Convert gateways into respective packages (#5200)

- Make azure gateway a package
- Make b2 gateway a package
- Make gcs gateway a package
- Make s3 gateway a package
- Make sia gateway a package
This commit is contained in:
Harshavardhana
2017-12-05 17:58:09 -08:00
committed by Dee Koder
parent 52e382b697
commit eb2894233c
31 changed files with 1586 additions and 1505 deletions

View File

@@ -0,0 +1,322 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package azure
import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/minio/minio/pkg/errors"
minio "github.com/minio/minio/cmd"
)
// Copied from github.com/Azure/azure-sdk-for-go/storage/container.go
func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Delimiter != "" {
out.Set("delimiter", p.Delimiter)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != nil {
addString := func(datasets []string, include bool, text string) []string {
if include {
datasets = append(datasets, text)
}
return datasets
}
include := []string{}
include = addString(include, p.Include.Snapshots, "snapshots")
include = addString(include, p.Include.Metadata, "metadata")
include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs")
include = addString(include, p.Include.Copy, "copy")
fullInclude := strings.Join(include, ",")
out.Set("include", fullInclude)
}
if p.MaxResults != 0 {
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
}
if p.Timeout != 0 {
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
}
return out
}
// Make anonymous HTTP request to azure endpoint.
func azureAnonRequest(verb, urlStr string, header http.Header) (*http.Response, error) {
req, err := http.NewRequest(verb, urlStr, nil)
if err != nil {
return nil, err
}
if header != nil {
req.Header = header
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
// 4XX and 5XX are error HTTP codes.
if resp.StatusCode >= 400 && resp.StatusCode <= 511 {
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if len(respBody) == 0 {
// no error in response body, might happen in HEAD requests
return nil, storage.AzureStorageServiceError{
StatusCode: resp.StatusCode,
Code: resp.Status,
Message: "no response body was available for error status code",
}
}
// Response contains Azure storage service error object.
var storageErr storage.AzureStorageServiceError
if err := xml.Unmarshal(respBody, &storageErr); err != nil {
return nil, err
}
storageErr.StatusCode = resp.StatusCode
return nil, storageErr
}
return resp, nil
}
// AnonGetBucketInfo - Get bucket metadata from azure anonymously.
func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo minio.BucketInfo, err error) {
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL)
if err != nil {
return bucketInfo, azureToObjectError(errors.Trace(err))
}
url.RawQuery = "restype=container"
resp, err := azureAnonRequest(http.MethodHead, url.String(), nil)
if err != nil {
return bucketInfo, azureToObjectError(errors.Trace(err), bucket)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return bucketInfo, azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket)), bucket)
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return bucketInfo, errors.Trace(err)
}
return minio.BucketInfo{
Name: bucket,
Created: t,
}, nil
}
// AnonGetObject - SendGET request without authentication.
// This is needed when clients send GET requests on objects that can be downloaded without auth.
func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) {
h := make(http.Header)
if length > 0 && startOffset > 0 {
h.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
} else if startOffset > 0 {
h.Add("Range", fmt.Sprintf("bytes=%d-", startOffset))
}
blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL()
resp, err := azureAnonRequest(http.MethodGet, blobURL, h)
if err != nil {
return azureToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
return azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
_, err = io.Copy(writer, resp.Body)
return errors.Trace(err)
}
// AnonGetObjectInfo - Send HEAD request without authentication and convert the
// result to ObjectInfo.
func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo minio.ObjectInfo, err error) {
blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL()
resp, err := azureAnonRequest(http.MethodHead, blobURL, nil)
if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return objInfo, azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
var contentLength int64
contentLengthStr := resp.Header.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil {
return objInfo, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object)
}
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return objInfo, errors.Trace(err)
}
objInfo.ModTime = t
objInfo.Bucket = bucket
objInfo.UserDefined = make(map[string]string)
if resp.Header.Get("Content-Encoding") != "" {
objInfo.UserDefined["Content-Encoding"] = resp.Header.Get("Content-Encoding")
}
objInfo.UserDefined["Content-Type"] = resp.Header.Get("Content-Type")
objInfo.ETag = resp.Header.Get("Etag")
objInfo.ModTime = t
objInfo.Name = object
objInfo.Size = contentLength
return
}
// AnonListObjects - Use Azure equivalent ListBlobs.
func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result minio.ListObjectsInfo, err error) {
params := storage.ListBlobsParameters{
Prefix: prefix,
Marker: marker,
Delimiter: delimiter,
MaxResults: uint(maxKeys),
}
q := azureListBlobsGetParameters(params)
q.Set("restype", "container")
q.Set("comp", "list")
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
url.RawQuery = q.Encode()
resp, err := azureAnonRequest(http.MethodGet, url.String(), nil)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
defer resp.Body.Close()
var listResp storage.BlobListResponse
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
err = xml.Unmarshal(data, &listResp)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
result.IsTruncated = listResp.NextMarker != ""
result.NextMarker = listResp.NextMarker
for _, object := range listResp.Blobs {
result.Objects = append(result.Objects, minio.ObjectInfo{
Bucket: bucket,
Name: object.Name,
ModTime: time.Time(object.Properties.LastModified),
Size: object.Properties.ContentLength,
ETag: object.Properties.Etag,
ContentType: object.Properties.ContentType,
ContentEncoding: object.Properties.ContentEncoding,
})
}
result.Prefixes = listResp.BlobPrefixes
return result, nil
}
// AnonListObjectsV2 - List objects in V2 mode, anonymously
func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result minio.ListObjectsV2Info, err error) {
params := storage.ListBlobsParameters{
Prefix: prefix,
Marker: continuationToken,
Delimiter: delimiter,
MaxResults: uint(maxKeys),
}
q := azureListBlobsGetParameters(params)
q.Set("restype", "container")
q.Set("comp", "list")
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
url.RawQuery = q.Encode()
resp, err := http.Get(url.String())
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
defer resp.Body.Close()
var listResp storage.BlobListResponse
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
err = xml.Unmarshal(data, &listResp)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
// If NextMarker is not empty, this means response is truncated and NextContinuationToken should be set
if listResp.NextMarker != "" {
result.IsTruncated = true
result.NextContinuationToken = listResp.NextMarker
}
for _, object := range listResp.Blobs {
result.Objects = append(result.Objects, minio.ObjectInfo{
Bucket: bucket,
Name: object.Name,
ModTime: time.Time(object.Properties.LastModified),
Size: object.Properties.ContentLength,
ETag: minio.CanonicalizeETag(object.Properties.Etag),
ContentType: object.Properties.ContentType,
ContentEncoding: object.Properties.ContentEncoding,
})
}
result.Prefixes = listResp.BlobPrefixes
return result, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,376 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package azure
import (
"fmt"
"net/http"
"net/url"
"reflect"
"testing"
"github.com/Azure/azure-sdk-for-go/storage"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/pkg/errors"
)
// Test canonical metadata.
func TestS3MetaToAzureProperties(t *testing.T) {
headers := map[string]string{
"accept-encoding": "gzip",
"content-encoding": "gzip",
"cache-control": "age: 3600",
"content-disposition": "dummy",
"content-length": "10",
"content-type": "application/javascript",
"X-Amz-Meta-Hdr": "value",
"X-Amz-Meta-X_test_key": "value",
"X-Amz-Meta-X__test__key": "value",
"X-Amz-Meta-X-Test__key": "value",
"X-Amz-Meta-X-Amz-Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=",
"X-Amz-Meta-X-Amz-Matdesc": "{}",
"X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==",
}
// Only X-Amz-Meta- prefixed entries will be returned in
// Metadata (without the prefix!)
expectedHeaders := map[string]string{
"Hdr": "value",
"X__test__key": "value",
"X____test____key": "value",
"X_Test____key": "value",
"X_Amz_Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=",
"X_Amz_Matdesc": "{}",
"X_Amz_Iv": "eWmyryl8kq+EVnnsE7jpOg==",
}
meta, _, err := s3MetaToAzureProperties(headers)
if err != nil {
t.Fatalf("Test failed, with %s", err)
}
if !reflect.DeepEqual(map[string]string(meta), expectedHeaders) {
t.Fatalf("Test failed, expected %#v, got %#v", expectedHeaders, meta)
}
headers = map[string]string{
"invalid--meta": "value",
}
_, _, err = s3MetaToAzureProperties(headers)
if err = errors.Cause(err); err != nil {
if _, ok := err.(minio.UnsupportedMetadata); !ok {
t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err)
}
}
headers = map[string]string{
"content-md5": "Dce7bmCX61zvxzP5QmfelQ==",
}
_, props, err := s3MetaToAzureProperties(headers)
if err != nil {
t.Fatalf("Test failed, with %s", err)
}
if props.ContentMD5 != headers["content-md5"] {
t.Fatalf("Test failed, expected %s, got %s", headers["content-md5"], props.ContentMD5)
}
}
func TestAzurePropertiesToS3Meta(t *testing.T) {
// Just one testcase. Adding more test cases does not add value to the testcase
// as azureToS3Metadata() just adds a prefix.
metadata := map[string]string{
"first_name": "myname",
"x_test_key": "value",
"x_test__key": "value",
"x__test__key": "value",
"x____test____key": "value",
"x_amz_key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=",
"x_amz_matdesc": "{}",
"x_amz_iv": "eWmyryl8kq+EVnnsE7jpOg==",
}
expectedMeta := map[string]string{
"X-Amz-Meta-First-Name": "myname",
"X-Amz-Meta-X-Test-Key": "value",
"X-Amz-Meta-X-Test_key": "value",
"X-Amz-Meta-X_test_key": "value",
"X-Amz-Meta-X__test__key": "value",
"X-Amz-Meta-X-Amz-Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=",
"X-Amz-Meta-X-Amz-Matdesc": "{}",
"X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==",
"Cache-Control": "max-age: 3600",
"Content-Disposition": "dummy",
"Content-Encoding": "gzip",
"Content-Length": "10",
"Content-MD5": "base64-md5",
"Content-Type": "application/javascript",
}
actualMeta := azurePropertiesToS3Meta(metadata, storage.BlobProperties{
CacheControl: "max-age: 3600",
ContentDisposition: "dummy",
ContentEncoding: "gzip",
ContentLength: 10,
ContentMD5: "base64-md5",
ContentType: "application/javascript",
})
if !reflect.DeepEqual(actualMeta, expectedMeta) {
t.Fatalf("Test failed, expected %#v, got %#v", expectedMeta, actualMeta)
}
}
// Add tests for azure to object error.
func TestAzureToObjectError(t *testing.T) {
testCases := []struct {
actualErr error
expectedErr error
bucket, object string
}{
{
nil, nil, "", "",
},
{
errors.Trace(fmt.Errorf("Non azure error")),
fmt.Errorf("Non azure error"), "", "",
},
{
storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists",
}, storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists",
}, "bucket", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists",
}), minio.BucketExists{Bucket: "bucket"}, "bucket", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
Code: "InvalidResourceName",
}), minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
Code: "RequestBodyTooLarge",
}), minio.PartTooBig{}, "", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
Code: "InvalidMetadata",
}), minio.UnsupportedMetadata{}, "", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound,
}), minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
}, "bucket", "object",
},
{
errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound,
}), minio.BucketNotFound{Bucket: "bucket"}, "bucket", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusBadRequest,
}), minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
},
}
for i, testCase := range testCases {
if err := azureToObjectError(testCase.actualErr, testCase.bucket, testCase.object); err != nil {
if err.Error() != testCase.expectedErr.Error() {
t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.expectedErr, err)
}
}
}
}
// Test azureGetBlockID().
func TestAzureGetBlockID(t *testing.T) {
testCases := []struct {
partID int
subPartNumber int
uploadID string
md5 string
blockID string
}{
{1, 7, "f328c35cad938137", "d41d8cd98f00b204e9800998ecf8427e", "MDAwMDEuMDcuZjMyOGMzNWNhZDkzODEzNy5kNDFkOGNkOThmMDBiMjA0ZTk4MDA5OThlY2Y4NDI3ZQ=="},
{2, 19, "abcdc35cad938137", "a7fb6b7b36ee4ed66b5546fac4690273", "MDAwMDIuMTkuYWJjZGMzNWNhZDkzODEzNy5hN2ZiNmI3YjM2ZWU0ZWQ2NmI1NTQ2ZmFjNDY5MDI3Mw=="},
}
for _, test := range testCases {
blockID := azureGetBlockID(test.partID, test.subPartNumber, test.uploadID, test.md5)
if blockID != test.blockID {
t.Fatalf("%s is not equal to %s", blockID, test.blockID)
}
}
}
// Test azureParseBlockID().
func TestAzureParseBlockID(t *testing.T) {
testCases := []struct {
blockID string
partID int
subPartNumber int
uploadID string
md5 string
success bool
}{
// Invalid base64.
{"MDAwMDEuMDcuZjMyOGMzNWNhZDkzODEzNy5kNDFkOGNkOThmMDBiMjA0ZTk4MDA5OThlY2Y4NDI3ZQ=", 0, 0, "", "", false},
// Invalid number of tokens.
{"MDAwMDEuQUEuZjMyOGMzNWNhZDkzODEzNwo=", 0, 0, "", "", false},
// Invalid encoded part ID.
{"MDAwMGEuMDcuZjMyOGMzNWNhZDkzODEzNy5kNDFkOGNkOThmMDBiMjA0ZTk4MDA5OThlY2Y4NDI3ZQo=", 0, 0, "", "", false},
// Invalid sub part ID.
{"MDAwMDEuQUEuZjMyOGMzNWNhZDkzODEzNy5kNDFkOGNkOThmMDBiMjA0ZTk4MDA5OThlY2Y4NDI3ZQo=", 0, 0, "", "", false},
{"MDAwMDEuMDcuZjMyOGMzNWNhZDkzODEzNy5kNDFkOGNkOThmMDBiMjA0ZTk4MDA5OThlY2Y4NDI3ZQ==", 1, 7, "f328c35cad938137", "d41d8cd98f00b204e9800998ecf8427e", true},
{"MDAwMDIuMTkuYWJjZGMzNWNhZDkzODEzNy5hN2ZiNmI3YjM2ZWU0ZWQ2NmI1NTQ2ZmFjNDY5MDI3Mw==", 2, 19, "abcdc35cad938137", "a7fb6b7b36ee4ed66b5546fac4690273", true},
}
for i, test := range testCases {
partID, subPartNumber, uploadID, md5, err := azureParseBlockID(test.blockID)
if err != nil && test.success {
t.Errorf("Test %d: Expected success but failed %s", i+1, err)
}
if err == nil && !test.success {
t.Errorf("Test %d: Expected to fail but succeeeded insteadl", i+1)
}
if err == nil {
if partID != test.partID {
t.Errorf("Test %d: %d not equal to %d", i+1, partID, test.partID)
}
if subPartNumber != test.subPartNumber {
t.Errorf("Test %d: %d not equal to %d", i+1, subPartNumber, test.subPartNumber)
}
if uploadID != test.uploadID {
t.Errorf("Test %d: %s not equal to %s", i+1, uploadID, test.uploadID)
}
if md5 != test.md5 {
t.Errorf("Test %d: %s not equal to %s", i+1, md5, test.md5)
}
}
}
}
// Test azureListBlobsGetParameters()
func TestAzureListBlobsGetParameters(t *testing.T) {
// Test values set 1
expectedURLValues := url.Values{}
expectedURLValues.Set("prefix", "test")
expectedURLValues.Set("delimiter", "_")
expectedURLValues.Set("marker", "marker")
expectedURLValues.Set("include", "metadata")
expectedURLValues.Set("maxresults", "20")
expectedURLValues.Set("timeout", "10")
setBlobParameters := storage.ListBlobsParameters{
Prefix: "test",
Delimiter: "_",
Marker: "marker",
Include: &storage.IncludeBlobDataset{Metadata: true},
MaxResults: 20,
Timeout: 10,
}
// Test values set 2
expectedURLValues1 := url.Values{}
setBlobParameters1 := storage.ListBlobsParameters{
Prefix: "",
Delimiter: "",
Marker: "",
Include: nil,
MaxResults: 0,
Timeout: 0,
}
testCases := []struct {
name string
args storage.ListBlobsParameters
want url.Values
}{
{"TestIfValuesSet", setBlobParameters, expectedURLValues},
{"TestIfValuesNotSet", setBlobParameters1, expectedURLValues1},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
if got := azureListBlobsGetParameters(test.args); !reflect.DeepEqual(got, test.want) {
t.Errorf("azureListBlobsGetParameters() = %v, want %v", got, test.want)
}
})
}
}
func TestAnonErrToObjectErr(t *testing.T) {
testCases := []struct {
name string
statusCode int
params []string
wantErr error
}{
{"ObjectNotFound",
http.StatusNotFound,
[]string{"testBucket", "testObject"},
minio.ObjectNotFound{Bucket: "testBucket", Object: "testObject"},
},
{"BucketNotFound",
http.StatusNotFound,
[]string{"testBucket", ""},
minio.BucketNotFound{Bucket: "testBucket"},
},
{"ObjectNameInvalid",
http.StatusBadRequest,
[]string{"testBucket", "testObject"},
minio.ObjectNameInvalid{Bucket: "testBucket", Object: "testObject"},
},
{"BucketNameInvalid",
http.StatusBadRequest,
[]string{"testBucket", ""},
minio.BucketNameInvalid{Bucket: "testBucket"},
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
if err := minio.AnonErrToObjectErr(test.statusCode, test.params...); !reflect.DeepEqual(err, test.wantErr) {
t.Errorf("anonErrToObjectErr() error = %v, wantErr %v", err, test.wantErr)
}
})
}
}
func TestCheckAzureUploadID(t *testing.T) {
invalidUploadIDs := []string{
"123456789abcdefg",
"hello world",
"0x1234567890",
"1234567890abcdef1234567890abcdef",
}
for _, uploadID := range invalidUploadIDs {
if err := checkAzureUploadID(uploadID); err == nil {
t.Fatalf("%s: expected: <error>, got: <nil>", uploadID)
}
}
validUploadIDs := []string{
"1234567890abcdef",
"1122334455667788",
}
for _, uploadID := range validUploadIDs {
if err := checkAzureUploadID(uploadID); err != nil {
t.Fatalf("%s: expected: <nil>, got: %s", uploadID, err)
}
}
}

View File

@@ -0,0 +1,136 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package b2
import (
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/minio/minio/pkg/errors"
minio "github.com/minio/minio/cmd"
)
// mkRange converts offset, size into Range header equivalent.
func mkRange(offset, size int64) string {
if offset == 0 && size == 0 {
return ""
}
if size == 0 {
return fmt.Sprintf("bytes=%d-", offset)
}
return fmt.Sprintf("bytes=%d-%d", offset, offset+size-1)
}
// AnonGetObject - performs a plain http GET request on a public resource,
// fails if the resource is not public.
func (l *b2Objects) AnonGetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error {
uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return b2ToObjectError(errors.Trace(err), bucket, object)
}
rng := mkRange(startOffset, length)
if rng != "" {
req.Header.Set("Range", rng)
}
resp, err := l.anonClient.Do(req)
if err != nil {
return b2ToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return b2ToObjectError(errors.Trace(fmt.Errorf(resp.Status)), bucket, object)
}
_, err = io.Copy(writer, resp.Body)
return b2ToObjectError(errors.Trace(err), bucket, object)
}
// Converts http Header into ObjectInfo. This function looks for all the
// standard Backblaze B2 headers to convert into ObjectInfo.
//
// Content-Length is converted to Size.
// X-Bz-Upload-Timestamp is converted to ModTime.
// X-Bz-Info-<header>:<value> is converted to <header>:<value>
// Content-Type is converted to ContentType.
// X-Bz-Content-Sha1 is converted to ETag.
func headerToObjectInfo(bucket, object string, header http.Header) (objInfo minio.ObjectInfo, err error) {
clen, err := strconv.ParseInt(header.Get("Content-Length"), 10, 64)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
// Converting upload timestamp in milliseconds to a time.Time value for ObjectInfo.ModTime.
timeStamp, err := strconv.ParseInt(header.Get("X-Bz-Upload-Timestamp"), 10, 64)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
// Populate user metadata by looking for all the X-Bz-Info-<name>
// HTTP headers, ignore other headers since they have their own
// designated meaning, for more details refer B2 API documentation.
userMetadata := make(map[string]string)
for key := range header {
if strings.HasPrefix(key, "X-Bz-Info-") {
var name string
name, err = url.QueryUnescape(strings.TrimPrefix(key, "X-Bz-Info-"))
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
var val string
val, err = url.QueryUnescape(header.Get(key))
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
userMetadata[name] = val
}
}
return minio.ObjectInfo{
Bucket: bucket,
Name: object,
ContentType: header.Get("Content-Type"),
ModTime: time.Unix(0, 0).Add(time.Duration(timeStamp) * time.Millisecond),
Size: clen,
ETag: header.Get("X-Bz-File-Id"),
UserDefined: userMetadata,
}, nil
}
// AnonGetObjectInfo - performs a plain http HEAD request on a public resource,
// fails if the resource is not public.
func (l *b2Objects) AnonGetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) {
uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object)
req, err := http.NewRequest("HEAD", uri, nil)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
resp, err := l.anonClient.Do(req)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return objInfo, b2ToObjectError(errors.Trace(fmt.Errorf(resp.Status)), bucket, object)
}
return headerToObjectInfo(bucket, object, resp.Header)
}

View File

@@ -0,0 +1,752 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package b2
import (
"context"
"crypto/sha1"
"fmt"
"hash"
"io"
"io/ioutil"
"net/http"
"strings"
"sync"
"time"
b2 "github.com/minio/blazer/base"
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
h2 "github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd"
)
// Supported bucket types by B2 backend.
const (
bucketTypePrivate = "allPrivate"
bucketTypeReadOnly = "allPublic"
b2Backend = "b2"
)
func init() {
const b2GatewayTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}}
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
ENVIRONMENT VARIABLES:
ACCESS:
MINIO_ACCESS_KEY: B2 account id.
MINIO_SECRET_KEY: B2 application key.
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
EXAMPLES:
1. Start minio gateway server for B2 backend.
$ export MINIO_ACCESS_KEY=accountID
$ export MINIO_SECRET_KEY=applicationKey
$ {{.HelpName}}
`
minio.RegisterGatewayCommand(cli.Command{
Name: b2Backend,
Usage: "Backblaze B2.",
Action: b2GatewayMain,
CustomHelpTemplate: b2GatewayTemplate,
HideHelpCommand: true,
})
}
// Handler for 'minio gateway b2' command line.
func b2GatewayMain(ctx *cli.Context) {
minio.StartGateway(ctx, &B2{})
}
// B2 implements Minio Gateway
type B2 struct{}
// Name implements Gateway interface.
func (g *B2) Name() string {
return b2Backend
}
// NewGatewayLayer returns b2 gateway layer, implements GatewayLayer interface to
// talk to B2 remote backend.
func (g *B2) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
ctx := context.Background()
client, err := b2.AuthorizeAccount(ctx, creds.AccessKey, creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport()))
if err != nil {
return nil, err
}
return &b2Objects{
creds: creds,
b2Client: client,
anonClient: &http.Client{
Transport: minio.NewCustomHTTPTransport(),
},
ctx: ctx,
}, nil
}
// Production - Ready for production use?
func (g *B2) Production() bool {
// Not ready for production use just yet.
return false
}
// b2Object implements gateway for Minio and BackBlaze B2 compatible object storage servers.
type b2Objects struct {
minio.GatewayUnsupported
mu sync.Mutex
creds auth.Credentials
b2Client *b2.B2
anonClient *http.Client
ctx context.Context
}
// Convert B2 errors to minio object layer errors.
func b2ToObjectError(err error, params ...string) error {
if err == nil {
return nil
}
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
minio.ErrorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := ""
object := ""
uploadID := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
if len(params) == 3 {
uploadID = params[2]
}
// Following code is a non-exhaustive check to convert
// B2 errors into S3 compatible errors.
//
// For a more complete information - https://www.backblaze.com/b2/docs/
statusCode, code, msg := b2.Code(err)
if statusCode == 0 {
// We don't interpret non B2 errors. B2 errors have statusCode
// to help us convert them to S3 object errors.
return e
}
switch code {
case "duplicate_bucket_name":
err = minio.BucketAlreadyOwnedByYou{Bucket: bucket}
case "bad_request":
if object != "" {
err = minio.ObjectNameInvalid{
Bucket: bucket,
Object: object,
}
} else if bucket != "" {
err = minio.BucketNotFound{Bucket: bucket}
}
case "bad_bucket_id":
err = minio.BucketNotFound{Bucket: bucket}
case "file_not_present", "not_found":
err = minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
case "cannot_delete_non_empty_bucket":
err = minio.BucketNotEmpty{Bucket: bucket}
}
// Special interpretation like this is required for Multipart sessions.
if strings.Contains(msg, "No active upload for") && uploadID != "" {
err = minio.InvalidUploadID{UploadID: uploadID}
}
e.Cause = err
return e
}
// Shutdown saves any gateway metadata to disk
// if necessary and reload upon next restart.
func (l *b2Objects) Shutdown() error {
// TODO
return nil
}
// StorageInfo is not relevant to B2 backend.
func (l *b2Objects) StorageInfo() (si minio.StorageInfo) {
return si
}
// MakeBucket creates a new container on B2 backend.
func (l *b2Objects) MakeBucketWithLocation(bucket, location string) error {
// location is ignored for B2 backend.
// All buckets are set to private by default.
_, err := l.b2Client.CreateBucket(l.ctx, bucket, bucketTypePrivate, nil, nil)
return b2ToObjectError(errors.Trace(err), bucket)
}
func (l *b2Objects) reAuthorizeAccount() error {
client, err := b2.AuthorizeAccount(l.ctx, l.creds.AccessKey, l.creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport()))
if err != nil {
return err
}
l.mu.Lock()
l.b2Client.Update(client)
l.mu.Unlock()
return nil
}
// listBuckets is a wrapper similar to ListBuckets, which re-authorizes
// the account and updates the B2 client safely. Once successfully
// authorized performs the call again and returns list of buckets.
// For any errors which are not actionable we return an error.
func (l *b2Objects) listBuckets(err error) ([]*b2.Bucket, error) {
if err != nil {
if b2.Action(err) != b2.ReAuthenticate {
return nil, err
}
if rerr := l.reAuthorizeAccount(); rerr != nil {
return nil, rerr
}
}
bktList, lerr := l.b2Client.ListBuckets(l.ctx)
if lerr != nil {
return l.listBuckets(lerr)
}
return bktList, nil
}
// Bucket - is a helper which provides a *Bucket instance
// for performing an API operation. B2 API doesn't
// provide a direct way to access the bucket so we need
// to employ following technique.
func (l *b2Objects) Bucket(bucket string) (*b2.Bucket, error) {
bktList, err := l.listBuckets(nil)
if err != nil {
return nil, b2ToObjectError(errors.Trace(err), bucket)
}
for _, bkt := range bktList {
if bkt.Name == bucket {
return bkt, nil
}
}
return nil, errors.Trace(minio.BucketNotFound{Bucket: bucket})
}
// GetBucketInfo gets bucket metadata..
func (l *b2Objects) GetBucketInfo(bucket string) (bi minio.BucketInfo, err error) {
if _, err = l.Bucket(bucket); err != nil {
return bi, err
}
return minio.BucketInfo{
Name: bucket,
Created: time.Unix(0, 0),
}, nil
}
// ListBuckets lists all B2 buckets
func (l *b2Objects) ListBuckets() ([]minio.BucketInfo, error) {
bktList, err := l.listBuckets(nil)
if err != nil {
return nil, err
}
var bktInfo []minio.BucketInfo
for _, bkt := range bktList {
bktInfo = append(bktInfo, minio.BucketInfo{
Name: bkt.Name,
Created: time.Unix(0, 0),
})
}
return bktInfo, nil
}
// DeleteBucket deletes a bucket on B2
func (l *b2Objects) DeleteBucket(bucket string) error {
bkt, err := l.Bucket(bucket)
if err != nil {
return err
}
err = bkt.DeleteBucket(l.ctx)
return b2ToObjectError(errors.Trace(err), bucket)
}
// ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time.
func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
bkt, err := l.Bucket(bucket)
if err != nil {
return loi, err
}
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
if lerr != nil {
return loi, b2ToObjectError(errors.Trace(lerr), bucket)
}
loi.IsTruncated = next != ""
loi.NextMarker = next
for _, file := range files {
switch file.Status {
case "folder":
loi.Prefixes = append(loi.Prefixes, file.Name)
case "upload":
loi.Objects = append(loi.Objects, minio.ObjectInfo{
Bucket: bucket,
Name: file.Name,
ModTime: file.Timestamp,
Size: file.Size,
ETag: minio.ToS3ETag(file.Info.ID),
ContentType: file.Info.ContentType,
UserDefined: file.Info.Info,
})
}
}
return loi, nil
}
// ListObjectsV2 lists all objects in B2 bucket filtered by prefix, returns upto max 1000 entries at a time.
func (l *b2Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int,
fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) {
// fetchOwner, startAfter are not supported and unused.
bkt, err := l.Bucket(bucket)
if err != nil {
return loi, err
}
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, continuationToken, prefix, delimiter)
if lerr != nil {
return loi, b2ToObjectError(errors.Trace(lerr), bucket)
}
loi.IsTruncated = next != ""
loi.ContinuationToken = continuationToken
loi.NextContinuationToken = next
for _, file := range files {
switch file.Status {
case "folder":
loi.Prefixes = append(loi.Prefixes, file.Name)
case "upload":
loi.Objects = append(loi.Objects, minio.ObjectInfo{
Bucket: bucket,
Name: file.Name,
ModTime: file.Timestamp,
Size: file.Size,
ETag: minio.ToS3ETag(file.Info.ID),
ContentType: file.Info.ContentType,
UserDefined: file.Info.Info,
})
}
}
return loi, nil
}
// GetObject reads an object from B2. Supports additional
// parameters like offset and length which are synonymous with
// HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (l *b2Objects) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error {
bkt, err := l.Bucket(bucket)
if err != nil {
return err
}
reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length)
if err != nil {
return b2ToObjectError(errors.Trace(err), bucket, object)
}
defer reader.Close()
_, err = io.Copy(writer, reader)
return b2ToObjectError(errors.Trace(err), bucket, object)
}
// GetObjectInfo reads object info and replies back ObjectInfo
func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) {
bkt, err := l.Bucket(bucket)
if err != nil {
return objInfo, err
}
f, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
f.Close()
fi, err := bkt.File(f.ID, object).GetFileInfo(l.ctx)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
return minio.ObjectInfo{
Bucket: bucket,
Name: object,
ETag: minio.ToS3ETag(fi.ID),
Size: fi.Size,
ModTime: fi.Timestamp,
ContentType: fi.ContentType,
UserDefined: fi.Info,
}, nil
}
// In B2 - You must always include the X-Bz-Content-Sha1 header with
// your upload request. The value you provide can be:
// (1) the 40-character hex checksum of the file,
// (2) the string hex_digits_at_end, or
// (3) the string do_not_verify.
// For more reference - https://www.backblaze.com/b2/docs/uploading.html
//
// In our case we are going to use (2) option
const sha1AtEOF = "hex_digits_at_end"
// With the second option mentioned above, you append the 40-character hex sha1
// to the end of the request body, immediately after the contents of the file
// being uploaded. Note that the content length is the size of the file plus 40
// of the original size of the reader.
//
// newB2Reader implements a B2 compatible reader by wrapping the hash.Reader into
// a new io.Reader which will emit out the sha1 hex digits at io.EOF.
// It also means that your overall content size is now original size + 40 bytes.
// Additionally this reader also verifies Hash encapsulated inside hash.Reader
// at io.EOF if the verification failed we return an error and do not send
// the content to server.
func newB2Reader(r *h2.Reader, size int64) *Reader {
return &Reader{
r: r,
size: size,
sha1Hash: sha1.New(),
}
}
// Reader - is a Reader wraps the hash.Reader which will emit out the sha1
// hex digits at io.EOF. It also means that your overall content size is
// now original size + 40 bytes. Additionally this reader also verifies
// Hash encapsulated inside hash.Reader at io.EOF if the verification
// failed we return an error and do not send the content to server.
type Reader struct {
r *h2.Reader
size int64
sha1Hash hash.Hash
isEOF bool
buf *strings.Reader
}
// Size - Returns the total size of Reader.
func (nb *Reader) Size() int64 { return nb.size + 40 }
func (nb *Reader) Read(p []byte) (int, error) {
if nb.isEOF {
return nb.buf.Read(p)
}
// Read into hash to update the on going checksum.
n, err := io.TeeReader(nb.r, nb.sha1Hash).Read(p)
if err == io.EOF {
// Stream is not corrupted on this end
// now fill in the last 40 bytes of sha1 hex
// so that the server can verify the stream on
// their end.
err = nil
nb.isEOF = true
nb.buf = strings.NewReader(fmt.Sprintf("%x", nb.sha1Hash.Sum(nil)))
}
return n, err
}
// PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB.
func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
bkt, err := l.Bucket(bucket)
if err != nil {
return objInfo, err
}
contentType := metadata["content-type"]
delete(metadata, "content-type")
var u *b2.URL
u, err = bkt.GetUploadURL(l.ctx)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
hr := newB2Reader(data, data.Size())
var f *b2.File
f, err = u.UploadFile(l.ctx, hr, int(hr.Size()), object, contentType, sha1AtEOF, metadata)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
var fi *b2.FileInfo
fi, err = f.GetFileInfo(l.ctx)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
return minio.ObjectInfo{
Bucket: bucket,
Name: object,
ETag: minio.ToS3ETag(fi.ID),
Size: fi.Size,
ModTime: fi.Timestamp,
ContentType: fi.ContentType,
UserDefined: fi.Info,
}, nil
}
// CopyObject copies a blob from source container to destination container.
func (l *b2Objects) CopyObject(srcBucket string, srcObject string, dstBucket string,
dstObject string, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
return objInfo, errors.Trace(minio.NotImplemented{})
}
// DeleteObject deletes a blob in bucket
func (l *b2Objects) DeleteObject(bucket string, object string) error {
bkt, err := l.Bucket(bucket)
if err != nil {
return err
}
reader, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
if err != nil {
return b2ToObjectError(errors.Trace(err), bucket, object)
}
io.Copy(ioutil.Discard, reader)
reader.Close()
err = bkt.File(reader.ID, object).DeleteFileVersion(l.ctx)
return b2ToObjectError(errors.Trace(err), bucket, object)
}
// ListMultipartUploads lists all multipart uploads.
func (l *b2Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string,
delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) {
// keyMarker, prefix, delimiter are all ignored, Backblaze B2 doesn't support any
// of these parameters only equivalent parameter is uploadIDMarker.
bkt, err := l.Bucket(bucket)
if err != nil {
return lmi, err
}
// The maximum number of files to return from this call.
// The default value is 100, and the maximum allowed is 100.
if maxUploads > 100 {
maxUploads = 100
}
largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, uploadIDMarker, maxUploads)
if err != nil {
return lmi, b2ToObjectError(errors.Trace(err), bucket)
}
lmi = minio.ListMultipartsInfo{
MaxUploads: maxUploads,
}
if nextMarker != "" {
lmi.IsTruncated = true
lmi.NextUploadIDMarker = nextMarker
}
for _, largeFile := range largeFiles {
lmi.Uploads = append(lmi.Uploads, minio.MultipartInfo{
Object: largeFile.Name,
UploadID: largeFile.ID,
Initiated: largeFile.Timestamp,
})
}
return lmi, nil
}
// NewMultipartUpload upload object in multiple parts, uses B2's LargeFile upload API.
// Large files can range in size from 5MB to 10TB.
// Each large file must consist of at least 2 parts, and all of the parts except the
// last one must be at least 5MB in size. The last part must contain at least one byte.
// For more information - https://www.backblaze.com/b2/docs/large_files.html
func (l *b2Objects) NewMultipartUpload(bucket string, object string, metadata map[string]string) (string, error) {
var uploadID string
bkt, err := l.Bucket(bucket)
if err != nil {
return uploadID, err
}
contentType := metadata["content-type"]
delete(metadata, "content-type")
lf, err := bkt.StartLargeFile(l.ctx, object, contentType, metadata)
if err != nil {
return uploadID, b2ToObjectError(errors.Trace(err), bucket, object)
}
return lf.ID, nil
}
// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *h2.Reader) (pi minio.PartInfo, err error) {
bkt, err := l.Bucket(bucket)
if err != nil {
return pi, err
}
fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx)
if err != nil {
return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
}
hr := newB2Reader(data, data.Size())
sha1, err := fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID)
if err != nil {
return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
}
return minio.PartInfo{
PartNumber: partID,
LastModified: minio.UTCNow(),
ETag: minio.ToS3ETag(sha1),
Size: data.Size(),
}, nil
}
// ListObjectParts returns all object parts for specified object in specified bucket, uses B2's LargeFile upload API.
func (l *b2Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi minio.ListPartsInfo, err error) {
bkt, err := l.Bucket(bucket)
if err != nil {
return lpi, err
}
lpi = minio.ListPartsInfo{
Bucket: bucket,
Object: object,
UploadID: uploadID,
MaxParts: maxParts,
PartNumberMarker: partNumberMarker,
}
// startPartNumber must be in the range 1 - 10000 for B2.
partNumberMarker++
partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts)
if err != nil {
return lpi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
}
if next != 0 {
lpi.IsTruncated = true
lpi.NextPartNumberMarker = next
}
for _, part := range partsList {
lpi.Parts = append(lpi.Parts, minio.PartInfo{
PartNumber: part.Number,
ETag: minio.ToS3ETag(part.SHA1),
Size: part.Size,
})
}
return lpi, nil
}
// AbortMultipartUpload aborts a on going multipart upload, uses B2's LargeFile upload API.
func (l *b2Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error {
bkt, err := l.Bucket(bucket)
if err != nil {
return err
}
err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx)
return b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API.
func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, err error) {
bkt, err := l.Bucket(bucket)
if err != nil {
return oi, err
}
hashes := make(map[int]string)
for i, uploadedPart := range uploadedParts {
// B2 requires contigous part numbers starting with 1, they do not support
// hand picking part numbers, we return an S3 compatible error instead.
if i+1 != uploadedPart.PartNumber {
return oi, b2ToObjectError(errors.Trace(minio.InvalidPart{}), bucket, object, uploadID)
}
// Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag.
hashes[uploadedPart.PartNumber] = strings.TrimSuffix(uploadedPart.ETag, "-1")
}
if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil {
return oi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
}
return l.GetObjectInfo(bucket, object)
}
// SetBucketPolicies - B2 supports 2 types of bucket policies:
// bucketType.AllPublic - bucketTypeReadOnly means that anybody can download the files is the bucket;
// bucketType.AllPrivate - bucketTypePrivate means that you need an authorization token to download them.
// Default is AllPrivate for all buckets.
func (l *b2Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
var policies []minio.BucketAccessPolicy
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
policies = append(policies, minio.BucketAccessPolicy{
Prefix: prefix,
Policy: policy,
})
}
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
return errors.Trace(minio.NotImplemented{})
}
if policies[0].Prefix != prefix {
return errors.Trace(minio.NotImplemented{})
}
if policies[0].Policy != policy.BucketPolicyReadOnly {
return errors.Trace(minio.NotImplemented{})
}
bkt, err := l.Bucket(bucket)
if err != nil {
return err
}
bkt.Type = bucketTypeReadOnly
_, err = bkt.Update(l.ctx)
return b2ToObjectError(errors.Trace(err))
}
// GetBucketPolicies, returns the current bucketType from B2 backend and convert
// it into S3 compatible bucket policy info.
func (l *b2Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
bkt, err := l.Bucket(bucket)
if err != nil {
return policyInfo, err
}
if bkt.Type == bucketTypeReadOnly {
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "")
return policyInfo, nil
}
// bkt.Type can also be snapshot, but it is only allowed through B2 browser console,
// just return back as policy not found for all cases.
// CreateBucket always sets the value to allPrivate by default.
return policy.BucketAccessPolicy{}, errors.Trace(minio.PolicyNotFound{Bucket: bucket})
}
// DeleteBucketPolicies - resets the bucketType of bucket on B2 to 'allPrivate'.
func (l *b2Objects) DeleteBucketPolicies(bucket string) error {
bkt, err := l.Bucket(bucket)
if err != nil {
return err
}
bkt.Type = bucketTypePrivate
_, err = bkt.Update(l.ctx)
return b2ToObjectError(errors.Trace(err))
}

View File

@@ -0,0 +1,203 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package b2
import (
"fmt"
"net/http"
"testing"
b2 "github.com/minio/blazer/base"
"github.com/minio/minio/pkg/errors"
minio "github.com/minio/minio/cmd"
)
// Tests headerToObjectInfo
func TestHeaderToObjectInfo(t *testing.T) {
testCases := []struct {
bucket, object string
header http.Header
objInfo minio.ObjectInfo
}{
{
bucket: "bucket",
object: "object",
header: http.Header{
"Content-Length": []string{"10"},
"Content-Type": []string{"application/javascript"},
"X-Bz-Upload-Timestamp": []string{"1000"},
"X-Bz-Info-X-Amz-Meta-1": []string{"test1"},
"X-Bz-File-Id": []string{"xxxxx"},
},
objInfo: minio.ObjectInfo{
Bucket: "bucket",
Name: "object",
ContentType: "application/javascript",
Size: 10,
UserDefined: map[string]string{
"X-Amz-Meta-1": "test1",
},
ETag: "xxxxx",
},
},
}
for i, testCase := range testCases {
gotObjInfo, err := headerToObjectInfo(testCase.bucket, testCase.object, testCase.header)
if err != nil {
t.Fatalf("Test %d: %s", i+1, err)
}
if gotObjInfo.Bucket != testCase.objInfo.Bucket {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.Bucket, gotObjInfo.Bucket)
}
if gotObjInfo.Name != testCase.objInfo.Name {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.Name, gotObjInfo.Name)
}
if gotObjInfo.ContentType != testCase.objInfo.ContentType {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.ContentType, gotObjInfo.ContentType)
}
if gotObjInfo.ETag != testCase.objInfo.ETag {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.ETag, gotObjInfo.ETag)
}
}
}
// Tests mkRange test.
func TestMkRange(t *testing.T) {
testCases := []struct {
offset, size int64
expectedRng string
}{
// No offset set, size not set.
{
offset: 0,
size: 0,
expectedRng: "",
},
// Offset set, size not set.
{
offset: 10,
size: 0,
expectedRng: "bytes=10-",
},
// Offset set, size set.
{
offset: 10,
size: 11,
expectedRng: "bytes=10-20",
},
}
for i, testCase := range testCases {
gotRng := mkRange(testCase.offset, testCase.size)
if gotRng != testCase.expectedRng {
t.Errorf("Test %d: expected %s, got %s", i+1, testCase.expectedRng, gotRng)
}
}
}
// Test b2 object error.
func TestB2ObjectError(t *testing.T) {
testCases := []struct {
params []string
b2Err error
expectedErr error
}{
{
[]string{}, nil, nil,
},
{
[]string{}, fmt.Errorf("Not *Error"), fmt.Errorf("Not *Error"),
},
{
[]string{}, errors.Trace(fmt.Errorf("Non B2 Error")), fmt.Errorf("Non B2 Error"),
},
{
[]string{"bucket"}, errors.Trace(b2.Error{
StatusCode: 1,
Code: "duplicate_bucket_name",
}), minio.BucketAlreadyOwnedByYou{
Bucket: "bucket",
},
},
{
[]string{"bucket"}, errors.Trace(b2.Error{
StatusCode: 1,
Code: "bad_request",
}), minio.BucketNotFound{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"}, errors.Trace(b2.Error{
StatusCode: 1,
Code: "bad_request",
}), minio.ObjectNameInvalid{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket"}, errors.Trace(b2.Error{
StatusCode: 1,
Code: "bad_bucket_id",
}), minio.BucketNotFound{Bucket: "bucket"},
},
{
[]string{"bucket", "object"}, errors.Trace(b2.Error{
StatusCode: 1,
Code: "file_not_present",
}), minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket", "object"}, errors.Trace(b2.Error{
StatusCode: 1,
Code: "not_found",
}), minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket"}, errors.Trace(b2.Error{
StatusCode: 1,
Code: "cannot_delete_non_empty_bucket",
}), minio.BucketNotEmpty{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object", "uploadID"}, errors.Trace(b2.Error{
StatusCode: 1,
Message: "No active upload for",
}), minio.InvalidUploadID{
UploadID: "uploadID",
},
},
}
for i, testCase := range testCases {
actualErr := b2ToObjectError(testCase.b2Err, testCase.params...)
if actualErr != nil {
if actualErr.Error() != testCase.expectedErr.Error() {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.expectedErr, actualErr)
}
}
}
}

26
cmd/gateway/gateway.go Normal file
View File

@@ -0,0 +1,26 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gateway
import (
// Import all gateways.
_ "github.com/minio/minio/cmd/gateway/azure"
_ "github.com/minio/minio/cmd/gateway/b2"
_ "github.com/minio/minio/cmd/gateway/gcs"
_ "github.com/minio/minio/cmd/gateway/s3"
_ "github.com/minio/minio/cmd/gateway/sia"
)

View File

@@ -0,0 +1,146 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gcs
import (
"fmt"
"io"
"net/http"
"strconv"
"time"
"github.com/minio/minio/pkg/errors"
minio "github.com/minio/minio/cmd"
)
func toGCSPublicURL(bucket, object string) string {
return fmt.Sprintf("https://storage.googleapis.com/%s/%s", bucket, object)
}
// AnonGetObject - Get object anonymously
func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error {
req, err := http.NewRequest("GET", toGCSPublicURL(bucket, object), nil)
if err != nil {
return gcsToObjectError(errors.Trace(err), bucket, object)
}
if length > 0 && startOffset > 0 {
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
} else if startOffset > 0 {
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", startOffset))
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return gcsToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
return gcsToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
_, err = io.Copy(writer, resp.Body)
return gcsToObjectError(errors.Trace(err), bucket, object)
}
// AnonGetObjectInfo - Get object info anonymously
func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) {
resp, err := http.Head(toGCSPublicURL(bucket, object))
if err != nil {
return objInfo, gcsToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return objInfo, gcsToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
var contentLength int64
contentLengthStr := resp.Header.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil {
return objInfo, gcsToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object)
}
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return objInfo, errors.Trace(err)
}
objInfo.ModTime = t
objInfo.Bucket = bucket
objInfo.UserDefined = make(map[string]string)
if resp.Header.Get("Content-Encoding") != "" {
objInfo.UserDefined["Content-Encoding"] = resp.Header.Get("Content-Encoding")
}
objInfo.UserDefined["Content-Type"] = resp.Header.Get("Content-Type")
objInfo.ETag = resp.Header.Get("Etag")
objInfo.ModTime = t
objInfo.Name = object
objInfo.Size = contentLength
return
}
// AnonListObjects - List objects anonymously
func (l *gcsGateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (minio.ListObjectsInfo, error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return minio.ListObjectsInfo{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
return minio.FromMinioClientListBucketResult(bucket, result), nil
}
// AnonListObjectsV2 - List objects in V2 mode, anonymously
func (l *gcsGateway) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (minio.ListObjectsV2Info, error) {
// Request V1 List Object to the backend
result, err := l.anonClient.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys)
if err != nil {
return minio.ListObjectsV2Info{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
// translate V1 Result to V2Info
return minio.FromMinioClientListBucketResultToV2Info(bucket, result), nil
}
// AnonGetBucketInfo - Get bucket metadata anonymously.
func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo minio.BucketInfo, err error) {
resp, err := http.Head(toGCSPublicURL(bucket, ""))
if err != nil {
return bucketInfo, gcsToObjectError(errors.Trace(err))
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return bucketInfo, gcsToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket)), bucket)
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return bucketInfo, errors.Trace(err)
}
// Last-Modified date being returned by GCS
return minio.BucketInfo{
Name: bucket,
Created: t,
}, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,403 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gcs
import (
"fmt"
"io/ioutil"
"os"
"path"
"reflect"
"testing"
"github.com/minio/minio/pkg/errors"
"google.golang.org/api/googleapi"
miniogo "github.com/minio/minio-go"
minio "github.com/minio/minio/cmd"
)
func TestToGCSPageToken(t *testing.T) {
testCases := []struct {
Name string
Token string
}{
{
Name: "A",
Token: "CgFB",
},
{
Name: "AAAAAAAAAA",
Token: "CgpBQUFBQUFBQUFB",
},
{
Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
Token: "CmRBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB",
},
{
Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
Token: "CpEDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUE=",
},
{
Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
Token: "CpIDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB",
},
{
Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
Token: "CpMDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQQ==",
},
{
Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
Token: "CvQDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUE=",
},
}
for i, testCase := range testCases {
if toGCSPageToken(testCase.Name) != testCase.Token {
t.Errorf("Test %d: Expected %s, got %s", i+1, toGCSPageToken(testCase.Name), testCase.Token)
}
}
}
// TestIsValidGCSProjectIDFormat tests isValidGCSProjectIDFormat
func TestValidGCSProjectIDFormat(t *testing.T) {
testCases := []struct {
ProjectID string
Valid bool
}{
{"", false},
{"a", false},
{"Abc", false},
{"1bcd", false},
// 5 chars
{"abcdb", false},
// 6 chars
{"abcdbz", true},
// 30 chars
{"project-id-1-project-id-more-1", true},
// 31 chars
{"project-id-1-project-id-more-11", false},
{"storage.googleapis.com", false},
{"http://storage.googleapis.com", false},
{"http://localhost:9000", false},
{"project-id-1", true},
{"project-id-1988832", true},
{"projectid1414", true},
}
for i, testCase := range testCases {
valid := isValidGCSProjectIDFormat(testCase.ProjectID)
if valid != testCase.Valid {
t.Errorf("Test %d: Expected %v, got %v", i+1, valid, testCase.Valid)
}
}
}
// Test for isGCSMarker.
func TestIsGCSMarker(t *testing.T) {
testCases := []struct {
marker string
expected bool
}{
{
marker: "{minio}gcs123",
expected: true,
},
{
marker: "{mini_no}tgcs123",
expected: false,
},
{
marker: "{minioagainnotgcs123",
expected: false,
},
{
marker: "obj1",
expected: false,
},
}
for i, tc := range testCases {
if actual := isGCSMarker(tc.marker); actual != tc.expected {
t.Errorf("Test %d: marker is %s, expected %v but got %v",
i+1, tc.marker, tc.expected, actual)
}
}
}
// Test for gcsMultipartMetaName.
func TestGCSMultipartMetaName(t *testing.T) {
uploadID := "a"
expected := path.Join(gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta)
got := gcsMultipartMetaName(uploadID)
if expected != got {
t.Errorf("expected: %s, got: %s", expected, got)
}
}
// Test for gcsMultipartDataName.
func TestGCSMultipartDataName(t *testing.T) {
var (
uploadID = "a"
etag = "b"
partNumber = 1
)
expected := path.Join(gcsMinioMultipartPathV1, uploadID, fmt.Sprintf("%05d.%s", partNumber, etag))
got := gcsMultipartDataName(uploadID, partNumber, etag)
if expected != got {
t.Errorf("expected: %s, got: %s", expected, got)
}
}
func TestFromMinioClientListBucketResultToV2Info(t *testing.T) {
listBucketResult := miniogo.ListBucketResult{
IsTruncated: false,
Marker: "testMarker",
NextMarker: "testMarker2",
CommonPrefixes: []miniogo.CommonPrefix{{Prefix: "one"}, {Prefix: "two"}},
Contents: []miniogo.ObjectInfo{{Key: "testobj", ContentType: ""}},
}
listBucketV2Info := minio.ListObjectsV2Info{
Prefixes: []string{"one", "two"},
Objects: []minio.ObjectInfo{{Name: "testobj", Bucket: "testbucket", UserDefined: map[string]string{"Content-Type": ""}}},
IsTruncated: false,
ContinuationToken: "testMarker",
NextContinuationToken: "testMarker2",
}
if got := minio.FromMinioClientListBucketResultToV2Info("testbucket", listBucketResult); !reflect.DeepEqual(got, listBucketV2Info) {
t.Errorf("fromMinioClientListBucketResultToV2Info() = %v, want %v", got, listBucketV2Info)
}
}
// Test for gcsParseProjectID
func TestGCSParseProjectID(t *testing.T) {
f, err := ioutil.TempFile("", "")
if err != nil {
t.Error(err)
return
}
defer os.Remove(f.Name())
defer f.Close()
contents := `
{
"type": "service_account",
"project_id": "miniotesting"
}
`
f.WriteString(contents)
projectID, err := gcsParseProjectID(f.Name())
if err != nil {
t.Fatal(err)
}
if projectID != "miniotesting" {
t.Errorf(`Expected projectID value to be "miniotesting"`)
}
if _, err = gcsParseProjectID("non-existent"); err == nil {
t.Errorf(`Expected to fail but succeeded reading "non-existent"`)
}
f.WriteString(`,}`)
if _, err := gcsParseProjectID(f.Name()); err == nil {
t.Errorf(`Expected to fail reading corrupted credentials file`)
}
}
func TestGCSPublicURL(t *testing.T) {
gcsURL := toGCSPublicURL("bucket", "testing")
if gcsURL != "https://storage.googleapis.com/bucket/testing" {
t.Errorf(`Expected "https://storage.googleapis.com/bucket/testing", got %s"`, gcsURL)
}
}
func TestGCSToObjectError(t *testing.T) {
testCases := []struct {
params []string
gcsErr error
expectedErr error
}{
{
[]string{}, nil, nil,
},
{
[]string{}, fmt.Errorf("Not *Error"), fmt.Errorf("Not *Error"),
},
{
[]string{"bucket"},
errors.Trace(fmt.Errorf("storage: bucket doesn't exist")),
minio.BucketNotFound{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"},
errors.Trace(fmt.Errorf("storage: object doesn't exist")),
minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket", "object", "uploadID"},
errors.Trace(fmt.Errorf("storage: object doesn't exist")),
minio.InvalidUploadID{
UploadID: "uploadID",
},
},
{
[]string{},
errors.Trace(fmt.Errorf("Unknown error")),
fmt.Errorf("Unknown error"),
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
Message: "No list of errors",
}),
&googleapi.Error{
Message: "No list of errors",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "conflict",
Message: "You already own this bucket. Please select another name.",
}},
}),
minio.BucketAlreadyOwnedByYou{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "conflict",
Message: "Sorry, that name is not available. Please try a different one.",
}},
}),
minio.BucketAlreadyExists{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "conflict",
}},
}),
minio.BucketNotEmpty{Bucket: "bucket"},
},
{
[]string{"bucket"},
errors.Trace(&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "notFound",
}},
}),
minio.BucketNotFound{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "notFound",
}},
}),
minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket"},
errors.Trace(&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "invalid",
}},
}),
minio.BucketNameInvalid{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "forbidden",
}},
}),
minio.PrefixAccessDenied{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "keyInvalid",
}},
}),
minio.PrefixAccessDenied{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "required",
}},
}),
minio.PrefixAccessDenied{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "unknown",
}},
}),
fmt.Errorf("Unsupported error reason: unknown"),
},
}
for i, testCase := range testCases {
actualErr := gcsToObjectError(testCase.gcsErr, testCase.params...)
if actualErr != nil {
if actualErr.Error() != testCase.expectedErr.Error() {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.expectedErr, actualErr)
}
}
}
}

View File

@@ -0,0 +1,114 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3
import (
"io"
miniogo "github.com/minio/minio-go"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd"
)
// AnonPutObject creates a new object anonymously with the incoming data,
func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, e error) {
oi, err := l.anonClient.PutObject(bucket, object, data, data.Size(), data.MD5HexString(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata))
if err != nil {
return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return minio.FromMinioClientObjectInfo(bucket, oi), nil
}
// AnonGetObject - Get object anonymously
func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
opts := miniogo.GetObjectOptions{}
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
}
object, _, err := l.anonClient.GetObject(bucket, key, opts)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
}
defer object.Close()
if _, err := io.CopyN(writer, object, length); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
}
return nil
}
// AnonGetObjectInfo - Get object info anonymously
func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, e error) {
oi, err := l.anonClient.StatObject(bucket, object, miniogo.StatObjectOptions{})
if err != nil {
return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return minio.FromMinioClientObjectInfo(bucket, oi), nil
}
// AnonListObjects - List objects anonymously
func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
return minio.FromMinioClientListBucketResult(bucket, result), nil
}
// AnonListObjectsV2 - List objects in V2 mode, anonymously
func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
result, err := l.anonClient.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
return minio.FromMinioClientListBucketV2Result(bucket, result), nil
}
// AnonGetBucketInfo - Get bucket metadata anonymously.
func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi minio.BucketInfo, e error) {
if exists, err := l.anonClient.BucketExists(bucket); err != nil {
return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
} else if !exists {
return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket})
}
buckets, err := l.anonClient.ListBuckets()
if err != nil {
return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
for _, bi := range buckets {
if bi.Name != bucket {
continue
}
return minio.BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}, nil
}
return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket})
}

View File

@@ -0,0 +1,416 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3
import (
"io"
"github.com/minio/cli"
miniogo "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd"
)
const (
s3Backend = "s3"
)
func init() {
const s3GatewayTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT]
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
ENDPOINT:
S3 server endpoint. Default ENDPOINT is https://s3.amazonaws.com
ENVIRONMENT VARIABLES:
ACCESS:
MINIO_ACCESS_KEY: Username or access key of S3 storage.
MINIO_SECRET_KEY: Password or secret key of S3 storage.
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
EXAMPLES:
1. Start minio gateway server for AWS S3 backend.
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ {{.HelpName}}
2. Start minio gateway server for S3 backend on custom endpoint.
$ export MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
$ export MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
$ {{.HelpName}} https://play.minio.io:9000
`
minio.RegisterGatewayCommand(cli.Command{
Name: s3Backend,
Usage: "Amazon Simple Storage Service (S3).",
Action: s3GatewayMain,
CustomHelpTemplate: s3GatewayTemplate,
HideHelpCommand: true,
})
}
// Handler for 'minio gateway s3' command line.
func s3GatewayMain(ctx *cli.Context) {
// Validate gateway arguments.
host := ctx.Args().First()
// Validate gateway arguments.
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &S3{host})
}
// S3 implements Gateway.
type S3 struct {
host string
}
// Name implements Gateway interface.
func (g *S3) Name() string {
return s3Backend
}
// NewGatewayLayer returns s3 gatewaylayer.
func (g *S3) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
var err error
var endpoint string
var secure = true
// Validate host parameters.
if g.host != "" {
// Override default params if the host is provided
endpoint, secure, err = minio.ParseGatewayEndpoint(g.host)
if err != nil {
return nil, err
}
}
// Default endpoint parameters
if endpoint == "" {
endpoint = "s3.amazonaws.com"
}
// Initialize minio client object.
client, err := miniogo.NewCore(endpoint, creds.AccessKey, creds.SecretKey, secure)
if err != nil {
return nil, err
}
anonClient, err := miniogo.NewCore(endpoint, "", "", secure)
if err != nil {
return nil, err
}
anonClient.SetCustomTransport(minio.NewCustomHTTPTransport())
return &s3Objects{
Client: client,
anonClient: anonClient,
}, nil
}
// Production - s3 gateway is not production ready.
func (g *S3) Production() bool {
return false
}
// s3Objects implements gateway for Minio and S3 compatible object storage servers.
type s3Objects struct {
minio.GatewayUnsupported
Client *miniogo.Core
anonClient *miniogo.Core
}
// Shutdown saves any gateway metadata to disk
// if necessary and reload upon next restart.
func (l *s3Objects) Shutdown() error {
return nil
}
// StorageInfo is not relevant to S3 backend.
func (l *s3Objects) StorageInfo() (si minio.StorageInfo) {
return si
}
// MakeBucket creates a new container on S3 backend.
func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error {
err := l.Client.MakeBucket(bucket, location)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
return err
}
// GetBucketInfo gets bucket metadata..
func (l *s3Objects) GetBucketInfo(bucket string) (bi minio.BucketInfo, e error) {
// Verify if bucket name is valid.
// We are using a separate helper function here to validate bucket
// names instead of IsValidBucketName() because there is a possibility
// that certains users might have buckets which are non-DNS compliant
// in us-east-1 and we might severely restrict them by not allowing
// access to these buckets.
// Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
if s3utils.CheckValidBucketName(bucket) != nil {
return bi, errors.Trace(minio.BucketNameInvalid{Bucket: bucket})
}
buckets, err := l.Client.ListBuckets()
if err != nil {
return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
for _, bi := range buckets {
if bi.Name != bucket {
continue
}
return minio.BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}, nil
}
return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket})
}
// ListBuckets lists all S3 buckets
func (l *s3Objects) ListBuckets() ([]minio.BucketInfo, error) {
buckets, err := l.Client.ListBuckets()
if err != nil {
return nil, minio.ErrorRespToObjectError(errors.Trace(err))
}
b := make([]minio.BucketInfo, len(buckets))
for i, bi := range buckets {
b[i] = minio.BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}
}
return b, err
}
// DeleteBucket deletes a bucket on S3
func (l *s3Objects) DeleteBucket(bucket string) error {
err := l.Client.RemoveBucket(bucket)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
return nil
}
// ListObjects lists all blobs in S3 bucket filtered by prefix
func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) {
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
return minio.FromMinioClientListBucketResult(bucket, result), nil
}
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
return minio.FromMinioClientListBucketV2Result(bucket, result), nil
}
// GetObject reads an object from S3. Supports additional
// parameters like offset and length which are synonymous with
// HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (l *s3Objects) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
if length < 0 && length != -1 {
return minio.ErrorRespToObjectError(errors.Trace(minio.InvalidRange{}), bucket, key)
}
opts := miniogo.GetObjectOptions{}
if startOffset >= 0 && length >= 0 {
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
}
}
object, _, err := l.Client.GetObject(bucket, key, opts)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
}
defer object.Close()
if _, err := io.Copy(writer, object); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
}
return nil
}
// GetObjectInfo reads object info and replies back ObjectInfo
func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) {
oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{})
if err != nil {
return minio.ObjectInfo{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return minio.FromMinioClientObjectInfo(bucket, oi), nil
}
// PutObject creates a new object with the incoming data,
func (l *s3Objects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5HexString(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata))
if err != nil {
return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return minio.FromMinioClientObjectInfo(bucket, oi), nil
}
// CopyObject copies an object from source bucket to a destination bucket.
func (l *s3Objects) CopyObject(srcBucket string, srcObject string, dstBucket string, dstObject string, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
// Set this header such that following CopyObject() always sets the right metadata on the destination.
// metadata input is already a trickled down value from interpreting x-amz-metadata-directive at
// handler layer. So what we have right now is supposed to be applied on the destination object anyways.
// So preserve it by adding "REPLACE" directive to save all the metadata set by CopyObject API.
metadata["x-amz-metadata-directive"] = "REPLACE"
if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, metadata); err != nil {
return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), srcBucket, srcObject)
}
return l.GetObjectInfo(dstBucket, dstObject)
}
// DeleteObject deletes a blob in bucket
func (l *s3Objects) DeleteObject(bucket string, object string) error {
err := l.Client.RemoveObject(bucket, object)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return nil
}
// ListMultipartUploads lists all multipart uploads.
func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) {
result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
return lmi, err
}
return minio.FromMinioClientListMultipartsInfo(result), nil
}
// NewMultipartUpload upload object in multiple parts
func (l *s3Objects) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) {
// Create PutObject options
opts := miniogo.PutObjectOptions{UserMetadata: metadata}
uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts)
if err != nil {
return uploadID, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return uploadID, nil
}
// PutObjectPart puts a part of object in bucket
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, e error) {
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5HexString(), data.SHA256HexString())
if err != nil {
return pi, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return minio.FromMinioClientObjectPart(info), nil
}
// CopyObjectPart creates a part in a multipart upload by copying
// existing object or a part of it.
func (l *s3Objects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, metadata map[string]string) (p minio.PartInfo, err error) {
completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject,
uploadID, partID, startOffset, length, metadata)
if err != nil {
return p, minio.ErrorRespToObjectError(errors.Trace(err), srcBucket, srcObject)
}
p.PartNumber = completePart.PartNumber
p.ETag = completePart.ETag
return p, nil
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi minio.ListPartsInfo, e error) {
result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
return lpi, err
}
return minio.FromMinioClientListPartsInfo(result), nil
}
// AbortMultipartUpload aborts a ongoing multipart upload
func (l *s3Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error {
err := l.Client.AbortMultipartUpload(bucket, object, uploadID)
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, e error) {
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts))
if err != nil {
return oi, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return l.GetObjectInfo(bucket, object)
}
// SetBucketPolicies sets policy on bucket
func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, "")
}
return nil
}
// GetBucketPolicies will get policy on bucket
func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
policyInfo, err := l.Client.GetBucketPolicy(bucket)
if err != nil {
return policy.BucketAccessPolicy{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket, "")
}
return policyInfo, nil
}
// DeleteBucketPolicies deletes all policies on bucket
func (l *s3Objects) DeleteBucketPolicies(bucket string) error {
if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, "")
}
return nil
}

View File

@@ -0,0 +1,127 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3
import (
"fmt"
"testing"
miniogo "github.com/minio/minio-go"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd"
)
func errResponse(code string) miniogo.ErrorResponse {
return miniogo.ErrorResponse{
Code: code,
}
}
func TestS3ToObjectError(t *testing.T) {
testCases := []struct {
inputErr error
expectedErr error
bucket, object string
}{
{
inputErr: errResponse("BucketAlreadyOwnedByYou"),
expectedErr: minio.BucketAlreadyOwnedByYou{},
},
{
inputErr: errResponse("BucketNotEmpty"),
expectedErr: minio.BucketNotEmpty{},
},
{
inputErr: errResponse("InvalidBucketName"),
expectedErr: minio.BucketNameInvalid{},
},
{
inputErr: errResponse("NoSuchBucketPolicy"),
expectedErr: minio.PolicyNotFound{},
},
{
inputErr: errResponse("NoSuchBucket"),
expectedErr: minio.BucketNotFound{},
},
// with empty Object in miniogo.ErrorRepsonse, NoSuchKey
// is interpreted as BucketNotFound
{
inputErr: errResponse("NoSuchKey"),
expectedErr: minio.BucketNotFound{},
},
{
inputErr: errResponse("NoSuchUpload"),
expectedErr: minio.InvalidUploadID{},
},
{
inputErr: errResponse("XMinioInvalidObjectName"),
expectedErr: minio.ObjectNameInvalid{},
},
{
inputErr: errResponse("AccessDenied"),
expectedErr: minio.PrefixAccessDenied{},
},
{
inputErr: errResponse("XAmzContentSHA256Mismatch"),
expectedErr: hash.SHA256Mismatch{},
},
{
inputErr: errResponse("EntityTooSmall"),
expectedErr: minio.PartTooSmall{},
},
{
inputErr: nil,
expectedErr: nil,
},
// Special test case for NoSuchKey with object name
{
inputErr: miniogo.ErrorResponse{
Code: "NoSuchKey",
},
expectedErr: minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
},
bucket: "bucket",
object: "object",
},
// N B error values that aren't of expected types
// should be left untouched.
// Special test case for error that is not of type
// miniogo.ErrorResponse
{
inputErr: fmt.Errorf("not a minio.ErrorResponse"),
expectedErr: fmt.Errorf("not a minio.ErrorResponse"),
},
// Special test case for error value that is not of
// type (*Error)
{
inputErr: fmt.Errorf("not a *Error"),
expectedErr: fmt.Errorf("not a *Error"),
},
}
for i, tc := range testCases {
actualErr := minio.ErrorRespToObjectError(errors.Trace(tc.inputErr), tc.bucket, tc.object)
if e, ok := actualErr.(*errors.Error); ok && e.Cause.Error() != tc.expectedErr.Error() {
t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, e)
}
}
}

View File

@@ -0,0 +1,594 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sia
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/fatih/color"
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/set"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
"github.com/minio/sha256-simd"
)
const (
siaBackend = "sia"
)
type siaObjects struct {
minio.GatewayUnsupported
Address string // Address and port of Sia Daemon.
TempDir string // Temporary storage location for file transfers.
RootDir string // Root directory to store files on Sia.
password string // Sia password for uploading content in authenticated manner.
}
func init() {
const siaGatewayTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [SIA_DAEMON_ADDR]
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
ENVIRONMENT VARIABLES: (Default values in parenthesis)
ACCESS:
MINIO_ACCESS_KEY: Custom access key (Do not reuse same access keys on all instances)
MINIO_SECRET_KEY: Custom secret key (Do not reuse same secret keys on all instances)
SIA_TEMP_DIR: The name of the local Sia temporary storage directory. (.sia_temp)
SIA_API_PASSWORD: API password for Sia daemon. (default is empty)
EXAMPLES:
1. Start minio gateway server for Sia backend.
$ {{.HelpName}}
`
minio.RegisterGatewayCommand(cli.Command{
Name: siaBackend,
Usage: "Sia Decentralized Cloud.",
Action: siaGatewayMain,
CustomHelpTemplate: siaGatewayTemplate,
HideHelpCommand: true,
})
}
// Handler for 'minio gateway sia' command line.
func siaGatewayMain(ctx *cli.Context) {
// Validate gateway arguments.
host := ctx.Args().First()
// Validate gateway arguments.
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &Sia{host})
}
// Sia implements Gateway.
type Sia struct {
host string // Sia daemon host address
}
// Name implements Gateway interface.
func (g *Sia) Name() string {
return siaBackend
}
// NewGatewayLayer returns Sia gateway layer, implements GatewayLayer interface to
// talk to Sia backend.
func (g *Sia) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
sia := &siaObjects{
Address: g.host,
// RootDir uses access key directly, provides partitioning for
// concurrent users talking to same sia daemon.
RootDir: creds.AccessKey,
TempDir: os.Getenv("SIA_TEMP_DIR"),
password: os.Getenv("SIA_API_PASSWORD"),
}
// If Address not provided on command line or ENV, default to:
if sia.Address == "" {
sia.Address = "127.0.0.1:9980"
}
// If local Sia temp directory not specified, default to:
if sia.TempDir == "" {
sia.TempDir = ".sia_temp"
}
var err error
sia.TempDir, err = filepath.Abs(sia.TempDir)
if err != nil {
return nil, err
}
// Create the temp directory with proper permissions.
// Ignore error when dir already exists.
if err = os.MkdirAll(sia.TempDir, 0700); err != nil {
return nil, err
}
colorBlue := color.New(color.FgBlue).SprintfFunc()
colorBold := color.New(color.Bold).SprintFunc()
log.Println(colorBlue("\nSia Gateway Configuration:"))
log.Println(colorBlue(" Sia Daemon API Address:") + colorBold(fmt.Sprintf(" %s\n", sia.Address)))
log.Println(colorBlue(" Sia Temp Directory:") + colorBold(fmt.Sprintf(" %s\n", sia.TempDir)))
return sia, nil
}
// Production - sia gateway is not ready for production use.
func (g *Sia) Production() bool {
return false
}
// non2xx returns true for non-success HTTP status codes.
func non2xx(code int) bool {
return code < 200 || code > 299
}
// decodeError returns the api.Error from a API response. This method should
// only be called if the response's status code is non-2xx. The error returned
// may not be of type api.Error in the event of an error unmarshalling the
// JSON.
type siaError struct {
// Message describes the error in English. Typically it is set to
// `err.Error()`. This field is required.
Message string `json:"message"`
}
func (s siaError) Error() string {
return s.Message
}
func decodeError(resp *http.Response) error {
// Error is a type that is encoded as JSON and returned in an API response in
// the event of an error. Only the Message field is required. More fields may
// be added to this struct in the future for better error reporting.
var apiErr siaError
if err := json.NewDecoder(resp.Body).Decode(&apiErr); err != nil {
return err
}
return apiErr
}
// MethodNotSupported - returned if call returned error.
type MethodNotSupported struct {
method string
}
func (s MethodNotSupported) Error() string {
return fmt.Sprintf("API call not recognized: %s", s.method)
}
// apiGet wraps a GET request with a status code check, such that if the GET does
// not return 2xx, the error will be read and returned. The response body is
// not closed.
func apiGet(addr, call, apiPassword string) (*http.Response, error) {
req, err := http.NewRequest("GET", "http://"+addr+call, nil)
if err != nil {
return nil, errors.Trace(err)
}
req.Header.Set("User-Agent", "Sia-Agent")
if apiPassword != "" {
req.SetBasicAuth("", apiPassword)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, errors.Trace(err)
}
if resp.StatusCode == http.StatusNotFound {
resp.Body.Close()
return nil, MethodNotSupported{call}
}
if non2xx(resp.StatusCode) {
err := decodeError(resp)
resp.Body.Close()
return nil, err
}
return resp, nil
}
// apiPost wraps a POST request with a status code check, such that if the POST
// does not return 2xx, the error will be read and returned. The response body
// is not closed.
func apiPost(addr, call, vals, apiPassword string) (*http.Response, error) {
req, err := http.NewRequest("POST", "http://"+addr+call, strings.NewReader(vals))
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", "Sia-Agent")
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if apiPassword != "" {
req.SetBasicAuth("", apiPassword)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, errors.Trace(err)
}
if resp.StatusCode == http.StatusNotFound {
resp.Body.Close()
return nil, MethodNotSupported{call}
}
if non2xx(resp.StatusCode) {
err := decodeError(resp)
resp.Body.Close()
return nil, err
}
return resp, nil
}
// post makes an API call and discards the response. An error is returned if
// the response status is not 2xx.
func post(addr, call, vals, apiPassword string) error {
resp, err := apiPost(addr, call, vals, apiPassword)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// list makes a lists all the uploaded files, decodes the json response.
func list(addr string, apiPassword string, obj *renterFiles) error {
resp, err := apiGet(addr, "/renter/files", apiPassword)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent {
return fmt.Errorf("Expecting a response, but API returned %s", resp.Status)
}
return json.NewDecoder(resp.Body).Decode(obj)
}
// get makes an API call and discards the response. An error is returned if the
// responsee status is not 2xx.
func get(addr, call, apiPassword string) error {
resp, err := apiGet(addr, call, apiPassword)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// Shutdown saves any gateway metadata to disk
// if necessary and reload upon next restart.
func (s *siaObjects) Shutdown() error {
return nil
}
// StorageInfo is not relevant to Sia backend.
func (s *siaObjects) StorageInfo() (si minio.StorageInfo) {
return si
}
// MakeBucket creates a new container on Sia backend.
func (s *siaObjects) MakeBucketWithLocation(bucket, location string) error {
srcFile := path.Join(s.TempDir, minio.MustGetUUID())
defer os.Remove(srcFile)
writer, err := os.Create(srcFile)
if err != nil {
return err
}
if _, err = io.Copy(writer, bytes.NewReader([]byte(""))); err != nil {
return err
}
sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
return post(s.Address, "/renter/upload/"+siaObj, "source="+srcFile, s.password)
}
// GetBucketInfo gets bucket metadata.
func (s *siaObjects) GetBucketInfo(bucket string) (bi minio.BucketInfo, err error) {
sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
dstFile := path.Join(s.TempDir, minio.MustGetUUID())
defer os.Remove(dstFile)
if err := get(s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
return bi, err
}
return minio.BucketInfo{Name: bucket}, nil
}
// ListBuckets will detect and return existing buckets on Sia.
func (s *siaObjects) ListBuckets() (buckets []minio.BucketInfo, err error) {
sObjs, serr := s.listRenterFiles("")
if serr != nil {
return buckets, serr
}
m := make(set.StringSet)
prefix := s.RootDir + "/"
for _, sObj := range sObjs {
if strings.HasPrefix(sObj.SiaPath, prefix) {
siaObj := strings.TrimPrefix(sObj.SiaPath, prefix)
idx := strings.Index(siaObj, "/")
if idx > 0 {
m.Add(siaObj[0:idx])
}
}
}
for _, bktName := range m.ToSlice() {
buckets = append(buckets, minio.BucketInfo{
Name: bktName,
Created: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
})
}
return buckets, nil
}
// DeleteBucket deletes a bucket on Sia.
func (s *siaObjects) DeleteBucket(bucket string) error {
sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
return post(s.Address, "/renter/delete/"+siaObj, "", s.password)
}
func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
siaObjs, siaErr := s.listRenterFiles(bucket)
if siaErr != nil {
return loi, siaErr
}
loi.IsTruncated = false
loi.NextMarker = ""
root := s.RootDir + "/"
sha256sum := sha256.Sum256([]byte(bucket))
// FIXME(harsha) - No paginated output supported for Sia backend right now, only prefix
// based filtering. Once list renter files API supports paginated output we can support
// paginated results here as well - until then Listing is an expensive operation.
for _, sObj := range siaObjs {
name := strings.TrimPrefix(sObj.SiaPath, path.Join(root, bucket)+"/")
// Skip the file created specially when bucket was created.
if name == hex.EncodeToString(sha256sum[:]) {
continue
}
if strings.HasPrefix(name, prefix) {
loi.Objects = append(loi.Objects, minio.ObjectInfo{
Bucket: bucket,
Name: name,
Size: int64(sObj.Filesize),
IsDir: false,
})
}
}
return loi, nil
}
func (s *siaObjects) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error {
dstFile := path.Join(s.TempDir, minio.MustGetUUID())
defer os.Remove(dstFile)
var siaObj = path.Join(s.RootDir, bucket, object)
if err := get(s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
return err
}
reader, err := os.Open(dstFile)
if err != nil {
return err
}
defer reader.Close()
st, err := reader.Stat()
if err != nil {
return err
}
size := st.Size()
if _, err = reader.Seek(startOffset, os.SEEK_SET); err != nil {
return err
}
// For negative length we read everything.
if length < 0 {
length = size - startOffset
}
bufSize := int64(1 * humanize.MiByte)
if bufSize > length {
bufSize = length
}
// Reply back invalid range if the input offset and length fall out of range.
if startOffset > size || startOffset+length > size {
return errors.Trace(minio.InvalidRange{
OffsetBegin: startOffset,
OffsetEnd: length,
ResourceSize: size,
})
}
// Allocate a staging buffer.
buf := make([]byte, int(bufSize))
_, err = io.CopyBuffer(writer, io.LimitReader(reader, length), buf)
return err
}
// findSiaObject retrieves the siaObjectInfo for the Sia object with the given
// Sia path name.
func (s *siaObjects) findSiaObject(bucket, object string) (siaObjectInfo, error) {
siaPath := path.Join(s.RootDir, bucket, object)
sObjs, err := s.listRenterFiles("")
if err != nil {
return siaObjectInfo{}, err
}
for _, sObj := range sObjs {
if sObj.SiaPath == siaPath {
return sObj, nil
}
}
return siaObjectInfo{}, errors.Trace(minio.ObjectNotFound{
Bucket: bucket,
Object: object,
})
}
// GetObjectInfo reads object info and replies back ObjectInfo
func (s *siaObjects) GetObjectInfo(bucket string, object string) (minio.ObjectInfo, error) {
so, err := s.findSiaObject(bucket, object)
if err != nil {
return minio.ObjectInfo{}, err
}
// Metadata about sia objects is just quite minimal. Sia only provides file size.
return minio.ObjectInfo{
Bucket: bucket,
Name: object,
ModTime: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Size: int64(so.Filesize),
IsDir: false,
}, nil
}
// PutObject creates a new object with the incoming data,
func (s *siaObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
srcFile := path.Join(s.TempDir, minio.MustGetUUID())
writer, err := os.Create(srcFile)
if err != nil {
return objInfo, err
}
wsize, err := io.CopyN(writer, data, data.Size())
if err != nil {
os.Remove(srcFile)
return objInfo, err
}
if err = post(s.Address, "/renter/upload/"+path.Join(s.RootDir, bucket, object), "source="+srcFile, s.password); err != nil {
os.Remove(srcFile)
return objInfo, err
}
defer s.deleteTempFileWhenUploadCompletes(srcFile, bucket, object)
return minio.ObjectInfo{
Name: object,
Bucket: bucket,
ModTime: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Size: wsize,
ETag: minio.GenETag(),
}, nil
}
// DeleteObject deletes a blob in bucket
func (s *siaObjects) DeleteObject(bucket string, object string) error {
// Tell Sia daemon to delete the object
var siaObj = path.Join(s.RootDir, bucket, object)
return post(s.Address, "/renter/delete/"+siaObj, "", s.password)
}
// siaObjectInfo represents object info stored on Sia
type siaObjectInfo struct {
SiaPath string `json:"siapath"`
LocalPath string `json:"localpath"`
Filesize uint64 `json:"filesize"`
Available bool `json:"available"`
Renewing bool `json:"renewing"`
Redundancy float64 `json:"redundancy"`
UploadProgress float64 `json:"uploadprogress"`
}
type renterFiles struct {
Files []siaObjectInfo `json:"files"`
}
// listRenterFiles will return a list of existing objects in the bucket provided
func (s *siaObjects) listRenterFiles(bucket string) (siaObjs []siaObjectInfo, err error) {
// Get list of all renter files
var rf renterFiles
if err = list(s.Address, s.password, &rf); err != nil {
return siaObjs, err
}
var prefix string
root := s.RootDir + "/"
if bucket == "" {
prefix = root
} else {
prefix = root + bucket + "/"
}
for _, f := range rf.Files {
if strings.HasPrefix(f.SiaPath, prefix) {
siaObjs = append(siaObjs, f)
}
}
return siaObjs, nil
}
// deleteTempFileWhenUploadCompletes checks the status of a Sia file upload
// until it reaches 100% upload progress, then deletes the local temp copy from
// the filesystem.
func (s *siaObjects) deleteTempFileWhenUploadCompletes(tempFile string, bucket, object string) {
var soi siaObjectInfo
// Wait until 100% upload instead of 1x redundancy because if we delete
// after 1x redundancy, the user has to pay the cost of other hosts
// redistributing the file.
for soi.UploadProgress < 100.0 {
var err error
soi, err = s.findSiaObject(bucket, object)
if err != nil {
minio.ErrorIf(err, "Unable to find file uploaded to Sia path %s/%s", bucket, object)
break
}
// Sleep between each check so that we're not hammering
// the Sia daemon with requests.
time.Sleep(15 * time.Second)
}
os.Remove(tempFile)
}

View File

@@ -0,0 +1,32 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sia
import (
"testing"
)
func TestSianon2xx(t *testing.T) {
for i := 0; i < 1000; i++ {
actual := non2xx(i)
expected := i < 200 || i > 299
if actual != expected {
t.Errorf("Test case %d: non2xx(%d) returned %t but expected %t", i+1, i, actual, expected)
}
}
}