Revert and bring back B2 gateway implementation (#7224)

This PR is simply a revert of 3265112d04
just for B2 gateway.
This commit is contained in:
Harshavardhana 2019-02-11 23:14:22 -08:00 committed by Nitish Tiwari
parent b8955fe577
commit 9f87283cd5
12 changed files with 2658 additions and 60 deletions

View File

@ -0,0 +1,850 @@
/*
* Minio Cloud Storage, (C) 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package b2
import (
"context"
"crypto/sha1"
"fmt"
"hash"
"io"
"io/ioutil"
"net/http"
"strings"
"sync"
"time"
b2 "github.com/minio/blazer/base"
"github.com/minio/cli"
miniogopolicy "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
h2 "github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/policy"
"github.com/minio/minio/pkg/policy/condition"
minio "github.com/minio/minio/cmd"
)
// Supported bucket types by B2 backend.
const (
bucketTypePrivate = "allPrivate"
bucketTypeReadOnly = "allPublic"
b2Backend = "b2"
)
func init() {
const b2GatewayTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}}
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
ENVIRONMENT VARIABLES:
ACCESS:
MINIO_ACCESS_KEY: B2 account id.
MINIO_SECRET_KEY: B2 application key.
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
DOMAIN:
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to Minio host domain name.
CACHE:
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
EXAMPLES:
1. Start minio gateway server for B2 backend.
$ export MINIO_ACCESS_KEY=accountID
$ export MINIO_SECRET_KEY=applicationKey
$ {{.HelpName}}
2. Start minio gateway server for B2 backend with edge caching enabled.
$ export MINIO_ACCESS_KEY=accountID
$ export MINIO_SECRET_KEY=applicationKey
$ export MINIO_CACHE_DRIVES="/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ export MINIO_CACHE_MAXUSE=80
$ {{.HelpName}}
`
minio.RegisterGatewayCommand(cli.Command{
Name: b2Backend,
Usage: "Backblaze B2",
Action: b2GatewayMain,
CustomHelpTemplate: b2GatewayTemplate,
HideHelpCommand: true,
})
}
// Handler for 'minio gateway b2' command line.
func b2GatewayMain(ctx *cli.Context) {
minio.StartGateway(ctx, &B2{})
}
// B2 implements Minio Gateway
type B2 struct{}
// Name implements Gateway interface.
func (g *B2) Name() string {
return b2Backend
}
// NewGatewayLayer returns b2 gateway layer, implements ObjectLayer interface to
// talk to B2 remote backend.
func (g *B2) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
ctx := context.Background()
client, err := b2.AuthorizeAccount(ctx, creds.AccessKey, creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport()))
if err != nil {
return nil, err
}
return &b2Objects{
creds: creds,
b2Client: client,
ctx: ctx,
}, nil
}
// Production - Ready for production use.
func (g *B2) Production() bool {
return true
}
// b2Object implements gateway for Minio and BackBlaze B2 compatible object storage servers.
type b2Objects struct {
minio.GatewayUnsupported
mu sync.Mutex
creds auth.Credentials
b2Client *b2.B2
ctx context.Context
}
// Convert B2 errors to minio object layer errors.
func b2ToObjectError(err error, params ...string) error {
if err == nil {
return nil
}
bucket := ""
object := ""
uploadID := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
if len(params) == 3 {
uploadID = params[2]
}
// Following code is a non-exhaustive check to convert
// B2 errors into S3 compatible errors.
//
// For a more complete information - https://www.backblaze.com/b2/docs/
statusCode, code, msg := b2.Code(err)
if statusCode == 0 {
// We don't interpret non B2 errors. B2 errors have statusCode
// to help us convert them to S3 object errors.
return err
}
switch code {
case "duplicate_bucket_name":
err = minio.BucketAlreadyOwnedByYou{Bucket: bucket}
case "bad_request":
if object != "" {
err = minio.ObjectNameInvalid{
Bucket: bucket,
Object: object,
}
} else if bucket != "" {
err = minio.BucketNotFound{Bucket: bucket}
}
case "bad_json":
if object != "" {
err = minio.ObjectNameInvalid{
Bucket: bucket,
Object: object,
}
} else if bucket != "" {
err = minio.BucketNameInvalid{Bucket: bucket}
}
case "bad_bucket_id":
err = minio.BucketNotFound{Bucket: bucket}
case "file_not_present", "not_found":
err = minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
case "cannot_delete_non_empty_bucket":
err = minio.BucketNotEmpty{Bucket: bucket}
}
// Special interpretation like this is required for Multipart sessions.
if strings.Contains(msg, "No active upload for") && uploadID != "" {
err = minio.InvalidUploadID{UploadID: uploadID}
}
return err
}
// Shutdown saves any gateway metadata to disk
// if necessary and reload upon next restart.
func (l *b2Objects) Shutdown(ctx context.Context) error {
// TODO
return nil
}
// StorageInfo is not relevant to B2 backend.
func (l *b2Objects) StorageInfo(ctx context.Context) (si minio.StorageInfo) {
return si
}
// MakeBucket creates a new container on B2 backend.
func (l *b2Objects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
// location is ignored for B2 backend.
// All buckets are set to private by default.
_, err := l.b2Client.CreateBucket(l.ctx, bucket, bucketTypePrivate, nil, nil)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket)
}
func (l *b2Objects) reAuthorizeAccount(ctx context.Context) error {
client, err := b2.AuthorizeAccount(l.ctx, l.creds.AccessKey, l.creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport()))
if err != nil {
return err
}
l.mu.Lock()
l.b2Client.Update(client)
l.mu.Unlock()
return nil
}
// listBuckets is a wrapper similar to ListBuckets, which re-authorizes
// the account and updates the B2 client safely. Once successfully
// authorized performs the call again and returns list of buckets.
// For any errors which are not actionable we return an error.
func (l *b2Objects) listBuckets(ctx context.Context, err error) ([]*b2.Bucket, error) {
if err != nil {
if b2.Action(err) != b2.ReAuthenticate {
return nil, err
}
if rerr := l.reAuthorizeAccount(ctx); rerr != nil {
return nil, rerr
}
}
bktList, lerr := l.b2Client.ListBuckets(l.ctx)
if lerr != nil {
return l.listBuckets(ctx, lerr)
}
return bktList, nil
}
// Bucket - is a helper which provides a *Bucket instance
// for performing an API operation. B2 API doesn't
// provide a direct way to access the bucket so we need
// to employ following technique.
func (l *b2Objects) Bucket(ctx context.Context, bucket string) (*b2.Bucket, error) {
bktList, err := l.listBuckets(ctx, nil)
if err != nil {
logger.LogIf(ctx, err)
return nil, b2ToObjectError(err, bucket)
}
for _, bkt := range bktList {
if bkt.Name == bucket {
return bkt, nil
}
}
return nil, minio.BucketNotFound{Bucket: bucket}
}
// GetBucketInfo gets bucket metadata..
func (l *b2Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) {
if _, err = l.Bucket(ctx, bucket); err != nil {
return bi, err
}
return minio.BucketInfo{
Name: bucket,
Created: time.Unix(0, 0),
}, nil
}
// ListBuckets lists all B2 buckets
func (l *b2Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error) {
bktList, err := l.listBuckets(ctx, nil)
if err != nil {
return nil, err
}
var bktInfo []minio.BucketInfo
for _, bkt := range bktList {
bktInfo = append(bktInfo, minio.BucketInfo{
Name: bkt.Name,
Created: time.Unix(0, 0),
})
}
return bktInfo, nil
}
// DeleteBucket deletes a bucket on B2
func (l *b2Objects) DeleteBucket(ctx context.Context, bucket string) error {
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return err
}
err = bkt.DeleteBucket(l.ctx)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket)
}
// ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time.
func (l *b2Objects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return loi, err
}
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
if lerr != nil {
logger.LogIf(ctx, lerr)
return loi, b2ToObjectError(lerr, bucket)
}
loi.IsTruncated = next != ""
loi.NextMarker = next
for _, file := range files {
switch file.Status {
case "folder":
loi.Prefixes = append(loi.Prefixes, file.Name)
case "upload":
loi.Objects = append(loi.Objects, minio.ObjectInfo{
Bucket: bucket,
Name: file.Name,
ModTime: file.Timestamp,
Size: file.Size,
ETag: minio.ToS3ETag(file.Info.ID),
ContentType: file.Info.ContentType,
UserDefined: file.Info.Info,
})
}
}
return loi, nil
}
// ListObjectsV2 lists all objects in B2 bucket filtered by prefix, returns upto max 1000 entries at a time.
func (l *b2Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int,
fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) {
// fetchOwner is not supported and unused.
marker := continuationToken
if marker == "" {
// B2's continuation token is an object name to "start at" rather than "start after"
// startAfter plus the lowest character B2 supports is used so that the startAfter
// object isn't included in the results
marker = startAfter + " "
}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return loi, err
}
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
if lerr != nil {
logger.LogIf(ctx, lerr)
return loi, b2ToObjectError(lerr, bucket)
}
loi.IsTruncated = next != ""
loi.ContinuationToken = continuationToken
loi.NextContinuationToken = next
for _, file := range files {
switch file.Status {
case "folder":
loi.Prefixes = append(loi.Prefixes, file.Name)
case "upload":
loi.Objects = append(loi.Objects, minio.ObjectInfo{
Bucket: bucket,
Name: file.Name,
ModTime: file.Timestamp,
Size: file.Size,
ETag: minio.ToS3ETag(file.Info.ID),
ContentType: file.Info.ContentType,
UserDefined: file.Info.Info,
})
}
}
return loi, nil
}
// GetObjectNInfo - returns object info and locked object ReadCloser
func (l *b2Objects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) {
var objInfo minio.ObjectInfo
objInfo, err = l.GetObjectInfo(ctx, bucket, object, opts)
if err != nil {
return nil, err
}
var startOffset, length int64
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts)
pw.CloseWithError(err)
}()
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
}
// GetObject reads an object from B2. Supports additional
// parameters like offset and length which are synonymous with
// HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (l *b2Objects) GetObject(ctx context.Context, bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return err
}
reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length)
if err != nil {
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
}
defer reader.Close()
_, err = io.Copy(writer, reader)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
}
// GetObjectInfo reads object info and replies back ObjectInfo
func (l *b2Objects) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return objInfo, err
}
f, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
if err != nil {
logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
}
f.Close()
fi, err := bkt.File(f.ID, object).GetFileInfo(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
}
return minio.ObjectInfo{
Bucket: bucket,
Name: object,
ETag: minio.ToS3ETag(fi.ID),
Size: fi.Size,
ModTime: fi.Timestamp,
ContentType: fi.ContentType,
UserDefined: fi.Info,
}, nil
}
// In B2 - You must always include the X-Bz-Content-Sha1 header with
// your upload request. The value you provide can be:
// (1) the 40-character hex checksum of the file,
// (2) the string hex_digits_at_end, or
// (3) the string do_not_verify.
// For more reference - https://www.backblaze.com/b2/docs/uploading.html
//
// In our case we are going to use (2) option
const sha1AtEOF = "hex_digits_at_end"
// With the second option mentioned above, you append the 40-character hex sha1
// to the end of the request body, immediately after the contents of the file
// being uploaded. Note that the content length is the size of the file plus 40
// of the original size of the reader.
//
// newB2Reader implements a B2 compatible reader by wrapping the hash.Reader into
// a new io.Reader which will emit out the sha1 hex digits at io.EOF.
// It also means that your overall content size is now original size + 40 bytes.
// Additionally this reader also verifies Hash encapsulated inside hash.Reader
// at io.EOF if the verification failed we return an error and do not send
// the content to server.
func newB2Reader(r *h2.Reader, size int64) *Reader {
return &Reader{
r: r,
size: size,
sha1Hash: sha1.New(),
}
}
// Reader - is a Reader wraps the hash.Reader which will emit out the sha1
// hex digits at io.EOF. It also means that your overall content size is
// now original size + 40 bytes. Additionally this reader also verifies
// Hash encapsulated inside hash.Reader at io.EOF if the verification
// failed we return an error and do not send the content to server.
type Reader struct {
r *h2.Reader
size int64
sha1Hash hash.Hash
isEOF bool
buf *strings.Reader
}
// Size - Returns the total size of Reader.
func (nb *Reader) Size() int64 { return nb.size + 40 }
func (nb *Reader) Read(p []byte) (int, error) {
if nb.isEOF {
return nb.buf.Read(p)
}
// Read into hash to update the on going checksum.
n, err := io.TeeReader(nb.r, nb.sha1Hash).Read(p)
if err == io.EOF {
// Stream is not corrupted on this end
// now fill in the last 40 bytes of sha1 hex
// so that the server can verify the stream on
// their end.
err = nil
nb.isEOF = true
nb.buf = strings.NewReader(fmt.Sprintf("%x", nb.sha1Hash.Sum(nil)))
}
return n, err
}
// PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB.
func (l *b2Objects) PutObject(ctx context.Context, bucket string, object string, r *minio.PutObjReader, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
data := r.Reader
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return objInfo, err
}
contentType := opts.UserDefined["content-type"]
delete(opts.UserDefined, "content-type")
var u *b2.URL
u, err = bkt.GetUploadURL(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
}
hr := newB2Reader(data, data.Size())
var f *b2.File
f, err = u.UploadFile(l.ctx, hr, int(hr.Size()), object, contentType, sha1AtEOF, opts.UserDefined)
if err != nil {
logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
}
var fi *b2.FileInfo
fi, err = f.GetFileInfo(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
}
return minio.ObjectInfo{
Bucket: bucket,
Name: object,
ETag: minio.ToS3ETag(fi.ID),
Size: fi.Size,
ModTime: fi.Timestamp,
ContentType: fi.ContentType,
UserDefined: fi.Info,
}, nil
}
// DeleteObject deletes a blob in bucket
func (l *b2Objects) DeleteObject(ctx context.Context, bucket string, object string) error {
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return err
}
reader, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
if err != nil {
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
}
io.Copy(ioutil.Discard, reader)
reader.Close()
err = bkt.File(reader.ID, object).DeleteFileVersion(l.ctx)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
}
// ListMultipartUploads lists all multipart uploads.
func (l *b2Objects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string,
delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) {
// keyMarker, prefix, delimiter are all ignored, Backblaze B2 doesn't support any
// of these parameters only equivalent parameter is uploadIDMarker.
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return lmi, err
}
// The maximum number of files to return from this call.
// The default value is 100, and the maximum allowed is 100.
if maxUploads > 100 {
maxUploads = 100
}
largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, uploadIDMarker, maxUploads)
if err != nil {
logger.LogIf(ctx, err)
return lmi, b2ToObjectError(err, bucket)
}
lmi = minio.ListMultipartsInfo{
MaxUploads: maxUploads,
}
if nextMarker != "" {
lmi.IsTruncated = true
lmi.NextUploadIDMarker = nextMarker
}
for _, largeFile := range largeFiles {
lmi.Uploads = append(lmi.Uploads, minio.MultipartInfo{
Object: largeFile.Name,
UploadID: largeFile.ID,
Initiated: largeFile.Timestamp,
})
}
return lmi, nil
}
// NewMultipartUpload upload object in multiple parts, uses B2's LargeFile upload API.
// Large files can range in size from 5MB to 10TB.
// Each large file must consist of at least 2 parts, and all of the parts except the
// last one must be at least 5MB in size. The last part must contain at least one byte.
// For more information - https://www.backblaze.com/b2/docs/large_files.html
func (l *b2Objects) NewMultipartUpload(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (string, error) {
var uploadID string
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return uploadID, err
}
contentType := opts.UserDefined["content-type"]
delete(opts.UserDefined, "content-type")
lf, err := bkt.StartLargeFile(l.ctx, object, contentType, opts.UserDefined)
if err != nil {
logger.LogIf(ctx, err)
return uploadID, b2ToObjectError(err, bucket, object)
}
return lf.ID, nil
}
// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
func (l *b2Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, err error) {
data := r.Reader
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return pi, err
}
fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return pi, b2ToObjectError(err, bucket, object, uploadID)
}
hr := newB2Reader(data, data.Size())
sha1, err := fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID)
if err != nil {
logger.LogIf(ctx, err)
return pi, b2ToObjectError(err, bucket, object, uploadID)
}
return minio.PartInfo{
PartNumber: partID,
LastModified: minio.UTCNow(),
ETag: minio.ToS3ETag(sha1),
Size: data.Size(),
}, nil
}
// ListObjectParts returns all object parts for specified object in specified bucket, uses B2's LargeFile upload API.
func (l *b2Objects) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (lpi minio.ListPartsInfo, err error) {
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return lpi, err
}
lpi = minio.ListPartsInfo{
Bucket: bucket,
Object: object,
UploadID: uploadID,
MaxParts: maxParts,
PartNumberMarker: partNumberMarker,
}
// startPartNumber must be in the range 1 - 10000 for B2.
partNumberMarker++
partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts)
if err != nil {
logger.LogIf(ctx, err)
return lpi, b2ToObjectError(err, bucket, object, uploadID)
}
if next != 0 {
lpi.IsTruncated = true
lpi.NextPartNumberMarker = next
}
for _, part := range partsList {
lpi.Parts = append(lpi.Parts, minio.PartInfo{
PartNumber: part.Number,
ETag: minio.ToS3ETag(part.SHA1),
Size: part.Size,
})
}
return lpi, nil
}
// AbortMultipartUpload aborts a on going multipart upload, uses B2's LargeFile upload API.
func (l *b2Objects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error {
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return err
}
err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object, uploadID)
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API.
func (l *b2Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, err error) {
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return oi, err
}
hashes := make(map[int]string)
for i, uploadedPart := range uploadedParts {
// B2 requires contigous part numbers starting with 1, they do not support
// hand picking part numbers, we return an S3 compatible error instead.
if i+1 != uploadedPart.PartNumber {
logger.LogIf(ctx, minio.InvalidPart{})
return oi, b2ToObjectError(minio.InvalidPart{}, bucket, object, uploadID)
}
// Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag.
hashes[uploadedPart.PartNumber] = strings.TrimSuffix(uploadedPart.ETag, "-1")
}
if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil {
logger.LogIf(ctx, err)
return oi, b2ToObjectError(err, bucket, object, uploadID)
}
return l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
}
// SetBucketPolicy - B2 supports 2 types of bucket policies:
// bucketType.AllPublic - bucketTypeReadOnly means that anybody can download the files is the bucket;
// bucketType.AllPrivate - bucketTypePrivate means that you need an authorization token to download them.
// Default is AllPrivate for all buckets.
func (l *b2Objects) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error {
policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy)
if err != nil {
// This should not happen.
return b2ToObjectError(err, bucket)
}
var policies []minio.BucketAccessPolicy
for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") {
policies = append(policies, minio.BucketAccessPolicy{
Prefix: prefix,
Policy: policy,
})
}
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
if policies[0].Prefix != prefix {
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
if policies[0].Policy != miniogopolicy.BucketPolicyReadOnly {
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return err
}
bkt.Type = bucketTypeReadOnly
_, err = bkt.Update(l.ctx)
logger.LogIf(ctx, err)
return b2ToObjectError(err)
}
// GetBucketPolicy, returns the current bucketType from B2 backend and convert
// it into S3 compatible bucket policy info.
func (l *b2Objects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) {
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return nil, err
}
// bkt.Type can also be snapshot, but it is only allowed through B2 browser console,
// just return back as policy not found for all cases.
// CreateBucket always sets the value to allPrivate by default.
if bkt.Type != bucketTypeReadOnly {
return nil, minio.BucketPolicyNotFound{Bucket: bucket}
}
return &policy.Policy{
Version: policy.DefaultVersion,
Statements: []policy.Statement{
policy.NewStatement(
policy.Allow,
policy.NewPrincipal("*"),
policy.NewActionSet(
policy.GetBucketLocationAction,
policy.ListBucketAction,
policy.GetObjectAction,
),
policy.NewResourceSet(
policy.NewResource(bucket, ""),
policy.NewResource(bucket, "*"),
),
condition.NewFunctions(),
),
},
}, nil
}
// DeleteBucketPolicy - resets the bucketType of bucket on B2 to 'allPrivate'.
func (l *b2Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return err
}
bkt.Type = bucketTypePrivate
_, err = bkt.Update(l.ctx)
logger.LogIf(ctx, err)
return b2ToObjectError(err)
}
// IsCompressionSupported returns whether compression is applicable for this layer.
func (l *b2Objects) IsCompressionSupported() bool {
return false
}

View File

@ -0,0 +1,119 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package b2
import (
"fmt"
"testing"
b2 "github.com/minio/blazer/base"
minio "github.com/minio/minio/cmd"
)
// Test b2 object error.
func TestB2ObjectError(t *testing.T) {
testCases := []struct {
params []string
b2Err error
expectedErr error
}{
{
[]string{}, nil, nil,
},
{
[]string{}, fmt.Errorf("Not *Error"), fmt.Errorf("Not *Error"),
},
{
[]string{}, fmt.Errorf("Non B2 Error"), fmt.Errorf("Non B2 Error"),
},
{
[]string{"bucket"}, b2.Error{
StatusCode: 1,
Code: "duplicate_bucket_name",
}, minio.BucketAlreadyOwnedByYou{
Bucket: "bucket",
},
},
{
[]string{"bucket"}, b2.Error{
StatusCode: 1,
Code: "bad_request",
}, minio.BucketNotFound{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"}, b2.Error{
StatusCode: 1,
Code: "bad_request",
}, minio.ObjectNameInvalid{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket"}, b2.Error{
StatusCode: 1,
Code: "bad_bucket_id",
}, minio.BucketNotFound{Bucket: "bucket"},
},
{
[]string{"bucket", "object"}, b2.Error{
StatusCode: 1,
Code: "file_not_present",
}, minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket", "object"}, b2.Error{
StatusCode: 1,
Code: "not_found",
}, minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket"}, b2.Error{
StatusCode: 1,
Code: "cannot_delete_non_empty_bucket",
}, minio.BucketNotEmpty{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object", "uploadID"}, b2.Error{
StatusCode: 1,
Message: "No active upload for",
}, minio.InvalidUploadID{
UploadID: "uploadID",
},
},
}
for i, testCase := range testCases {
actualErr := b2ToObjectError(testCase.b2Err, testCase.params...)
if actualErr != nil {
if actualErr.Error() != testCase.expectedErr.Error() {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.expectedErr, actualErr)
}
}
}
}

View File

@ -23,5 +23,9 @@ import (
_ "github.com/minio/minio/cmd/gateway/nas" _ "github.com/minio/minio/cmd/gateway/nas"
_ "github.com/minio/minio/cmd/gateway/oss" _ "github.com/minio/minio/cmd/gateway/oss"
_ "github.com/minio/minio/cmd/gateway/s3" _ "github.com/minio/minio/cmd/gateway/s3"
// B2 is specifically kept here to avoid re-ordering by goimports,
// please ask on github.com/minio/minio/issues before changing this.
_ "github.com/minio/minio/cmd/gateway/b2"
// Add your gateway here. // Add your gateway here.
) )

View File

@ -5,3 +5,5 @@ Minio Gateway adds Amazon S3 compatibility to third party cloud storage provider
- [S3](https://github.com/minio/minio/blob/master/docs/gateway/s3.md) - [S3](https://github.com/minio/minio/blob/master/docs/gateway/s3.md)
- [Google Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/gcs.md) - [Google Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/gcs.md)
- [Alibaba Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/oss.md) - [Alibaba Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/oss.md)
- [Backblaze B2](https://github.com/minio/minio/blob/master/docs/gateway/b2.md)

58
docs/gateway/b2.md Normal file
View File

@ -0,0 +1,58 @@
# Minio B2 Gateway [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io)
Minio Gateway adds Amazon S3 compatibility to Backblaze B2 Cloud Storage.
## Run Minio Gateway for Backblaze B2 Cloud Storage
Please follow this [guide](https://www.backblaze.com/b2/docs/quick_account.html) to create an account on backblaze.com to obtain your access credentials for B2 Cloud storage.
### Using Docker
```
docker run -p 9000:9000 --name b2-s3 \
-e "MINIO_ACCESS_KEY=b2_account_id" \
-e "MINIO_SECRET_KEY=b2_application_key" \
minio/minio gateway b2
```
### Using Binary
```
export MINIO_ACCESS_KEY=b2_account_id
export MINIO_SECRET_KEY=b2_application_key
minio gateway b2
```
## Test using Minio Browser
Minio Gateway comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure that your server has started successfully.
![Screenshot](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/minio-browser-gateway.png)
## Test using Minio Client `mc`
`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services.
### Configure `mc`
```
mc config host add myb2 http://gateway-ip:9000 b2_account_id b2_application_key
```
### List buckets on Backblaze B2
```
mc ls myb2
[2017-02-22 01:50:43 PST] 0B ferenginar/
[2017-02-26 21:43:51 PST] 0B my-bucket/
[2017-02-26 22:10:11 PST] 0B test-bucket1/
```
### Known limitations
Gateway inherits the following B2 limitations:
- No support for CopyObject S3 API (There are no equivalent APIs available on Backblaze B2).
- No support for CopyObjectPart S3 API (There are no equivalent APIs available on Backblaze B2).
- Only read-only bucket policy supported at bucket level, all other variations will return API Notimplemented error.
- DeleteObject() might not delete the object right away on Backblaze B2, so you might see the object immediately after a Delete request.
Other limitations:
- Bucket notification APIs are not supported.
## Explore Further
- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide)
- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio)
- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide)

View File

@ -1,54 +0,0 @@
// Copyright 2014 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package internal // import "github.com/garyburd/redigo/internal"
import (
"strings"
)
const (
WatchState = 1 << iota
MultiState
SubscribeState
MonitorState
)
type CommandInfo struct {
Set, Clear int
}
var commandInfos = map[string]CommandInfo{
"WATCH": {Set: WatchState},
"UNWATCH": {Clear: WatchState},
"MULTI": {Set: MultiState},
"EXEC": {Clear: WatchState | MultiState},
"DISCARD": {Clear: WatchState | MultiState},
"PSUBSCRIBE": {Set: SubscribeState},
"SUBSCRIBE": {Set: SubscribeState},
"MONITOR": {Set: MonitorState},
}
func init() {
for n, ci := range commandInfos {
commandInfos[strings.ToLower(n)] = ci
}
}
func LookupCommandInfo(commandName string) CommandInfo {
if ci, ok := commandInfos[commandName]; ok {
return ci
}
return commandInfos[strings.ToUpper(commandName)]
}

13
vendor/github.com/minio/blazer/LICENSE generated vendored Normal file
View File

@ -0,0 +1,13 @@
Copyright 2016, Google
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1204
vendor/github.com/minio/blazer/base/base.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

81
vendor/github.com/minio/blazer/base/strings.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
// Copyright 2017, Google
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package base
import (
"bytes"
"errors"
"fmt"
)
func noEscape(c byte) bool {
switch c {
case '.', '_', '-', '/', '~', '!', '$', '\'', '(', ')', '*', ';', '=', ':', '@':
return true
}
return false
}
func escape(s string) string {
// cribbed from url.go, kinda
b := &bytes.Buffer{}
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == '/':
b.WriteByte(c)
case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9':
b.WriteByte(c)
case noEscape(c):
b.WriteByte(c)
default:
fmt.Fprintf(b, "%%%X", c)
}
}
return b.String()
}
func unescape(s string) (string, error) {
b := &bytes.Buffer{}
for i := 0; i < len(s); i++ {
c := s[i]
switch c {
case '/':
b.WriteString("/")
case '+':
b.WriteString(" ")
case '%':
if len(s)-i < 3 {
return "", errors.New("unescape: bad encoding")
}
b.WriteByte(unhex(s[i+1])<<4 | unhex(s[i+2]))
i += 2
default:
b.WriteByte(c)
}
}
return b.String(), nil
}
func unhex(c byte) byte {
switch {
case '0' <= c && c <= '9':
return c - '0'
case 'a' <= c && c <= 'f':
return c - 'a' + 10
case 'A' <= c && c <= 'F':
return c - 'A' + 10
}
return 0
}

View File

@ -0,0 +1,255 @@
// Copyright 2016, Google
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package b2types implements internal types common to the B2 API.
package b2types
// You know what would be amazing? If I could autogen this from like a JSON
// file. Wouldn't that be amazing? That would be amazing.
const (
V1api = "/b2api/v1/"
)
type ErrorMessage struct {
Status int `json:"status"`
Code string `json:"code"`
Msg string `json:"message"`
}
type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"`
AuthToken string `json:"authorizationToken"`
URI string `json:"apiUrl"`
DownloadURI string `json:"downloadUrl"`
MinPartSize int `json:"minimumPartSize"`
}
type LifecycleRule struct {
DaysHiddenUntilDeleted int `json:"daysFromHidingToDeleting,omitempty"`
DaysNewUntilHidden int `json:"daysFromUploadingToHiding,omitempty"`
Prefix string `json:"fileNamePrefix"`
}
type CreateBucketRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
Info map[string]string `json:"bucketInfo"`
LifecycleRules []LifecycleRule `json:"lifecycleRules"`
}
type CreateBucketResponse struct {
BucketID string `json:"bucketId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
Info map[string]string `json:"bucketInfo"`
LifecycleRules []LifecycleRule `json:"lifecycleRules"`
Revision int `json:"revision"`
}
type DeleteBucketRequest struct {
AccountID string `json:"accountId"`
BucketID string `json:"bucketId"`
}
type ListBucketsRequest struct {
AccountID string `json:"accountId"`
}
type ListBucketsResponse struct {
Buckets []CreateBucketResponse `json:"buckets"`
}
type UpdateBucketRequest struct {
AccountID string `json:"accountId"`
BucketID string `json:"bucketId"`
// bucketName is a required field according to
// https://www.backblaze.com/b2/docs/b2_update_bucket.html.
//
// However, actually setting it returns 400: unknown field in
// com.backblaze.modules.b2.data.UpdateBucketRequest: bucketName
//
//Name string `json:"bucketName"`
Type string `json:"bucketType,omitempty"`
Info map[string]string `json:"bucketInfo,omitempty"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
IfRevisionIs int `json:"ifRevisionIs,omitempty"`
}
type UpdateBucketResponse CreateBucketResponse
type GetUploadURLRequest struct {
BucketID string `json:"bucketId"`
}
type GetUploadURLResponse struct {
URI string `json:"uploadUrl"`
Token string `json:"authorizationToken"`
}
type UploadFileResponse struct {
FileID string `json:"fileId"`
Timestamp int64 `json:"uploadTimestamp"`
Action string `json:"action"`
}
type DeleteFileVersionRequest struct {
Name string `json:"fileName"`
FileID string `json:"fileId"`
}
type StartLargeFileRequest struct {
BucketID string `json:"bucketId"`
Name string `json:"fileName"`
ContentType string `json:"contentType"`
Info map[string]string `json:"fileInfo,omitempty"`
}
type StartLargeFileResponse struct {
ID string `json:"fileId"`
}
type CancelLargeFileRequest struct {
ID string `json:"fileId"`
}
type ListUnfinishedLargeFilesRequest struct {
BucketID string `json:"bucketId"`
Continuation string `json:"startFileId,omitempty"`
Count int `json:"maxFileCount,omitempty"`
}
type ListUnfinishedLargeFilesResponse struct {
NextID string `json:"nextFileId"`
Files []struct {
AccountID string `json:"accountId"`
BucketID string `json:"bucketId"`
Name string `json:"fileName"`
ID string `json:"fileId"`
Timestamp int64 `json:"uploadTimestamp"`
ContentType string `json:"contentType"`
Info map[string]string `json:"fileInfo,omitempty"`
} `json:"files"`
}
type ListPartsRequest struct {
ID string `json:"fileId"`
Start int `json:"startPartNumber"`
Count int `json:"maxPartCount"`
}
type ListPartsResponse struct {
Next int `json:"nextPartNumber"`
Parts []struct {
ID string `json:"fileId"`
Number int `json:"partNumber"`
SHA1 string `json:"contentSha1"`
Size int64 `json:"contentLength"`
} `json:"parts"`
}
type getUploadPartURLRequest struct {
ID string `json:"fileId"`
}
type getUploadPartURLResponse struct {
URL string `json:"uploadUrl"`
Token string `json:"authorizationToken"`
}
type UploadPartResponse struct {
ID string `json:"fileId"`
PartNumber int `json:"partNumber"`
Size int64 `json:"contentLength"`
SHA1 string `json:"contentSha1"`
}
type FinishLargeFileRequest struct {
ID string `json:"fileId"`
Hashes []string `json:"partSha1Array"`
}
type FinishLargeFileResponse struct {
Name string `json:"fileName"`
FileID string `json:"fileId"`
Timestamp int64 `json:"uploadTimestamp"`
Action string `json:"action"`
}
type ListFileNamesRequest struct {
BucketID string `json:"bucketId"`
Count int `json:"maxFileCount"`
Continuation string `json:"startFileName,omitempty"`
Prefix string `json:"prefix,omitempty"`
Delimiter string `json:"delimiter,omitempty"`
}
type ListFileNamesResponse struct {
Continuation string `json:"nextFileName"`
Files []GetFileInfoResponse `json:"files"`
}
type ListFileVersionsRequest struct {
BucketID string `json:"bucketId"`
Count int `json:"maxFileCount"`
StartName string `json:"startFileName,omitempty"`
StartID string `json:"startFileId,omitempty"`
Prefix string `json:"prefix,omitempty"`
Delimiter string `json:"delimiter,omitempty"`
}
type ListFileVersionsResponse struct {
NextName string `json:"nextFileName"`
NextID string `json:"nextFileId"`
Files []GetFileInfoResponse `json:"files"`
}
type HideFileRequest struct {
BucketID string `json:"bucketId"`
File string `json:"fileName"`
}
type HideFileResponse struct {
ID string `json:"fileId"`
Timestamp int64 `json:"uploadTimestamp"`
Action string `json:"action"`
}
type GetFileInfoRequest struct {
ID string `json:"fileId"`
}
type GetFileInfoResponse struct {
FileID string `json:"fileId"`
Name string `json:"fileName"`
SHA1 string `json:"contentSha1"`
Size int64 `json:"contentLength"`
ContentType string `json:"contentType"`
Info map[string]string `json:"fileInfo"`
Action string `json:"action"`
Timestamp int64 `json:"uploadTimestamp"`
}
type GetDownloadAuthorizationRequest struct {
BucketID string `json:"bucketId"`
Prefix string `json:"fileNamePrefix"`
Valid int `json:"validDurationInSeconds"`
}
type GetDownloadAuthorizationResponse struct {
BucketID string `json:"bucketId"`
Prefix string `json:"fileNamePrefix"`
Token string `json:"authorizationToken"`
}

54
vendor/github.com/minio/blazer/internal/blog/blog.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2017, Google
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package blog implements a private logger, in the manner of glog, without
// polluting the flag namespace or leaving files all over /tmp.
//
// It has almost no features, and a bunch of global state.
package blog
import (
"log"
"os"
"strconv"
)
var level int32
type Verbose bool
func init() {
lvl := os.Getenv("B2_LOG_LEVEL")
i, err := strconv.ParseInt(lvl, 10, 32)
if err != nil {
return
}
level = int32(i)
}
func (v Verbose) Info(a ...interface{}) {
if v {
log.Print(a...)
}
}
func (v Verbose) Infof(format string, a ...interface{}) {
if v {
log.Printf(format, a...)
}
}
func V(target int32) Verbose {
return Verbose(target <= level)
}

24
vendor/vendor.json vendored
View File

@ -297,12 +297,6 @@
"revision": "42c364ba490082e4815b5222728711b3440603eb", "revision": "42c364ba490082e4815b5222728711b3440603eb",
"revisionTime": "2017-01-13T15:16:12Z" "revisionTime": "2017-01-13T15:16:12Z"
}, },
{
"checksumSHA1": "2UmMbNHc8FBr98mJFN1k8ISOIHk=",
"path": "github.com/garyburd/redigo/internal",
"revision": "0d253a66e6e1349f4581d6d2b300ee434ee2da9f",
"revisionTime": "2017-02-16T21:49:44Z"
},
{ {
"checksumSHA1": "+IH9gXMht4fL/fxKRZ4sqGBps1g=", "checksumSHA1": "+IH9gXMht4fL/fxKRZ4sqGBps1g=",
"path": "github.com/go-ini/ini", "path": "github.com/go-ini/ini",
@ -590,6 +584,24 @@
"revision": "db96a2b759cdef4f11a34506a42eb8d1290c598e", "revision": "db96a2b759cdef4f11a34506a42eb8d1290c598e",
"revisionTime": "2016-07-26T03:20:27Z" "revisionTime": "2016-07-26T03:20:27Z"
}, },
{
"checksumSHA1": "uTShVxdYNwW+3WI6SfJwOc/LQgo=",
"path": "github.com/minio/blazer/base",
"revision": "2081f5bf046503f576d8712253724fbf2950fffe",
"revisionTime": "2017-11-26T20:28:54Z"
},
{
"checksumSHA1": "ucCxupZ1gyxvFsBg5igP13dySLI=",
"path": "github.com/minio/blazer/internal/b2types",
"revision": "2081f5bf046503f576d8712253724fbf2950fffe",
"revisionTime": "2017-11-26T20:28:54Z"
},
{
"checksumSHA1": "zgBbPwwuUH2sxz8smOzOA9TrD5g=",
"path": "github.com/minio/blazer/internal/blog",
"revision": "2081f5bf046503f576d8712253724fbf2950fffe",
"revisionTime": "2017-11-26T20:28:54Z"
},
{ {
"checksumSHA1": "fUWokilZyc1QDKnIgCDJE8n1S9U=", "checksumSHA1": "fUWokilZyc1QDKnIgCDJE8n1S9U=",
"path": "github.com/minio/cli", "path": "github.com/minio/cli",