diff --git a/routers.go b/routers.go
index 503e50276..f724e18d8 100644
--- a/routers.go
+++ b/routers.go
@@ -17,45 +17,21 @@
package main
import (
- "net"
"net/http"
router "github.com/gorilla/mux"
- "github.com/minio/minio-go"
"github.com/minio/minio/pkg/fs"
- "github.com/minio/minio/pkg/probe"
)
// configureServer handler returns final handler for the http server.
func configureServerHandler(filesystem fs.Filesystem) http.Handler {
- // Access credentials.
- cred := serverConfig.GetCredential()
-
- // Server addr.
- addr := serverConfig.GetAddr()
-
// Initialize API.
api := storageAPI{
Filesystem: filesystem,
}
-
- // Split host port.
- host, port, _ := net.SplitHostPort(addr)
-
- // Default host is 'localhost', if no host present.
- if host == "" {
- host = "localhost"
- }
-
- // Initialize minio client for AWS Signature Version '4'
- insecure := !isSSL() // Insecure true when SSL is false.
- client, e := minio.NewV4(net.JoinHostPort(host, port), cred.AccessKeyID, cred.SecretAccessKey, insecure)
- fatalIf(probe.NewError(e), "Unable to initialize minio client", nil)
-
// Initialize Web.
web := &webAPI{
- FSPath: filesystem.GetRootPath(),
- Client: client,
+ Filesystem: filesystem,
}
// Initialize router.
diff --git a/typed-errors.go b/typed-errors.go
index 64ae986fe..95bc6a0a1 100644
--- a/typed-errors.go
+++ b/typed-errors.go
@@ -26,3 +26,6 @@ var errInvalidArgument = errors.New("Invalid arguments specified")
// errSignatureMismatch means signature did not match.
var errSignatureMismatch = errors.New("Signature does not match")
+
+// used when token used for authentication by the MinioBrowser has expired
+var errInvalidToken = errors.New("Invalid token")
diff --git a/vendor/github.com/minio/minio-go/API.md b/vendor/github.com/minio/minio-go/API.md
deleted file mode 100644
index dc39d40f0..000000000
--- a/vendor/github.com/minio/minio-go/API.md
+++ /dev/null
@@ -1,499 +0,0 @@
-## API Documentation
-
-### Minio client object creation
-Minio client object is created using minio-go:
-```go
-package main
-
-import (
- "fmt"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
- if err !!= nil {
- fmt.Println(err)
- return
- }
-}
-```
-
-s3Client can be used to perform operations on S3 storage. APIs are described below.
-
-### Bucket operations
-
-* [`MakeBucket`](#MakeBucket)
-* [`ListBuckets`](#ListBuckets)
-* [`BucketExists`](#BucketExists)
-* [`RemoveBucket`](#RemoveBucket)
-* [`ListObjects`](#ListObjects)
-* [`ListIncompleteUploads`](#ListIncompleteUploads)
-
-### Object operations
-
-* [`GetObject`](#GetObject)
-* [`PutObject`](#PutObject)
-* [`StatObject`](#StatObject)
-* [`RemoveObject`](#RemoveObject)
-* [`RemoveIncompleteUpload`](#RemoveIncompleteUpload)
-
-### File operations.
-
-* [`FPutObject`](#FPutObject)
-* [`FGetObject`](#FPutObject)
-
-### Bucket policy operations.
-
-* [`SetBucketPolicy`](#SetBucketPolicy)
-* [`GetBucketPolicy`](#GetBucketPolicy)
-* [`RemoveBucketPolicy`](#RemoveBucketPolicy)
-
-### Presigned operations
-
-* [`PresignedGetObject`](#PresignedGetObject)
-* [`PresignedPutObject`](#PresignedPutObject)
-* [`PresignedPostPolicy`](#PresignedPostPolicy)
-
-### Bucket operations
----------------------------------------
-
-#### MakeBucket(bucketName, location)
-Create a new bucket.
-
-__Arguments__
-* `bucketName` _string_ - Name of the bucket.
-* `location` _string_ - region valid values are _us-west-1_, _us-west-2_, _eu-west-1_, _eu-central-1_, _ap-southeast-1_, _ap-northeast-1_, _ap-southeast-2_, _sa-east-1_
-
-__Example__
-```go
-err := s3Client.MakeBucket("mybucket", "us-west-1")
-if err != nil {
- fmt.Println(err)
- return
-}
-fmt.Println("Successfully created mybucket.")
-```
----------------------------------------
-
-#### ListBuckets()
-List all buckets.
-
-`bucketList` emits bucket with the format:
-* `bucket.Name` _string_: bucket name
-* `bucket.CreationDate` time.Time : date when bucket was created
-
-__Example__
-```go
-buckets, err := s3Client.ListBuckets()
-if err != nil {
- fmt.Println(err)
- return
-}
-for _, bucket := range buckets {
- fmt.Println(bucket)
-}
-```
----------------------------------------
-
-#### BucketExists(bucketName)
-Check if bucket exists.
-
-__Arguments__
-* `bucketName` _string_ : name of the bucket
-
-__Example__
-```go
-err := s3Client.BucketExists("mybucket")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
-
-#### RemoveBucket(bucketName)
-Remove a bucket.
-
-__Arguments__
-* `bucketName` _string_ : name of the bucket
-
-__Example__
-```go
-err := s3Client.RemoveBucket("mybucket")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
-
-#### GetBucketPolicy(bucketName, objectPrefix)
-Get access permissions on a bucket or a prefix.
-
-__Arguments__
-* `bucketName` _string_ : name of the bucket
-* `objectPrefix` _string_ : name of the object prefix
-
-__Example__
-```go
-bucketPolicy, err := s3Client.GetBucketPolicy("mybucket")
-if err != nil {
- fmt.Println(err)
- return
-}
-fmt.Println("Access permissions for mybucket is", bucketPolicy)
-```
----------------------------------------
-
-#### SetBucketPolicy(bucketname, objectPrefix, policy)
-Set access permissions on bucket or an object prefix.
-
-__Arguments__
-* `bucketName` _string_: name of the bucket
-* `objectPrefix` _string_ : name of the object prefix
-* `policy` _BucketPolicy_: policy can be _non_, _readonly_, _readwrite_, _writeonly_
-
-__Example__
-```go
-err := s3Client.SetBucketPolicy("mybucket", "myprefix", "readwrite")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
-
-#### RemoveBucketPolicy(bucketname, objectPrefix)
-Remove existing permissions on bucket or an object prefix.
-
-__Arguments__
-* `bucketName` _string_: name of the bucket
-* `objectPrefix` _string_ : name of the object prefix
-
-__Example__
-```go
-err := s3Client.RemoveBucketPolicy("mybucket", "myprefix")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
----------------------------------------
-
-#### ListObjects(bucketName, prefix, recursive, doneCh)
-List objects in a bucket.
-
-__Arguments__
-* `bucketName` _string_: name of the bucket
-* `objectPrefix` _string_: the prefix of the objects that should be listed
-* `recursive` _bool_: `true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'
-* `doneCh` chan struct{} : channel for pro-actively closing the internal go routine
-
-__Return Value__
-* `<-chan ObjectInfo` _chan ObjectInfo_: Read channel for all the objects in the bucket, the object is of the format:
- * `objectInfo.Key` _string_: name of the object
- * `objectInfo.Size` _int64_: size of the object
- * `objectInfo.ETag` _string_: etag of the object
- * `objectInfo.LastModified` _time.Time_: modified time stamp
-
-__Example__
-```go
-// Create a done channel to control 'ListObjects' go routine.
-doneCh := make(chan struct{})
-
-// Indicate to our routine to exit cleanly upon return.
-defer close(doneCh)
-
-isRecursive := true
-objectCh := s3Client.ListObjects("mybucket", "myprefix", isRecursive, doneCh)
-for object := range objectCh {
- if object.Err != nil {
- fmt.Println(object.Err)
- return
- }
- fmt.Println(object)
-}
-
-```
-
----------------------------------------
-
-#### ListIncompleteUploads(bucketName, prefix, recursive)
-List partially uploaded objects in a bucket.
-
-__Arguments__
-* `bucketname` _string_: name of the bucket
-* `prefix` _string_: prefix of the object names that are partially uploaded
-* `recursive` bool: directory style listing when false, recursive listing when true
-* `doneCh` chan struct{} : channel for pro-actively closing the internal go routine
-
-__Return Value__
-* `<-chan ObjectMultipartInfo` _chan ObjectMultipartInfo_ : emits multipart objects of the format:
- * `multiPartObjInfo.Key` _string_: name of the incomplete object
- * `multiPartObjInfo.UploadID` _string_: upload ID of the incomplete object
- * `multiPartObjInfo.Size` _int64_: size of the incompletely uploaded object
-
-__Example__
-```go
-// Create a done channel to control 'ListObjects' go routine.
-doneCh := make(chan struct{})
-
-// Indicate to our routine to exit cleanly upon return.
-defer close(doneCh)
-
-isRecursive := true
-multiPartObjectCh := s3Client.ListIncompleteUploads("mybucket", "myprefix", isRecursive, doneCh)
-for multiPartObject := range multiPartObjectCh {
- if multiPartObject.Err != nil {
- fmt.Println(multiPartObject.Err)
- return
- }
- fmt.Println(multiPartObject)
-}
-```
-
----------------------------------------
-### Object operations
-
-#### GetObject(bucketName, objectName)
-Download an object.
-
-__Arguments__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-
-__Return Value__
-* `object` _*minio.Object_ : _minio.Object_ represents object reader.
-
-__Example__
-```go
-object, err := s3Client.GetObject("mybucket", "photo.jpg")
-if err != nil {
- fmt.Println(err)
- return
-}
-localFile _ := os.Open("/tmp/local-file")
-if _, err := io.Copy(localFile, object); err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
----------------------------------------
-
-#### FGetObject(bucketName, objectName, filePath)
-Callback is called with `error` in case of error or `null` in case of success
-
-__Arguments__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-* `filePath` _string_: path to which the object data will be written to
-
-__Example__
-```go
-err := s3Client.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
-
-#### PutObject(bucketName, objectName, reader, contentType)
-Upload an object.
-
-Uploading a stream
-__Arguments__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-* `reader` _io.Reader_: Any golang object implementing io.Reader
-* `contentType` _string_: content type of the object.
-
-__Example__
-```go
-file, err := os.Open("my-testfile")
-if err != nil {
- fmt.Println(err)
- return
-}
-defer file.Close()
-
-n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
----------------------------------------
-
-#### FPutObject(bucketName, objectName, filePath, contentType)
-Uploads the object using contents from a file
-
-__Arguments__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-* `filePath` _string_: file path of the file to be uploaded
-* `contentType` _string_: content type of the object
-
-__Example__
-```go
-n, err := s3Client.FPutObject("my-bucketname", "my-objectname", "/tmp/my-filename.csv", "application/csv")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
-
-#### StatObject(bucketName, objectName)
-Get metadata of an object.
-
-__Arguments__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-
-__Return Value__
- `objInfo` _ObjectInfo_ : object stat info for following format:
- * `objInfo.Size` _int64_: size of the object
- * `objInfo.ETag` _string_: etag of the object
- * `objInfo.ContentType` _string_: Content-Type of the object
- * `objInfo.LastModified` _string_: modified time stamp
-
-__Example__
-```go
-objInfo, err := s3Client.StatObject("mybucket", "photo.jpg")
-if err != nil {
- fmt.Println(err)
- return
-}
-fmt.Println(objInfo)
-```
----------------------------------------
-
-#### RemoveObject(bucketName, objectName)
-Remove an object.
-
-__Arguments__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-
-__Example__
-```go
-err := s3Client.RemoveObject("mybucket", "photo.jpg")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
-
-#### RemoveIncompleteUpload(bucketName, objectName)
-Remove an partially uploaded object.
-
-__Arguments__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-
-__Example__
-```go
-err := s3Client.RemoveIncompleteUpload("mybucket", "photo.jpg")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
-### Presigned operations
----------------------------------------
-
-#### PresignedGetObject(bucketName, objectName, expiry)
-Generate a presigned URL for GET.
-
-__Arguments__
-* `bucketName` _string_: name of the bucket.
-* `objectName` _string_: name of the object.
-* `expiry` _time.Duration_: expiry in seconds.
- `reqParams` _url.Values_ : additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_
-
-__Example__
-```go
-// Set request parameters for content-disposition.
-reqParams := make(url.Values)
-reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
-
-// Generates a presigned url which expires in a day.
-presignedURL, err := s3Client.PresignedGetObject("mybucket", "photo.jpg", time.Second * 24 * 60 * 60, reqParams)
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
----------------------------------------
-
-#### PresignedPutObject(bucketName, objectName, expiry)
-Generate a presigned URL for PUT.
-
-NOTE: you can upload to S3 only with specified object name.
-
-
-__Arguments__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-* `expiry` _time.Duration_: expiry in seconds
-
-__Example__
-```go
-// Generates a url which expires in a day.
-presignedURL, err := s3Client.PresignedPutObject("mybucket", "photo.jpg", time.Second * 24 * 60 * 60)
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
----------------------------------------
-
-#### PresignedPostPolicy
-PresignedPostPolicy we can provide policies specifying conditions restricting
-what you want to allow in a POST request, such as bucket name where objects can be
-uploaded, key name prefixes that you want to allow for the object being created and more.
-
-We need to create our policy first:
-```go
-policy := minio.NewPostPolicy()
-```
-Apply upload policy restrictions:
-```go
-policy.SetBucket("my-bucketname")
-policy.SetKey("my-objectname")
-policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
-
-// Only allow 'png' images.
-policy.SetContentType("image/png")
-
-// Only allow content size in range 1KB to 1MB.
-policy.SetContentLengthRange(1024, 1024*1024)
-```
-Get the POST form key/value object:
-```go
-formData, err := s3Client.PresignedPostPolicy(policy)
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
-POST your content from the command line using `curl`:
-```go
-fmt.Printf("curl ")
-for k, v := range m {
- fmt.Printf("-F %s=%s ", k, v)
-}
-fmt.Printf("-F file=@/etc/bash.bashrc ")
-fmt.Printf("https://my-bucketname.s3.amazonaws.com\n")
-```
diff --git a/vendor/github.com/minio/minio-go/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/CONTRIBUTING.md
deleted file mode 100644
index 8b1ee86c6..000000000
--- a/vendor/github.com/minio/minio-go/CONTRIBUTING.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-### Developer Guidelines
-
-``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
-
-* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes.
- - Fork it
- - Create your feature branch (git checkout -b my-new-feature)
- - Commit your changes (git commit -am 'Add some feature')
- - Push to the branch (git push origin my-new-feature)
- - Create new Pull Request
-
-* When you're ready to create a pull request, be sure to:
- - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
- - Run `go fmt`
- - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
- - Make sure `go test -race ./...` and `go build` completes.
- NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
- ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
-
-* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
- - `minio-go` project is strictly conformant with Golang style
- - if you happen to observe offending code, please feel free to send a pull request
diff --git a/vendor/github.com/minio/minio-go/INSTALLGO.md b/vendor/github.com/minio/minio-go/INSTALLGO.md
deleted file mode 100644
index 81c3d53f5..000000000
--- a/vendor/github.com/minio/minio-go/INSTALLGO.md
+++ /dev/null
@@ -1,83 +0,0 @@
-## Ubuntu (Kylin) 14.04
-### Build Dependencies
-This installation guide is based on Ubuntu 14.04+ on x86-64 platform.
-
-##### Install Git, GCC
-```sh
-$ sudo apt-get install git build-essential
-```
-
-##### Install Go 1.5+
-
-Download Go 1.5+ from [https://golang.org/dl/](https://golang.org/dl/).
-
-```sh
-$ wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz
-$ mkdir -p ${HOME}/bin/
-$ mkdir -p ${HOME}/go/
-$ tar -C ${HOME}/bin/ -xzf go1.5.1.linux-amd64.tar.gz
-```
-##### Setup GOROOT and GOPATH
-
-Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries
-and GOPATH specifies the location of your project workspace.
-
-```sh
-export GOROOT=${HOME}/bin/go
-export GOPATH=${HOME}/go
-export PATH=$PATH:${HOME}/bin/go/bin:${GOPATH}/bin
-```
-```sh
-$ source ~/.bashrc
-```
-
-##### Testing it all
-```sh
-$ go env
-```
-
-## OS X (Yosemite) 10.10
-### Build Dependencies
-This installation document assumes OS X Yosemite 10.10+ on x86-64 platform.
-
-##### Install brew
-```sh
-$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
-```
-
-##### Install Git, Python
-```sh
-$ brew install git python
-```
-
-##### Install Go 1.5+
-
-Install golang binaries using `brew`
-
-```sh
-$ brew install go
-$ mkdir -p $HOME/go
-```
-
-##### Setup GOROOT and GOPATH
-
-Add the following exports to your ``~/.bash_profile``. Environment variable GOROOT specifies the location of your golang binaries
-and GOPATH specifies the location of your project workspace.
-
-```sh
-export GOPATH=${HOME}/go
-export GOVERSION=$(brew list go | head -n 1 | cut -d '/' -f 6)
-export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec
-export PATH=$PATH:${GOPATH}/bin
-```
-
-##### Source the new environment
-
-```sh
-$ source ~/.bash_profile
-```
-
-##### Testing it all
-```sh
-$ go env
-```
diff --git a/vendor/github.com/minio/minio-go/LICENSE b/vendor/github.com/minio/minio-go/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/vendor/github.com/minio/minio-go/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/minio/minio-go/MAINTAINERS.md b/vendor/github.com/minio/minio-go/MAINTAINERS.md
deleted file mode 100644
index 6dbef6265..000000000
--- a/vendor/github.com/minio/minio-go/MAINTAINERS.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# For maintainers only
-
-## Responsibilities
-
-Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
-
-### Making new releases
-
-Edit `libraryVersion` constant in `api.go`.
-
-```
-$ grep libraryVersion api.go
- libraryVersion = "0.3.0"
-```
-
-```
-$ git tag 0.3.0
-$ git push --tags
-```
\ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md
deleted file mode 100644
index 3db4f7b8f..000000000
--- a/vendor/github.com/minio/minio-go/README.md
+++ /dev/null
@@ -1,105 +0,0 @@
-# Minio Go Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-
-## Description
-
-Minio Go library is a simple client library for S3 compatible cloud storage servers. Supports AWS Signature Version 4 and 2. AWS Signature Version 4 is chosen as default.
-
-List of supported cloud storage providers.
-
- - AWS Signature Version 4
- - Amazon S3
- - Minio
-
- - AWS Signature Version 2
- - Google Cloud Storage (Compatibility Mode)
- - Openstack Swift + Swift3 middleware
- - Ceph Object Gateway
- - Riak CS
-
-## Install
-
-If you do not have a working Golang environment, please follow [Install Golang](./INSTALLGO.md).
-
-```sh
-$ go get github.com/minio/minio-go
-```
-
-## Example
-
-### ListBuckets()
-
-This example shows how to List your buckets.
-
-```go
-package main
-
-import (
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
- // This boolean value is the last argument for New().
-
- // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
- // determined based on the Endpoint value.
- s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false)
- if err != nil {
- log.Fatalln(err)
- }
- buckets, err := s3Client.ListBuckets()
- if err != nil {
- log.Fatalln(err)
- }
- for _, bucket := range buckets {
- log.Println(bucket)
- }
-}
-```
-
-## Documentation
-
-[API documentation](./API.md)
-
-## Examples
-
-### Bucket Operations.
-* [MakeBucket(bucketName, location) error](examples/s3/makebucket.go)
-* [BucketExists(bucketName) error](examples/s3/bucketexists.go)
-* [RemoveBucket(bucketName) error](examples/s3/removebucket.go)
-* [ListBuckets() []BucketInfo](examples/s3/listbuckets.go)
-* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectInfo](examples/s3/listobjects.go)
-* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartInfo](examples/s3/listincompleteuploads.go)
-
-### Object Operations.
-* [PutObject(bucketName, objectName, io.Reader, contentType) error](examples/s3/putobject.go)
-* [GetObject(bucketName, objectName) (*Object, error)](examples/s3/getobject.go)
-* [StatObject(bucketName, objectName) (ObjectInfo, error)](examples/s3/statobject.go)
-* [RemoveObject(bucketName, objectName) error](examples/s3/removeobject.go)
-* [RemoveIncompleteUpload(bucketName, objectName) <-chan error](examples/s3/removeincompleteupload.go)
-
-### File Object Operations.
-* [FPutObject(bucketName, objectName, filePath, contentType) (size, error)](examples/s3/fputobject.go)
-* [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go)
-
-### Presigned Operations.
-* [PresignedGetObject(bucketName, objectName, time.Duration, url.Values) (string, error)](examples/s3/presignedgetobject.go)
-* [PresignedPutObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedputobject.go)
-* [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go)
-
-### Bucket Policy Operations.
-* [SetBucketPolicy(bucketName, objectPrefix, BucketPolicy) error](examples/s3/setbucketpolicy.go)
-* [GetBucketPolicy(bucketName, objectPrefix) (BucketPolicy, error)](examples/s3/getbucketpolicy.go)
-* [RemoveBucketPolicy(bucketName, objectPrefix) error](examples/s3/removebucketpolicy.go)
-
-### API Reference
-
-[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/minio/minio-go)
-
-## Contribute
-
-[Contributors Guide](./CONTRIBUTING.md)
-
-[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) [![Build status](https://ci.appveyor.com/api/projects/status/1ep7n2resn6fk1w6?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)
diff --git a/vendor/github.com/minio/minio-go/api-datatypes.go b/vendor/github.com/minio/minio-go/api-datatypes.go
deleted file mode 100644
index 0871b1cfb..000000000
--- a/vendor/github.com/minio/minio-go/api-datatypes.go
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import "time"
-
-// BucketInfo container for bucket metadata.
-type BucketInfo struct {
- // The name of the bucket.
- Name string `json:"name"`
- // Date the bucket was created.
- CreationDate time.Time `json:"creationDate"`
-}
-
-// ObjectInfo container for object metadata.
-type ObjectInfo struct {
- // An ETag is optionally set to md5sum of an object. In case of multipart objects,
- // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
- // each parts concatenated into one string.
- ETag string `json:"etag"`
-
- Key string `json:"name"` // Name of the object
- LastModified time.Time `json:"lastModified"` // Date and time the object was last modified.
- Size int64 `json:"size"` // Size in bytes of the object.
- ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
-
- // Owner name.
- Owner struct {
- DisplayName string `json:"name"`
- ID string `json:"id"`
- } `json:"owner"`
-
- // The class of storage used to store the object.
- StorageClass string `json:"storageClass"`
-
- // Error
- Err error `json:"-"`
-}
-
-// ObjectMultipartInfo container for multipart object metadata.
-type ObjectMultipartInfo struct {
- // Date and time at which the multipart upload was initiated.
- Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
-
- Initiator initiator
- Owner owner
-
- // The type of storage to use for the object. Defaults to 'STANDARD'.
- StorageClass string
-
- // Key of the object for which the multipart upload was initiated.
- Key string
-
- // Size in bytes of the object.
- Size int64
-
- // Upload ID that identifies the multipart upload.
- UploadID string `xml:"UploadId"`
-
- // Error
- Err error
-}
diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go
deleted file mode 100644
index e6789aff5..000000000
--- a/vendor/github.com/minio/minio-go/api-error-response.go
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "encoding/xml"
- "fmt"
- "net/http"
- "strconv"
-)
-
-/* **** SAMPLE ERROR RESPONSE ****
-
-
- AccessDenied
- Access Denied
- bucketName
- objectName
- F19772218238A85A
- GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD
-
-*/
-
-// ErrorResponse - Is the typed error returned by all API operations.
-type ErrorResponse struct {
- XMLName xml.Name `xml:"Error" json:"-"`
- Code string
- Message string
- BucketName string
- Key string
- RequestID string `xml:"RequestId"`
- HostID string `xml:"HostId"`
-
- // Region where the bucket is located. This header is returned
- // only in HEAD bucket and ListObjects response.
- Region string
-}
-
-// ToErrorResponse - Returns parsed ErrorResponse struct from body and
-// http headers.
-//
-// For example:
-//
-// import s3 "github.com/minio/minio-go"
-// ...
-// ...
-// reader, stat, err := s3.GetObject(...)
-// if err != nil {
-// resp := s3.ToErrorResponse(err)
-// }
-// ...
-func ToErrorResponse(err error) ErrorResponse {
- switch err := err.(type) {
- case ErrorResponse:
- return err
- default:
- return ErrorResponse{}
- }
-}
-
-// Error - Returns HTTP error string
-func (e ErrorResponse) Error() string {
- return e.Message
-}
-
-// Common string for errors to report issue location in unexpected
-// cases.
-const (
- reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
-)
-
-// httpRespToErrorResponse returns a new encoded ErrorResponse
-// structure as error.
-func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
- if resp == nil {
- msg := "Response is empty. " + reportIssue
- return ErrInvalidArgument(msg)
- }
- var errResp ErrorResponse
- err := xmlDecoder(resp.Body, &errResp)
- // Xml decoding failed with no body, fall back to HTTP headers.
- if err != nil {
- switch resp.StatusCode {
- case http.StatusNotFound:
- if objectName == "" {
- errResp = ErrorResponse{
- Code: "NoSuchBucket",
- Message: "The specified bucket does not exist.",
- BucketName: bucketName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
- }
- } else {
- errResp = ErrorResponse{
- Code: "NoSuchKey",
- Message: "The specified key does not exist.",
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
- }
- }
- case http.StatusForbidden:
- errResp = ErrorResponse{
- Code: "AccessDenied",
- Message: "Access Denied.",
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
- }
- case http.StatusConflict:
- errResp = ErrorResponse{
- Code: "Conflict",
- Message: "Bucket not empty.",
- BucketName: bucketName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
- }
- default:
- errResp = ErrorResponse{
- Code: resp.Status,
- Message: resp.Status,
- BucketName: bucketName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
- }
- }
- }
- return errResp
-}
-
-// ErrEntityTooLarge - Input size is larger than supported maximum.
-func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
- msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
- return ErrorResponse{
- Code: "EntityTooLarge",
- Message: msg,
- BucketName: bucketName,
- Key: objectName,
- }
-}
-
-// ErrEntityTooSmall - Input size is smaller than supported minimum.
-func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
- msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", totalSize)
- return ErrorResponse{
- Code: "EntityTooLarge",
- Message: msg,
- BucketName: bucketName,
- Key: objectName,
- }
-}
-
-// ErrUnexpectedEOF - Unexpected end of file reached.
-func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
- msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.",
- strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
- return ErrorResponse{
- Code: "UnexpectedEOF",
- Message: msg,
- BucketName: bucketName,
- Key: objectName,
- }
-}
-
-// ErrInvalidBucketName - Invalid bucket name response.
-func ErrInvalidBucketName(message string) error {
- return ErrorResponse{
- Code: "InvalidBucketName",
- Message: message,
- RequestID: "minio",
- }
-}
-
-// ErrInvalidObjectName - Invalid object name response.
-func ErrInvalidObjectName(message string) error {
- return ErrorResponse{
- Code: "NoSuchKey",
- Message: message,
- RequestID: "minio",
- }
-}
-
-// ErrInvalidParts - Invalid number of parts.
-func ErrInvalidParts(expectedParts, uploadedParts int) error {
- msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", expectedParts, uploadedParts)
- return ErrorResponse{
- Code: "InvalidParts",
- Message: msg,
- RequestID: "minio",
- }
-}
-
-// ErrInvalidObjectPrefix - Invalid object prefix response is
-// similar to object name response.
-var ErrInvalidObjectPrefix = ErrInvalidObjectName
-
-// ErrInvalidArgument - Invalid argument response.
-func ErrInvalidArgument(message string) error {
- return ErrorResponse{
- Code: "InvalidArgument",
- Message: message,
- RequestID: "minio",
- }
-}
diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go
deleted file mode 100644
index 265a58eea..000000000
--- a/vendor/github.com/minio/minio-go/api-get-object-file.go
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "io"
- "os"
- "path/filepath"
-)
-
-// FGetObject - download contents of an object to a local file.
-func (c Client) FGetObject(bucketName, objectName, filePath string) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
- if err := isValidObjectName(objectName); err != nil {
- return err
- }
-
- // Verify if destination already exists.
- st, err := os.Stat(filePath)
- if err == nil {
- // If the destination exists and is a directory.
- if st.IsDir() {
- return ErrInvalidArgument("fileName is a directory.")
- }
- }
-
- // Proceed if file does not exist. return for all other errors.
- if err != nil {
- if !os.IsNotExist(err) {
- return err
- }
- }
-
- // Extract top level direcotry.
- objectDir, _ := filepath.Split(filePath)
- if objectDir != "" {
- // Create any missing top level directories.
- if err := os.MkdirAll(objectDir, 0700); err != nil {
- return err
- }
- }
-
- // Gather md5sum.
- objectStat, err := c.StatObject(bucketName, objectName)
- if err != nil {
- return err
- }
-
- // Write to a temporary file "fileName.part.minio" before saving.
- filePartPath := filePath + objectStat.ETag + ".part.minio"
-
- // If exists, open in append mode. If not create it as a part file.
- filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
- if err != nil {
- return err
- }
-
- // Issue Stat to get the current offset.
- st, err = filePart.Stat()
- if err != nil {
- return err
- }
-
- // Seek to current position for incoming reader.
- objectReader, objectStat, err := c.getObject(bucketName, objectName, st.Size(), 0)
- if err != nil {
- return err
- }
-
- // Write to the part file.
- if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil {
- return err
- }
-
- // Close the file before rename, this is specifically needed for Windows users.
- if err = filePart.Close(); err != nil {
- return err
- }
-
- // Safely completed. Now commit by renaming to actual filename.
- if err = os.Rename(filePartPath, filePath); err != nil {
- return err
- }
-
- // Return.
- return nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-get.go b/vendor/github.com/minio/minio-go/api-get.go
deleted file mode 100644
index d3e9937e9..000000000
--- a/vendor/github.com/minio/minio-go/api-get.go
+++ /dev/null
@@ -1,542 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "sort"
- "strings"
- "sync"
- "time"
-)
-
-// GetBucketPolicy - get bucket policy at a given path.
-func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy BucketPolicy, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return BucketPolicyNone, err
- }
- if err := isValidObjectPrefix(objectPrefix); err != nil {
- return BucketPolicyNone, err
- }
- policy, err := c.getBucketPolicy(bucketName, objectPrefix)
- if err != nil {
- return BucketPolicyNone, err
- }
- if policy.Statements == nil {
- return BucketPolicyNone, nil
- }
- if isBucketPolicyReadWrite(policy.Statements, bucketName, objectPrefix) {
- return BucketPolicyReadWrite, nil
- } else if isBucketPolicyWriteOnly(policy.Statements, bucketName, objectPrefix) {
- return BucketPolicyWriteOnly, nil
- } else if isBucketPolicyReadOnly(policy.Statements, bucketName, objectPrefix) {
- return BucketPolicyReadOnly, nil
- }
- return BucketPolicyNone, nil
-}
-
-func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (BucketAccessPolicy, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return BucketAccessPolicy{}, err
- }
- if err := isValidObjectPrefix(objectPrefix); err != nil {
- return BucketAccessPolicy{}, err
- }
- // Get resources properly escaped and lined up before
- // using them in http request.
- urlValues := make(url.Values)
- urlValues.Set("policy", "")
-
- // Execute GET on bucket to list objects.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- })
- defer closeResponse(resp)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- errResponse := httpRespToErrorResponse(resp, bucketName, "")
- if ToErrorResponse(errResponse).Code == "NoSuchBucketPolicy" {
- return BucketAccessPolicy{Version: "2012-10-17"}, nil
- }
- return BucketAccessPolicy{}, errResponse
- }
- }
- // Read access policy up to maxAccessPolicySize.
- // http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
- // bucket policies are limited to 20KB in size, using a limit reader.
- bucketPolicyBuf, err := ioutil.ReadAll(io.LimitReader(resp.Body, maxAccessPolicySize))
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- policy, err := unMarshalBucketPolicy(bucketPolicyBuf)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- // Sort the policy actions and resources for convenience.
- for _, statement := range policy.Statements {
- sort.Strings(statement.Actions)
- sort.Strings(statement.Resources)
- }
- return policy, nil
-}
-
-// GetObject - returns an seekable, readable object.
-func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return nil, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return nil, err
- }
-
- // Start the request as soon Get is initiated.
- httpReader, objectInfo, err := c.getObject(bucketName, objectName, 0, 0)
- if err != nil {
- return nil, err
- }
-
- // Create request channel.
- reqCh := make(chan readRequest)
- // Create response channel.
- resCh := make(chan readResponse)
- // Create done channel.
- doneCh := make(chan struct{})
-
- // This routine feeds partial object data as and when the caller reads.
- go func() {
- defer close(reqCh)
- defer close(resCh)
-
- // Loop through the incoming control messages and read data.
- for {
- select {
- // When the done channel is closed exit our routine.
- case <-doneCh:
- return
- // Request message.
- case req := <-reqCh:
- // Offset changes fetch the new object at an Offset.
- if req.DidOffsetChange {
- // Read from offset.
- httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, 0)
- if err != nil {
- resCh <- readResponse{
- Error: err,
- }
- return
- }
- }
-
- // Read at least req.Buffer bytes, if not we have
- // reached our EOF.
- size, err := io.ReadFull(httpReader, req.Buffer)
- if err == io.ErrUnexpectedEOF {
- // If an EOF happens after reading some but not
- // all the bytes ReadFull returns ErrUnexpectedEOF
- err = io.EOF
- }
- // Reply back how much was read.
- resCh <- readResponse{
- Size: int(size),
- Error: err,
- }
- }
- }
- }()
- // Return the readerAt backed by routine.
- return newObject(reqCh, resCh, doneCh, objectInfo), nil
-}
-
-// Read response message container to reply back for the request.
-type readResponse struct {
- Size int
- Error error
-}
-
-// Read request message container to communicate with internal
-// go-routine.
-type readRequest struct {
- Buffer []byte
- Offset int64 // readAt offset.
- DidOffsetChange bool
-}
-
-// Object represents an open object. It implements Read, ReadAt,
-// Seeker, Close for a HTTP stream.
-type Object struct {
- // Mutex.
- mutex *sync.Mutex
-
- // User allocated and defined.
- reqCh chan<- readRequest
- resCh <-chan readResponse
- doneCh chan<- struct{}
- prevOffset int64
- currOffset int64
- objectInfo ObjectInfo
-
- // Keeps track of closed call.
- isClosed bool
-
- // Previous error saved for future calls.
- prevErr error
-}
-
-// Read reads up to len(p) bytes into p. It returns the number of
-// bytes read (0 <= n <= len(p)) and any error encountered. Returns
-// io.EOF upon end of file.
-func (o *Object) Read(b []byte) (n int, err error) {
- if o == nil {
- return 0, ErrInvalidArgument("Object is nil")
- }
-
- // Locking.
- o.mutex.Lock()
- defer o.mutex.Unlock()
-
- // prevErr is previous error saved from previous operation.
- if o.prevErr != nil || o.isClosed {
- return 0, o.prevErr
- }
-
- // If current offset has reached Size limit, return EOF.
- if o.currOffset >= o.objectInfo.Size {
- return 0, io.EOF
- }
-
- // Send current information over control channel to indicate we are ready.
- reqMsg := readRequest{}
- // Send the pointer to the buffer over the channel.
- reqMsg.Buffer = b
-
- // Verify if offset has changed and currOffset is greater than
- // previous offset. Perhaps due to Seek().
- offsetChange := o.prevOffset - o.currOffset
- if offsetChange < 0 {
- offsetChange = -offsetChange
- }
- if offsetChange > 0 {
- // Fetch the new reader at the current offset again.
- reqMsg.Offset = o.currOffset
- reqMsg.DidOffsetChange = true
- } else {
- // No offset changes no need to fetch new reader, continue
- // reading.
- reqMsg.DidOffsetChange = false
- reqMsg.Offset = 0
- }
-
- // Send read request over the control channel.
- o.reqCh <- reqMsg
-
- // Get data over the response channel.
- dataMsg := <-o.resCh
-
- // Bytes read.
- bytesRead := int64(dataMsg.Size)
-
- // Update current offset.
- o.currOffset += bytesRead
-
- // Save the current offset as previous offset.
- o.prevOffset = o.currOffset
-
- if dataMsg.Error == nil {
- // If currOffset read is equal to objectSize
- // We have reached end of file, we return io.EOF.
- if o.currOffset >= o.objectInfo.Size {
- return dataMsg.Size, io.EOF
- }
- return dataMsg.Size, nil
- }
-
- // Save any error.
- o.prevErr = dataMsg.Error
- return dataMsg.Size, dataMsg.Error
-}
-
-// Stat returns the ObjectInfo structure describing object.
-func (o *Object) Stat() (ObjectInfo, error) {
- if o == nil {
- return ObjectInfo{}, ErrInvalidArgument("Object is nil")
- }
- // Locking.
- o.mutex.Lock()
- defer o.mutex.Unlock()
-
- if o.prevErr != nil || o.isClosed {
- return ObjectInfo{}, o.prevErr
- }
-
- return o.objectInfo, nil
-}
-
-// ReadAt reads len(b) bytes from the File starting at byte offset
-// off. It returns the number of bytes read and the error, if any.
-// ReadAt always returns a non-nil error when n < len(b). At end of
-// file, that error is io.EOF.
-func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
- if o == nil {
- return 0, ErrInvalidArgument("Object is nil")
- }
-
- // Locking.
- o.mutex.Lock()
- defer o.mutex.Unlock()
-
- // prevErr is error which was saved in previous operation.
- if o.prevErr != nil || o.isClosed {
- return 0, o.prevErr
- }
-
- // If offset is negative and offset is greater than or equal to
- // object size we return EOF.
- if offset < 0 || offset >= o.objectInfo.Size {
- return 0, io.EOF
- }
-
- // Send current information over control channel to indicate we
- // are ready.
- reqMsg := readRequest{}
-
- // Send the offset and pointer to the buffer over the channel.
- reqMsg.Buffer = b
-
- // For ReadAt offset always changes, minor optimization where
- // offset same as currOffset we don't change the offset.
- reqMsg.DidOffsetChange = offset != o.currOffset
- if reqMsg.DidOffsetChange {
- // Set new offset.
- reqMsg.Offset = offset
- // Save new offset as current offset.
- o.currOffset = offset
- }
-
- // Send read request over the control channel.
- o.reqCh <- reqMsg
-
- // Get data over the response channel.
- dataMsg := <-o.resCh
-
- // Bytes read.
- bytesRead := int64(dataMsg.Size)
-
- // Update current offset.
- o.currOffset += bytesRead
-
- // Save current offset as previous offset before returning.
- o.prevOffset = o.currOffset
-
- if dataMsg.Error == nil {
- // If currentOffset is equal to objectSize
- // we have reached end of file, we return io.EOF.
- if o.currOffset >= o.objectInfo.Size {
- return dataMsg.Size, io.EOF
- }
- return dataMsg.Size, nil
- }
-
- // Save any error.
- o.prevErr = dataMsg.Error
- return dataMsg.Size, dataMsg.Error
-}
-
-// Seek sets the offset for the next Read or Write to offset,
-// interpreted according to whence: 0 means relative to the
-// origin of the file, 1 means relative to the current offset,
-// and 2 means relative to the end.
-// Seek returns the new offset and an error, if any.
-//
-// Seeking to a negative offset is an error. Seeking to any positive
-// offset is legal, subsequent io operations succeed until the
-// underlying object is not closed.
-func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
- if o == nil {
- return 0, ErrInvalidArgument("Object is nil")
- }
-
- // Locking.
- o.mutex.Lock()
- defer o.mutex.Unlock()
-
- if o.prevErr != nil {
- // At EOF seeking is legal allow only io.EOF, for any other errors we return.
- if o.prevErr != io.EOF {
- return 0, o.prevErr
- }
- }
-
- // Negative offset is valid for whence of '2'.
- if offset < 0 && whence != 2 {
- return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
- }
-
- // Save current offset as previous offset.
- o.prevOffset = o.currOffset
-
- // Switch through whence.
- switch whence {
- default:
- return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
- case 0:
- if offset > o.objectInfo.Size {
- return 0, io.EOF
- }
- o.currOffset = offset
- case 1:
- if o.currOffset+offset > o.objectInfo.Size {
- return 0, io.EOF
- }
- o.currOffset += offset
- case 2:
- // Seeking to positive offset is valid for whence '2', but
- // since we are backing a Reader we have reached 'EOF' if
- // offset is positive.
- if offset > 0 {
- return 0, io.EOF
- }
- // Seeking to negative position not allowed for whence.
- if o.objectInfo.Size+offset < 0 {
- return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
- }
- o.currOffset += offset
- }
- // Return the effective offset.
- return o.currOffset, nil
-}
-
-// Close - The behavior of Close after the first call returns error
-// for subsequent Close() calls.
-func (o *Object) Close() (err error) {
- if o == nil {
- return ErrInvalidArgument("Object is nil")
- }
- // Locking.
- o.mutex.Lock()
- defer o.mutex.Unlock()
-
- // if already closed return an error.
- if o.isClosed {
- return o.prevErr
- }
-
- // Close successfully.
- close(o.doneCh)
-
- // Save for future operations.
- errMsg := "Object is already closed. Bad file descriptor."
- o.prevErr = errors.New(errMsg)
- // Save here that we closed done channel successfully.
- o.isClosed = true
- return nil
-}
-
-// newObject instantiates a new *minio.Object*
-func newObject(reqCh chan<- readRequest, resCh <-chan readResponse, doneCh chan<- struct{}, objectInfo ObjectInfo) *Object {
- return &Object{
- mutex: &sync.Mutex{},
- reqCh: reqCh,
- resCh: resCh,
- doneCh: doneCh,
- objectInfo: objectInfo,
- }
-}
-
-// getObject - retrieve object from Object Storage.
-//
-// Additionally this function also takes range arguments to download the specified
-// range bytes of an object. Setting offset and length = 0 will download the full object.
-//
-// For more information about the HTTP Range header.
-// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
-func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectInfo, error) {
- // Validate input arguments.
- if err := isValidBucketName(bucketName); err != nil {
- return nil, ObjectInfo{}, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return nil, ObjectInfo{}, err
- }
-
- customHeader := make(http.Header)
- // Set ranges if length and offset are valid.
- if length > 0 && offset >= 0 {
- customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
- } else if offset > 0 && length == 0 {
- customHeader.Set("Range", fmt.Sprintf("bytes=%d-", offset))
- } else if length < 0 && offset == 0 {
- customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
- }
-
- // Execute GET on objectName.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- customHeader: customHeader,
- })
- if err != nil {
- return nil, ObjectInfo{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
- return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
- }
- }
-
- // Trim off the odd double quotes from ETag in the beginning and end.
- md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
- md5sum = strings.TrimSuffix(md5sum, "\"")
-
- // Parse the date.
- date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
- if err != nil {
- msg := "Last-Modified time format not recognized. " + reportIssue
- return nil, ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
- Message: msg,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
- }
- }
- // Get content-type.
- contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
- if contentType == "" {
- contentType = "application/octet-stream"
- }
- var objectStat ObjectInfo
- objectStat.ETag = md5sum
- objectStat.Key = objectName
- objectStat.Size = resp.ContentLength
- objectStat.LastModified = date
- objectStat.ContentType = contentType
-
- // do not close body here, caller will close
- return resp.Body, objectStat, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go
deleted file mode 100644
index b17a51acc..000000000
--- a/vendor/github.com/minio/minio-go/api-list.go
+++ /dev/null
@@ -1,520 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "fmt"
- "net/http"
- "net/url"
- "strings"
-)
-
-// ListBuckets list all buckets owned by this authenticated user.
-//
-// This call requires explicit authentication, no anonymous requests are
-// allowed for listing buckets.
-//
-// api := client.New(....)
-// for message := range api.ListBuckets() {
-// fmt.Println(message)
-// }
-//
-func (c Client) ListBuckets() ([]BucketInfo, error) {
- // Execute GET on service.
- resp, err := c.executeMethod("GET", requestMetadata{})
- defer closeResponse(resp)
- if err != nil {
- return nil, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return nil, httpRespToErrorResponse(resp, "", "")
- }
- }
- listAllMyBucketsResult := listAllMyBucketsResult{}
- err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
- if err != nil {
- return nil, err
- }
- return listAllMyBucketsResult.Buckets.Bucket, nil
-}
-
-// ListObjects - (List Objects) - List some objects or all recursively.
-//
-// ListObjects lists all objects matching the objectPrefix from
-// the specified bucket. If recursion is enabled it would list
-// all subdirectories and all its contents.
-//
-// Your input parameters are just bucketName, objectPrefix, recursive
-// and a done channel for pro-actively closing the internal go
-// routine. If you enable recursive as 'true' this function will
-// return back all the objects in a given bucket name and object
-// prefix.
-//
-// api := client.New(....)
-// // Create a done channel.
-// doneCh := make(chan struct{})
-// defer close(doneCh)
-// // Recurively list all objects in 'mytestbucket'
-// recursive := true
-// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) {
-// fmt.Println(message)
-// }
-//
-func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
- // Allocate new list objects channel.
- objectStatCh := make(chan ObjectInfo, 1000)
- // Default listing is delimited at "/"
- delimiter := "/"
- if recursive {
- // If recursive we do not delimit.
- delimiter = ""
- }
- // Validate bucket name.
- if err := isValidBucketName(bucketName); err != nil {
- defer close(objectStatCh)
- objectStatCh <- ObjectInfo{
- Err: err,
- }
- return objectStatCh
- }
- // Validate incoming object prefix.
- if err := isValidObjectPrefix(objectPrefix); err != nil {
- defer close(objectStatCh)
- objectStatCh <- ObjectInfo{
- Err: err,
- }
- return objectStatCh
- }
-
- // Initiate list objects goroutine here.
- go func(objectStatCh chan<- ObjectInfo) {
- defer close(objectStatCh)
- // Save marker for next request.
- var marker string
- for {
- // Get list of objects a maximum of 1000 per request.
- result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000)
- if err != nil {
- objectStatCh <- ObjectInfo{
- Err: err,
- }
- return
- }
-
- // If contents are available loop through and send over channel.
- for _, object := range result.Contents {
- // Save the marker.
- marker = object.Key
- select {
- // Send object content.
- case objectStatCh <- object:
- // If receives done from the caller, return here.
- case <-doneCh:
- return
- }
- }
-
- // Send all common prefixes if any.
- // NOTE: prefixes are only present if the request is delimited.
- for _, obj := range result.CommonPrefixes {
- object := ObjectInfo{}
- object.Key = obj.Prefix
- object.Size = 0
- select {
- // Send object prefixes.
- case objectStatCh <- object:
- // If receives done from the caller, return here.
- case <-doneCh:
- return
- }
- }
-
- // If next marker present, save it for next request.
- if result.NextMarker != "" {
- marker = result.NextMarker
- }
-
- // Listing ends result is not truncated, return right here.
- if !result.IsTruncated {
- return
- }
- }
- }(objectStatCh)
- return objectStatCh
-}
-
-/// Bucket Read Operations.
-
-// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
-//
-// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
-// request parameters :-
-// ---------
-// ?marker - Specifies the key to start with when listing objects in a bucket.
-// ?delimiter - A delimiter is a character you use to group keys.
-// ?prefix - Limits the response to keys that begin with the specified prefix.
-// ?max-keys - Sets the maximum number of keys returned in the response body.
-func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) {
- // Validate bucket name.
- if err := isValidBucketName(bucketName); err != nil {
- return listBucketResult{}, err
- }
- // Validate object prefix.
- if err := isValidObjectPrefix(objectPrefix); err != nil {
- return listBucketResult{}, err
- }
- // Get resources properly escaped and lined up before
- // using them in http request.
- urlValues := make(url.Values)
- // Set object prefix.
- if objectPrefix != "" {
- urlValues.Set("prefix", objectPrefix)
- }
- // Set object marker.
- if objectMarker != "" {
- urlValues.Set("marker", objectMarker)
- }
- // Set delimiter.
- if delimiter != "" {
- urlValues.Set("delimiter", delimiter)
- }
-
- // maxkeys should default to 1000 or less.
- if maxkeys == 0 || maxkeys > 1000 {
- maxkeys = 1000
- }
- // Set max keys.
- urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
-
- // Execute GET on bucket to list objects.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- })
- defer closeResponse(resp)
- if err != nil {
- return listBucketResult{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return listBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
- }
- }
- // Decode listBuckets XML.
- listBucketResult := listBucketResult{}
- err = xmlDecoder(resp.Body, &listBucketResult)
- if err != nil {
- return listBucketResult, err
- }
- return listBucketResult, nil
-}
-
-// ListIncompleteUploads - List incompletely uploaded multipart objects.
-//
-// ListIncompleteUploads lists all incompleted objects matching the
-// objectPrefix from the specified bucket. If recursion is enabled
-// it would list all subdirectories and all its contents.
-//
-// Your input parameters are just bucketName, objectPrefix, recursive
-// and a done channel to pro-actively close the internal go routine.
-// If you enable recursive as 'true' this function will return back all
-// the multipart objects in a given bucket name.
-//
-// api := client.New(....)
-// // Create a done channel.
-// doneCh := make(chan struct{})
-// defer close(doneCh)
-// // Recurively list all objects in 'mytestbucket'
-// recursive := true
-// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) {
-// fmt.Println(message)
-// }
-//
-func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
- // Turn on size aggregation of individual parts.
- isAggregateSize := true
- return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh)
-}
-
-// listIncompleteUploads lists all incomplete uploads.
-func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
- // Allocate channel for multipart uploads.
- objectMultipartStatCh := make(chan ObjectMultipartInfo, 1000)
- // Delimiter is set to "/" by default.
- delimiter := "/"
- if recursive {
- // If recursive do not delimit.
- delimiter = ""
- }
- // Validate bucket name.
- if err := isValidBucketName(bucketName); err != nil {
- defer close(objectMultipartStatCh)
- objectMultipartStatCh <- ObjectMultipartInfo{
- Err: err,
- }
- return objectMultipartStatCh
- }
- // Validate incoming object prefix.
- if err := isValidObjectPrefix(objectPrefix); err != nil {
- defer close(objectMultipartStatCh)
- objectMultipartStatCh <- ObjectMultipartInfo{
- Err: err,
- }
- return objectMultipartStatCh
- }
- go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
- defer close(objectMultipartStatCh)
- // object and upload ID marker for future requests.
- var objectMarker string
- var uploadIDMarker string
- for {
- // list all multipart uploads.
- result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000)
- if err != nil {
- objectMultipartStatCh <- ObjectMultipartInfo{
- Err: err,
- }
- return
- }
- // Save objectMarker and uploadIDMarker for next request.
- objectMarker = result.NextKeyMarker
- uploadIDMarker = result.NextUploadIDMarker
- // Send all multipart uploads.
- for _, obj := range result.Uploads {
- // Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
- if aggregateSize {
- // Get total multipart size.
- obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID)
- if err != nil {
- objectMultipartStatCh <- ObjectMultipartInfo{
- Err: err,
- }
- }
- }
- select {
- // Send individual uploads here.
- case objectMultipartStatCh <- obj:
- // If done channel return here.
- case <-doneCh:
- return
- }
- }
- // Send all common prefixes if any.
- // NOTE: prefixes are only present if the request is delimited.
- for _, obj := range result.CommonPrefixes {
- object := ObjectMultipartInfo{}
- object.Key = obj.Prefix
- object.Size = 0
- select {
- // Send delimited prefixes here.
- case objectMultipartStatCh <- object:
- // If done channel return here.
- case <-doneCh:
- return
- }
- }
- // Listing ends if result not truncated, return right here.
- if !result.IsTruncated {
- return
- }
- }
- }(objectMultipartStatCh)
- // return.
- return objectMultipartStatCh
-}
-
-// listMultipartUploads - (List Multipart Uploads).
-// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
-//
-// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
-// request parameters. :-
-// ---------
-// ?key-marker - Specifies the multipart upload after which listing should begin.
-// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
-// ?delimiter - A delimiter is a character you use to group keys.
-// ?prefix - Limits the response to keys that begin with the specified prefix.
-// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
-func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) {
- // Get resources properly escaped and lined up before using them in http request.
- urlValues := make(url.Values)
- // Set uploads.
- urlValues.Set("uploads", "")
- // Set object key marker.
- if keyMarker != "" {
- urlValues.Set("key-marker", keyMarker)
- }
- // Set upload id marker.
- if uploadIDMarker != "" {
- urlValues.Set("upload-id-marker", uploadIDMarker)
- }
- // Set prefix marker.
- if prefix != "" {
- urlValues.Set("prefix", prefix)
- }
- // Set delimiter.
- if delimiter != "" {
- urlValues.Set("delimiter", delimiter)
- }
-
- // maxUploads should be 1000 or less.
- if maxUploads == 0 || maxUploads > 1000 {
- maxUploads = 1000
- }
- // Set max-uploads.
- urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
-
- // Execute GET on bucketName to list multipart uploads.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- })
- defer closeResponse(resp)
- if err != nil {
- return listMultipartUploadsResult{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return listMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
- }
- }
- // Decode response body.
- listMultipartUploadsResult := listMultipartUploadsResult{}
- err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
- if err != nil {
- return listMultipartUploadsResult, err
- }
- return listMultipartUploadsResult, nil
-}
-
-// listObjectParts list all object parts recursively.
-func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]objectPart, err error) {
- // Part number marker for the next batch of request.
- var nextPartNumberMarker int
- partsInfo = make(map[int]objectPart)
- for {
- // Get list of uploaded parts a maximum of 1000 per request.
- listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
- if err != nil {
- return nil, err
- }
- // Append to parts info.
- for _, part := range listObjPartsResult.ObjectParts {
- // Trim off the odd double quotes from ETag in the beginning and end.
- part.ETag = strings.TrimPrefix(part.ETag, "\"")
- part.ETag = strings.TrimSuffix(part.ETag, "\"")
- partsInfo[part.PartNumber] = part
- }
- // Keep part number marker, for the next iteration.
- nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
- // Listing ends result is not truncated, return right here.
- if !listObjPartsResult.IsTruncated {
- break
- }
- }
-
- // Return all the parts.
- return partsInfo, nil
-}
-
-// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name.
-func (c Client) findUploadID(bucketName, objectName string) (uploadID string, err error) {
- // Make list incomplete uploads recursive.
- isRecursive := true
- // Turn off size aggregation of individual parts, in this request.
- isAggregateSize := false
- // latestUpload to track the latest multipart info for objectName.
- var latestUpload ObjectMultipartInfo
- // Create done channel to cleanup the routine.
- doneCh := make(chan struct{})
- defer close(doneCh)
- // List all incomplete uploads.
- for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) {
- if mpUpload.Err != nil {
- return "", mpUpload.Err
- }
- if objectName == mpUpload.Key {
- if mpUpload.Initiated.Sub(latestUpload.Initiated) > 0 {
- latestUpload = mpUpload
- }
- }
- }
- // Return the latest upload id.
- return latestUpload.UploadID, nil
-}
-
-// getTotalMultipartSize - calculate total uploaded size for the a given multipart object.
-func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) {
- // Iterate over all parts and aggregate the size.
- partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- for _, partInfo := range partsInfo {
- size += partInfo.Size
- }
- return size, nil
-}
-
-// listObjectPartsQuery (List Parts query)
-// - lists some or all (up to 1000) parts that have been uploaded
-// for a specific multipart upload
-//
-// You can use the request parameters as selection criteria to return
-// a subset of the uploads in a bucket, request parameters :-
-// ---------
-// ?part-number-marker - Specifies the part after which listing should
-// begin.
-// ?max-parts - Maximum parts to be listed per request.
-func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) {
- // Get resources properly escaped and lined up before using them in http request.
- urlValues := make(url.Values)
- // Set part number marker.
- urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
- // Set upload id.
- urlValues.Set("uploadId", uploadID)
-
- // maxParts should be 1000 or less.
- if maxParts == 0 || maxParts > 1000 {
- maxParts = 1000
- }
- // Set max parts.
- urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
-
- // Execute GET on objectName to get list of parts.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- })
- defer closeResponse(resp)
- if err != nil {
- return listObjectPartsResult{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return listObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
- }
- }
- // Decode list object parts XML.
- listObjectPartsResult := listObjectPartsResult{}
- err = xmlDecoder(resp.Body, &listObjectPartsResult)
- if err != nil {
- return listObjectPartsResult, err
- }
- return listObjectPartsResult, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go
deleted file mode 100644
index 0d3285253..000000000
--- a/vendor/github.com/minio/minio-go/api-presigned.go
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "errors"
- "net/url"
- "time"
-)
-
-// supportedGetReqParams - supported request parameters for GET
-// presigned request.
-var supportedGetReqParams = map[string]struct{}{
- "response-expires": struct{}{},
- "response-content-type": struct{}{},
- "response-cache-control": struct{}{},
- "response-content-disposition": struct{}{},
-}
-
-// presignURL - Returns a presigned URL for an input 'method'.
-// Expires maximum is 7days - ie. 604800 and minimum is 1.
-func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (urlStr string, err error) {
- // Input validation.
- if method == "" {
- return "", ErrInvalidArgument("method cannot be empty.")
- }
- if err := isValidBucketName(bucketName); err != nil {
- return "", err
- }
- if err := isValidObjectName(objectName); err != nil {
- return "", err
- }
- if err := isValidExpiry(expires); err != nil {
- return "", err
- }
-
- // Convert expires into seconds.
- expireSeconds := int64(expires / time.Second)
- reqMetadata := requestMetadata{
- presignURL: true,
- bucketName: bucketName,
- objectName: objectName,
- expires: expireSeconds,
- }
-
- // For "GET" we are handling additional request parameters to
- // override its response headers.
- if method == "GET" {
- // Verify if input map has unsupported params, if yes exit.
- for k := range reqParams {
- if _, ok := supportedGetReqParams[k]; !ok {
- return "", ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
- }
- }
- // Save the request parameters to be used in presigning for
- // GET request.
- reqMetadata.queryValues = reqParams
- }
-
- // Instantiate a new request.
- // Since expires is set newRequest will presign the request.
- req, err := c.newRequest(method, reqMetadata)
- if err != nil {
- return "", err
- }
- return req.URL.String(), nil
-}
-
-// PresignedGetObject - Returns a presigned URL to access an object
-// without credentials. Expires maximum is 7days - ie. 604800 and
-// minimum is 1. Additionally you can override a set of response
-// headers using the query parameters.
-func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (url string, err error) {
- return c.presignURL("GET", bucketName, objectName, expires, reqParams)
-}
-
-// PresignedPutObject - Returns a presigned URL to upload an object without credentials.
-// Expires maximum is 7days - ie. 604800 and minimum is 1.
-func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (url string, err error) {
- return c.presignURL("PUT", bucketName, objectName, expires, nil)
-}
-
-// PresignedPostPolicy - Returns POST form data to upload an object at a location.
-func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
- // Validate input arguments.
- if p.expiration.IsZero() {
- return nil, errors.New("Expiration time must be specified")
- }
- if _, ok := p.formData["key"]; !ok {
- return nil, errors.New("object key must be specified")
- }
- if _, ok := p.formData["bucket"]; !ok {
- return nil, errors.New("bucket name must be specified")
- }
-
- bucketName := p.formData["bucket"]
- // Fetch the bucket location.
- location, err := c.getBucketLocation(bucketName)
- if err != nil {
- return nil, err
- }
-
- // Keep time.
- t := time.Now().UTC()
- // For signature version '2' handle here.
- if c.signature.isV2() {
- policyBase64 := p.base64()
- p.formData["policy"] = policyBase64
- // For Google endpoint set this value to be 'GoogleAccessId'.
- if isGoogleEndpoint(c.endpointURL) {
- p.formData["GoogleAccessId"] = c.accessKeyID
- } else {
- // For all other endpoints set this value to be 'AWSAccessKeyId'.
- p.formData["AWSAccessKeyId"] = c.accessKeyID
- }
- // Sign the policy.
- p.formData["signature"] = postPresignSignatureV2(policyBase64, c.secretAccessKey)
- return p.formData, nil
- }
-
- // Add date policy.
- if err = p.addNewPolicy(policyCondition{
- matchType: "eq",
- condition: "$x-amz-date",
- value: t.Format(iso8601DateFormat),
- }); err != nil {
- return nil, err
- }
-
- // Add algorithm policy.
- if err = p.addNewPolicy(policyCondition{
- matchType: "eq",
- condition: "$x-amz-algorithm",
- value: signV4Algorithm,
- }); err != nil {
- return nil, err
- }
-
- // Add a credential policy.
- credential := getCredential(c.accessKeyID, location, t)
- if err = p.addNewPolicy(policyCondition{
- matchType: "eq",
- condition: "$x-amz-credential",
- value: credential,
- }); err != nil {
- return nil, err
- }
-
- // Get base64 encoded policy.
- policyBase64 := p.base64()
- // Fill in the form data.
- p.formData["policy"] = policyBase64
- p.formData["x-amz-algorithm"] = signV4Algorithm
- p.formData["x-amz-credential"] = credential
- p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
- p.formData["x-amz-signature"] = postPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
- return p.formData, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go
deleted file mode 100644
index c0bb210c9..000000000
--- a/vendor/github.com/minio/minio-go/api-put-bucket.go
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/hex"
- "encoding/json"
- "encoding/xml"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
-)
-
-/// Bucket operations
-
-// MakeBucket creates a new bucket with bucketName.
-//
-// Location is an optional argument, by default all buckets are
-// created in US Standard Region.
-//
-// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
-// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
-func (c Client) MakeBucket(bucketName string, location string) error {
- // Validate the input arguments.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
-
- // If location is empty, treat is a default region 'us-east-1'.
- if location == "" {
- location = "us-east-1"
- }
-
- // Instantiate the request.
- req, err := c.makeBucketRequest(bucketName, location)
- if err != nil {
- return err
- }
-
- // Execute the request.
- resp, err := c.do(req)
- defer closeResponse(resp)
- if err != nil {
- return err
- }
-
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return httpRespToErrorResponse(resp, bucketName, "")
- }
- }
-
- // Save the location into cache on a successful makeBucket response.
- c.bucketLocCache.Set(bucketName, location)
-
- // Return.
- return nil
-}
-
-// makeBucketRequest constructs request for makeBucket.
-func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) {
- // Validate input arguments.
- if err := isValidBucketName(bucketName); err != nil {
- return nil, err
- }
-
- // In case of Amazon S3. The make bucket issued on already
- // existing bucket would fail with 'AuthorizationMalformed' error
- // if virtual style is used. So we default to 'path style' as that
- // is the preferred method here. The final location of the
- // 'bucket' is provided through XML LocationConstraint data with
- // the request.
- targetURL := *c.endpointURL
- targetURL.Path = "/" + bucketName + "/"
-
- // get a new HTTP request for the method.
- req, err := http.NewRequest("PUT", targetURL.String(), nil)
- if err != nil {
- return nil, err
- }
-
- // set UserAgent for the request.
- c.setUserAgent(req)
-
- // set sha256 sum for signature calculation only with signature version '4'.
- if c.signature.isV4() {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
- }
-
- // If location is not 'us-east-1' create bucket location config.
- if location != "us-east-1" && location != "" {
- createBucketConfig := createBucketConfiguration{}
- createBucketConfig.Location = location
- var createBucketConfigBytes []byte
- createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
- if err != nil {
- return nil, err
- }
- createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
- req.Body = ioutil.NopCloser(createBucketConfigBuffer)
- req.ContentLength = int64(len(createBucketConfigBytes))
- // Set content-md5.
- req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
- if c.signature.isV4() {
- // Set sha256.
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
- }
- }
-
- // Sign the request.
- if c.signature.isV4() {
- // Signature calculated for MakeBucket request should be for 'us-east-1',
- // regardless of the bucket's location constraint.
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
- } else if c.signature.isV2() {
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
- }
-
- // Return signed request.
- return req, nil
-}
-
-// SetBucketPolicy set the access permissions on an existing bucket.
-//
-// For example
-//
-// none - owner gets full access [default].
-// readonly - anonymous get access for everyone at a given object prefix.
-// readwrite - anonymous list/put/delete access to a given object prefix.
-// writeonly - anonymous put/delete access to a given object prefix.
-func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy BucketPolicy) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
- if err := isValidObjectPrefix(objectPrefix); err != nil {
- return err
- }
- if !bucketPolicy.isValidBucketPolicy() {
- return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
- }
- policy, err := c.getBucketPolicy(bucketName, objectPrefix)
- if err != nil {
- return err
- }
- // For bucket policy set to 'none' we need to remove the policy.
- if bucketPolicy == BucketPolicyNone && policy.Statements == nil {
- // No policies to set, return success.
- return nil
- }
- // Remove any previous policies at this path.
- policy.Statements = removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
-
- bucketResourceStatement := &Statement{}
- objectResourceStatement := &Statement{}
- if bucketPolicy == BucketPolicyReadWrite {
- // Read write policy.
- bucketResourceStatement.Effect = "Allow"
- bucketResourceStatement.Principal.AWS = []string{"*"}
- bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketResourceStatement.Actions = readWriteBucketActions
- objectResourceStatement.Effect = "Allow"
- objectResourceStatement.Principal.AWS = []string{"*"}
- objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
- objectResourceStatement.Actions = readWriteObjectActions
- // Save the read write policy.
- policy.Statements = append(policy.Statements, *bucketResourceStatement, *objectResourceStatement)
- } else if bucketPolicy == BucketPolicyReadOnly {
- // Read only policy.
- bucketResourceStatement.Effect = "Allow"
- bucketResourceStatement.Principal.AWS = []string{"*"}
- bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketResourceStatement.Actions = readOnlyBucketActions
- objectResourceStatement.Effect = "Allow"
- objectResourceStatement.Principal.AWS = []string{"*"}
- objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
- objectResourceStatement.Actions = readOnlyObjectActions
- // Save the read only policy.
- policy.Statements = append(policy.Statements, *bucketResourceStatement, *objectResourceStatement)
- } else if bucketPolicy == BucketPolicyWriteOnly {
- // Write only policy.
- bucketResourceStatement.Effect = "Allow"
- bucketResourceStatement.Principal.AWS = []string{"*"}
- bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketResourceStatement.Actions = writeOnlyBucketActions
- objectResourceStatement.Effect = "Allow"
- objectResourceStatement.Principal.AWS = []string{"*"}
- objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
- objectResourceStatement.Actions = writeOnlyObjectActions
- // Save the write only policy.
- policy.Statements = append(policy.Statements, *bucketResourceStatement, *objectResourceStatement)
- }
- // Save the updated policies.
- return c.putBucketPolicy(bucketName, policy)
-}
-
-// Saves a new bucket policy.
-func (c Client) putBucketPolicy(bucketName string, policy BucketAccessPolicy) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
-
- // If there are no policy statements, we should remove entire policy.
- if len(policy.Statements) == 0 {
- return c.removeBucketPolicy(bucketName)
- }
-
- // Get resources properly escaped and lined up before
- // using them in http request.
- urlValues := make(url.Values)
- urlValues.Set("policy", "")
-
- policyBytes, err := json.Marshal(&policy)
- if err != nil {
- return err
- }
-
- policyBuffer := bytes.NewReader(policyBytes)
- reqMetadata := requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentBody: policyBuffer,
- contentLength: int64(len(policyBytes)),
- contentMD5Bytes: sumMD5(policyBytes),
- contentSHA256Bytes: sum256(policyBytes),
- }
-
- // Execute PUT to upload a new bucket policy.
- resp, err := c.executeMethod("PUT", reqMetadata)
- defer closeResponse(resp)
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusNoContent {
- return httpRespToErrorResponse(resp, bucketName, "")
- }
- }
- return nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go
deleted file mode 100644
index 937c74d46..000000000
--- a/vendor/github.com/minio/minio-go/api-put-object-common.go
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "crypto/md5"
- "crypto/sha256"
- "fmt"
- "hash"
- "io"
- "math"
- "os"
-)
-
-// Verify if reader is *os.File
-func isFile(reader io.Reader) (ok bool) {
- _, ok = reader.(*os.File)
- return
-}
-
-// Verify if reader is *minio.Object
-func isObject(reader io.Reader) (ok bool) {
- _, ok = reader.(*Object)
- return
-}
-
-// Verify if reader is a generic ReaderAt
-func isReadAt(reader io.Reader) (ok bool) {
- _, ok = reader.(io.ReaderAt)
- return
-}
-
-// shouldUploadPart - verify if part should be uploaded.
-func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
- // If part not found should upload the part.
- uploadedPart, found := objectParts[objPart.PartNumber]
- if !found {
- return true
- }
- // if size mismatches should upload the part.
- if objPart.Size != uploadedPart.Size {
- return true
- }
- // if md5sum mismatches should upload the part.
- if objPart.ETag != uploadedPart.ETag {
- return true
- }
- return false
-}
-
-// optimalPartInfo - calculate the optimal part info for a given
-// object size.
-//
-// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
-// object storage it will have the following parameters as constants.
-//
-// maxPartsCount - 10000
-// minPartSize - 5MiB
-// maxMultipartPutObjectSize - 5TiB
-//
-func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
- // object size is '-1' set it to 5TiB.
- if objectSize == -1 {
- objectSize = maxMultipartPutObjectSize
- }
- // object size is larger than supported maximum.
- if objectSize > maxMultipartPutObjectSize {
- err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
- return
- }
- // Use floats for part size for all calculations to avoid
- // overflows during float64 to int64 conversions.
- partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount))
- partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize
- // Total parts count.
- totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt))
- // Part size.
- partSize = int64(partSizeFlt)
- // Last part size.
- lastPartSize = objectSize - int64(totalPartsCount-1)*partSize
- return totalPartsCount, partSize, lastPartSize, nil
-}
-
-// hashCopyBuffer is identical to hashCopyN except that it doesn't take
-// any size argument but takes a buffer argument and reader should be
-// of io.ReaderAt interface.
-//
-// Stages reads from offsets into the buffer, if buffer is nil it is
-// initialized to optimalBufferSize.
-func (c Client) hashCopyBuffer(writer io.Writer, reader io.ReaderAt, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) {
- // MD5 and SHA256 hasher.
- var hashMD5, hashSHA256 hash.Hash
- // MD5 and SHA256 hasher.
- hashMD5 = md5.New()
- hashWriter := io.MultiWriter(writer, hashMD5)
- if c.signature.isV4() {
- hashSHA256 = sha256.New()
- hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
- }
-
- // Buffer is nil, initialize.
- if buf == nil {
- buf = make([]byte, optimalReadBufferSize)
- }
-
- // Offset to start reading from.
- var readAtOffset int64
-
- // Following block reads data at an offset from the input
- // reader and copies data to into local temporary file.
- for {
- readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
- if rerr != nil {
- if rerr != io.EOF {
- return nil, nil, 0, rerr
- }
- }
- writeSize, werr := hashWriter.Write(buf[:readAtSize])
- if werr != nil {
- return nil, nil, 0, werr
- }
- if readAtSize != writeSize {
- return nil, nil, 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
- }
- readAtOffset += int64(writeSize)
- size += int64(writeSize)
- if rerr == io.EOF {
- break
- }
- }
-
- // Finalize md5 sum and sha256 sum.
- md5Sum = hashMD5.Sum(nil)
- if c.signature.isV4() {
- sha256Sum = hashSHA256.Sum(nil)
- }
- return md5Sum, sha256Sum, size, err
-}
-
-// hashCopy is identical to hashCopyN except that it doesn't take
-// any size argument.
-func (c Client) hashCopy(writer io.Writer, reader io.Reader) (md5Sum, sha256Sum []byte, size int64, err error) {
- // MD5 and SHA256 hasher.
- var hashMD5, hashSHA256 hash.Hash
- // MD5 and SHA256 hasher.
- hashMD5 = md5.New()
- hashWriter := io.MultiWriter(writer, hashMD5)
- if c.signature.isV4() {
- hashSHA256 = sha256.New()
- hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
- }
-
- // Using copyBuffer to copy in large buffers, default buffer
- // for io.Copy of 32KiB is too small.
- size, err = io.Copy(hashWriter, reader)
- if err != nil {
- return nil, nil, 0, err
- }
-
- // Finalize md5 sum and sha256 sum.
- md5Sum = hashMD5.Sum(nil)
- if c.signature.isV4() {
- sha256Sum = hashSHA256.Sum(nil)
- }
- return md5Sum, sha256Sum, size, err
-}
-
-// hashCopyN - Calculates Md5sum and SHA256sum for up to partSize amount of bytes.
-func (c Client) hashCopyN(writer io.Writer, reader io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) {
- // MD5 and SHA256 hasher.
- var hashMD5, hashSHA256 hash.Hash
- // MD5 and SHA256 hasher.
- hashMD5 = md5.New()
- hashWriter := io.MultiWriter(writer, hashMD5)
- if c.signature.isV4() {
- hashSHA256 = sha256.New()
- hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
- }
-
- // Copies to input at writer.
- size, err = io.CopyN(hashWriter, reader, partSize)
- if err != nil {
- // If not EOF return error right here.
- if err != io.EOF {
- return nil, nil, 0, err
- }
- }
-
- // Finalize md5shum and sha256 sum.
- md5Sum = hashMD5.Sum(nil)
- if c.signature.isV4() {
- sha256Sum = hashSHA256.Sum(nil)
- }
- return md5Sum, sha256Sum, size, err
-}
-
-// getUploadID - fetch upload id if already present for an object name
-// or initiate a new request to fetch a new upload id.
-func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadID string, isNew bool, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return "", false, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return "", false, err
- }
-
- // Set content Type to default if empty string.
- if contentType == "" {
- contentType = "application/octet-stream"
- }
-
- // Find upload id for previous upload for an object.
- uploadID, err = c.findUploadID(bucketName, objectName)
- if err != nil {
- return "", false, err
- }
- if uploadID == "" {
- // Initiate multipart upload for an object.
- initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType)
- if err != nil {
- return "", false, err
- }
- // Save the new upload id.
- uploadID = initMultipartUploadResult.UploadID
- // Indicate that this is a new upload id.
- isNew = true
- }
- return uploadID, isNew, nil
-}
-
-// computeHash - Calculates MD5 and SHA256 for an input read Seeker.
-func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) {
- // MD5 and SHA256 hasher.
- var hashMD5, hashSHA256 hash.Hash
- // MD5 and SHA256 hasher.
- hashMD5 = md5.New()
- hashWriter := io.MultiWriter(hashMD5)
- if c.signature.isV4() {
- hashSHA256 = sha256.New()
- hashWriter = io.MultiWriter(hashMD5, hashSHA256)
- }
-
- // If no buffer is provided, no need to allocate just use io.Copy.
- size, err = io.Copy(hashWriter, reader)
- if err != nil {
- return nil, nil, 0, err
- }
-
- // Seek back reader to the beginning location.
- if _, err := reader.Seek(0, 0); err != nil {
- return nil, nil, 0, err
- }
-
- // Finalize md5shum and sha256 sum.
- md5Sum = hashMD5.Sum(nil)
- if c.signature.isV4() {
- sha256Sum = hashSHA256.Sum(nil)
- }
- return md5Sum, sha256Sum, size, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go
deleted file mode 100644
index d4772c53a..000000000
--- a/vendor/github.com/minio/minio-go/api-put-object-file.go
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "encoding/hex"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "sort"
-)
-
-// FPutObject - Create an object in a bucket, with contents from file at filePath.
-func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
-
- // Open the referenced file.
- fileReader, err := os.Open(filePath)
- // If any error fail quickly here.
- if err != nil {
- return 0, err
- }
- defer fileReader.Close()
-
- // Save the file stat.
- fileStat, err := fileReader.Stat()
- if err != nil {
- return 0, err
- }
-
- // Save the file size.
- fileSize := fileStat.Size()
-
- // Check for largest object size allowed.
- if fileSize > int64(maxMultipartPutObjectSize) {
- return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
- }
-
- // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
- // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
- if isGoogleEndpoint(c.endpointURL) {
- if fileSize > int64(maxSinglePutObjectSize) {
- return 0, ErrorResponse{
- Code: "NotImplemented",
- Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize),
- Key: objectName,
- BucketName: bucketName,
- }
- }
- // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
- }
-
- // NOTE: S3 doesn't allow anonymous multipart requests.
- if isAmazonEndpoint(c.endpointURL) && c.anonymous {
- if fileSize > int64(maxSinglePutObjectSize) {
- return 0, ErrorResponse{
- Code: "NotImplemented",
- Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize),
- Key: objectName,
- BucketName: bucketName,
- }
- }
- // Do not compute MD5 for anonymous requests to Amazon
- // S3. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
- }
-
- // Small object upload is initiated for uploads for input data size smaller than 5MiB.
- if fileSize < minPartSize && fileSize >= 0 {
- return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
- }
- // Upload all large objects as multipart.
- n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, contentType, nil)
- if err != nil {
- errResp := ToErrorResponse(err)
- // Verify if multipart functionality is not available, if not
- // fall back to single PutObject operation.
- if errResp.Code == "NotImplemented" {
- // If size of file is greater than '5GiB' fail.
- if fileSize > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName)
- }
- // Fall back to uploading as single PutObject operation.
- return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
- }
- return n, err
- }
- return n, nil
-}
-
-// putObjectMultipartFromFile - Creates object from contents of *os.File
-//
-// NOTE: This function is meant to be used for readers with local
-// file as in *os.File. This function resumes by skipping all the
-// necessary parts which were already uploaded by verifying them
-// against MD5SUM of each individual parts. This function also
-// effectively utilizes file system capabilities of reading from
-// specific sections and not having to create temporary files.
-func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, contentType string, progress io.Reader) (int64, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
-
- // Get upload id for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
- if err != nil {
- return 0, err
- }
-
- // Total data read and written to server. should be equal to 'size' at the end of the call.
- var totalUploadedSize int64
-
- // Complete multipart upload.
- var completeMultipartUpload completeMultipartUpload
-
- // A map of all uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // If this session is a continuation of a previous session fetch all
- // previously uploaded parts info.
- if !isNew {
- // Fetch previously upload parts and maximum part size.
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
- // Calculate the optimal parts info for a given size.
- totalPartsCount, partSize, _, err := optimalPartInfo(fileSize)
- if err != nil {
- return 0, err
- }
-
- // Part number always starts with '1'.
- partNumber := 1
-
- for partNumber <= totalPartsCount {
- // Get a section reader on a particular offset.
- sectionReader := io.NewSectionReader(fileReader, totalUploadedSize, partSize)
-
- // Calculates MD5 and SHA256 sum for a section reader.
- var md5Sum, sha256Sum []byte
- var prtSize int64
- md5Sum, sha256Sum, prtSize, err = c.computeHash(sectionReader)
- if err != nil {
- return 0, err
- }
-
- var reader io.Reader
- // Update progress reader appropriately to the latest offset
- // as we read from the source.
- reader = newHook(sectionReader, progress)
-
- // Verify if part should be uploaded.
- if shouldUploadPart(objectPart{
- ETag: hex.EncodeToString(md5Sum),
- PartNumber: partNumber,
- Size: prtSize,
- }, partsInfo) {
- // Proceed to upload the part.
- var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
- md5Sum, sha256Sum, prtSize)
- if err != nil {
- return totalUploadedSize, err
- }
- // Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
- } else {
- // Update the progress reader for the skipped part.
- if progress != nil {
- if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
- return totalUploadedSize, err
- }
- }
- }
-
- // Save successfully uploaded size.
- totalUploadedSize += prtSize
-
- // Increment part number.
- partNumber++
- }
-
- // Verify if we uploaded all data.
- if totalUploadedSize != fileSize {
- return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
- }
-
- // Loop over uploaded parts to save them in a Parts array before completing the multipart request.
- for _, part := range partsInfo {
- var complPart completePart
- complPart.ETag = part.ETag
- complPart.PartNumber = part.PartNumber
- completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
- }
-
- // Verify if totalPartsCount is not equal to total list of parts.
- if totalPartsCount != len(completeMultipartUpload.Parts) {
- return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts))
- }
-
- // Sort all completed parts.
- sort.Sort(completedParts(completeMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
- if err != nil {
- return totalUploadedSize, err
- }
-
- // Return final size.
- return totalUploadedSize, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
deleted file mode 100644
index 8b3db1039..000000000
--- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "encoding/hex"
- "encoding/xml"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "os"
- "sort"
- "strconv"
- "strings"
-)
-
-// Comprehensive put object operation involving multipart resumable uploads.
-//
-// Following code handles these types of readers.
-//
-// - *os.File
-// - *minio.Object
-// - Any reader which has a method 'ReadAt()'
-//
-// If we exhaust all the known types, code proceeds to use stream as
-// is where each part is re-downloaded, checksummed and verified
-// before upload.
-func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
- if size > 0 && size >= minPartSize {
- // Verify if reader is *os.File, then use file system functionalities.
- if isFile(reader) {
- return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress)
- }
- // Verify if reader is *minio.Object or io.ReaderAt.
- // NOTE: Verification of object is kept for a specific purpose
- // while it is going to be duck typed similar to io.ReaderAt.
- // It is to indicate that *minio.Object implements io.ReaderAt.
- // and such a functionality is used in the subsequent code
- // path.
- if isObject(reader) || isReadAt(reader) {
- return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType, progress)
- }
- }
- // For any other data size and reader type we do generic multipart
- // approach by staging data in temporary files and uploading them.
- return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType, progress)
-}
-
-// putObjectStream uploads files bigger than 5MiB, and also supports
-// special case where size is unknown i.e '-1'.
-func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
-
- // Total data read and written to server. should be equal to 'size' at the end of the call.
- var totalUploadedSize int64
-
- // Complete multipart upload.
- var complMultipartUpload completeMultipartUpload
-
- // A map of all previously uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // getUploadID for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
- if err != nil {
- return 0, err
- }
-
- // If This session is a continuation of a previous session fetch all
- // previously uploaded parts info.
- if !isNew {
- // Fetch previously uploaded parts and maximum part size.
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
- // Calculate the optimal parts info for a given size.
- totalPartsCount, partSize, _, err := optimalPartInfo(size)
- if err != nil {
- return 0, err
- }
-
- // Part number always starts with '1'.
- partNumber := 1
-
- // Initialize a temporary buffer.
- tmpBuffer := new(bytes.Buffer)
-
- for partNumber <= totalPartsCount {
- // Calculates MD5 and SHA256 sum while copying partSize bytes
- // into tmpBuffer.
- md5Sum, sha256Sum, prtSize, rErr := c.hashCopyN(tmpBuffer, reader, partSize)
- if rErr != nil {
- if rErr != io.EOF {
- return 0, rErr
- }
- }
-
- var reader io.Reader
- // Update progress reader appropriately to the latest offset
- // as we read from the source.
- reader = newHook(bytes.NewReader(tmpBuffer.Bytes()), progress)
-
- // Verify if part should be uploaded.
- if shouldUploadPart(objectPart{
- ETag: hex.EncodeToString(md5Sum),
- PartNumber: partNumber,
- Size: prtSize,
- }, partsInfo) {
- // Proceed to upload the part.
- var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, md5Sum, sha256Sum, prtSize)
- if err != nil {
- // Reset the temporary buffer upon any error.
- tmpBuffer.Reset()
- return totalUploadedSize, err
- }
- // Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
- } else {
- // Update the progress reader for the skipped part.
- if progress != nil {
- if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
- return totalUploadedSize, err
- }
- }
- }
-
- // Reset the temporary buffer.
- tmpBuffer.Reset()
-
- // Save successfully uploaded size.
- totalUploadedSize += prtSize
-
- // For unknown size, Read EOF we break away.
- // We do not have to upload till totalPartsCount.
- if size < 0 && rErr == io.EOF {
- break
- }
-
- // Increment part number.
- partNumber++
- }
-
- // Verify if we uploaded all the data.
- if size > 0 {
- if totalUploadedSize != size {
- return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
- }
- }
-
- // Loop over uploaded parts to save them in a Parts array before completing the multipart request.
- for _, part := range partsInfo {
- var complPart completePart
- complPart.ETag = part.ETag
- complPart.PartNumber = part.PartNumber
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart)
- }
-
- if size > 0 {
- // Verify if totalPartsCount is not equal to total list of parts.
- if totalPartsCount != len(complMultipartUpload.Parts) {
- return totalUploadedSize, ErrInvalidParts(partNumber, len(complMultipartUpload.Parts))
- }
- }
-
- // Sort all completed parts.
- sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
- if err != nil {
- return totalUploadedSize, err
- }
-
- // Return final size.
- return totalUploadedSize, nil
-}
-
-// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
-func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return initiateMultipartUploadResult{}, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return initiateMultipartUploadResult{}, err
- }
-
- // Initialize url queries.
- urlValues := make(url.Values)
- urlValues.Set("uploads", "")
-
- if contentType == "" {
- contentType = "application/octet-stream"
- }
-
- // Set ContentType header.
- customHeader := make(http.Header)
- customHeader.Set("Content-Type", contentType)
-
- reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- customHeader: customHeader,
- }
-
- // Execute POST on an objectName to initiate multipart upload.
- resp, err := c.executeMethod("POST", reqMetadata)
- defer closeResponse(resp)
- if err != nil {
- return initiateMultipartUploadResult{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
- }
- }
- // Decode xml for new multipart upload.
- initiateMultipartUploadResult := initiateMultipartUploadResult{}
- err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
- if err != nil {
- return initiateMultipartUploadResult, err
- }
- return initiateMultipartUploadResult, nil
-}
-
-// uploadPart - Uploads a part in a multipart upload.
-func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return objectPart{}, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return objectPart{}, err
- }
- if size > maxPartSize {
- return objectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
- }
- if size <= -1 {
- return objectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
- }
- if partNumber <= 0 {
- return objectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
- }
- if uploadID == "" {
- return objectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
- }
-
- // Get resources properly escaped and lined up before using them in http request.
- urlValues := make(url.Values)
- // Set part number.
- urlValues.Set("partNumber", strconv.Itoa(partNumber))
- // Set upload id.
- urlValues.Set("uploadId", uploadID)
-
- reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- contentBody: reader,
- contentLength: size,
- contentMD5Bytes: md5Sum,
- contentSHA256Bytes: sha256Sum,
- }
-
- // Execute PUT on each part.
- resp, err := c.executeMethod("PUT", reqMetadata)
- defer closeResponse(resp)
- if err != nil {
- return objectPart{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return objectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
- }
- }
- // Once successfully uploaded, return completed part.
- objPart := objectPart{}
- objPart.Size = size
- objPart.PartNumber = partNumber
- // Trim off the odd double quotes from ETag in the beginning and end.
- objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
- objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"")
- return objPart, nil
-}
-
-// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
-func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return completeMultipartUploadResult{}, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return completeMultipartUploadResult{}, err
- }
-
- // Initialize url queries.
- urlValues := make(url.Values)
- urlValues.Set("uploadId", uploadID)
-
- // Marshal complete multipart body.
- completeMultipartUploadBytes, err := xml.Marshal(complete)
- if err != nil {
- return completeMultipartUploadResult{}, err
- }
-
- // Instantiate all the complete multipart buffer.
- completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
- reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- contentBody: completeMultipartUploadBuffer,
- contentLength: int64(len(completeMultipartUploadBytes)),
- contentSHA256Bytes: sum256(completeMultipartUploadBytes),
- }
-
- // Execute POST to complete multipart upload for an objectName.
- resp, err := c.executeMethod("POST", reqMetadata)
- defer closeResponse(resp)
- if err != nil {
- return completeMultipartUploadResult{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
- }
- }
- // Decode completed multipart upload response on success.
- completeMultipartUploadResult := completeMultipartUploadResult{}
- err = xmlDecoder(resp.Body, &completeMultipartUploadResult)
- if err != nil {
- return completeMultipartUploadResult, err
- }
- return completeMultipartUploadResult, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-progress.go b/vendor/github.com/minio/minio-go/api-put-object-progress.go
deleted file mode 100644
index ebbc380c3..000000000
--- a/vendor/github.com/minio/minio-go/api-put-object-progress.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import "io"
-
-// PutObjectWithProgress - With progress.
-func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
- if reader == nil {
- return 0, ErrInvalidArgument("Input reader is invalid, cannot be nil.")
- }
-
- // Size of the object.
- var size int64
-
- // Get reader size.
- size, err = getReaderSize(reader)
- if err != nil {
- return 0, err
- }
-
- // Check for largest object size allowed.
- if size > int64(maxMultipartPutObjectSize) {
- return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
- }
-
- // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
- // So we fall back to single PUT operation with the maximum limit of 5GiB.
- if isGoogleEndpoint(c.endpointURL) {
- if size <= -1 {
- return 0, ErrorResponse{
- Code: "NotImplemented",
- Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.",
- Key: objectName,
- BucketName: bucketName,
- }
- }
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
- // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
- }
-
- // NOTE: S3 doesn't allow anonymous multipart requests.
- if isAmazonEndpoint(c.endpointURL) && c.anonymous {
- if size <= -1 {
- return 0, ErrorResponse{
- Code: "NotImplemented",
- Message: "Content-Length cannot be negative for anonymous requests.",
- Key: objectName,
- BucketName: bucketName,
- }
- }
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
- // Do not compute MD5 for anonymous requests to Amazon
- // S3. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
- }
-
- // putSmall object.
- if size < minPartSize && size >= 0 {
- return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
- }
- // For all sizes greater than 5MiB do multipart.
- n, err = c.putObjectMultipart(bucketName, objectName, reader, size, contentType, progress)
- if err != nil {
- errResp := ToErrorResponse(err)
- // Verify if multipart functionality is not available, if not
- // fall back to single PutObject operation.
- if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied." {
- // Verify if size of reader is greater than '5GiB'.
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
- // Fall back to uploading as single PutObject operation.
- return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
- }
- return n, err
- }
- return n, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-readat.go b/vendor/github.com/minio/minio-go/api-put-object-readat.go
deleted file mode 100644
index 0501cbec5..000000000
--- a/vendor/github.com/minio/minio-go/api-put-object-readat.go
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "sort"
-)
-
-// shouldUploadPartReadAt - verify if part should be uploaded.
-func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart) bool {
- // If part not found part should be uploaded.
- uploadedPart, found := objectParts[objPart.PartNumber]
- if !found {
- return true
- }
- // if size mismatches part should be uploaded.
- if uploadedPart.Size != objPart.Size {
- return true
- }
- return false
-}
-
-// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader
-// of type which implements io.ReaderAt interface (ReadAt method).
-//
-// NOTE: This function is meant to be used for all readers which
-// implement io.ReaderAt which allows us for resuming multipart
-// uploads but reading at an offset, which would avoid re-read the
-// data which was already uploaded. Internally this function uses
-// temporary files for staging all the data, these temporary files are
-// cleaned automatically when the caller i.e http client closes the
-// stream after uploading all the contents successfully.
-func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, contentType string, progress io.Reader) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
-
- // Get upload id for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
- if err != nil {
- return 0, err
- }
-
- // Total data read and written to server. should be equal to 'size' at the end of the call.
- var totalUploadedSize int64
-
- // Complete multipart upload.
- var complMultipartUpload completeMultipartUpload
-
- // A map of all uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // Fetch all parts info previously uploaded.
- if !isNew {
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
- // Calculate the optimal parts info for a given size.
- totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
- if err != nil {
- return 0, err
- }
-
- // Used for readability, lastPartNumber is always
- // totalPartsCount.
- lastPartNumber := totalPartsCount
-
- // partNumber always starts with '1'.
- partNumber := 1
-
- // Initialize a temporary buffer.
- tmpBuffer := new(bytes.Buffer)
-
- // Read defaults to reading at 5MiB buffer.
- readAtBuffer := make([]byte, optimalReadBufferSize)
-
- // Upload all the missing parts.
- for partNumber <= lastPartNumber {
- // Verify object if its uploaded.
- verifyObjPart := objectPart{
- PartNumber: partNumber,
- Size: partSize,
- }
- // Special case if we see a last part number, save last part
- // size as the proper part size.
- if partNumber == lastPartNumber {
- verifyObjPart = objectPart{
- PartNumber: lastPartNumber,
- Size: lastPartSize,
- }
- }
-
- // Verify if part should be uploaded.
- if !shouldUploadPartReadAt(verifyObjPart, partsInfo) {
- // Increment part number when not uploaded.
- partNumber++
- if progress != nil {
- // Update the progress reader for the skipped part.
- if _, err = io.CopyN(ioutil.Discard, progress, verifyObjPart.Size); err != nil {
- return 0, err
- }
- }
- continue
- }
-
- // If partNumber was not uploaded we calculate the missing
- // part offset and size. For all other part numbers we
- // calculate offset based on multiples of partSize.
- readOffset := int64(partNumber-1) * partSize
- missingPartSize := partSize
-
- // As a special case if partNumber is lastPartNumber, we
- // calculate the offset based on the last part size.
- if partNumber == lastPartNumber {
- readOffset = (size - lastPartSize)
- missingPartSize = lastPartSize
- }
-
- // Get a section reader on a particular offset.
- sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
-
- // Calculates MD5 and SHA256 sum for a section reader.
- var md5Sum, sha256Sum []byte
- var prtSize int64
- md5Sum, sha256Sum, prtSize, err = c.hashCopyBuffer(tmpBuffer, sectionReader, readAtBuffer)
- if err != nil {
- return 0, err
- }
-
- var reader io.Reader
- // Update progress reader appropriately to the latest offset
- // as we read from the source.
- reader = newHook(bytes.NewReader(tmpBuffer.Bytes()), progress)
-
- // Proceed to upload the part.
- var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, md5Sum, sha256Sum, prtSize)
- if err != nil {
- // Reset the buffer upon any error.
- tmpBuffer.Reset()
- return 0, err
- }
-
- // Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
-
- // Increment part number here after successful part upload.
- partNumber++
-
- // Reset the buffer.
- tmpBuffer.Reset()
- }
-
- // Loop over uploaded parts to save them in a Parts array before completing the multipart request.
- for _, part := range partsInfo {
- var complPart completePart
- complPart.ETag = part.ETag
- complPart.PartNumber = part.PartNumber
- totalUploadedSize += part.Size
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart)
- }
-
- // Verify if we uploaded all the data.
- if totalUploadedSize != size {
- return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
- }
-
- // Verify if totalPartsCount is not equal to total list of parts.
- if totalPartsCount != len(complMultipartUpload.Parts) {
- return totalUploadedSize, ErrInvalidParts(totalPartsCount, len(complMultipartUpload.Parts))
- }
-
- // Sort all completed parts.
- sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
- if err != nil {
- return totalUploadedSize, err
- }
-
- // Return final size.
- return totalUploadedSize, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go
deleted file mode 100644
index 1586580ed..000000000
--- a/vendor/github.com/minio/minio-go/api-put-object.go
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "reflect"
- "runtime"
- "strings"
-)
-
-// getReaderSize - Determine the size of Reader if available.
-func getReaderSize(reader io.Reader) (size int64, err error) {
- var result []reflect.Value
- size = -1
- if reader != nil {
- // Verify if there is a method by name 'Size'.
- lenFn := reflect.ValueOf(reader).MethodByName("Size")
- if lenFn.IsValid() {
- if lenFn.Kind() == reflect.Func {
- // Call the 'Size' function and save its return value.
- result = lenFn.Call([]reflect.Value{})
- if result != nil && len(result) == 1 {
- lenValue := result[0]
- if lenValue.IsValid() {
- switch lenValue.Kind() {
- case reflect.Int:
- fallthrough
- case reflect.Int8:
- fallthrough
- case reflect.Int16:
- fallthrough
- case reflect.Int32:
- fallthrough
- case reflect.Int64:
- size = lenValue.Int()
- }
- }
- }
- }
- } else {
- // Fallback to Stat() method, two possible Stat() structs
- // exist.
- switch v := reader.(type) {
- case *os.File:
- var st os.FileInfo
- st, err = v.Stat()
- if err != nil {
- // Handle this case specially for "windows",
- // certain files for example 'Stdin', 'Stdout' and
- // 'Stderr' it is not allowed to fetch file information.
- if runtime.GOOS == "windows" {
- if strings.Contains(err.Error(), "GetFileInformationByHandle") {
- return -1, nil
- }
- }
- return
- }
- // Ignore if input is a directory, throw an error.
- if st.Mode().IsDir() {
- return -1, ErrInvalidArgument("Input file cannot be a directory.")
- }
- // Ignore 'Stdin', 'Stdout' and 'Stderr', since they
- // represent *os.File type but internally do not
- // implement Seekable calls. Ignore them and treat
- // them like a stream with unknown length.
- switch st.Name() {
- case "stdin":
- fallthrough
- case "stdout":
- fallthrough
- case "stderr":
- return
- }
- size = st.Size()
- case *Object:
- var st ObjectInfo
- st, err = v.Stat()
- if err != nil {
- return
- }
- size = st.Size
- }
- }
- }
- // Returns the size here.
- return size, err
-}
-
-// completedParts is a collection of parts sortable by their part numbers.
-// used for sorting the uploaded parts before completing the multipart request.
-type completedParts []completePart
-
-func (a completedParts) Len() int { return len(a) }
-func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
-
-// PutObject creates an object in a bucket.
-//
-// You must have WRITE permissions on a bucket to create an object.
-//
-// - For size smaller than 5MiB PutObject automatically does a single atomic Put operation.
-// - For size larger than 5MiB PutObject automatically does a resumable multipart Put operation.
-// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF.
-// Maximum object size that can be uploaded through this operation will be 5TiB.
-//
-// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
-// So we fall back to single PUT operation with the maximum limit of 5GiB.
-//
-// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation.
-func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
- return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil)
-}
-
-// putObjectNoChecksum special function used Google Cloud Storage. This special function
-// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
-func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
-
- // Update progress reader appropriately to the latest offset as we
- // read from the source.
- readSeeker := newHook(reader, progress)
-
- // This function does not calculate sha256 and md5sum for payload.
- // Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, contentType)
- if err != nil {
- return 0, err
- }
- if st.Size != size {
- return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
- }
- return size, nil
-}
-
-// putObjectSingle is a special function for uploading single put object request.
-// This special function is used as a fallback when multipart upload fails.
-func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
- // If size is a stream, upload up to 5GiB.
- if size <= -1 {
- size = maxSinglePutObjectSize
- }
- var md5Sum, sha256Sum []byte
- if size <= minPartSize {
- // Initialize a new temporary buffer.
- tmpBuffer := new(bytes.Buffer)
- md5Sum, sha256Sum, size, err = c.hashCopyN(tmpBuffer, reader, size)
- reader = bytes.NewReader(tmpBuffer.Bytes())
- tmpBuffer.Reset()
- } else {
- // Initialize a new temporary file.
- var tmpFile *tempFile
- tmpFile, err = newTempFile("single$-putobject-single")
- if err != nil {
- return 0, err
- }
- defer tmpFile.Close()
- md5Sum, sha256Sum, size, err = c.hashCopyN(tmpFile, reader, size)
- // Seek back to beginning of the temporary file.
- if _, err = tmpFile.Seek(0, 0); err != nil {
- return 0, err
- }
- reader = tmpFile
- }
- // Return error if its not io.EOF.
- if err != nil {
- if err != io.EOF {
- return 0, err
- }
- }
- // Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, reader, md5Sum, sha256Sum, size, contentType)
- if err != nil {
- return 0, err
- }
- if st.Size != size {
- return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
- }
- // Progress the reader to the size if putObjectDo is successful.
- if progress != nil {
- if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
- return size, err
- }
- }
- return size, nil
-}
-
-// putObjectDo - executes the put object http operation.
-// NOTE: You must have WRITE permissions on a bucket to add an object to it.
-func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return ObjectInfo{}, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return ObjectInfo{}, err
- }
-
- if size <= -1 {
- return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName)
- }
-
- if size > maxSinglePutObjectSize {
- return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
-
- if strings.TrimSpace(contentType) == "" {
- contentType = "application/octet-stream"
- }
-
- // Set headers.
- customHeader := make(http.Header)
- customHeader.Set("Content-Type", contentType)
-
- // Populate request metadata.
- reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- customHeader: customHeader,
- contentBody: reader,
- contentLength: size,
- contentMD5Bytes: md5Sum,
- contentSHA256Bytes: sha256Sum,
- }
-
- // Execute PUT an objectName.
- resp, err := c.executeMethod("PUT", reqMetadata)
- defer closeResponse(resp)
- if err != nil {
- return ObjectInfo{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
- }
- }
-
- var metadata ObjectInfo
- // Trim off the odd double quotes from ETag in the beginning and end.
- metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
- metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"")
- // A success here means data was written to server successfully.
- metadata.Size = size
-
- // Return here.
- return metadata, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go
deleted file mode 100644
index bd5842828..000000000
--- a/vendor/github.com/minio/minio-go/api-remove.go
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "net/http"
- "net/url"
-)
-
-// RemoveBucket deletes the bucket name.
-//
-// All objects (including all object versions and delete markers).
-// in the bucket must be deleted before successfully attempting this request.
-func (c Client) RemoveBucket(bucketName string) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
- // Execute DELETE on bucket.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- })
- defer closeResponse(resp)
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusNoContent {
- return httpRespToErrorResponse(resp, bucketName, "")
- }
- }
-
- // Remove the location from cache on a successful delete.
- c.bucketLocCache.Delete(bucketName)
-
- return nil
-}
-
-// RemoveBucketPolicy remove a bucket policy on given path.
-func (c Client) RemoveBucketPolicy(bucketName, objectPrefix string) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
- if err := isValidObjectPrefix(objectPrefix); err != nil {
- return err
- }
- policy, err := c.getBucketPolicy(bucketName, objectPrefix)
- if err != nil {
- return err
- }
- // No bucket policy found, nothing to remove return success.
- if policy.Statements == nil {
- return nil
- }
-
- // Save new statements after removing requested bucket policy.
- policy.Statements = removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
-
- // Commit the update policy.
- return c.putBucketPolicy(bucketName, policy)
-}
-
-// Removes all policies on a bucket.
-func (c Client) removeBucketPolicy(bucketName string) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
- // Get resources properly escaped and lined up before
- // using them in http request.
- urlValues := make(url.Values)
- urlValues.Set("policy", "")
-
- // Execute DELETE on objectName.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- })
- defer closeResponse(resp)
- if err != nil {
- return err
- }
- return nil
-}
-
-// RemoveObject remove an object from a bucket.
-func (c Client) RemoveObject(bucketName, objectName string) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
- if err := isValidObjectName(objectName); err != nil {
- return err
- }
- // Execute DELETE on objectName.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- })
- defer closeResponse(resp)
- if err != nil {
- return err
- }
- // DeleteObject always responds with http '204' even for
- // objects which do not exist. So no need to handle them
- // specifically.
- return nil
-}
-
-// RemoveIncompleteUpload aborts an partially uploaded object.
-// Requires explicit authentication, no anonymous requests are allowed for multipart API.
-func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
- if err := isValidObjectName(objectName); err != nil {
- return err
- }
- // Find multipart upload id of the object to be aborted.
- uploadID, err := c.findUploadID(bucketName, objectName)
- if err != nil {
- return err
- }
- if uploadID != "" {
- // Upload id found, abort the incomplete multipart upload.
- err := c.abortMultipartUpload(bucketName, objectName, uploadID)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// abortMultipartUpload aborts a multipart upload for the given
-// uploadID, all previously uploaded parts are deleted.
-func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
- if err := isValidObjectName(objectName); err != nil {
- return err
- }
-
- // Initialize url queries.
- urlValues := make(url.Values)
- urlValues.Set("uploadId", uploadID)
-
- // Execute DELETE on multipart upload.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- })
- defer closeResponse(resp)
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusNoContent {
- // Abort has no response body, handle it for any errors.
- var errorResponse ErrorResponse
- switch resp.StatusCode {
- case http.StatusNotFound:
- // This is needed specifically for abort and it cannot
- // be converged into default case.
- errorResponse = ErrorResponse{
- Code: "NoSuchUpload",
- Message: "The specified multipart upload does not exist.",
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
- }
- default:
- return httpRespToErrorResponse(resp, bucketName, objectName)
- }
- return errorResponse
- }
- }
- return nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
deleted file mode 100644
index cd0f6aa4d..000000000
--- a/vendor/github.com/minio/minio-go/api-s3-datatypes.go
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "encoding/xml"
- "time"
-)
-
-// listAllMyBucketsResult container for listBuckets response.
-type listAllMyBucketsResult struct {
- // Container for one or more buckets.
- Buckets struct {
- Bucket []BucketInfo
- }
- Owner owner
-}
-
-// owner container for bucket owner information.
-type owner struct {
- DisplayName string
- ID string
-}
-
-// commonPrefix container for prefix response.
-type commonPrefix struct {
- Prefix string
-}
-
-// listBucketResult container for listObjects response.
-type listBucketResult struct {
- // A response can contain CommonPrefixes only if you have
- // specified a delimiter.
- CommonPrefixes []commonPrefix
- // Metadata about each object returned.
- Contents []ObjectInfo
- Delimiter string
-
- // Encoding type used to encode object keys in the response.
- EncodingType string
-
- // A flag that indicates whether or not ListObjects returned all of the results
- // that satisfied the search criteria.
- IsTruncated bool
- Marker string
- MaxKeys int64
- Name string
-
- // When response is truncated (the IsTruncated element value in
- // the response is true), you can use the key name in this field
- // as marker in the subsequent request to get next set of objects.
- // Object storage lists objects in alphabetical order Note: This
- // element is returned only if you have delimiter request
- // parameter specified. If response does not include the NextMaker
- // and it is truncated, you can use the value of the last Key in
- // the response as the marker in the subsequent request to get the
- // next set of object keys.
- NextMarker string
- Prefix string
-}
-
-// listMultipartUploadsResult container for ListMultipartUploads response
-type listMultipartUploadsResult struct {
- Bucket string
- KeyMarker string
- UploadIDMarker string `xml:"UploadIdMarker"`
- NextKeyMarker string
- NextUploadIDMarker string `xml:"NextUploadIdMarker"`
- EncodingType string
- MaxUploads int64
- IsTruncated bool
- Uploads []ObjectMultipartInfo `xml:"Upload"`
- Prefix string
- Delimiter string
- // A response can contain CommonPrefixes only if you specify a delimiter.
- CommonPrefixes []commonPrefix
-}
-
-// initiator container for who initiated multipart upload.
-type initiator struct {
- ID string
- DisplayName string
-}
-
-// objectPart container for particular part of an object.
-type objectPart struct {
- // Part number identifies the part.
- PartNumber int
-
- // Date and time the part was uploaded.
- LastModified time.Time
-
- // Entity tag returned when the part was uploaded, usually md5sum
- // of the part.
- ETag string
-
- // Size of the uploaded part data.
- Size int64
-}
-
-// listObjectPartsResult container for ListObjectParts response.
-type listObjectPartsResult struct {
- Bucket string
- Key string
- UploadID string `xml:"UploadId"`
-
- Initiator initiator
- Owner owner
-
- StorageClass string
- PartNumberMarker int
- NextPartNumberMarker int
- MaxParts int
-
- // Indicates whether the returned list of parts is truncated.
- IsTruncated bool
- ObjectParts []objectPart `xml:"Part"`
-
- EncodingType string
-}
-
-// initiateMultipartUploadResult container for InitiateMultiPartUpload
-// response.
-type initiateMultipartUploadResult struct {
- Bucket string
- Key string
- UploadID string `xml:"UploadId"`
-}
-
-// completeMultipartUploadResult container for completed multipart
-// upload response.
-type completeMultipartUploadResult struct {
- Location string
- Bucket string
- Key string
- ETag string
-}
-
-// completePart sub container lists individual part numbers and their
-// md5sum, part of completeMultipartUpload.
-type completePart struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
-
- // Part number identifies the part.
- PartNumber int
- ETag string
-}
-
-// completeMultipartUpload container for completing multipart upload.
-type completeMultipartUpload struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
- Parts []completePart `xml:"Part"`
-}
-
-// createBucketConfiguration container for bucket configuration.
-type createBucketConfiguration struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
- Location string `xml:"LocationConstraint"`
-}
diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go
deleted file mode 100644
index b5db7fedc..000000000
--- a/vendor/github.com/minio/minio-go/api-stat.go
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "net/http"
- "strconv"
- "strings"
- "time"
-)
-
-// BucketExists verify if bucket exists and you have permission to access it.
-func (c Client) BucketExists(bucketName string) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
-
- // Execute HEAD on bucketName.
- resp, err := c.executeMethod("HEAD", requestMetadata{
- bucketName: bucketName,
- })
- defer closeResponse(resp)
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return httpRespToErrorResponse(resp, bucketName, "")
- }
- }
- return nil
-}
-
-// StatObject verifies if object exists and you have permission to access.
-func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return ObjectInfo{}, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return ObjectInfo{}, err
- }
-
- // Execute HEAD on objectName.
- resp, err := c.executeMethod("HEAD", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- })
- defer closeResponse(resp)
- if err != nil {
- return ObjectInfo{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
- }
- }
-
- // Trim off the odd double quotes from ETag in the beginning and end.
- md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
- md5sum = strings.TrimSuffix(md5sum, "\"")
-
- // Parse content length.
- size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
- if err != nil {
- return ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
- Message: "Content-Length is invalid. " + reportIssue,
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
- }
- }
- // Parse Last-Modified has http time format.
- date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
- if err != nil {
- return ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
- Message: "Last-Modified time format is invalid. " + reportIssue,
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
- }
- }
- // Fetch content type if any present.
- contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
- if contentType == "" {
- contentType = "application/octet-stream"
- }
- // Save object metadata info.
- var objectStat ObjectInfo
- objectStat.ETag = md5sum
- objectStat.Key = objectName
- objectStat.Size = size
- objectStat.LastModified = date
- objectStat.ContentType = contentType
- return objectStat, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go
deleted file mode 100644
index af9ad6b1d..000000000
--- a/vendor/github.com/minio/minio-go/api.go
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/hex"
- "fmt"
- "io"
- "io/ioutil"
- "math/rand"
- "net/http"
- "net/http/httputil"
- "net/url"
- "os"
- "regexp"
- "runtime"
- "strings"
- "sync"
- "time"
-)
-
-// Client implements Amazon S3 compatible methods.
-type Client struct {
- /// Standard options.
-
- // AccessKeyID required for authorized requests.
- accessKeyID string
- // SecretAccessKey required for authorized requests.
- secretAccessKey string
- // Choose a signature type if necessary.
- signature SignatureType
- // Set to 'true' if Client has no access and secret keys.
- anonymous bool
-
- // User supplied.
- appInfo struct {
- appName string
- appVersion string
- }
- endpointURL *url.URL
-
- // Needs allocation.
- httpClient *http.Client
- bucketLocCache *bucketLocationCache
-
- // Advanced functionality.
- isTraceEnabled bool
- traceOutput io.Writer
-
- // Random seed.
- random *rand.Rand
-}
-
-// Global constants.
-const (
- libraryName = "minio-go"
- libraryVersion = "1.0.0"
-)
-
-// User Agent should always following the below style.
-// Please open an issue to discuss any new changes here.
-//
-// Minio (OS; ARCH) LIB/VER APP/VER
-const (
- libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
- libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
-)
-
-// NewV2 - instantiate minio client with Amazon S3 signature version
-// '2' compatibility.
-func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
- clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
- if err != nil {
- return nil, err
- }
- // Set to use signature version '2'.
- clnt.signature = SignatureV2
- return clnt, nil
-}
-
-// NewV4 - instantiate minio client with Amazon S3 signature version
-// '4' compatibility.
-func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
- clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
- if err != nil {
- return nil, err
- }
- // Set to use signature version '4'.
- clnt.signature = SignatureV4
- return clnt, nil
-}
-
-// New - instantiate minio client Client, adds automatic verification
-// of signature.
-func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
- clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
- if err != nil {
- return nil, err
- }
- // Google cloud storage should be set to signature V2, force it if
- // not.
- if isGoogleEndpoint(clnt.endpointURL) {
- clnt.signature = SignatureV2
- }
- // If Amazon S3 set to signature v2.n
- if isAmazonEndpoint(clnt.endpointURL) {
- clnt.signature = SignatureV4
- }
- return clnt, nil
-}
-
-// lockedRandSource provides protected rand source, implements rand.Source interface.
-type lockedRandSource struct {
- lk sync.Mutex
- src rand.Source
-}
-
-// Int63 returns a non-negative pseudo-random 63-bit integer as an
-// int64.
-func (r *lockedRandSource) Int63() (n int64) {
- r.lk.Lock()
- n = r.src.Int63()
- r.lk.Unlock()
- return
-}
-
-// Seed uses the provided seed value to initialize the generator to a
-// deterministic state.
-func (r *lockedRandSource) Seed(seed int64) {
- r.lk.Lock()
- r.src.Seed(seed)
- r.lk.Unlock()
-}
-
-func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
- // construct endpoint.
- endpointURL, err := getEndpointURL(endpoint, insecure)
- if err != nil {
- return nil, err
- }
-
- // instantiate new Client.
- clnt := new(Client)
- clnt.accessKeyID = accessKeyID
- clnt.secretAccessKey = secretAccessKey
- if clnt.accessKeyID == "" || clnt.secretAccessKey == "" {
- clnt.anonymous = true
- }
-
- // Save endpoint URL, user agent for future uses.
- clnt.endpointURL = endpointURL
-
- // Instantiate http client and bucket location cache.
- clnt.httpClient = &http.Client{
- // Setting a sensible time out of 2minutes to wait for response
- // headers. Request is pro-actively cancelled after 2minutes
- // if no response was received from server.
- Timeout: 2 * time.Minute,
- Transport: http.DefaultTransport,
- }
-
- // Instantiae bucket location cache.
- clnt.bucketLocCache = newBucketLocationCache()
-
- // Introduce a new locked random seed.
- clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
-
- // Return.
- return clnt, nil
-}
-
-// SetAppInfo - add application details to user agent.
-func (c *Client) SetAppInfo(appName string, appVersion string) {
- // if app name and version is not set, we do not a new user
- // agent.
- if appName != "" && appVersion != "" {
- c.appInfo = struct {
- appName string
- appVersion string
- }{}
- c.appInfo.appName = appName
- c.appInfo.appVersion = appVersion
- }
-}
-
-// SetCustomTransport - set new custom transport.
-func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
- // Set this to override default transport
- // ``http.DefaultTransport``.
- //
- // This transport is usually needed for debugging OR to add your
- // own custom TLS certificates on the client transport, for custom
- // CA's and certs which are not part of standard certificate
- // authority follow this example :-
- //
- // tr := &http.Transport{
- // TLSClientConfig: &tls.Config{RootCAs: pool},
- // DisableCompression: true,
- // }
- // api.SetTransport(tr)
- //
- if c.httpClient != nil {
- c.httpClient.Transport = customHTTPTransport
- }
-}
-
-// TraceOn - enable HTTP tracing.
-func (c *Client) TraceOn(outputStream io.Writer) {
- // if outputStream is nil then default to os.Stdout.
- if outputStream == nil {
- outputStream = os.Stdout
- }
- // Sets a new output stream.
- c.traceOutput = outputStream
-
- // Enable tracing.
- c.isTraceEnabled = true
-}
-
-// TraceOff - disable HTTP tracing.
-func (c *Client) TraceOff() {
- // Disable tracing.
- c.isTraceEnabled = false
-}
-
-// requestMetadata - is container for all the values to make a
-// request.
-type requestMetadata struct {
- // If set newRequest presigns the URL.
- presignURL bool
-
- // User supplied.
- bucketName string
- objectName string
- queryValues url.Values
- customHeader http.Header
- expires int64
-
- // Generated by our internal code.
- bucketLocation string
- contentBody io.Reader
- contentLength int64
- contentSHA256Bytes []byte
- contentMD5Bytes []byte
-}
-
-// Filter out signature value from Authorization header.
-func (c Client) filterSignature(req *http.Request) {
- // For anonymous requests, no need to filter.
- if c.anonymous {
- return
- }
- // Handle if Signature V2.
- if c.signature.isV2() {
- // Set a temporary redacted auth
- req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**")
- return
- }
-
- /// Signature V4 authorization header.
-
- // Save the original auth.
- origAuth := req.Header.Get("Authorization")
- // Strip out accessKeyID from:
- // Credential=////aws4_request
- regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/")
- newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
-
- // Strip out 256-bit signature from: Signature=<256-bit signature>
- regSign := regexp.MustCompile("Signature=([[0-9a-f]+)")
- newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
-
- // Set a temporary redacted auth
- req.Header.Set("Authorization", newAuth)
- return
-}
-
-// dumpHTTP - dump HTTP request and response.
-func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
- // Starts http dump.
- _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
- if err != nil {
- return err
- }
-
- // Filter out Signature field from Authorization header.
- c.filterSignature(req)
-
- // Only display request header.
- reqTrace, err := httputil.DumpRequestOut(req, false)
- if err != nil {
- return err
- }
-
- // Write request to trace output.
- _, err = fmt.Fprint(c.traceOutput, string(reqTrace))
- if err != nil {
- return err
- }
-
- // Only display response header.
- var respTrace []byte
-
- // For errors we make sure to dump response body as well.
- if resp.StatusCode != http.StatusOK &&
- resp.StatusCode != http.StatusPartialContent &&
- resp.StatusCode != http.StatusNoContent {
- respTrace, err = httputil.DumpResponse(resp, true)
- if err != nil {
- return err
- }
- } else {
- // WORKAROUND for https://github.com/golang/go/issues/13942.
- // httputil.DumpResponse does not print response headers for
- // all successful calls which have response ContentLength set
- // to zero. Keep this workaround until the above bug is fixed.
- if resp.ContentLength == 0 {
- var buffer bytes.Buffer
- if err = resp.Header.Write(&buffer); err != nil {
- return err
- }
- respTrace = buffer.Bytes()
- respTrace = append(respTrace, []byte("\r\n")...)
- } else {
- respTrace, err = httputil.DumpResponse(resp, false)
- if err != nil {
- return err
- }
- }
- }
- // Write response to trace output.
- _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
- if err != nil {
- return err
- }
-
- // Ends the http dump.
- _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
- if err != nil {
- return err
- }
-
- // Returns success.
- return nil
-}
-
-// do - execute http request.
-func (c Client) do(req *http.Request) (*http.Response, error) {
- // do the request.
- resp, err := c.httpClient.Do(req)
- if err != nil {
- // Handle this specifically for now until future Golang
- // versions fix this issue properly.
- urlErr, ok := err.(*url.Error)
- if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
- return nil, &url.Error{
- Op: urlErr.Op,
- URL: urlErr.URL,
- Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
- }
- }
- return nil, err
- }
-
- // Response cannot be non-nil, report if its the case.
- if resp == nil {
- msg := "Response is empty. " + reportIssue
- return nil, ErrInvalidArgument(msg)
- }
-
- // If trace is enabled, dump http request and response.
- if c.isTraceEnabled {
- err = c.dumpHTTP(req, resp)
- if err != nil {
- return nil, err
- }
- }
- return resp, nil
-}
-
-// List of success status.
-var successStatus = []int{
- http.StatusOK,
- http.StatusNoContent,
- http.StatusPartialContent,
-}
-
-// executeMethod - instantiates a given method, and retries the
-// request upon any error up to maxRetries attempts in a binomially
-// delayed manner using a standard back off algorithm.
-func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) {
- var isRetryable bool // Indicates if request can be retried.
- var bodySeeker io.Seeker // Extracted seeker from io.Reader.
- if metadata.contentBody != nil {
- // Check if body is seekable then it is retryable.
- bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
- }
-
- // Retry executes the following function body if request has an
- // error until maxRetries have been exhausted, retry attempts are
- // performed after waiting for a given period of time in a
- // binomial fashion.
- for range c.newRetryTimer(MaxRetry, time.Second, time.Second*30, MaxJitter) {
- if isRetryable {
- // Seek back to beginning for each attempt.
- if _, err = bodySeeker.Seek(0, 0); err != nil {
- // If seek failed, no need to retry.
- return nil, err
- }
- }
-
- // Instantiate a new request.
- var req *http.Request
- req, err = c.newRequest(method, metadata)
- if err != nil {
- errResponse := ToErrorResponse(err)
- if isS3CodeRetryable(errResponse.Code) {
- continue // Retry.
- }
- return nil, err
- }
-
- // Initiate the request.
- res, err = c.do(req)
- if err != nil {
- // For supported network errors verify.
- if isNetErrorRetryable(err) {
- continue // Retry.
- }
- // For other errors, return here no need to retry.
- return nil, err
- }
-
- // For any known successful http status, return quickly.
- for _, httpStatus := range successStatus {
- if httpStatus == res.StatusCode {
- return res, nil
- }
- }
-
- // Read the body to be saved later.
- errBodyBytes, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return nil, err
- }
- // Save the body.
- errBodySeeker := bytes.NewReader(errBodyBytes)
- res.Body = ioutil.NopCloser(errBodySeeker)
-
- // For errors verify if its retryable otherwise fail quickly.
- errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
- // Bucket region if set in error response, we can retry the
- // request with the new region.
- if errResponse.Region != "" {
- c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
- continue // Retry.
- }
-
- // Verify if error response code is retryable.
- if isS3CodeRetryable(errResponse.Code) {
- continue // Retry.
- }
-
- // Verify if http status code is retryable.
- if isHTTPStatusRetryable(res.StatusCode) {
- continue // Retry.
- }
-
- // Save the body back again.
- errBodySeeker.Seek(0, 0) // Seek back to starting point.
- res.Body = ioutil.NopCloser(errBodySeeker)
-
- // For all other cases break out of the retry loop.
- break
- }
- return res, err
-}
-
-// newRequest - instantiate a new HTTP request for a given method.
-func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
- // If no method is supplied default to 'POST'.
- if method == "" {
- method = "POST"
- }
-
- // Gather location only if bucketName is present.
- location := "us-east-1" // Default all other requests to "us-east-1".
- if metadata.bucketName != "" {
- location, err = c.getBucketLocation(metadata.bucketName)
- if err != nil {
- return nil, err
- }
- }
-
- // Save location.
- metadata.bucketLocation = location
-
- // Construct a new target URL.
- targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.bucketLocation, metadata.queryValues)
- if err != nil {
- return nil, err
- }
-
- // Initialize a new HTTP request for the method.
- req, err = http.NewRequest(method, targetURL.String(), nil)
- if err != nil {
- return nil, err
- }
-
- // Generate presign url if needed, return right here.
- if metadata.expires != 0 && metadata.presignURL {
- if c.anonymous {
- return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.")
- }
- if c.signature.isV2() {
- // Presign URL with signature v2.
- req = preSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
- } else {
- // Presign URL with signature v4.
- req = preSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
- }
- return req, nil
- }
-
- // Set content body if available.
- if metadata.contentBody != nil {
- req.Body = ioutil.NopCloser(metadata.contentBody)
- }
-
- // set 'Expect' header for the request.
- req.Header.Set("Expect", "100-continue")
-
- // set 'User-Agent' header for the request.
- c.setUserAgent(req)
-
- // Set all headers.
- for k, v := range metadata.customHeader {
- req.Header.Set(k, v[0])
- }
-
- // set incoming content-length.
- if metadata.contentLength > 0 {
- req.ContentLength = metadata.contentLength
- }
-
- // Set sha256 sum only for non anonymous credentials.
- if !c.anonymous {
- // set sha256 sum for signature calculation only with
- // signature version '4'.
- if c.signature.isV4() {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
- if metadata.contentSHA256Bytes != nil {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSHA256Bytes))
- }
- }
- }
-
- // set md5Sum for content protection.
- if metadata.contentMD5Bytes != nil {
- req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
- }
-
- // Sign the request for all authenticated requests.
- if !c.anonymous {
- if c.signature.isV2() {
- // Add signature version '2' authorization header.
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
- } else if c.signature.isV4() {
- // Add signature version '4' authorization header.
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, location)
- }
- }
-
- // Return request.
- return req, nil
-}
-
-// set User agent.
-func (c Client) setUserAgent(req *http.Request) {
- req.Header.Set("User-Agent", libraryUserAgent)
- if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
- req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
- }
-}
-
-// makeTargetURL make a new target url.
-func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
- // Save host.
- host := c.endpointURL.Host
- // For Amazon S3 endpoint, try to fetch location based endpoint.
- if isAmazonEndpoint(c.endpointURL) {
- // Fetch new host based on the bucket location.
- host = getS3Endpoint(bucketLocation)
- }
- // Save scheme.
- scheme := c.endpointURL.Scheme
-
- urlStr := scheme + "://" + host + "/"
- // Make URL only if bucketName is available, otherwise use the
- // endpoint URL.
- if bucketName != "" {
- // Save if target url will have buckets which suppport virtual host.
- isVirtualHostStyle := isVirtualHostSupported(c.endpointURL, bucketName)
-
- // If endpoint supports virtual host style use that always.
- // Currently only S3 and Google Cloud Storage would support
- // virtual host style.
- if isVirtualHostStyle {
- urlStr = scheme + "://" + bucketName + "." + host + "/"
- if objectName != "" {
- urlStr = urlStr + urlEncodePath(objectName)
- }
- } else {
- // If not fall back to using path style.
- urlStr = urlStr + bucketName + "/"
- if objectName != "" {
- urlStr = urlStr + urlEncodePath(objectName)
- }
- }
- }
- // If there are any query values, add them to the end.
- if len(queryValues) > 0 {
- urlStr = urlStr + "?" + queryEncode(queryValues)
- }
- u, err := url.Parse(urlStr)
- if err != nil {
- return nil, err
- }
-
- return u, nil
-}
diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml
deleted file mode 100644
index 5b8824d45..000000000
--- a/vendor/github.com/minio/minio-go/appveyor.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-# version format
-version: "{build}"
-
-# Operating system (build VM template)
-os: Windows Server 2012 R2
-
-clone_folder: c:\gopath\src\github.com\minio\minio-go
-
-# environment variables
-environment:
- GOPATH: c:\gopath
- GO15VENDOREXPERIMENT: 1
-
-# scripts that run after cloning repository
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - go version
- - go env
- - go get -u github.com/golang/lint/golint
- - go get -u golang.org/x/tools/cmd/vet
- - go get -u github.com/remyoudompheng/go-misc/deadcode
-
-# to run your custom scripts instead of automatic MSBuild
-build_script:
- - go vet ./...
- - gofmt -s -l .
- - golint github.com/minio/minio-go...
- - deadcode
- - go test -short -v
- - go test -short -race -v
-
-# to disable automatic tests
-test: off
-
-# to disable deployment
-deploy: off
diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go
deleted file mode 100644
index 50679a380..000000000
--- a/vendor/github.com/minio/minio-go/bucket-cache.go
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "encoding/hex"
- "net/http"
- "net/url"
- "path"
- "strings"
- "sync"
-)
-
-// bucketLocationCache - Provides simple mechansim to hold bucket
-// locations in memory.
-type bucketLocationCache struct {
- // mutex is used for handling the concurrent
- // read/write requests for cache.
- sync.RWMutex
-
- // items holds the cached bucket locations.
- items map[string]string
-}
-
-// newBucketLocationCache - Provides a new bucket location cache to be
-// used internally with the client object.
-func newBucketLocationCache() *bucketLocationCache {
- return &bucketLocationCache{
- items: make(map[string]string),
- }
-}
-
-// Get - Returns a value of a given key if it exists.
-func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
- r.RLock()
- defer r.RUnlock()
- location, ok = r.items[bucketName]
- return
-}
-
-// Set - Will persist a value into cache.
-func (r *bucketLocationCache) Set(bucketName string, location string) {
- r.Lock()
- defer r.Unlock()
- r.items[bucketName] = location
-}
-
-// Delete - Deletes a bucket name from cache.
-func (r *bucketLocationCache) Delete(bucketName string) {
- r.Lock()
- defer r.Unlock()
- delete(r.items, bucketName)
-}
-
-// getBucketLocation - Get location for the bucketName from location map cache.
-func (c Client) getBucketLocation(bucketName string) (string, error) {
- if location, ok := c.bucketLocCache.Get(bucketName); ok {
- return location, nil
- }
-
- // Initialize a new request.
- req, err := c.getBucketLocationRequest(bucketName)
- if err != nil {
- return "", err
- }
-
- // Initiate the request.
- resp, err := c.do(req)
- defer closeResponse(resp)
- if err != nil {
- return "", err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- err = httpRespToErrorResponse(resp, bucketName, "")
- errResp := ToErrorResponse(err)
- // For access denied error, it could be an anonymous
- // request. Move forward and let the top level callers
- // succeed if possible based on their policy.
- if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
- return "us-east-1", nil
- }
- return "", err
- }
- }
-
- // Extract location.
- var locationConstraint string
- err = xmlDecoder(resp.Body, &locationConstraint)
- if err != nil {
- return "", err
- }
-
- location := locationConstraint
- // Location is empty will be 'us-east-1'.
- if location == "" {
- location = "us-east-1"
- }
-
- // Location can be 'EU' convert it to meaningful 'eu-west-1'.
- if location == "EU" {
- location = "eu-west-1"
- }
-
- // Save the location into cache.
- c.bucketLocCache.Set(bucketName, location)
-
- // Return.
- return location, nil
-}
-
-// getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
-func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) {
- // Set location query.
- urlValues := make(url.Values)
- urlValues.Set("location", "")
-
- // Set get bucket location always as path style.
- targetURL := c.endpointURL
- targetURL.Path = path.Join(bucketName, "") + "/"
- targetURL.RawQuery = urlValues.Encode()
-
- // Get a new HTTP request for the method.
- req, err := http.NewRequest("GET", targetURL.String(), nil)
- if err != nil {
- return nil, err
- }
-
- // Set UserAgent for the request.
- c.setUserAgent(req)
-
- // Set sha256 sum for signature calculation only with signature version '4'.
- if c.signature.isV4() {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
- }
-
- // Sign the request.
- if c.signature.isV4() {
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
- } else if c.signature.isV2() {
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
- }
- return req, nil
-}
diff --git a/vendor/github.com/minio/minio-go/bucket-policy.go b/vendor/github.com/minio/minio-go/bucket-policy.go
deleted file mode 100644
index 5b8a5113d..000000000
--- a/vendor/github.com/minio/minio-go/bucket-policy.go
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "encoding/json"
- "sort"
-)
-
-// maximum supported access policy size.
-const maxAccessPolicySize = 20 * 1024 * 1024 // 20KiB.
-
-// Resource prefix for all aws resources.
-const awsResourcePrefix = "arn:aws:s3:::"
-
-// BucketPolicy - Bucket level policy.
-type BucketPolicy string
-
-// Different types of Policies currently supported for buckets.
-const (
- BucketPolicyNone BucketPolicy = "none"
- BucketPolicyReadOnly = "readonly"
- BucketPolicyReadWrite = "readwrite"
- BucketPolicyWriteOnly = "writeonly"
-)
-
-// isValidBucketPolicy - Is provided policy value supported.
-func (p BucketPolicy) isValidBucketPolicy() bool {
- switch p {
- case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
- return true
- }
- return false
-}
-
-// User - canonical users list.
-type User struct {
- AWS []string
-}
-
-// Statement - minio policy statement
-type Statement struct {
- Sid string
- Effect string
- Principal User `json:"Principal"`
- Actions []string `json:"Action"`
- Resources []string `json:"Resource"`
- Conditions map[string]map[string]string `json:"Condition,omitempty"`
-}
-
-// BucketAccessPolicy - minio policy collection
-type BucketAccessPolicy struct {
- Version string // date in 0000-00-00 format
- Statements []Statement `json:"Statement"`
-}
-
-// Read write actions.
-var (
- readWriteBucketActions = []string{
- "s3:GetBucketLocation",
- "s3:ListBucket",
- "s3:ListBucketMultipartUploads",
- // Add more bucket level read-write actions here.
- }
- readWriteObjectActions = []string{
- "s3:AbortMultipartUpload",
- "s3:DeleteObject",
- "s3:GetObject",
- "s3:ListMultipartUploadParts",
- "s3:PutObject",
- // Add more object level read-write actions here.
- }
-)
-
-// Write only actions.
-var (
- writeOnlyBucketActions = []string{
- "s3:GetBucketLocation",
- "s3:ListBucketMultipartUploads",
- // Add more bucket level write actions here.
- }
- writeOnlyObjectActions = []string{
- "s3:AbortMultipartUpload",
- "s3:DeleteObject",
- "s3:ListMultipartUploadParts",
- "s3:PutObject",
- // Add more object level write actions here.
- }
-)
-
-// Read only actions.
-var (
- readOnlyBucketActions = []string{
- "s3:GetBucketLocation",
- "s3:ListBucket",
- // Add more bucket level read actions here.
- }
- readOnlyObjectActions = []string{
- "s3:GetObject",
- // Add more object level read actions here.
- }
-)
-
-// subsetActions returns true if the first array is completely
-// contained in the second array. There must be at least
-// the same number of duplicate values in second as there
-// are in first.
-func subsetActions(first, second []string) bool {
- set := make(map[string]int)
- for _, value := range second {
- set[value]++
- }
- for _, value := range first {
- if count, found := set[value]; !found {
- return false
- } else if count < 1 {
- return false
- } else {
- set[value] = count - 1
- }
- }
- return true
-}
-
-// Verifies if we have read/write policy set at bucketName, objectPrefix.
-func isBucketPolicyReadWrite(statements []Statement, bucketName string, objectPrefix string) bool {
- var commonActions, readWrite bool
- sort.Strings(readWriteBucketActions)
- sort.Strings(readWriteObjectActions)
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- if subsetActions(readWriteBucketActions, statement.Actions) {
- commonActions = true
- continue
- }
- } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- if subsetActions(readWriteObjectActions, statement.Actions) {
- readWrite = true
- }
- }
- }
- }
- return commonActions && readWrite
-}
-
-// Verifies if we have write only policy set at bucketName, objectPrefix.
-func isBucketPolicyWriteOnly(statements []Statement, bucketName string, objectPrefix string) bool {
- var commonActions, writeOnly bool
- sort.Strings(writeOnlyBucketActions)
- sort.Strings(writeOnlyObjectActions)
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- if subsetActions(writeOnlyBucketActions, statement.Actions) {
- commonActions = true
- continue
- }
- } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- if subsetActions(writeOnlyObjectActions, statement.Actions) {
- writeOnly = true
- }
- }
- }
- }
- return commonActions && writeOnly
-}
-
-// Verifies if we have read only policy set at bucketName, objectPrefix.
-func isBucketPolicyReadOnly(statements []Statement, bucketName string, objectPrefix string) bool {
- var commonActions, readOnly bool
- sort.Strings(readOnlyBucketActions)
- sort.Strings(readOnlyObjectActions)
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- if subsetActions(readOnlyBucketActions, statement.Actions) {
- commonActions = true
- continue
- }
- } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- if subsetActions(readOnlyObjectActions, statement.Actions) {
- readOnly = true
- break
- }
- }
- }
- }
- return commonActions && readOnly
-}
-
-// Removes read write bucket policy if found.
-func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName string, objectPrefix string) []Statement {
- var newStatements []Statement
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- var newActions []string
- for _, action := range statement.Actions {
- switch action {
- case "s3:GetBucketLocation", "s3:ListBucket", "s3:ListBucketMultipartUploads":
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- var newActions []string
- for _, action := range statement.Actions {
- switch action {
- case "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:DeleteObject", "s3:GetObject":
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- }
- }
- if len(statement.Actions) != 0 {
- newStatements = append(newStatements, statement)
- }
- }
- return newStatements
-}
-
-// Removes write only bucket policy if found.
-func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
- var newStatements []Statement
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- var newActions []string
- for _, action := range statement.Actions {
- switch action {
- case "s3:GetBucketLocation", "s3:ListBucketMultipartUploads":
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- var newActions []string
- for _, action := range statement.Actions {
- switch action {
- case "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:DeleteObject":
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- }
- }
- if len(statement.Actions) != 0 {
- newStatements = append(newStatements, statement)
- }
- }
- return newStatements
-}
-
-// Removes read only bucket policy if found.
-func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
- var newStatements []Statement
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- var newActions []string
- for _, action := range statement.Actions {
- switch action {
- case "s3:GetBucketLocation", "s3:ListBucket":
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- var newActions []string
- for _, action := range statement.Actions {
- if action == "s3:GetObject" {
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- }
- }
- if len(statement.Actions) != 0 {
- newStatements = append(newStatements, statement)
- }
- }
- return newStatements
-}
-
-// Remove bucket policies based on the type.
-func removeBucketPolicyStatement(statements []Statement, bucketName string, objectPrefix string) []Statement {
- // Verify type of policy to be removed.
- if isBucketPolicyReadWrite(statements, bucketName, objectPrefix) {
- statements = removeBucketPolicyStatementReadWrite(statements, bucketName, objectPrefix)
- } else if isBucketPolicyWriteOnly(statements, bucketName, objectPrefix) {
- statements = removeBucketPolicyStatementWriteOnly(statements, bucketName, objectPrefix)
- } else if isBucketPolicyReadOnly(statements, bucketName, objectPrefix) {
- statements = removeBucketPolicyStatementReadOnly(statements, bucketName, objectPrefix)
- }
- return statements
-}
-
-// Unmarshals bucket policy byte array into a structured bucket access policy.
-func unMarshalBucketPolicy(bucketPolicyBuf []byte) (BucketAccessPolicy, error) {
- // Untyped lazy JSON struct.
- type bucketAccessPolicyUntyped struct {
- Version string
- Statement []struct {
- Sid string
- Effect string
- Principal struct {
- AWS json.RawMessage
- }
- Action json.RawMessage
- Resource json.RawMessage
- Condition map[string]map[string]string
- }
- }
- var policyUntyped = bucketAccessPolicyUntyped{}
- // Unmarshal incoming policy into an untyped structure, to be
- // evaluated lazily later.
- err := json.Unmarshal(bucketPolicyBuf, &policyUntyped)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- var policy = BucketAccessPolicy{}
- policy.Version = policyUntyped.Version
- for _, stmtUntyped := range policyUntyped.Statement {
- statement := Statement{}
- // These are properly typed messages.
- statement.Sid = stmtUntyped.Sid
- statement.Effect = stmtUntyped.Effect
- statement.Conditions = stmtUntyped.Condition
-
- // AWS user can have two different types, either as []string
- // and either as regular 'string'. We fall back to doing this
- // since there is no other easier way to fix this.
- err = json.Unmarshal(stmtUntyped.Principal.AWS, &statement.Principal.AWS)
- if err != nil {
- var awsUser string
- err = json.Unmarshal(stmtUntyped.Principal.AWS, &awsUser)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- statement.Principal.AWS = []string{awsUser}
- }
- // Actions can have two different types, either as []string
- // and either as regular 'string'. We fall back to doing this
- // since there is no other easier way to fix this.
- err = json.Unmarshal(stmtUntyped.Action, &statement.Actions)
- if err != nil {
- var action string
- err = json.Unmarshal(stmtUntyped.Action, &action)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- statement.Actions = []string{action}
- }
- // Resources can have two different types, either as []string
- // and either as regular 'string'. We fall back to doing this
- // since there is no other easier way to fix this.
- err = json.Unmarshal(stmtUntyped.Resource, &statement.Resources)
- if err != nil {
- var resource string
- err = json.Unmarshal(stmtUntyped.Resource, &resource)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- statement.Resources = []string{resource}
- }
- // Append the typed policy.
- policy.Statements = append(policy.Statements, statement)
- }
- return policy, nil
-}
diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go
deleted file mode 100644
index b0aa009d8..000000000
--- a/vendor/github.com/minio/minio-go/constants.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-/// Multipart upload defaults.
-
-// miniPartSize - minimum part size 5MiB per object after which
-// putObject behaves internally as multipart.
-const minPartSize = 1024 * 1024 * 5
-
-// maxPartsCount - maximum number of parts for a single multipart session.
-const maxPartsCount = 10000
-
-// maxPartSize - maximum part size 5GiB for a single multipart upload
-// operation.
-const maxPartSize = 1024 * 1024 * 1024 * 5
-
-// maxSinglePutObjectSize - maximum size 5GiB of object per PUT
-// operation.
-const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
-
-// maxMultipartPutObjectSize - maximum size 5TiB of object for
-// Multipart operation.
-const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
-
-// optimalReadBufferSize - optimal buffer 5MiB used for reading
-// through Read operation.
-const optimalReadBufferSize = 1024 * 1024 * 5
diff --git a/vendor/github.com/minio/minio-go/hook-reader.go b/vendor/github.com/minio/minio-go/hook-reader.go
deleted file mode 100644
index bc9ece049..000000000
--- a/vendor/github.com/minio/minio-go/hook-reader.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import "io"
-
-// hookReader hooks additional reader in the source stream. It is
-// useful for making progress bars. Second reader is appropriately
-// notified about the exact number of bytes read from the primary
-// source on each Read operation.
-type hookReader struct {
- source io.Reader
- hook io.Reader
-}
-
-// Seek implements io.Seeker. Seeks source first, and if necessary
-// seeks hook if Seek method is appropriately found.
-func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
- // Verify for source has embedded Seeker, use it.
- sourceSeeker, ok := hr.source.(io.Seeker)
- if ok {
- return sourceSeeker.Seek(offset, whence)
- }
- // Verify if hook has embedded Seeker, use it.
- hookSeeker, ok := hr.hook.(io.Seeker)
- if ok {
- return hookSeeker.Seek(offset, whence)
- }
- return n, nil
-}
-
-// Read implements io.Reader. Always reads from the source, the return
-// value 'n' number of bytes are reported through the hook. Returns
-// error for all non io.EOF conditions.
-func (hr *hookReader) Read(b []byte) (n int, err error) {
- n, err = hr.source.Read(b)
- if err != nil && err != io.EOF {
- return n, err
- }
- // Progress the hook with the total read bytes from the source.
- if _, herr := hr.hook.Read(b[:n]); herr != nil {
- if herr != io.EOF {
- return n, herr
- }
- }
- return n, err
-}
-
-// newHook returns a io.ReadSeeker which implements hookReader that
-// reports the data read from the source to the hook.
-func newHook(source, hook io.Reader) io.Reader {
- if hook == nil {
- return source
- }
- return &hookReader{source, hook}
-}
diff --git a/vendor/github.com/minio/minio-go/post-policy.go b/vendor/github.com/minio/minio-go/post-policy.go
deleted file mode 100644
index 2a675d770..000000000
--- a/vendor/github.com/minio/minio-go/post-policy.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package minio
-
-import (
- "encoding/base64"
- "fmt"
- "strings"
- "time"
-)
-
-// expirationDateFormat date format for expiration key in json policy.
-const expirationDateFormat = "2006-01-02T15:04:05.999Z"
-
-// policyCondition explanation:
-// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
-//
-// Example:
-//
-// policyCondition {
-// matchType: "$eq",
-// key: "$Content-Type",
-// value: "image/png",
-// }
-//
-type policyCondition struct {
- matchType string
- condition string
- value string
-}
-
-// PostPolicy - Provides strict static type conversion and validation
-// for Amazon S3's POST policy JSON string.
-type PostPolicy struct {
- // Expiration date and time of the POST policy.
- expiration time.Time
- // Collection of different policy conditions.
- conditions []policyCondition
- // ContentLengthRange minimum and maximum allowable size for the
- // uploaded content.
- contentLengthRange struct {
- min int64
- max int64
- }
-
- // Post form data.
- formData map[string]string
-}
-
-// NewPostPolicy - Instantiate new post policy.
-func NewPostPolicy() *PostPolicy {
- p := &PostPolicy{}
- p.conditions = make([]policyCondition, 0)
- p.formData = make(map[string]string)
- return p
-}
-
-// SetExpires - Sets expiration time for the new policy.
-func (p *PostPolicy) SetExpires(t time.Time) error {
- if t.IsZero() {
- return ErrInvalidArgument("No expiry time set.")
- }
- p.expiration = t
- return nil
-}
-
-// SetKey - Sets an object name for the policy based upload.
-func (p *PostPolicy) SetKey(key string) error {
- if strings.TrimSpace(key) == "" || key == "" {
- return ErrInvalidArgument("Object name is empty.")
- }
- policyCond := policyCondition{
- matchType: "eq",
- condition: "$key",
- value: key,
- }
- if err := p.addNewPolicy(policyCond); err != nil {
- return err
- }
- p.formData["key"] = key
- return nil
-}
-
-// SetKeyStartsWith - Sets an object name that an policy based upload
-// can start with.
-func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
- if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" {
- return ErrInvalidArgument("Object prefix is empty.")
- }
- policyCond := policyCondition{
- matchType: "starts-with",
- condition: "$key",
- value: keyStartsWith,
- }
- if err := p.addNewPolicy(policyCond); err != nil {
- return err
- }
- p.formData["key"] = keyStartsWith
- return nil
-}
-
-// SetBucket - Sets bucket at which objects will be uploaded to.
-func (p *PostPolicy) SetBucket(bucketName string) error {
- if strings.TrimSpace(bucketName) == "" || bucketName == "" {
- return ErrInvalidArgument("Bucket name is empty.")
- }
- policyCond := policyCondition{
- matchType: "eq",
- condition: "$bucket",
- value: bucketName,
- }
- if err := p.addNewPolicy(policyCond); err != nil {
- return err
- }
- p.formData["bucket"] = bucketName
- return nil
-}
-
-// SetContentType - Sets content-type of the object for this policy
-// based upload.
-func (p *PostPolicy) SetContentType(contentType string) error {
- if strings.TrimSpace(contentType) == "" || contentType == "" {
- return ErrInvalidArgument("No content type specified.")
- }
- policyCond := policyCondition{
- matchType: "eq",
- condition: "$Content-Type",
- value: contentType,
- }
- if err := p.addNewPolicy(policyCond); err != nil {
- return err
- }
- p.formData["Content-Type"] = contentType
- return nil
-}
-
-// SetContentLengthRange - Set new min and max content length
-// condition for all incoming uploads.
-func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
- if min > max {
- return ErrInvalidArgument("Minimum limit is larger than maximum limit.")
- }
- if min < 0 {
- return ErrInvalidArgument("Minimum limit cannot be negative.")
- }
- if max < 0 {
- return ErrInvalidArgument("Maximum limit cannot be negative.")
- }
- p.contentLengthRange.min = min
- p.contentLengthRange.max = max
- return nil
-}
-
-// addNewPolicy - internal helper to validate adding new policies.
-func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
- if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
- return ErrInvalidArgument("Policy fields are empty.")
- }
- p.conditions = append(p.conditions, policyCond)
- return nil
-}
-
-// Stringer interface for printing policy in json formatted string.
-func (p PostPolicy) String() string {
- return string(p.marshalJSON())
-}
-
-// marshalJSON - Provides Marshalled JSON in bytes.
-func (p PostPolicy) marshalJSON() []byte {
- expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
- var conditionsStr string
- conditions := []string{}
- for _, po := range p.conditions {
- conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value))
- }
- if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 {
- conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]",
- p.contentLengthRange.min, p.contentLengthRange.max))
- }
- if len(conditions) > 0 {
- conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]"
- }
- retStr := "{"
- retStr = retStr + expirationStr + ","
- retStr = retStr + conditionsStr
- retStr = retStr + "}"
- return []byte(retStr)
-}
-
-// base64 - Produces base64 of PostPolicy's Marshalled json.
-func (p PostPolicy) base64() string {
- return base64.StdEncoding.EncodeToString(p.marshalJSON())
-}
diff --git a/vendor/github.com/minio/minio-go/request-signature-v2.go b/vendor/github.com/minio/minio-go/request-signature-v2.go
deleted file mode 100644
index d753a7b8a..000000000
--- a/vendor/github.com/minio/minio-go/request-signature-v2.go
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "crypto/hmac"
- "crypto/sha1"
- "encoding/base64"
- "fmt"
- "net/http"
- "net/url"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-// Signature and API related constants.
-const (
- signV2Algorithm = "AWS"
-)
-
-// Encode input URL path to URL encoded path.
-func encodeURL2Path(u *url.URL) (path string) {
- // Encode URL path.
- if isS3, _ := filepath.Match("*.s3*.amazonaws.com", u.Host); isS3 {
- hostSplits := strings.SplitN(u.Host, ".", 4)
- // First element is the bucket name.
- bucketName := hostSplits[0]
- path = "/" + bucketName
- path += u.Path
- path = urlEncodePath(path)
- return
- }
- if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
- path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
- path += u.Path
- path = urlEncodePath(path)
- return
- }
- path = urlEncodePath(u.Path)
- return
-}
-
-// preSignV2 - presign the request in following style.
-// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
-func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
- // Presign is not needed for anonymous credentials.
- if accessKeyID == "" || secretAccessKey == "" {
- return &req
- }
- d := time.Now().UTC()
- // Add date if not present.
- if date := req.Header.Get("Date"); date == "" {
- req.Header.Set("Date", d.Format(http.TimeFormat))
- }
-
- // Get encoded URL path.
- path := encodeURL2Path(req.URL)
- if len(req.URL.Query()) > 0 {
- // Keep the usual queries unescaped for string to sign.
- query, _ := url.QueryUnescape(queryEncode(req.URL.Query()))
- path = path + "?" + query
- }
-
- // Find epoch expires when the request will expire.
- epochExpires := d.Unix() + expires
-
- // Get string to sign.
- stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", req.Method, epochExpires, path)
- hm := hmac.New(sha1.New, []byte(secretAccessKey))
- hm.Write([]byte(stringToSign))
-
- // Calculate signature.
- signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
-
- query := req.URL.Query()
- // Handle specially for Google Cloud Storage.
- if strings.Contains(req.URL.Host, ".storage.googleapis.com") {
- query.Set("GoogleAccessId", accessKeyID)
- } else {
- query.Set("AWSAccessKeyId", accessKeyID)
- }
-
- // Fill in Expires for presigned query.
- query.Set("Expires", strconv.FormatInt(epochExpires, 10))
-
- // Encode query and save.
- req.URL.RawQuery = queryEncode(query)
-
- // Save signature finally.
- req.URL.RawQuery += "&Signature=" + urlEncodePath(signature)
-
- // Return.
- return &req
-}
-
-// postPresignSignatureV2 - presigned signature for PostPolicy
-// request.
-func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
- hm := hmac.New(sha1.New, []byte(secretAccessKey))
- hm.Write([]byte(policyBase64))
- signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
- return signature
-}
-
-// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
-// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
-//
-// StringToSign = HTTP-Verb + "\n" +
-// Content-Md5 + "\n" +
-// Content-Type + "\n" +
-// Date + "\n" +
-// CanonicalizedProtocolHeaders +
-// CanonicalizedResource;
-//
-// CanonicalizedResource = [ "/" + Bucket ] +
-// +
-// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
-//
-// CanonicalizedProtocolHeaders =
-
-// signV2 sign the request before Do() (AWS Signature Version 2).
-func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
- // Signature calculation is not needed for anonymous credentials.
- if accessKeyID == "" || secretAccessKey == "" {
- return &req
- }
-
- // Initial time.
- d := time.Now().UTC()
-
- // Add date if not present.
- if date := req.Header.Get("Date"); date == "" {
- req.Header.Set("Date", d.Format(http.TimeFormat))
- }
-
- // Calculate HMAC for secretAccessKey.
- stringToSign := getStringToSignV2(req)
- hm := hmac.New(sha1.New, []byte(secretAccessKey))
- hm.Write([]byte(stringToSign))
-
- // Prepare auth header.
- authHeader := new(bytes.Buffer)
- authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
- encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
- encoder.Write(hm.Sum(nil))
- encoder.Close()
-
- // Set Authorization header.
- req.Header.Set("Authorization", authHeader.String())
-
- return &req
-}
-
-// From the Amazon docs:
-//
-// StringToSign = HTTP-Verb + "\n" +
-// Content-Md5 + "\n" +
-// Content-Type + "\n" +
-// Date + "\n" +
-// CanonicalizedProtocolHeaders +
-// CanonicalizedResource;
-func getStringToSignV2(req http.Request) string {
- buf := new(bytes.Buffer)
- // Write standard headers.
- writeDefaultHeaders(buf, req)
- // Write canonicalized protocol headers if any.
- writeCanonicalizedHeaders(buf, req)
- // Write canonicalized Query resources if any.
- writeCanonicalizedResource(buf, req)
- return buf.String()
-}
-
-// writeDefaultHeader - write all default necessary headers
-func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) {
- buf.WriteString(req.Method)
- buf.WriteByte('\n')
- buf.WriteString(req.Header.Get("Content-Md5"))
- buf.WriteByte('\n')
- buf.WriteString(req.Header.Get("Content-Type"))
- buf.WriteByte('\n')
- buf.WriteString(req.Header.Get("Date"))
- buf.WriteByte('\n')
-}
-
-// writeCanonicalizedHeaders - write canonicalized headers.
-func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
- var protoHeaders []string
- vals := make(map[string][]string)
- for k, vv := range req.Header {
- // All the AMZ headers should be lowercase
- lk := strings.ToLower(k)
- if strings.HasPrefix(lk, "x-amz") {
- protoHeaders = append(protoHeaders, lk)
- vals[lk] = vv
- }
- }
- sort.Strings(protoHeaders)
- for _, k := range protoHeaders {
- buf.WriteString(k)
- buf.WriteByte(':')
- for idx, v := range vals[k] {
- if idx > 0 {
- buf.WriteByte(',')
- }
- if strings.Contains(v, "\n") {
- // TODO: "Unfold" long headers that
- // span multiple lines (as allowed by
- // RFC 2616, section 4.2) by replacing
- // the folding white-space (including
- // new-line) by a single space.
- buf.WriteString(v)
- } else {
- buf.WriteString(v)
- }
- }
- buf.WriteByte('\n')
- }
-}
-
-// Must be sorted:
-var resourceList = []string{
- "acl",
- "location",
- "logging",
- "notification",
- "partNumber",
- "policy",
- "response-content-type",
- "response-content-language",
- "response-expires",
- "response-cache-control",
- "response-content-disposition",
- "response-content-encoding",
- "requestPayment",
- "torrent",
- "uploadId",
- "uploads",
- "versionId",
- "versioning",
- "versions",
- "website",
-}
-
-// From the Amazon docs:
-//
-// CanonicalizedResource = [ "/" + Bucket ] +
-// +
-// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
-func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) {
- // Save request URL.
- requestURL := req.URL
- // Get encoded URL path.
- path := encodeURL2Path(requestURL)
- buf.WriteString(path)
-
- sort.Strings(resourceList)
- if requestURL.RawQuery != "" {
- var n int
- vals, _ := url.ParseQuery(requestURL.RawQuery)
- // Verify if any sub resource queries are present, if yes
- // canonicallize them.
- for _, resource := range resourceList {
- if vv, ok := vals[resource]; ok && len(vv) > 0 {
- n++
- // First element
- switch n {
- case 1:
- buf.WriteByte('?')
- // The rest
- default:
- buf.WriteByte('&')
- }
- buf.WriteString(resource)
- // Request parameters
- if len(vv[0]) > 0 {
- buf.WriteByte('=')
- buf.WriteString(strings.Replace(url.QueryEscape(vv[0]), "+", "%20", -1))
- }
- }
- }
- }
-}
diff --git a/vendor/github.com/minio/minio-go/request-signature-v4.go b/vendor/github.com/minio/minio-go/request-signature-v4.go
deleted file mode 100644
index dfd11e9e4..000000000
--- a/vendor/github.com/minio/minio-go/request-signature-v4.go
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "encoding/hex"
- "net/http"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-// Signature and API related constants.
-const (
- signV4Algorithm = "AWS4-HMAC-SHA256"
- iso8601DateFormat = "20060102T150405Z"
- yyyymmdd = "20060102"
-)
-
-///
-/// Excerpts from @lsegal -
-/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
-///
-/// User-Agent:
-///
-/// This is ignored from signing because signing this causes
-/// problems with generating pre-signed URLs (that are executed
-/// by other agents) or when customers pass requests through
-/// proxies, which may modify the user-agent.
-///
-/// Content-Length:
-///
-/// This is ignored from signing because generating a pre-signed
-/// URL should not provide a content-length constraint,
-/// specifically when vending a S3 pre-signed PUT URL. The
-/// corollary to this is that when sending regular requests
-/// (non-pre-signed), the signature contains a checksum of the
-/// body, which implicitly validates the payload length (since
-/// changing the number of bytes would change the checksum)
-/// and therefore this header is not valuable in the signature.
-///
-/// Content-Type:
-///
-/// Signing this header causes quite a number of problems in
-/// browser environments, where browsers like to modify and
-/// normalize the content-type header in different ways. There is
-/// more information on this in https://goo.gl/2E9gyy. Avoiding
-/// this field simplifies logic and reduces the possibility of
-/// future bugs.
-///
-/// Authorization:
-///
-/// Is skipped for obvious reasons
-///
-var ignoredHeaders = map[string]bool{
- "Authorization": true,
- "Content-Type": true,
- "Content-Length": true,
- "User-Agent": true,
-}
-
-// getSigningKey hmac seed to calculate final signature.
-func getSigningKey(secret, loc string, t time.Time) []byte {
- date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
- location := sumHMAC(date, []byte(loc))
- service := sumHMAC(location, []byte("s3"))
- signingKey := sumHMAC(service, []byte("aws4_request"))
- return signingKey
-}
-
-// getSignature final signature in hexadecimal form.
-func getSignature(signingKey []byte, stringToSign string) string {
- return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
-}
-
-// getScope generate a string of a specific date, an AWS region, and a
-// service.
-func getScope(location string, t time.Time) string {
- scope := strings.Join([]string{
- t.Format(yyyymmdd),
- location,
- "s3",
- "aws4_request",
- }, "/")
- return scope
-}
-
-// getCredential generate a credential string.
-func getCredential(accessKeyID, location string, t time.Time) string {
- scope := getScope(location, t)
- return accessKeyID + "/" + scope
-}
-
-// getHashedPayload get the hexadecimal value of the SHA256 hash of
-// the request payload.
-func getHashedPayload(req http.Request) string {
- hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
- if hashedPayload == "" {
- // Presign does not have a payload, use S3 recommended value.
- hashedPayload = "UNSIGNED-PAYLOAD"
- }
- return hashedPayload
-}
-
-// getCanonicalHeaders generate a list of request headers for
-// signature.
-func getCanonicalHeaders(req http.Request) string {
- var headers []string
- vals := make(map[string][]string)
- for k, vv := range req.Header {
- if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
- continue // ignored header
- }
- headers = append(headers, strings.ToLower(k))
- vals[strings.ToLower(k)] = vv
- }
- headers = append(headers, "host")
- sort.Strings(headers)
-
- var buf bytes.Buffer
- // Save all the headers in canonical form : newline
- // separated for each header.
- for _, k := range headers {
- buf.WriteString(k)
- buf.WriteByte(':')
- switch {
- case k == "host":
- buf.WriteString(req.URL.Host)
- fallthrough
- default:
- for idx, v := range vals[k] {
- if idx > 0 {
- buf.WriteByte(',')
- }
- buf.WriteString(v)
- }
- buf.WriteByte('\n')
- }
- }
- return buf.String()
-}
-
-// getSignedHeaders generate all signed request headers.
-// i.e lexically sorted, semicolon-separated list of lowercase
-// request header names.
-func getSignedHeaders(req http.Request) string {
- var headers []string
- for k := range req.Header {
- if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
- continue // Ignored header found continue.
- }
- headers = append(headers, strings.ToLower(k))
- }
- headers = append(headers, "host")
- sort.Strings(headers)
- return strings.Join(headers, ";")
-}
-
-// getCanonicalRequest generate a canonical request of style.
-//
-// canonicalRequest =
-// \n
-// \n
-// \n
-// \n
-// \n
-//
-func getCanonicalRequest(req http.Request) string {
- req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
- canonicalRequest := strings.Join([]string{
- req.Method,
- urlEncodePath(req.URL.Path),
- req.URL.RawQuery,
- getCanonicalHeaders(req),
- getSignedHeaders(req),
- getHashedPayload(req),
- }, "\n")
- return canonicalRequest
-}
-
-// getStringToSign a string based on selected query values.
-func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
- stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
- stringToSign = stringToSign + getScope(location, t) + "\n"
- stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
- return stringToSign
-}
-
-// preSignV4 presign the request, in accordance with
-// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
-func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
- // Presign is not needed for anonymous credentials.
- if accessKeyID == "" || secretAccessKey == "" {
- return &req
- }
-
- // Initial time.
- t := time.Now().UTC()
-
- // Get credential string.
- credential := getCredential(accessKeyID, location, t)
-
- // Get all signed headers.
- signedHeaders := getSignedHeaders(req)
-
- // Set URL query.
- query := req.URL.Query()
- query.Set("X-Amz-Algorithm", signV4Algorithm)
- query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
- query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
- query.Set("X-Amz-SignedHeaders", signedHeaders)
- query.Set("X-Amz-Credential", credential)
- req.URL.RawQuery = query.Encode()
-
- // Get canonical request.
- canonicalRequest := getCanonicalRequest(req)
-
- // Get string to sign from canonical request.
- stringToSign := getStringToSignV4(t, location, canonicalRequest)
-
- // Gext hmac signing key.
- signingKey := getSigningKey(secretAccessKey, location, t)
-
- // Calculate signature.
- signature := getSignature(signingKey, stringToSign)
-
- // Add signature header to RawQuery.
- req.URL.RawQuery += "&X-Amz-Signature=" + signature
-
- return &req
-}
-
-// postPresignSignatureV4 - presigned signature for PostPolicy
-// requests.
-func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
- // Get signining key.
- signingkey := getSigningKey(secretAccessKey, location, t)
- // Calculate signature.
- signature := getSignature(signingkey, policyBase64)
- return signature
-}
-
-// signV4 sign the request before Do(), in accordance with
-// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
-func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
- // Signature calculation is not needed for anonymous credentials.
- if accessKeyID == "" || secretAccessKey == "" {
- return &req
- }
-
- // Initial time.
- t := time.Now().UTC()
-
- // Set x-amz-date.
- req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
-
- // Get canonical request.
- canonicalRequest := getCanonicalRequest(req)
-
- // Get string to sign from canonical request.
- stringToSign := getStringToSignV4(t, location, canonicalRequest)
-
- // Get hmac signing key.
- signingKey := getSigningKey(secretAccessKey, location, t)
-
- // Get credential string.
- credential := getCredential(accessKeyID, location, t)
-
- // Get all signed headers.
- signedHeaders := getSignedHeaders(req)
-
- // Calculate signature.
- signature := getSignature(signingKey, stringToSign)
-
- // If regular request, construct the final authorization header.
- parts := []string{
- signV4Algorithm + " Credential=" + credential,
- "SignedHeaders=" + signedHeaders,
- "Signature=" + signature,
- }
-
- // Set authorization header.
- auth := strings.Join(parts, ", ")
- req.Header.Set("Authorization", auth)
-
- return &req
-}
diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go
deleted file mode 100644
index d9fbe12f5..000000000
--- a/vendor/github.com/minio/minio-go/retry.go
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "net"
- "net/http"
- "net/url"
- "strings"
- "time"
-)
-
-// MaxRetry is the maximum number of retries before stopping.
-var MaxRetry = 5
-
-// MaxJitter will randomize over the full exponential backoff time
-const MaxJitter = 1.0
-
-// NoJitter disables the use of jitter for randomizing the exponential backoff time
-const NoJitter = 0.0
-
-// newRetryTimer creates a timer with exponentially increasing delays
-// until the maximum retry attempts are reached.
-func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int {
- attemptCh := make(chan int)
-
- // computes the exponential backoff duration according to
- // https://www.awsarchitectureblog.com/2015/03/backoff.html
- exponentialBackoffWait := func(attempt int) time.Duration {
- // normalize jitter to the range [0, 1.0]
- if jitter < NoJitter {
- jitter = NoJitter
- }
- if jitter > MaxJitter {
- jitter = MaxJitter
- }
-
- //sleep = random_between(0, min(cap, base * 2 ** attempt))
- sleep := unit * time.Duration(1< cap {
- sleep = cap
- }
- if jitter != NoJitter {
- sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
- }
- return sleep
- }
-
- go func() {
- defer close(attemptCh)
- for i := 0; i < maxRetry; i++ {
- attemptCh <- i + 1 // Attempts start from 1.
- time.Sleep(exponentialBackoffWait(i))
- }
- }()
- return attemptCh
-}
-
-// isNetErrorRetryable - is network error retryable.
-func isNetErrorRetryable(err error) bool {
- switch err.(type) {
- case *net.DNSError, *net.OpError, net.UnknownNetworkError:
- return true
- case *url.Error:
- // For a URL error, where it replies back "connection closed"
- // retry again.
- if strings.Contains(err.Error(), "Connection closed by foreign host") {
- return true
- }
- }
- return false
-}
-
-// List of AWS S3 error codes which are retryable.
-var retryableS3Codes = map[string]struct{}{
- "RequestError": {},
- "RequestTimeout": {},
- "Throttling": {},
- "ThrottlingException": {},
- "RequestLimitExceeded": {},
- "RequestThrottled": {},
- "InternalError": {},
- "ExpiredToken": {},
- "ExpiredTokenException": {},
- // Add more AWS S3 codes here.
-}
-
-// isS3CodeRetryable - is s3 error code retryable.
-func isS3CodeRetryable(s3Code string) (ok bool) {
- _, ok = retryableS3Codes[s3Code]
- return ok
-}
-
-// List of HTTP status codes which are retryable.
-var retryableHTTPStatusCodes = map[int]struct{}{
- 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
- http.StatusInternalServerError: {},
- http.StatusBadGateway: {},
- http.StatusServiceUnavailable: {},
- // Add more HTTP status codes here.
-}
-
-// isHTTPStatusRetryable - is HTTP error code retryable.
-func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
- _, ok = retryableHTTPStatusCodes[httpStatusCode]
- return ok
-}
diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go
deleted file mode 100644
index 8c9ff5e88..000000000
--- a/vendor/github.com/minio/minio-go/s3-endpoints.go
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-// awsS3EndpointMap Amazon S3 endpoint map.
-var awsS3EndpointMap = map[string]string{
- "us-east-1": "s3.amazonaws.com",
- "us-west-2": "s3-us-west-2.amazonaws.com",
- "us-west-1": "s3-us-west-1.amazonaws.com",
- "eu-west-1": "s3-eu-west-1.amazonaws.com",
- "eu-central-1": "s3-eu-central-1.amazonaws.com",
- "ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
- "ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
- "ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
- "sa-east-1": "s3-sa-east-1.amazonaws.com",
-}
-
-// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
-func getS3Endpoint(bucketLocation string) (s3Endpoint string) {
- s3Endpoint, ok := awsS3EndpointMap[bucketLocation]
- if !ok {
- // Default to 's3.amazonaws.com' endpoint.
- s3Endpoint = "s3.amazonaws.com"
- }
- return s3Endpoint
-}
diff --git a/vendor/github.com/minio/minio-go/signature-type.go b/vendor/github.com/minio/minio-go/signature-type.go
deleted file mode 100644
index cae74cd01..000000000
--- a/vendor/github.com/minio/minio-go/signature-type.go
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-// SignatureType is type of Authorization requested for a given HTTP request.
-type SignatureType int
-
-// Different types of supported signatures - default is Latest i.e SignatureV4.
-const (
- Latest SignatureType = iota
- SignatureV4
- SignatureV2
-)
-
-// isV2 - is signature SignatureV2?
-func (s SignatureType) isV2() bool {
- return s == SignatureV2
-}
-
-// isV4 - is signature SignatureV4?
-func (s SignatureType) isV4() bool {
- return s == SignatureV4 || s == Latest
-}
diff --git a/vendor/github.com/minio/minio-go/tempfile.go b/vendor/github.com/minio/minio-go/tempfile.go
deleted file mode 100644
index 65c7b0da1..000000000
--- a/vendor/github.com/minio/minio-go/tempfile.go
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "io/ioutil"
- "os"
- "sync"
-)
-
-// tempFile - temporary file container.
-type tempFile struct {
- *os.File
- mutex *sync.Mutex
-}
-
-// newTempFile returns a new temporary file, once closed it automatically deletes itself.
-func newTempFile(prefix string) (*tempFile, error) {
- // use platform specific temp directory.
- file, err := ioutil.TempFile(os.TempDir(), prefix)
- if err != nil {
- return nil, err
- }
- return &tempFile{
- File: file,
- mutex: &sync.Mutex{},
- }, nil
-}
-
-// Close - closer wrapper to close and remove temporary file.
-func (t *tempFile) Close() error {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- if t.File != nil {
- // Close the file.
- if err := t.File.Close(); err != nil {
- return err
- }
- // Remove file.
- if err := os.Remove(t.File.Name()); err != nil {
- return err
- }
- t.File = nil
- }
- return nil
-}
diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go
deleted file mode 100644
index 816fbed46..000000000
--- a/vendor/github.com/minio/minio-go/utils.go
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "crypto/hmac"
- "crypto/md5"
- "crypto/sha256"
- "encoding/hex"
- "encoding/xml"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "regexp"
- "sort"
- "strings"
- "time"
- "unicode/utf8"
-)
-
-// xmlDecoder provide decoded value in xml.
-func xmlDecoder(body io.Reader, v interface{}) error {
- d := xml.NewDecoder(body)
- return d.Decode(v)
-}
-
-// sum256 calculate sha256 sum for an input byte array.
-func sum256(data []byte) []byte {
- hash := sha256.New()
- hash.Write(data)
- return hash.Sum(nil)
-}
-
-// sumMD5 calculate sumMD5 sum for an input byte array.
-func sumMD5(data []byte) []byte {
- hash := md5.New()
- hash.Write(data)
- return hash.Sum(nil)
-}
-
-// sumHMAC calculate hmac between two input byte array.
-func sumHMAC(key []byte, data []byte) []byte {
- hash := hmac.New(sha256.New, key)
- hash.Write(data)
- return hash.Sum(nil)
-}
-
-// getEndpointURL - construct a new endpoint.
-func getEndpointURL(endpoint string, inSecure bool) (*url.URL, error) {
- if strings.Contains(endpoint, ":") {
- host, _, err := net.SplitHostPort(endpoint)
- if err != nil {
- return nil, err
- }
- if !isValidIP(host) && !isValidDomain(host) {
- msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
- return nil, ErrInvalidArgument(msg)
- }
- } else {
- if !isValidIP(endpoint) && !isValidDomain(endpoint) {
- msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
- return nil, ErrInvalidArgument(msg)
- }
- }
- // if inSecure is true, use 'http' scheme.
- scheme := "https"
- if inSecure {
- scheme = "http"
- }
-
- // Construct a secured endpoint URL.
- endpointURLStr := scheme + "://" + endpoint
- endpointURL, err := url.Parse(endpointURLStr)
- if err != nil {
- return nil, err
- }
-
- // Validate incoming endpoint URL.
- if err := isValidEndpointURL(endpointURL); err != nil {
- return nil, err
- }
- return endpointURL, nil
-}
-
-// isValidDomain validates if input string is a valid domain name.
-func isValidDomain(host string) bool {
- // See RFC 1035, RFC 3696.
- host = strings.TrimSpace(host)
- if len(host) == 0 || len(host) > 255 {
- return false
- }
- // host cannot start or end with "-"
- if host[len(host)-1:] == "-" || host[:1] == "-" {
- return false
- }
- // host cannot start or end with "_"
- if host[len(host)-1:] == "_" || host[:1] == "_" {
- return false
- }
- // host cannot start or end with a "."
- if host[len(host)-1:] == "." || host[:1] == "." {
- return false
- }
- // All non alphanumeric characters are invalid.
- if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:>/") {
- return false
- }
- // No need to regexp match, since the list is non-exhaustive.
- // We let it valid and fail later.
- return true
-}
-
-// isValidIP parses input string for ip address validity.
-func isValidIP(ip string) bool {
- return net.ParseIP(ip) != nil
-}
-
-// closeResponse close non nil response with any response Body.
-// convenient wrapper to drain any remaining data on response body.
-//
-// Subsequently this allows golang http RoundTripper
-// to re-use the same connection for future requests.
-func closeResponse(resp *http.Response) {
- // Callers should close resp.Body when done reading from it.
- // If resp.Body is not closed, the Client's underlying RoundTripper
- // (typically Transport) may not be able to re-use a persistent TCP
- // connection to the server for a subsequent "keep-alive" request.
- if resp != nil && resp.Body != nil {
- // Drain any remaining Body and then close the connection.
- // Without this closing connection would disallow re-using
- // the same connection for future uses.
- // - http://stackoverflow.com/a/17961593/4465767
- io.Copy(ioutil.Discard, resp.Body)
- resp.Body.Close()
- }
-}
-
-// isVirtualHostSupported - verifies if bucketName can be part of
-// virtual host. Currently only Amazon S3 and Google Cloud Storage would
-// support this.
-func isVirtualHostSupported(endpointURL *url.URL, bucketName string) bool {
- // bucketName can be valid but '.' in the hostname will fail SSL
- // certificate validation. So do not use host-style for such buckets.
- if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
- return false
- }
- // Return true for all other cases
- return isAmazonEndpoint(endpointURL) || isGoogleEndpoint(endpointURL)
-}
-
-// Match if it is exactly Amazon S3 endpoint.
-func isAmazonEndpoint(endpointURL *url.URL) bool {
- if endpointURL == nil {
- return false
- }
- if endpointURL.Host == "s3.amazonaws.com" {
- return true
- }
- return false
-}
-
-// Match if it is exactly Google cloud storage endpoint.
-func isGoogleEndpoint(endpointURL *url.URL) bool {
- if endpointURL == nil {
- return false
- }
- if endpointURL.Host == "storage.googleapis.com" {
- return true
- }
- return false
-}
-
-// Verify if input endpoint URL is valid.
-func isValidEndpointURL(endpointURL *url.URL) error {
- if endpointURL == nil {
- return ErrInvalidArgument("Endpoint url cannot be empty.")
- }
- if endpointURL.Path != "/" && endpointURL.Path != "" {
- return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
- }
- if strings.Contains(endpointURL.Host, ".amazonaws.com") {
- if !isAmazonEndpoint(endpointURL) {
- return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
- }
- }
- if strings.Contains(endpointURL.Host, ".googleapis.com") {
- if !isGoogleEndpoint(endpointURL) {
- return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
- }
- }
- return nil
-}
-
-// Verify if input expires value is valid.
-func isValidExpiry(expires time.Duration) error {
- expireSeconds := int64(expires / time.Second)
- if expireSeconds < 1 {
- return ErrInvalidArgument("Expires cannot be lesser than 1 second.")
- }
- if expireSeconds > 604800 {
- return ErrInvalidArgument("Expires cannot be greater than 7 days.")
- }
- return nil
-}
-
-// We support '.' with bucket names but we fallback to using path
-// style requests instead for such buckets.
-var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
-
-// isValidBucketName - verify bucket name in accordance with
-// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
-func isValidBucketName(bucketName string) error {
- if strings.TrimSpace(bucketName) == "" {
- return ErrInvalidBucketName("Bucket name cannot be empty.")
- }
- if len(bucketName) < 3 {
- return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")
- }
- if len(bucketName) > 63 {
- return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.")
- }
- if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
- return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
- }
- if match, _ := regexp.MatchString("\\.\\.", bucketName); match == true {
- return ErrInvalidBucketName("Bucket name cannot have successive periods.")
- }
- if !validBucketName.MatchString(bucketName) {
- return ErrInvalidBucketName("Bucket name contains invalid characters.")
- }
- return nil
-}
-
-// isValidObjectName - verify object name in accordance with
-// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
-func isValidObjectName(objectName string) error {
- if strings.TrimSpace(objectName) == "" {
- return ErrInvalidObjectName("Object name cannot be empty.")
- }
- if len(objectName) > 1024 {
- return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.")
- }
- if !utf8.ValidString(objectName) {
- return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.")
- }
- return nil
-}
-
-// isValidObjectPrefix - verify if object prefix is valid.
-func isValidObjectPrefix(objectPrefix string) error {
- if len(objectPrefix) > 1024 {
- return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.")
- }
- if !utf8.ValidString(objectPrefix) {
- return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.")
- }
- return nil
-}
-
-// queryEncode - encodes query values in their URL encoded form.
-func queryEncode(v url.Values) string {
- if v == nil {
- return ""
- }
- var buf bytes.Buffer
- keys := make([]string, 0, len(v))
- for k := range v {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- vs := v[k]
- prefix := urlEncodePath(k) + "="
- for _, v := range vs {
- if buf.Len() > 0 {
- buf.WriteByte('&')
- }
- buf.WriteString(prefix)
- buf.WriteString(urlEncodePath(v))
- }
- }
- return buf.String()
-}
-
-// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
-//
-// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
-// non english characters cannot be parsed due to the nature in which url.Encode() is written
-//
-// This function on the other hand is a direct replacement for url.Encode() technique to support
-// pretty much every UTF-8 character.
-func urlEncodePath(pathName string) string {
- // if object matches reserved string, no need to encode them
- reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
- if reservedNames.MatchString(pathName) {
- return pathName
- }
- var encodedPathname string
- for _, s := range pathName {
- if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
- encodedPathname = encodedPathname + string(s)
- continue
- }
- switch s {
- case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
- encodedPathname = encodedPathname + string(s)
- continue
- default:
- len := utf8.RuneLen(s)
- if len < 0 {
- // if utf8 cannot convert return the same string as is
- return pathName
- }
- u := make([]byte, len)
- utf8.EncodeRune(u, s)
- for _, r := range u {
- hex := hex.EncodeToString([]byte{r})
- encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
- }
- }
- }
- return encodedPathname
-}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 0866cfe42..ebbd41ed6 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -87,11 +87,6 @@
"revision": "db6b4f13442b26995f04b3b2b31b006cae7786e6",
"revisionTime": "2016-02-29T08:42:30-08:00"
},
- {
- "path": "github.com/minio/minio-go",
- "revision": "ec610a695d37f1f04b9516a91857bfca007e3740",
- "revisionTime": "2016-03-19T15:53:30-07:00"
- },
{
"path": "github.com/minio/miniobrowser",
"revision": "0e3078dae386f6404476d7cf9adf0128bb4f25f5",
diff --git a/web-handlers.go b/web-handlers.go
index 62e8d6b20..32d055714 100644
--- a/web-handlers.go
+++ b/web-handlers.go
@@ -18,23 +18,20 @@ package main
import (
"fmt"
- "net"
"net/http"
- "net/url"
"os"
"path"
"path/filepath"
"runtime"
"strconv"
- "strings"
"time"
jwtgo "github.com/dgrijalva/jwt-go"
"github.com/dustin/go-humanize"
+ "github.com/gorilla/mux"
"github.com/gorilla/rpc/v2/json2"
- "github.com/minio/minio-go"
"github.com/minio/minio/pkg/disk"
- "github.com/minio/minio/pkg/probe"
+ "github.com/minio/minio/pkg/fs"
"github.com/minio/miniobrowser"
)
@@ -73,11 +70,8 @@ type ServerInfoRep struct {
UIVersion string `json:"uiVersion"`
}
-// ServerInfoArgs - server info args.
-type ServerInfoArgs struct{}
-
// ServerInfo - get server info.
-func (web *webAPI) ServerInfo(r *http.Request, args *ServerInfoArgs, reply *ServerInfoRep) error {
+func (web *webAPI) ServerInfo(r *http.Request, args *GenericArgs, reply *ServerInfoRep) error {
if !isJWTReqAuthenticated(r) {
return &json2.Error{Message: "Unauthorized request"}
}
@@ -105,9 +99,6 @@ func (web *webAPI) ServerInfo(r *http.Request, args *ServerInfoArgs, reply *Serv
return nil
}
-// DiskInfoArgs - disk info args.
-type DiskInfoArgs struct{}
-
// DiskInfoRep - disk info reply.
type DiskInfoRep struct {
DiskInfo disk.Info `json:"diskInfo"`
@@ -115,11 +106,11 @@ type DiskInfoRep struct {
}
// DiskInfo - get disk statistics.
-func (web *webAPI) DiskInfo(r *http.Request, args *DiskInfoArgs, reply *DiskInfoRep) error {
+func (web *webAPI) DiskInfo(r *http.Request, args *GenericArgs, reply *DiskInfoRep) error {
if !isJWTReqAuthenticated(r) {
return &json2.Error{Message: "Unauthorized request"}
}
- info, e := disk.GetInfo(web.FSPath)
+ info, e := disk.GetInfo(web.Filesystem.GetRootPath())
if e != nil {
return &json2.Error{Message: e.Error()}
}
@@ -139,16 +130,13 @@ func (web *webAPI) MakeBucket(r *http.Request, args *MakeBucketArgs, reply *Gene
return &json2.Error{Message: "Unauthorized request"}
}
reply.UIVersion = miniobrowser.UIVersion
- e := web.Client.MakeBucket(args.BucketName, "")
+ e := web.Filesystem.MakeBucket(args.BucketName)
if e != nil {
- return &json2.Error{Message: e.Error()}
+ return &json2.Error{Message: e.Cause.Error()}
}
return nil
}
-// ListBucketsArgs - list bucket args.
-type ListBucketsArgs struct{}
-
// ListBucketsRep - list buckets response
type ListBucketsRep struct {
Buckets []BucketInfo `json:"buckets"`
@@ -164,20 +152,20 @@ type BucketInfo struct {
}
// ListBuckets - list buckets api.
-func (web *webAPI) ListBuckets(r *http.Request, args *ListBucketsArgs, reply *ListBucketsRep) error {
+func (web *webAPI) ListBuckets(r *http.Request, args *GenericArgs, reply *ListBucketsRep) error {
if !isJWTReqAuthenticated(r) {
return &json2.Error{Message: "Unauthorized request"}
}
- buckets, e := web.Client.ListBuckets()
+ buckets, e := web.Filesystem.ListBuckets()
if e != nil {
- return &json2.Error{Message: e.Error()}
+ return &json2.Error{Message: e.Cause.Error()}
}
for _, bucket := range buckets {
// List all buckets which are not private.
if bucket.Name != path.Base(reservedBucket) {
reply.Buckets = append(reply.Buckets, BucketInfo{
Name: bucket.Name,
- CreationDate: bucket.CreationDate,
+ CreationDate: bucket.Created,
})
}
}
@@ -211,118 +199,36 @@ type ObjectInfo struct {
// ListObjects - list objects api.
func (web *webAPI) ListObjects(r *http.Request, args *ListObjectsArgs, reply *ListObjectsRep) error {
+ marker := ""
if !isJWTReqAuthenticated(r) {
return &json2.Error{Message: "Unauthorized request"}
}
- doneCh := make(chan struct{})
- defer close(doneCh)
- for object := range web.Client.ListObjects(args.BucketName, args.Prefix, false, doneCh) {
- if object.Err != nil {
- return &json2.Error{Message: object.Err.Error()}
+ for {
+ lo, err := web.Filesystem.ListObjects(args.BucketName, args.Prefix, marker, "/", 1000)
+ if err != nil {
+ return &json2.Error{Message: err.Cause.Error()}
}
- objectInfo := ObjectInfo{
- Key: object.Key,
- LastModified: object.LastModified,
- Size: object.Size,
+ marker = lo.NextMarker
+ for _, obj := range lo.Objects {
+ reply.Objects = append(reply.Objects, ObjectInfo{
+ Key: obj.Name,
+ LastModified: obj.ModifiedTime,
+ Size: obj.Size,
+ })
}
- // TODO - This can get slower for large directories, we can
- // perhaps extend the ListObjects XML to reply back
- // ContentType as well.
- if !strings.HasSuffix(object.Key, "/") && object.Size > 0 {
- objectStatInfo, e := web.Client.StatObject(args.BucketName, object.Key)
- if e != nil {
- return &json2.Error{Message: e.Error()}
- }
- objectInfo.ContentType = objectStatInfo.ContentType
+ for _, prefix := range lo.Prefixes {
+ reply.Objects = append(reply.Objects, ObjectInfo{
+ Key: prefix,
+ })
+ }
+ if !lo.IsTruncated {
+ break
}
- reply.Objects = append(reply.Objects, objectInfo)
}
reply.UIVersion = miniobrowser.UIVersion
return nil
}
-// PutObjectURLArgs - args to generate url for upload access.
-type PutObjectURLArgs struct {
- TargetHost string `json:"targetHost"`
- TargetProto string `json:"targetProto"`
- BucketName string `json:"bucketName"`
- ObjectName string `json:"objectName"`
-}
-
-// PutObjectURLRep - reply for presigned upload url request.
-type PutObjectURLRep struct {
- URL string `json:"url"`
- UIVersion string `json:"uiVersion"`
-}
-
-// PutObjectURL - generates url for upload access.
-func (web *webAPI) PutObjectURL(r *http.Request, args *PutObjectURLArgs, reply *PutObjectURLRep) error {
- if !isJWTReqAuthenticated(r) {
- return &json2.Error{Message: "Unauthorized request"}
- }
-
- // disableSSL is true if no 'https:' proto is found.
- disableSSL := (args.TargetProto != "https:")
- cred := serverConfig.GetCredential()
- client, e := minio.New(args.TargetHost, cred.AccessKeyID, cred.SecretAccessKey, disableSSL)
- if e != nil {
- return &json2.Error{Message: e.Error()}
- }
- signedURLStr, e := client.PresignedPutObject(args.BucketName, args.ObjectName, time.Duration(60*60)*time.Second)
- if e != nil {
- return &json2.Error{Message: e.Error()}
- }
- reply.URL = signedURLStr
- reply.UIVersion = miniobrowser.UIVersion
- return nil
-}
-
-// GetObjectURLArgs - args to generate url for download access.
-type GetObjectURLArgs struct {
- TargetHost string `json:"targetHost"`
- TargetProto string `json:"targetProto"`
- BucketName string `json:"bucketName"`
- ObjectName string `json:"objectName"`
-}
-
-// GetObjectURLRep - reply for presigned download url request.
-type GetObjectURLRep struct {
- URL string `json:"url"`
- UIVersion string `json:"uiVersion"`
-}
-
-// GetObjectURL - generates url for download access.
-func (web *webAPI) GetObjectURL(r *http.Request, args *GetObjectURLArgs, reply *GetObjectURLRep) error {
- if !isJWTReqAuthenticated(r) {
- return &json2.Error{Message: "Unauthorized request"}
- }
-
- // See if object exists.
- _, e := web.Client.StatObject(args.BucketName, args.ObjectName)
- if e != nil {
- return &json2.Error{Message: e.Error()}
- }
-
- // disableSSL is true if no 'https:' proto is found.
- disableSSL := (args.TargetProto != "https:")
- cred := serverConfig.GetCredential()
- client, e := minio.New(args.TargetHost, cred.AccessKeyID, cred.SecretAccessKey, disableSSL)
- if e != nil {
- return &json2.Error{Message: e.Error()}
- }
-
- reqParams := make(url.Values)
- // Set content disposition for browser to download the file.
- reqParams.Set("response-content-disposition", fmt.Sprintf(`attachment; filename="%s"`, filepath.Base(args.ObjectName)))
- signedURLStr, e := client.PresignedGetObject(args.BucketName, args.ObjectName, time.Duration(60*60)*time.Second, reqParams)
- if e != nil {
- return &json2.Error{Message: e.Error()}
- }
- reply.URL = signedURLStr
- reply.UIVersion = miniobrowser.UIVersion
- return nil
-}
-
// RemoveObjectArgs - args to remove an object
type RemoveObjectArgs struct {
TargetHost string `json:"targetHost"`
@@ -336,9 +242,9 @@ func (web *webAPI) RemoveObject(r *http.Request, args *RemoveObjectArgs, reply *
return &json2.Error{Message: "Unauthorized request"}
}
reply.UIVersion = miniobrowser.UIVersion
- e := web.Client.RemoveObject(args.BucketName, args.ObjectName)
+ e := web.Filesystem.DeleteObject(args.BucketName, args.ObjectName)
if e != nil {
- return &json2.Error{Message: e.Error()}
+ return &json2.Error{Message: e.Cause.Error()}
}
return nil
}
@@ -417,20 +323,6 @@ func (web *webAPI) SetAuth(r *http.Request, args *SetAuthArgs, reply *SetAuthRep
return &json2.Error{Message: err.Cause.Error()}
}
- // Split host port.
- host, port, e := net.SplitHostPort(serverConfig.GetAddr())
- fatalIf(probe.NewError(e), "Unable to parse web addess.", nil)
-
- // Default host is 'localhost', if no host present.
- if host == "" {
- host = "localhost"
- }
-
- client, e := minio.NewV4(net.JoinHostPort(host, port), args.AccessKey, args.SecretKey, !isSSL())
- if e != nil {
- return &json2.Error{Message: e.Error()}
- }
- web.Client = client
jwt := initJWT()
if !jwt.Authenticate(args.AccessKey, args.SecretKey) {
return &json2.Error{Message: "Invalid credentials"}
@@ -443,3 +335,108 @@ func (web *webAPI) SetAuth(r *http.Request, args *SetAuthArgs, reply *SetAuthRep
reply.UIVersion = miniobrowser.UIVersion
return nil
}
+
+// GetAuthReply - Reply current credentials.
+type GetAuthReply struct {
+ AccessKey string `json:"accessKey"`
+ SecretKey string `json:"secretKey"`
+ UIVersion string `json:"uiVersion"`
+}
+
+// GetAuth - return accessKey and secretKey credentials.
+func (web *webAPI) GetAuth(r *http.Request, args *GenericArgs, reply *GetAuthReply) error {
+ if !isJWTReqAuthenticated(r) {
+ return &json2.Error{Message: "Unauthorized request"}
+ }
+ creds := serverConfig.GetCredential()
+ reply.AccessKey = creds.AccessKeyID
+ reply.SecretKey = creds.SecretAccessKey
+ reply.UIVersion = miniobrowser.UIVersion
+ return nil
+}
+
+// Upload - file upload handler.
+func (web *webAPI) Upload(w http.ResponseWriter, r *http.Request) {
+ if !isJWTReqAuthenticated(r) {
+ writeWebErrorResponse(w, errInvalidToken)
+ return
+ }
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+ object := vars["object"]
+ if _, err := web.Filesystem.CreateObject(bucket, object, -1, r.Body, nil); err != nil {
+ writeWebErrorResponse(w, err.ToGoError())
+ }
+}
+
+// Download - file download handler.
+func (web *webAPI) Download(w http.ResponseWriter, r *http.Request) {
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+ object := vars["object"]
+ token := r.URL.Query().Get("token")
+
+ jwt := initJWT()
+ jwttoken, e := jwtgo.Parse(token, func(token *jwtgo.Token) (interface{}, error) {
+ if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok {
+ return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
+ }
+ return []byte(jwt.SecretAccessKey), nil
+ })
+ if e != nil || !jwttoken.Valid {
+ writeWebErrorResponse(w, errInvalidToken)
+ return
+ }
+ w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filepath.Base(object)))
+
+ if _, err := web.Filesystem.GetObject(w, bucket, object, 0, 0); err != nil {
+ writeWebErrorResponse(w, err.ToGoError())
+ }
+}
+
+// writeWebErrorResponse - set HTTP status code and write error description to the body.
+func writeWebErrorResponse(w http.ResponseWriter, err error) {
+ if err == errInvalidToken {
+ w.WriteHeader(http.StatusForbidden)
+ w.Write([]byte(err.Error()))
+ return
+ }
+ switch err.(type) {
+ case fs.RootPathFull:
+ apiErr := getAPIError(ErrRootPathFull)
+ w.WriteHeader(apiErr.HTTPStatusCode)
+ w.Write([]byte(apiErr.Description))
+ case fs.BucketNotFound:
+ apiErr := getAPIError(ErrNoSuchBucket)
+ w.WriteHeader(apiErr.HTTPStatusCode)
+ w.Write([]byte(apiErr.Description))
+ case fs.BucketNameInvalid:
+ apiErr := getAPIError(ErrInvalidBucketName)
+ w.WriteHeader(apiErr.HTTPStatusCode)
+ w.Write([]byte(apiErr.Description))
+ case fs.BadDigest:
+ apiErr := getAPIError(ErrBadDigest)
+ w.WriteHeader(apiErr.HTTPStatusCode)
+ w.Write([]byte(apiErr.Description))
+ case fs.IncompleteBody:
+ apiErr := getAPIError(ErrIncompleteBody)
+ w.WriteHeader(apiErr.HTTPStatusCode)
+ w.Write([]byte(apiErr.Description))
+ case fs.ObjectExistsAsPrefix:
+ apiErr := getAPIError(ErrObjectExistsAsPrefix)
+ w.WriteHeader(apiErr.HTTPStatusCode)
+ w.Write([]byte(apiErr.Description))
+ case fs.ObjectNotFound:
+ apiErr := getAPIError(ErrNoSuchKey)
+ w.WriteHeader(apiErr.HTTPStatusCode)
+ w.Write([]byte(apiErr.Description))
+ case fs.ObjectNameInvalid:
+ apiErr := getAPIError(ErrNoSuchKey)
+ w.WriteHeader(apiErr.HTTPStatusCode)
+ w.Write([]byte(apiErr.Description))
+ default:
+ apiErr := getAPIError(ErrInternalError)
+ w.WriteHeader(apiErr.HTTPStatusCode)
+ w.Write([]byte(apiErr.Description))
+ }
+}
diff --git a/web-router.go b/web-router.go
index bf2535530..97f76e23c 100644
--- a/web-router.go
+++ b/web-router.go
@@ -25,16 +25,13 @@ import (
router "github.com/gorilla/mux"
jsonrpc "github.com/gorilla/rpc/v2"
"github.com/gorilla/rpc/v2/json2"
- "github.com/minio/minio-go"
+ "github.com/minio/minio/pkg/fs"
"github.com/minio/miniobrowser"
)
// webAPI container for Web API.
type webAPI struct {
- // FSPath filesystem path.
- FSPath string
- // Minio client instance.
- Client *minio.Client
+ Filesystem fs.Filesystem
}
// indexHandler - Handler to serve index.html
@@ -76,7 +73,9 @@ func registerWebRouter(mux *router.Router, web *webAPI) {
webRPC.RegisterService(web, "Web")
// RPC handler at URI - /minio/rpc
- webBrowserRouter.Path("/rpc").Handler(webRPC)
+ webBrowserRouter.Methods("POST").Path("/rpc").Handler(webRPC)
+ webBrowserRouter.Methods("PUT").Path("/upload/{bucket}/{object:.+}").HandlerFunc(web.Upload)
+ webBrowserRouter.Methods("GET").Path("/download/{bucket}/{object:.+}").Queries("token", "").HandlerFunc(web.Download)
// Add compression for assets.
compressedAssets := handlers.CompressHandler(http.StripPrefix(reservedBucket, http.FileServer(assetFS())))