From 3f1c4bb4b02a523d4bf73e693d48067c130dd19f Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Sat, 23 Jan 2016 19:44:32 -0800 Subject: [PATCH] Bring in the list APIs implemented by Bala --- JWT.md | 68 +- ...ric-web-handlers.go => jwt-auth-handler.go | 5 +- jwt.go | 17 +- routers.go | 40 +- server-main.go | 38 +- typed-errors.go | 3 + vendor/github.com/gorilla/rpc/v2/LICENSE | 27 + vendor/github.com/gorilla/rpc/v2/README.md | 6 + .../gorilla/rpc/v2/compression_selector.go | 90 ++ vendor/github.com/gorilla/rpc/v2/doc.go | 81 ++ .../gorilla/rpc/v2/encoder_selector.go | 43 + .../github.com/gorilla/rpc/v2/json/client.go | 61 + vendor/github.com/gorilla/rpc/v2/json/doc.go | 58 + .../gorilla/rpc/v2/json/json_test.go | 132 +++ .../github.com/gorilla/rpc/v2/json/server.go | 155 +++ vendor/github.com/gorilla/rpc/v2/map.go | 164 +++ vendor/github.com/gorilla/rpc/v2/server.go | 158 +++ .../github.com/gorilla/rpc/v2/server_test.go | 54 + .../github.com/minio/minio-go/CONTRIBUTING.md | 21 + vendor/github.com/minio/minio-go/INSTALLGO.md | 83 ++ vendor/github.com/minio/minio-go/LICENSE | 202 ++++ .../github.com/minio/minio-go/MAINTAINERS.md | 19 + vendor/github.com/minio/minio-go/README.md | 98 ++ .../minio/minio-go/api-definitions.go | 76 ++ .../minio/minio-go/api-error-response.go | 236 ++++ .../minio/minio-go/api-get-object-file.go | 104 ++ vendor/github.com/minio/minio-go/api-get.go | 537 +++++++++ vendor/github.com/minio/minio-go/api-list.go | 539 +++++++++ .../minio/minio-go/api-presigned.go | 144 +++ .../minio/minio-go/api-put-bucket.go | 214 ++++ .../minio/minio-go/api-put-object-common.go | 240 ++++ .../minio/minio-go/api-put-object-file.go | 240 ++++ .../minio-go/api-put-object-multipart.go | 379 ++++++ .../minio/minio-go/api-put-object-progress.go | 105 ++ .../minio/minio-go/api-put-object-readat.go | 208 ++++ .../minio/minio-go/api-put-object.go | 287 +++++ .../github.com/minio/minio-go/api-remove.go | 167 +++ .../minio/minio-go/api-s3-definitions.go | 197 ++++ vendor/github.com/minio/minio-go/api-stat.go | 125 ++ vendor/github.com/minio/minio-go/api.go | 533 +++++++++ .../minio/minio-go/api_functional_v2_test.go | 945 +++++++++++++++ .../minio/minio-go/api_functional_v4_test.go | 1026 +++++++++++++++++ .../minio/minio-go/api_unit_test.go | 583 ++++++++++ vendor/github.com/minio/minio-go/appveyor.yml | 36 + .../github.com/minio/minio-go/bucket-acl.go | 75 ++ .../github.com/minio/minio-go/bucket-cache.go | 154 +++ vendor/github.com/minio/minio-go/constants.go | 42 + .../github.com/minio/minio-go/hook-reader.go | 54 + .../github.com/minio/minio-go/post-policy.go | 191 +++ .../minio/minio-go/request-signature-v2.go | 289 +++++ .../minio/minio-go/request-signature-v4.go | 303 +++++ .../github.com/minio/minio-go/s3-endpoints.go | 40 + .../minio/minio-go/signature-type.go | 37 + vendor/github.com/minio/minio-go/tempfile.go | 60 + vendor/github.com/minio/minio-go/utils.go | 305 +++++ vendor/vendor.json | 15 + web-auth-handlers.go | 104 -- web-definitions.go | 21 +- web-handlers.go | 122 ++ 59 files changed, 10172 insertions(+), 184 deletions(-) rename generic-web-handlers.go => jwt-auth-handler.go (88%) create mode 100644 vendor/github.com/gorilla/rpc/v2/LICENSE create mode 100644 vendor/github.com/gorilla/rpc/v2/README.md create mode 100644 vendor/github.com/gorilla/rpc/v2/compression_selector.go create mode 100644 vendor/github.com/gorilla/rpc/v2/doc.go create mode 100644 vendor/github.com/gorilla/rpc/v2/encoder_selector.go create mode 100644 vendor/github.com/gorilla/rpc/v2/json/client.go create mode 100644 vendor/github.com/gorilla/rpc/v2/json/doc.go create mode 100644 vendor/github.com/gorilla/rpc/v2/json/json_test.go create mode 100644 vendor/github.com/gorilla/rpc/v2/json/server.go create mode 100644 vendor/github.com/gorilla/rpc/v2/map.go create mode 100644 vendor/github.com/gorilla/rpc/v2/server.go create mode 100644 vendor/github.com/gorilla/rpc/v2/server_test.go create mode 100644 vendor/github.com/minio/minio-go/CONTRIBUTING.md create mode 100644 vendor/github.com/minio/minio-go/INSTALLGO.md create mode 100644 vendor/github.com/minio/minio-go/LICENSE create mode 100644 vendor/github.com/minio/minio-go/MAINTAINERS.md create mode 100644 vendor/github.com/minio/minio-go/README.md create mode 100644 vendor/github.com/minio/minio-go/api-definitions.go create mode 100644 vendor/github.com/minio/minio-go/api-error-response.go create mode 100644 vendor/github.com/minio/minio-go/api-get-object-file.go create mode 100644 vendor/github.com/minio/minio-go/api-get.go create mode 100644 vendor/github.com/minio/minio-go/api-list.go create mode 100644 vendor/github.com/minio/minio-go/api-presigned.go create mode 100644 vendor/github.com/minio/minio-go/api-put-bucket.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object-common.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object-file.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object-multipart.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object-progress.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object-readat.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object.go create mode 100644 vendor/github.com/minio/minio-go/api-remove.go create mode 100644 vendor/github.com/minio/minio-go/api-s3-definitions.go create mode 100644 vendor/github.com/minio/minio-go/api-stat.go create mode 100644 vendor/github.com/minio/minio-go/api.go create mode 100644 vendor/github.com/minio/minio-go/api_functional_v2_test.go create mode 100644 vendor/github.com/minio/minio-go/api_functional_v4_test.go create mode 100644 vendor/github.com/minio/minio-go/api_unit_test.go create mode 100644 vendor/github.com/minio/minio-go/appveyor.yml create mode 100644 vendor/github.com/minio/minio-go/bucket-acl.go create mode 100644 vendor/github.com/minio/minio-go/bucket-cache.go create mode 100644 vendor/github.com/minio/minio-go/constants.go create mode 100644 vendor/github.com/minio/minio-go/hook-reader.go create mode 100644 vendor/github.com/minio/minio-go/post-policy.go create mode 100644 vendor/github.com/minio/minio-go/request-signature-v2.go create mode 100644 vendor/github.com/minio/minio-go/request-signature-v4.go create mode 100644 vendor/github.com/minio/minio-go/s3-endpoints.go create mode 100644 vendor/github.com/minio/minio-go/signature-type.go create mode 100644 vendor/github.com/minio/minio-go/tempfile.go create mode 100644 vendor/github.com/minio/minio-go/utils.go delete mode 100644 web-auth-handlers.go create mode 100644 web-handlers.go diff --git a/JWT.md b/JWT.md index 02b35f225..f5f1882b7 100644 --- a/JWT.md +++ b/JWT.md @@ -17,41 +17,45 @@ openssl rsa -in ~/.minio/web/private.key -outform PEM -pubout -out ~/.minio/web/ minio server ``` -### Now you can make curl requests to the server at port 9001. +### Implemented JSON RPC APIs. -Currently username and password are defaulted for testing purposes. +Namespace `Web` -``` -curl -X POST -H "Content-Type: application/json" -d '{"username":"WLGDGYAQYIGI833EV05A", "password": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"}' http://127.0.0.1:9001/login -{"token":"eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE0NTM1NDM0MjMsImlhdCI6MTQ1MzUwNzQyMywic3ViIjoiV0xHREdZQVFZSUdJODMzRVYwNUEifQ.zhL0vG5dwwak3SvpysW0CzdPRjpadrCLIpte2QHSxj2XjIQb2oK0dDD9Yvl-45E14CMVQhV3CCsf9LFaK2C94I5aop6nP7sSCyG2_l4w2xrfEPWKgyOY9P0QxUIPV3o43o2XjnMlU_6xE2mk8S9N7psk15sf0Ma1EoXkQlfqEZzbxyQjwKx4UxzkVpwN4k6wavtwU-rgVU0QwJwXXss0hVhY7HWtOzUGrhVWL42pOwNwZ73lrHpJkSyQi6fbc5lIALgFoeei_iSUXxRaJjvm36rn4vui3qLCoH79E-WhkoP_mqDvf_YfiTqcFHgdgnu2wtlQl90RNh2-wgR-XJiedQ"} +* Login - waits for 'username, password' and on success replies a new JWT token. +* ResetToken - resets token, requires password and token. +* Logout - currently a dummy operation. +* ListBuckets - lists buckets, requires valid token. +* ListObjects - lists objects, requires valid token. +* GetObjectURL - generates a url for download access, requires valid token. + +### Now you can use `webrpc.js` to make requests. + +- Login example +```js +var webRPC = require('webrpc'); +var web = new webRPC("http://localhost:9001/rpc") + +// Generate JWT Token. +web.Login({"username": "YOUR-ACCESS-KEY-ID", "password": "YOUR-SECRET-ACCESS-KEY"}) + .then(function(data) { + console.log("success : ", data); + }) + .catch(function(error) { + console.log("fail : ", error.toString()); + }); ``` -Replies back with a token which can be used to logout +- ListBuckets example +```js +var webRPC = require('webrpc'); +var web = new webRPC("http://localhost:9001/rpc", "my-token") -``` -curl -i -X GET -H "Authorization: Bearer eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE0NTM1NDM0MjMsImlhdCI6MTQ1MzUwNzQyMywic3ViIjoiV0xHREdZQVFZSUdJODMzRVYwNUEifQ.zhL0vG5dwwak3SvpysW0CzdPRjpadrCLIpte2QHSxj2XjIQb2oK0dDD9Yvl-45E14CMVQhV3CCsf9LFaK2C94I5aop6nP7sSCyG2_l4w2xrfEPWKgyOY9P0QxUIPV3o43o2XjnMlU_6xE2mk8S9N7psk15sf0Ma1EoXkQlfqEZzbxyQjwKx4UxzkVpwN4k6wavtwU-rgVU0QwJwXXss0hVhY7HWtOzUGrhVWL42pOwNwZ73lrHpJkSyQi6fbc5lIALgFoeei_iSUXxRaJjvm36rn4vui3qLCoH79E-WhkoP_mqDvf_YfiTqcFHgdgnu2wtlQl90RNh2-wgR-XJiedQ" http://127.0.0.1:9001/logout -HTTP/1.1 200 OK -Content-Type: application/json -Date: Sat, 23 Jan 2016 00:05:02 GMT -Content-Length: 0 -``` - - -Now attempt with wrong authorization, you should get 401. - -``` -$ curl -i -X GET -H "Authorization: Bearer testing123" http://127.0.0.1:9001/logout -HTTP/1.1 401 Unauthorized -Date: Sat, 23 Jan 2016 00:05:58 GMT -Content-Length: 0 -Content-Type: text/plain; charset=utf-8 -``` - -Without authorization logout is not possible. -``` -$ curl -i -X GET http://127.0.0.1:9001/logout -HTTP/1.1 401 Unauthorized -Date: Sat, 23 Jan 2016 00:07:00 GMT -Content-Length: 0 -Content-Type: text/plain; charset=utf-8 +// Generate Token. +web.ListBuckets() + .then(function(data) { + console.log("Success : ", data); + }) + .catch(function(error) { + console.log("fail : ", error.toString()); + }); ``` diff --git a/generic-web-handlers.go b/jwt-auth-handler.go similarity index 88% rename from generic-web-handlers.go rename to jwt-auth-handler.go index c6b065f4f..62739b0c8 100644 --- a/generic-web-handlers.go +++ b/jwt-auth-handler.go @@ -19,8 +19,9 @@ func AuthHandler(h http.Handler) http.Handler { // Ignore request if authorization header is not valid. func (h authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // For login attempts please issue a new token. - if r.Method == "POST" && r.URL.Path == "/login-token" { + // Let the top level caller handle if the requests should be + // allowed. + if r.Header.Get("Authorization") == "" { h.handler.ServeHTTP(w, r) return } diff --git a/jwt.go b/jwt.go index 17ee9340b..5ab31817c 100644 --- a/jwt.go +++ b/jwt.go @@ -45,14 +45,10 @@ func (b *JWTAuthBackend) GenerateToken(userName string) (string, error) { } // Authenticate - -func (b *JWTAuthBackend) Authenticate(user *User) bool { - hashedPassword, _ := bcrypt.GenerateFromPassword([]byte("BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"), 10) - testUser := User{ - Username: "WLGDGYAQYIGI833EV05A", - Password: string(hashedPassword), - } - if user.Username == testUser.Username { - return bcrypt.CompareHashAndPassword([]byte(testUser.Password), []byte(user.Password)) == nil +func (b *JWTAuthBackend) Authenticate(args *LoginArgs, accessKeyID, secretAccessKey string) bool { + hashedPassword, _ := bcrypt.GenerateFromPassword([]byte(secretAccessKey), 10) + if args.Username == accessKeyID { + return bcrypt.CompareHashAndPassword(hashedPassword, []byte(args.Password)) == nil } return false } @@ -69,9 +65,8 @@ func (b *JWTAuthBackend) getTokenRemainingValidity(timestamp interface{}) int { return expireOffset } -// Logout - logout -func (b *JWTAuthBackend) Logout(tokenString string, token *jwt.Token) error { - b.getTokenRemainingValidity(token.Claims["exp"]) +// Logout - logout is not implemented yet. +func (b *JWTAuthBackend) Logout(tokenString string) error { return nil } diff --git a/routers.go b/routers.go index 6efdf3160..42bdc598a 100644 --- a/routers.go +++ b/routers.go @@ -20,6 +20,8 @@ import ( "net/http" router "github.com/gorilla/mux" + jsonrpc "github.com/gorilla/rpc/v2" + "github.com/gorilla/rpc/v2/json" "github.com/minio/minio/pkg/fs" ) @@ -32,22 +34,16 @@ type CloudStorageAPI struct { // WebAPI container for Web API. type WebAPI struct { - Anonymous bool - AccessLog bool + Anonymous bool + AccessLog bool + AccessKeyID string + SecretAccessKey string } -// registerWebAPI - register all the handlers to their respective paths -func registerWebAPI(mux *router.Router, w WebAPI) { - // root Router - root := mux.NewRoute().PathPrefix("/").Subrouter() - root.Methods("POST").Path("/login").HandlerFunc(w.LoginHandler) - root.Methods("GET").Path("/logout").HandlerFunc(w.LogoutHandler) - root.Methods("GET").Path("/login-refresh-token").HandlerFunc(w.RefreshTokenHandler) -} - -func getWebAPIHandler(web WebAPI) http.Handler { +func getWebAPIHandler(web *WebAPI) http.Handler { var mwHandlers = []MiddlewareHandler{ - CorsHandler, // CORS added only for testing purposes. + TimeValidityHandler, // Validate time. + CorsHandler, // CORS added only for testing purposes. } if !web.Anonymous { mwHandlers = append(mwHandlers, AuthHandler) @@ -55,8 +51,17 @@ func getWebAPIHandler(web WebAPI) http.Handler { if web.AccessLog { mwHandlers = append(mwHandlers, AccessLogHandler) } + + s := jsonrpc.NewServer() + codec := json.NewCodec() + s.RegisterCodec(codec, "application/json") + s.RegisterCodec(codec, "application/json; charset=UTF-8") + s.RegisterService(web, "Web") mux := router.NewRouter() - registerWebAPI(mux, web) + // Add new RPC services here + mux.Handle("/rpc", s) + // Enable this when we add assets. + // mux.Handle("/{file:.*}", http.FileServer(assetFS())) return registerCustomMiddleware(mux, mwHandlers...) } @@ -94,11 +99,14 @@ func registerCloudStorageAPI(mux *router.Router, a CloudStorageAPI) { } // getNewWebAPI instantiate a new WebAPI. -func getNewWebAPI(conf cloudServerConfig) WebAPI { - return WebAPI{ +func getNewWebAPI(conf cloudServerConfig) *WebAPI { + web := &WebAPI{ Anonymous: conf.Anonymous, AccessLog: conf.AccessLog, } + web.AccessKeyID = conf.AccessKeyID + web.SecretAccessKey = conf.SecretAccessKey + return web } // getNewCloudStorageAPI instantiate a new CloudStorageAPI. diff --git a/server-main.go b/server-main.go index cf51e0c57..716cc181f 100644 --- a/server-main.go +++ b/server-main.go @@ -74,6 +74,10 @@ type cloudServerConfig struct { AccessLog bool // Enable access log handler Anonymous bool // No signature turn off + // Credentials. + AccessKeyID string // Access key id. + SecretAccessKey string // Secret access key. + /// FS options Path string // Path to export for cloud storage MinFreeDisk int64 // Minimum free disk space for filesystem @@ -293,13 +297,13 @@ func (a accessKeys) JSON() string { } // initServer initialize server -func initServer() *probe.Error { +func initServer() (*configV2, *probe.Error) { conf, err := getConfig() if err != nil { - return err.Trace() + return nil, err.Trace() } if err := setLogger(conf); err != nil { - return err.Trace() + return nil, err.Trace() } if conf != nil { Println() @@ -319,7 +323,7 @@ func initServer() *probe.Error { Println("\t$ ./mc cp --recursive ~/Photos localhost:9000/photobucket") } Println() - return nil + return conf, nil } func checkServerSyntax(c *cli.Context) { @@ -338,7 +342,7 @@ func checkServerSyntax(c *cli.Context) { func serverMain(c *cli.Context) { checkServerSyntax(c) - perr := initServer() + conf, perr := initServer() fatalIf(perr.Trace(), "Failed to read config for minio.", nil) certFile := c.GlobalString("cert") @@ -390,17 +394,19 @@ func serverMain(c *cli.Context) { } tls := (certFile != "" && keyFile != "") apiServerConfig := cloudServerConfig{ - Address: c.GlobalString("address"), - WebAddress: c.GlobalString("web-address"), - AccessLog: c.GlobalBool("enable-accesslog"), - Anonymous: c.GlobalBool("anonymous"), - Path: path, - MinFreeDisk: minFreeDisk, - Expiry: expiration, - TLS: tls, - CertFile: certFile, - KeyFile: keyFile, - RateLimit: c.GlobalInt("ratelimit"), + Address: c.GlobalString("address"), + WebAddress: c.GlobalString("web-address"), + AccessLog: c.GlobalBool("enable-accesslog"), + Anonymous: c.GlobalBool("anonymous"), + AccessKeyID: conf.Credentials.AccessKeyID, + SecretAccessKey: conf.Credentials.SecretAccessKey, + Path: path, + MinFreeDisk: minFreeDisk, + Expiry: expiration, + TLS: tls, + CertFile: certFile, + KeyFile: keyFile, + RateLimit: c.GlobalInt("ratelimit"), } perr = startServer(apiServerConfig) errorIf(perr.Trace(), "Failed to start the minio server.", nil) diff --git a/typed-errors.go b/typed-errors.go index 562460f23..293a85805 100644 --- a/typed-errors.go +++ b/typed-errors.go @@ -18,6 +18,9 @@ package main import "errors" +// errUnAuthorizedRequest - un authorized request. +var errUnAuthorizedRequest = errors.New("Unauthorized request") + // errSysLogNotSupported - this message is only meaningful on windows var errSysLogNotSupported = errors.New("Syslog logger not supported on windows") diff --git a/vendor/github.com/gorilla/rpc/v2/LICENSE b/vendor/github.com/gorilla/rpc/v2/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/rpc/v2/README.md b/vendor/github.com/gorilla/rpc/v2/README.md new file mode 100644 index 000000000..8f9af9a8d --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/README.md @@ -0,0 +1,6 @@ +rpc +=== + +gorilla/rpc is a foundation for RPC over HTTP services, providing access to the exported methods of an object through HTTP requests. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/rpc diff --git a/vendor/github.com/gorilla/rpc/v2/compression_selector.go b/vendor/github.com/gorilla/rpc/v2/compression_selector.go new file mode 100644 index 000000000..bbf3fd1ef --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/compression_selector.go @@ -0,0 +1,90 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "compress/flate" + "compress/gzip" + "io" + "net/http" + "strings" + "unicode" +) + +// gzipWriter writes and closes the gzip writer. +type gzipWriter struct { + w *gzip.Writer +} + +func (gw *gzipWriter) Write(p []byte) (n int, err error) { + defer gw.w.Close() + return gw.w.Write(p) +} + +// gzipEncoder implements the gzip compressed http encoder. +type gzipEncoder struct { +} + +func (enc *gzipEncoder) Encode(w http.ResponseWriter) io.Writer { + w.Header().Set("Content-Encoding", "gzip") + return &gzipWriter{gzip.NewWriter(w)} +} + +// flateWriter writes and closes the flate writer. +type flateWriter struct { + w *flate.Writer +} + +func (fw *flateWriter) Write(p []byte) (n int, err error) { + defer fw.w.Close() + return fw.w.Write(p) +} + +// flateEncoder implements the flate compressed http encoder. +type flateEncoder struct { +} + +func (enc *flateEncoder) Encode(w http.ResponseWriter) io.Writer { + fw, err := flate.NewWriter(w, flate.DefaultCompression) + if err != nil { + return w + } + w.Header().Set("Content-Encoding", "deflate") + return &flateWriter{fw} +} + +// CompressionSelector generates the compressed http encoder. +type CompressionSelector struct { +} + +// acceptedEnc returns the first compression type in "Accept-Encoding" header +// field of the request. +func acceptedEnc(req *http.Request) string { + encHeader := req.Header.Get("Accept-Encoding") + if encHeader == "" { + return "" + } + encTypes := strings.FieldsFunc(encHeader, func(r rune) bool { + return unicode.IsSpace(r) || r == ',' + }) + for _, enc := range encTypes { + if enc == "gzip" || enc == "deflate" { + return enc + } + } + return "" +} + +// Select method selects the correct compression encoder based on http HEADER. +func (_ *CompressionSelector) Select(r *http.Request) Encoder { + switch acceptedEnc(r) { + case "gzip": + return &gzipEncoder{} + case "flate": + return &flateEncoder{} + } + return DefaultEncoder +} diff --git a/vendor/github.com/gorilla/rpc/v2/doc.go b/vendor/github.com/gorilla/rpc/v2/doc.go new file mode 100644 index 000000000..301d5dc06 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/doc.go @@ -0,0 +1,81 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/rpc is a foundation for RPC over HTTP services, providing +access to the exported methods of an object through HTTP requests. + +This package derives from the standard net/rpc package but uses a single HTTP +request per call instead of persistent connections. Other differences +compared to net/rpc: + + - Multiple codecs can be registered in the same server. + - A codec is chosen based on the "Content-Type" header from the request. + - Service methods also receive http.Request as parameter. + - This package can be used on Google App Engine. + +Let's setup a server and register a codec and service: + + import ( + "http" + "github.com/gorilla/rpc/v2" + "github.com/gorilla/rpc/v2/json" + ) + + func init() { + s := rpc.NewServer() + s.RegisterCodec(json.NewCodec(), "application/json") + s.RegisterService(new(HelloService), "") + http.Handle("/rpc", s) + } + +This server handles requests to the "/rpc" path using a JSON codec. +A codec is tied to a content type. In the example above, the JSON codec is +registered to serve requests with "application/json" as the value for the +"Content-Type" header. If the header includes a charset definition, it is +ignored; only the media-type part is taken into account. + +A service can be registered using a name. If the name is empty, like in the +example above, it will be inferred from the service type. + +That's all about the server setup. Now let's define a simple service: + + type HelloArgs struct { + Who string + } + + type HelloReply struct { + Message string + } + + type HelloService struct {} + + func (h *HelloService) Say(r *http.Request, args *HelloArgs, reply *HelloReply) error { + reply.Message = "Hello, " + args.Who + "!" + return nil + } + +The example above defines a service with a method "HelloService.Say" and +the arguments and reply related to that method. + +The service must be exported (begin with an upper case letter) or local +(defined in the package registering the service). + +When a service is registered, the server inspects the service methods +and make available the ones that follow these rules: + + - The method name is exported. + - The method has three arguments: *http.Request, *args, *reply. + - All three arguments are pointers. + - The second and third arguments are exported or local. + - The method has return type error. + +All other methods are ignored. + +Gorilla has packages with common RPC codecs. Check out their documentation: + + JSON: http://gorilla-web.appspot.com/pkg/rpc/json +*/ +package rpc diff --git a/vendor/github.com/gorilla/rpc/v2/encoder_selector.go b/vendor/github.com/gorilla/rpc/v2/encoder_selector.go new file mode 100644 index 000000000..333361f3a --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/encoder_selector.go @@ -0,0 +1,43 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "io" + "net/http" +) + +// Encoder interface contains the encoder for http response. +// Eg. gzip, flate compressions. +type Encoder interface { + Encode(w http.ResponseWriter) io.Writer +} + +type encoder struct { +} + +func (_ *encoder) Encode(w http.ResponseWriter) io.Writer { + return w +} + +var DefaultEncoder = &encoder{} + +// EncoderSelector interface provides a way to select encoder using the http +// request. Typically people can use this to check HEADER of the request and +// figure out client capabilities. +// Eg. "Accept-Encoding" tells about supported compressions. +type EncoderSelector interface { + Select(r *http.Request) Encoder +} + +type encoderSelector struct { +} + +func (_ *encoderSelector) Select(_ *http.Request) Encoder { + return DefaultEncoder +} + +var DefaultEncoderSelector = &encoderSelector{} diff --git a/vendor/github.com/gorilla/rpc/v2/json/client.go b/vendor/github.com/gorilla/rpc/v2/json/client.go new file mode 100644 index 000000000..be05788e2 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/json/client.go @@ -0,0 +1,61 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012-2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "encoding/json" + "fmt" + "io" + "math/rand" +) + +// ---------------------------------------------------------------------------- +// Request and Response +// ---------------------------------------------------------------------------- + +// clientRequest represents a JSON-RPC request sent by a client. +type clientRequest struct { + // A String containing the name of the method to be invoked. + Method string `json:"method"` + // Object to pass as request parameter to the method. + Params [1]interface{} `json:"params"` + // The request id. This can be of any type. It is used to match the + // response with the request that it is replying to. + Id uint64 `json:"id"` +} + +// clientResponse represents a JSON-RPC response returned to a client. +type clientResponse struct { + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` + Id uint64 `json:"id"` +} + +// EncodeClientRequest encodes parameters for a JSON-RPC client request. +func EncodeClientRequest(method string, args interface{}) ([]byte, error) { + c := &clientRequest{ + Method: method, + Params: [1]interface{}{args}, + Id: uint64(rand.Int63()), + } + return json.Marshal(c) +} + +// DecodeClientResponse decodes the response body of a client request into +// the interface reply. +func DecodeClientResponse(r io.Reader, reply interface{}) error { + var c clientResponse + if err := json.NewDecoder(r).Decode(&c); err != nil { + return err + } + if c.Error != nil { + return &Error{Data: c.Error} + } + if c.Result == nil { + return fmt.Errorf("Unexpected null result") + } + return json.Unmarshal(*c.Result, reply) +} diff --git a/vendor/github.com/gorilla/rpc/v2/json/doc.go b/vendor/github.com/gorilla/rpc/v2/json/doc.go new file mode 100644 index 000000000..3f92b9cb3 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/json/doc.go @@ -0,0 +1,58 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/rpc/json provides a codec for JSON-RPC over HTTP services. + +To register the codec in a RPC server: + + import ( + "http" + "github.com/gorilla/rpc/v2" + "github.com/gorilla/rpc/v2/json" + ) + + func init() { + s := rpc.NewServer() + s.RegisterCodec(json.NewCodec(), "application/json") + // [...] + http.Handle("/rpc", s) + } + +A codec is tied to a content type. In the example above, the server will use +the JSON codec for requests with "application/json" as the value for the +"Content-Type" header. + +This package follows the JSON-RPC 1.0 specification: + + http://json-rpc.org/wiki/specification + +Request format is: + + method: + The name of the method to be invoked, as a string in dotted notation + as in "Service.Method". + params: + An array with a single object to pass as argument to the method. + id: + The request id, a uint. It is used to match the response with the + request that it is replying to. + +Response format is: + + result: + The Object that was returned by the invoked method, + or null in case there was an error invoking the method. + error: + An Error object if there was an error invoking the method, + or null if there was no error. + id: + The same id as the request it is responding to. + +Check the gorilla/rpc documentation for more details: + + http://gorilla-web.appspot.com/pkg/rpc +*/ +package json diff --git a/vendor/github.com/gorilla/rpc/v2/json/json_test.go b/vendor/github.com/gorilla/rpc/v2/json/json_test.go new file mode 100644 index 000000000..50017db74 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/json/json_test.go @@ -0,0 +1,132 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "github.com/gorilla/rpc/v2" +) + +var ( + ErrResponseError = errors.New("response error") + ErrResponseJsonError = &Error{Data: map[string]interface{}{ + "stackstrace": map[string]interface{}{"0": "foo()"}, + "error": "a message", + }} +) + +type Service1Request struct { + A int + B int +} + +type Service1Response struct { + Result int +} + +type Service1 struct { +} + +func (t *Service1) Multiply(r *http.Request, req *Service1Request, res *Service1Response) error { + res.Result = req.A * req.B + return nil +} + +func (t *Service1) ResponseError(r *http.Request, req *Service1Request, res *Service1Response) error { + return ErrResponseError +} + +func (t *Service1) ResponseJsonError(r *http.Request, req *Service1Request, res *Service1Response) error { + return ErrResponseJsonError +} + +func execute(t *testing.T, s *rpc.Server, method string, req, res interface{}) error { + if !s.HasMethod(method) { + t.Fatal("Expected to be registered:", method) + } + + buf, _ := EncodeClientRequest(method, req) + body := bytes.NewBuffer(buf) + r, _ := http.NewRequest("POST", "http://localhost:8080/", body) + r.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + s.ServeHTTP(w, r) + + return DecodeClientResponse(w.Body, res) +} + +func executeRaw(t *testing.T, s *rpc.Server, req json.RawMessage) (int, *bytes.Buffer) { + r, _ := http.NewRequest("POST", "http://localhost:8080/", bytes.NewBuffer(req)) + r.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + s.ServeHTTP(w, r) + + return w.Code, w.Body +} + +func field(name string, blob json.RawMessage) (v interface{}, ok bool) { + var obj map[string]interface{} + if err := json.Unmarshal(blob, &obj); err != nil { + return nil, false + } + v, ok = obj[name] + return +} + +func TestService(t *testing.T) { + s := rpc.NewServer() + s.RegisterCodec(NewCodec(), "application/json") + s.RegisterService(new(Service1), "") + + var res Service1Response + if err := execute(t, s, "Service1.Multiply", &Service1Request{4, 2}, &res); err != nil { + t.Error("Expected err to be nil, but got", err) + } + if res.Result != 8 { + t.Error("Expected res.Result to be 8, but got", res.Result) + } + if err := execute(t, s, "Service1.ResponseError", &Service1Request{4, 2}, &res); err == nil { + t.Errorf("Expected to get %q, but got nil", ErrResponseError) + } else if err.Error() != ErrResponseError.Error() { + t.Errorf("Expected to get %q, but got %q", ErrResponseError, err) + } + if code, res := executeRaw(t, s, json.RawMessage(`{"method":"Service1.Multiply","params":null,"id":5}`)); code != 400 { + t.Error("Expected response code to be 400, but got", code) + } else if v, ok := field("result", res.Bytes()); !ok || v != nil { + t.Errorf("Expected ok to be true and v to be nil, but got %v and %v", ok, v) + } + if err := execute(t, s, "Service1.ResponseJsonError", &Service1Request{4, 2}, &res); err == nil { + t.Errorf("Expected to get %q, but got nil", ErrResponseError) + } else if jsonErr, ok := err.(*Error); !ok { + t.Error("Expected err to be of a *json.Error type") + } else if !reflect.DeepEqual(jsonErr.Data, ErrResponseJsonError.Data) { + t.Errorf("Expected jsonErr to be %q, but got %q", ErrResponseJsonError, jsonErr) + } +} + +func TestClientNullResult(t *testing.T) { + data := `{"jsonrpc": "2.0", "id": 8674665223082153551, "result": null}` + reader := bytes.NewReader([]byte(data)) + + var reply interface{} + + err := DecodeClientResponse(reader, &reply) + if err == nil { + t.Fatal(err) + } + if err.Error() != "Unexpected null result" { + t.Fatalf("Unexpected error: %s", err) + } +} diff --git a/vendor/github.com/gorilla/rpc/v2/json/server.go b/vendor/github.com/gorilla/rpc/v2/json/server.go new file mode 100644 index 000000000..8fafbe3ad --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/json/server.go @@ -0,0 +1,155 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/gorilla/rpc/v2" +) + +var null = json.RawMessage([]byte("null")) + +// An Error is a wrapper for a JSON interface value. It can be used by either +// a service's handler func to write more complex JSON data to an error field +// of a server's response, or by a client to read it. +type Error struct { + Data interface{} +} + +func (e *Error) Error() string { + return fmt.Sprintf("%v", e.Data) +} + +// ---------------------------------------------------------------------------- +// Request and Response +// ---------------------------------------------------------------------------- + +// serverRequest represents a JSON-RPC request received by the server. +type serverRequest struct { + // A String containing the name of the method to be invoked. + Method string `json:"method"` + // An Array of objects to pass as arguments to the method. + Params *json.RawMessage `json:"params"` + // The request id. This can be of any type. It is used to match the + // response with the request that it is replying to. + Id *json.RawMessage `json:"id"` +} + +// serverResponse represents a JSON-RPC response returned by the server. +type serverResponse struct { + // The Object that was returned by the invoked method. This must be null + // in case there was an error invoking the method. + Result interface{} `json:"result"` + // An Error object if there was an error invoking the method. It must be + // null if there was no error. + Error interface{} `json:"error"` + // This must be the same id as the request it is responding to. + Id *json.RawMessage `json:"id"` +} + +// ---------------------------------------------------------------------------- +// Codec +// ---------------------------------------------------------------------------- + +// NewCodec returns a new JSON Codec. +func NewCodec() *Codec { + return &Codec{} +} + +// Codec creates a CodecRequest to process each request. +type Codec struct { +} + +// NewRequest returns a CodecRequest. +func (c *Codec) NewRequest(r *http.Request) rpc.CodecRequest { + return newCodecRequest(r) +} + +// ---------------------------------------------------------------------------- +// CodecRequest +// ---------------------------------------------------------------------------- + +// newCodecRequest returns a new CodecRequest. +func newCodecRequest(r *http.Request) rpc.CodecRequest { + // Decode the request body and check if RPC method is valid. + req := new(serverRequest) + err := json.NewDecoder(r.Body).Decode(req) + r.Body.Close() + return &CodecRequest{request: req, err: err} +} + +// CodecRequest decodes and encodes a single request. +type CodecRequest struct { + request *serverRequest + err error +} + +// Method returns the RPC method for the current request. +// +// The method uses a dotted notation as in "Service.Method". +func (c *CodecRequest) Method() (string, error) { + if c.err == nil { + return c.request.Method, nil + } + return "", c.err +} + +// ReadRequest fills the request object for the RPC method. +func (c *CodecRequest) ReadRequest(args interface{}) error { + if c.err == nil { + if c.request.Params != nil { + // JSON params is array value. RPC params is struct. + // Unmarshal into array containing the request struct. + params := [1]interface{}{args} + c.err = json.Unmarshal(*c.request.Params, ¶ms) + } else { + c.err = errors.New("rpc: method request ill-formed: missing params field") + } + } + return c.err +} + +// WriteResponse encodes the response and writes it to the ResponseWriter. +func (c *CodecRequest) WriteResponse(w http.ResponseWriter, reply interface{}) { + if c.request.Id != nil { + // Id is null for notifications and they don't have a response. + res := &serverResponse{ + Result: reply, + Error: &null, + Id: c.request.Id, + } + c.writeServerResponse(w, 200, res) + } +} + +func (c *CodecRequest) WriteError(w http.ResponseWriter, _ int, err error) { + res := &serverResponse{ + Result: &null, + Id: c.request.Id, + } + if jsonErr, ok := err.(*Error); ok { + res.Error = jsonErr.Data + } else { + res.Error = err.Error() + } + c.writeServerResponse(w, 400, res) +} + +func (c *CodecRequest) writeServerResponse(w http.ResponseWriter, status int, res *serverResponse) { + b, err := json.Marshal(res) + if err == nil { + w.WriteHeader(status) + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Write(b) + } else { + // Not sure in which case will this happen. But seems harmless. + rpc.WriteError(w, 400, err.Error()) + } +} diff --git a/vendor/github.com/gorilla/rpc/v2/map.go b/vendor/github.com/gorilla/rpc/v2/map.go new file mode 100644 index 000000000..dda42161c --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/map.go @@ -0,0 +1,164 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "fmt" + "net/http" + "reflect" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +var ( + // Precompute the reflect.Type of error and http.Request + typeOfError = reflect.TypeOf((*error)(nil)).Elem() + typeOfRequest = reflect.TypeOf((*http.Request)(nil)).Elem() +) + +// ---------------------------------------------------------------------------- +// service +// ---------------------------------------------------------------------------- + +type service struct { + name string // name of service + rcvr reflect.Value // receiver of methods for the service + rcvrType reflect.Type // type of the receiver + methods map[string]*serviceMethod // registered methods +} + +type serviceMethod struct { + method reflect.Method // receiver method + argsType reflect.Type // type of the request argument + replyType reflect.Type // type of the response argument +} + +// ---------------------------------------------------------------------------- +// serviceMap +// ---------------------------------------------------------------------------- + +// serviceMap is a registry for services. +type serviceMap struct { + mutex sync.Mutex + services map[string]*service +} + +// register adds a new service using reflection to extract its methods. +func (m *serviceMap) register(rcvr interface{}, name string) error { + // Setup service. + s := &service{ + name: name, + rcvr: reflect.ValueOf(rcvr), + rcvrType: reflect.TypeOf(rcvr), + methods: make(map[string]*serviceMethod), + } + if name == "" { + s.name = reflect.Indirect(s.rcvr).Type().Name() + if !isExported(s.name) { + return fmt.Errorf("rpc: type %q is not exported", s.name) + } + } + if s.name == "" { + return fmt.Errorf("rpc: no service name for type %q", + s.rcvrType.String()) + } + // Setup methods. + for i := 0; i < s.rcvrType.NumMethod(); i++ { + method := s.rcvrType.Method(i) + mtype := method.Type + // Method must be exported. + if method.PkgPath != "" { + continue + } + // Method needs four ins: receiver, *http.Request, *args, *reply. + if mtype.NumIn() != 4 { + continue + } + // First argument must be a pointer and must be http.Request. + reqType := mtype.In(1) + if reqType.Kind() != reflect.Ptr || reqType.Elem() != typeOfRequest { + continue + } + // Second argument must be a pointer and must be exported. + args := mtype.In(2) + if args.Kind() != reflect.Ptr || !isExportedOrBuiltin(args) { + continue + } + // Third argument must be a pointer and must be exported. + reply := mtype.In(3) + if reply.Kind() != reflect.Ptr || !isExportedOrBuiltin(reply) { + continue + } + // Method needs one out: error. + if mtype.NumOut() != 1 { + continue + } + if returnType := mtype.Out(0); returnType != typeOfError { + continue + } + s.methods[method.Name] = &serviceMethod{ + method: method, + argsType: args.Elem(), + replyType: reply.Elem(), + } + } + if len(s.methods) == 0 { + return fmt.Errorf("rpc: %q has no exported methods of suitable type", + s.name) + } + // Add to the map. + m.mutex.Lock() + defer m.mutex.Unlock() + if m.services == nil { + m.services = make(map[string]*service) + } else if _, ok := m.services[s.name]; ok { + return fmt.Errorf("rpc: service already defined: %q", s.name) + } + m.services[s.name] = s + return nil +} + +// get returns a registered service given a method name. +// +// The method name uses a dotted notation as in "Service.Method". +func (m *serviceMap) get(method string) (*service, *serviceMethod, error) { + parts := strings.Split(method, ".") + if len(parts) != 2 { + err := fmt.Errorf("rpc: service/method request ill-formed: %q", method) + return nil, nil, err + } + m.mutex.Lock() + service := m.services[parts[0]] + m.mutex.Unlock() + if service == nil { + err := fmt.Errorf("rpc: can't find service %q", method) + return nil, nil, err + } + serviceMethod := service.methods[parts[1]] + if serviceMethod == nil { + err := fmt.Errorf("rpc: can't find method %q", method) + return nil, nil, err + } + return service, serviceMethod, nil +} + +// isExported returns true of a string is an exported (upper case) name. +func isExported(name string) bool { + rune, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(rune) +} + +// isExportedOrBuiltin returns true if a type is exported or a builtin. +func isExportedOrBuiltin(t reflect.Type) bool { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + // PkgPath will be non-empty even for an exported type, + // so we need to check the type name as well. + return isExported(t.Name()) || t.PkgPath() == "" +} diff --git a/vendor/github.com/gorilla/rpc/v2/server.go b/vendor/github.com/gorilla/rpc/v2/server.go new file mode 100644 index 000000000..b552cb3c7 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/server.go @@ -0,0 +1,158 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "fmt" + "net/http" + "reflect" + "strings" +) + +// ---------------------------------------------------------------------------- +// Codec +// ---------------------------------------------------------------------------- + +// Codec creates a CodecRequest to process each request. +type Codec interface { + NewRequest(*http.Request) CodecRequest +} + +// CodecRequest decodes a request and encodes a response using a specific +// serialization scheme. +type CodecRequest interface { + // Reads the request and returns the RPC method name. + Method() (string, error) + // Reads the request filling the RPC method args. + ReadRequest(interface{}) error + // Writes the response using the RPC method reply. + WriteResponse(http.ResponseWriter, interface{}) + // Writes an error produced by the server. + WriteError(w http.ResponseWriter, status int, err error) +} + +// ---------------------------------------------------------------------------- +// Server +// ---------------------------------------------------------------------------- + +// NewServer returns a new RPC server. +func NewServer() *Server { + return &Server{ + codecs: make(map[string]Codec), + services: new(serviceMap), + } +} + +// Server serves registered RPC services using registered codecs. +type Server struct { + codecs map[string]Codec + services *serviceMap +} + +// RegisterCodec adds a new codec to the server. +// +// Codecs are defined to process a given serialization scheme, e.g., JSON or +// XML. A codec is chosen based on the "Content-Type" header from the request, +// excluding the charset definition. +func (s *Server) RegisterCodec(codec Codec, contentType string) { + s.codecs[strings.ToLower(contentType)] = codec +} + +// RegisterService adds a new service to the server. +// +// The name parameter is optional: if empty it will be inferred from +// the receiver type name. +// +// Methods from the receiver will be extracted if these rules are satisfied: +// +// - The receiver is exported (begins with an upper case letter) or local +// (defined in the package registering the service). +// - The method name is exported. +// - The method has three arguments: *http.Request, *args, *reply. +// - All three arguments are pointers. +// - The second and third arguments are exported or local. +// - The method has return type error. +// +// All other methods are ignored. +func (s *Server) RegisterService(receiver interface{}, name string) error { + return s.services.register(receiver, name) +} + +// HasMethod returns true if the given method is registered. +// +// The method uses a dotted notation as in "Service.Method". +func (s *Server) HasMethod(method string) bool { + if _, _, err := s.services.get(method); err == nil { + return true + } + return false +} + +// ServeHTTP +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + WriteError(w, 405, "rpc: POST method required, received "+r.Method) + return + } + contentType := r.Header.Get("Content-Type") + idx := strings.Index(contentType, ";") + if idx != -1 { + contentType = contentType[:idx] + } + codec := s.codecs[strings.ToLower(contentType)] + if codec == nil { + WriteError(w, 415, "rpc: unrecognized Content-Type: "+contentType) + return + } + // Create a new codec request. + codecReq := codec.NewRequest(r) + // Get service method to be called. + method, errMethod := codecReq.Method() + if errMethod != nil { + codecReq.WriteError(w, 400, errMethod) + return + } + serviceSpec, methodSpec, errGet := s.services.get(method) + if errGet != nil { + codecReq.WriteError(w, 400, errGet) + return + } + // Decode the args. + args := reflect.New(methodSpec.argsType) + if errRead := codecReq.ReadRequest(args.Interface()); errRead != nil { + codecReq.WriteError(w, 400, errRead) + return + } + // Call the service method. + reply := reflect.New(methodSpec.replyType) + errValue := methodSpec.method.Func.Call([]reflect.Value{ + serviceSpec.rcvr, + reflect.ValueOf(r), + args, + reply, + }) + // Cast the result to error if needed. + var errResult error + errInter := errValue[0].Interface() + if errInter != nil { + errResult = errInter.(error) + } + // Prevents Internet Explorer from MIME-sniffing a response away + // from the declared content-type + w.Header().Set("x-content-type-options", "nosniff") + // Encode the response. + if errResult == nil { + codecReq.WriteResponse(w, reply.Interface()) + } else { + codecReq.WriteError(w, 400, errResult) + } +} + +func WriteError(w http.ResponseWriter, status int, msg string) { + w.WriteHeader(status) + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprint(w, msg) +} diff --git a/vendor/github.com/gorilla/rpc/v2/server_test.go b/vendor/github.com/gorilla/rpc/v2/server_test.go new file mode 100644 index 000000000..d2cddfca2 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/server_test.go @@ -0,0 +1,54 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "net/http" + "testing" +) + +type Service1Request struct { + A int + B int +} + +type Service1Response struct { + Result int +} + +type Service1 struct { +} + +func (t *Service1) Multiply(r *http.Request, req *Service1Request, res *Service1Response) error { + res.Result = req.A * req.B + return nil +} + +type Service2 struct { +} + +func TestRegisterService(t *testing.T) { + var err error + s := NewServer() + service1 := new(Service1) + service2 := new(Service2) + + // Inferred name. + err = s.RegisterService(service1, "") + if err != nil || !s.HasMethod("Service1.Multiply") { + t.Errorf("Expected to be registered: Service1.Multiply") + } + // Provided name. + err = s.RegisterService(service1, "Foo") + if err != nil || !s.HasMethod("Foo.Multiply") { + t.Errorf("Expected to be registered: Foo.Multiply") + } + // No methods. + err = s.RegisterService(service2, "") + if err == nil { + t.Errorf("Expected error on service2") + } +} diff --git a/vendor/github.com/minio/minio-go/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/CONTRIBUTING.md new file mode 100644 index 000000000..b4b224eef --- /dev/null +++ b/vendor/github.com/minio/minio-go/CONTRIBUTING.md @@ -0,0 +1,21 @@ + +### Developer Guidelines + +``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: + +* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. + - Fork it + - Create your feature branch (git checkout -b my-new-feature) + - Commit your changes (git commit -am 'Add some feature') + - Push to the branch (git push origin my-new-feature) + - Create new Pull Request + +* When you're ready to create a pull request, be sure to: + - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. + - Run `go fmt` + - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. + - Make sure `go test -race ./...` and `go build` completes. + +* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project + - `minio-go` project is strictly conformant with Golang style + - if you happen to observe offending code, please feel free to send a pull request diff --git a/vendor/github.com/minio/minio-go/INSTALLGO.md b/vendor/github.com/minio/minio-go/INSTALLGO.md new file mode 100644 index 000000000..c3762bbfc --- /dev/null +++ b/vendor/github.com/minio/minio-go/INSTALLGO.md @@ -0,0 +1,83 @@ +## Ubuntu (Kylin) 14.04 +### Build Dependencies +This installation guide is based on Ubuntu 14.04+ on x86-64 platform. + +##### Install Git, GCC +```sh +$ sudo apt-get install git build-essential +``` + +##### Install Go 1.5+ + +Download Go 1.5+ from [https://golang.org/dl/](https://golang.org/dl/). + +```sh +$ wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz +$ mkdir -p ${HOME}/bin/ +$ mkdir -p ${HOME}/go/ +$ tar -C ${HOME}/bin/ -xzf go1.5.1.linux-amd64.tar.gz +``` +##### Setup GOROOT and GOPATH + +Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries +and GOPATH specifies the location of your project workspace. + +```sh +export GOROOT=${HOME}/bin/go +export GOPATH=${HOME}/go +export PATH=$PATH:${HOME}/bin/go/bin:${GOPATH}/bin +``` +```sh +$ source ~/.bashrc +``` + +##### Testing it all +```sh +$ go env +``` + +## OS X (Yosemite) 10.10 +### Build Dependencies +This installation document assumes OS X Yosemite 10.10+ on x86-64 platform. + +##### Install brew +```sh +$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" +``` + +##### Install Git, Python +```sh +$ brew install git python +``` + +##### Install Go 1.5+ + +Install golang binaries using `brew` + +```sh +$ brew install go +$ mkdir -p $HOME/go +``` + +##### Setup GOROOT and GOPATH + +Add the following exports to your ``~/.bash_profile``. Environment variable GOROOT specifies the location of your golang binaries +and GOPATH specifies the location of your project workspace. + +```sh +export GOPATH=${HOME}/go +export GOVERSION=$(brew list go | head -n 1 | cut -d '/' -f 6) +export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec +export PATH=$PATH:${GOPATH}/bin +``` + +##### Source the new enviornment + +```sh +$ source ~/.bash_profile +``` + +##### Testing it all +```sh +$ go env +``` diff --git a/vendor/github.com/minio/minio-go/LICENSE b/vendor/github.com/minio/minio-go/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/minio/minio-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/minio/minio-go/MAINTAINERS.md b/vendor/github.com/minio/minio-go/MAINTAINERS.md new file mode 100644 index 000000000..6dbef6265 --- /dev/null +++ b/vendor/github.com/minio/minio-go/MAINTAINERS.md @@ -0,0 +1,19 @@ +# For maintainers only + +## Responsibilities + +Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) + +### Making new releases + +Edit `libraryVersion` constant in `api.go`. + +``` +$ grep libraryVersion api.go + libraryVersion = "0.3.0" +``` + +``` +$ git tag 0.3.0 +$ git push --tags +``` \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md new file mode 100644 index 000000000..8e4da4317 --- /dev/null +++ b/vendor/github.com/minio/minio-go/README.md @@ -0,0 +1,98 @@ +# Minio Go Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +## Description + +Minio Go library is a simple client library for S3 compatible cloud storage servers. Supports AWS Signature Version 4 and 2. AWS Signature Version 4 is chosen as default. + +List of supported cloud storage providers. + + - AWS Signature Version 4 + - Amazon S3 + - Minio + + - AWS Signature Version 2 + - Google Cloud Storage (Compatibility Mode) + - Openstack Swift + Swift3 middleware + - Ceph Object Gateway + - Riak CS + +## Install + +If you do not have a working Golang environment, please follow [Install Golang](./INSTALLGO.md). + +```sh +$ go get github.com/minio/minio-go +``` + +## Example + +### ListBuckets() + +This example shows how to List your buckets. + +```go +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) + if err != nil { + log.Fatalln(err) + } + buckets, err := s3Client.ListBuckets() + if err != nil { + log.Fatalln(err) + } + for _, bucket := range buckets { + log.Println(bucket) + } +} +``` + +## Documentation + +### Bucket Operations. +* [MakeBucket(bucketName, BucketACL, location) error](examples/s3/makebucket.go) +* [BucketExists(bucketName) error](examples/s3/bucketexists.go) +* [RemoveBucket(bucketName) error](examples/s3/removebucket.go) +* [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go) +* [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go) +* [ListBuckets() []BucketInfo](examples/s3/listbuckets.go) +* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectInfo](examples/s3/listobjects.go) +* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartInfo](examples/s3/listincompleteuploads.go) + +### Object Operations. +* [PutObject(bucketName, objectName, io.Reader, contentType) error](examples/s3/putobject.go) +* [GetObject(bucketName, objectName) (*Object, error)](examples/s3/getobject.go) +* [StatObject(bucketName, objectName) (ObjectInfo, error)](examples/s3/statobject.go) +* [RemoveObject(bucketName, objectName) error](examples/s3/removeobject.go) +* [RemoveIncompleteUpload(bucketName, objectName) <-chan error](examples/s3/removeincompleteupload.go) + +### File Object Operations. +* [FPutObject(bucketName, objectName, filePath, contentType) (size, error)](examples/s3/fputobject.go) +* [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go) + +### Presigned Operations. +* [PresignedGetObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedgetobject.go) +* [PresignedPutObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedputobject.go) +* [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go) + +### API Reference + +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/minio/minio-go) + +## Contribute + +[Contributors Guide](./CONTRIBUTING.md) + +[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) [![Build status](https://ci.appveyor.com/api/projects/status/1ep7n2resn6fk1w6?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) diff --git a/vendor/github.com/minio/minio-go/api-definitions.go b/vendor/github.com/minio/minio-go/api-definitions.go new file mode 100644 index 000000000..43532de60 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-definitions.go @@ -0,0 +1,76 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "time" + +// BucketInfo container for bucket metadata. +type BucketInfo struct { + // The name of the bucket. + Name string `json:"name"` + // Date the bucket was created. + CreationDate time.Time `json:"creationDate"` +} + +// ObjectInfo container for object metadata. +type ObjectInfo struct { + // An ETag is optionally set to md5sum of an object. In case of multipart objects, + // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of + // each parts concatenated into one string. + ETag string `json:"etag"` + + Key string `json:"name"` // Name of the object + LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. + Size int64 `json:"size"` // Size in bytes of the object. + ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. + + // Owner name. + Owner struct { + DisplayName string `json:"name"` + ID string `json:"id"` + } `json:"owner"` + + // The class of storage used to store the object. + StorageClass string `json:"storageClass"` + + // Error + Err error `json:"-"` +} + +// ObjectMultipartInfo container for multipart object metadata. +type ObjectMultipartInfo struct { + // Date and time at which the multipart upload was initiated. + Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Initiator initiator + Owner owner + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass string + + // Key of the object for which the multipart upload was initiated. + Key string + + // Size in bytes of the object. + Size int64 + + // Upload ID that identifies the multipart upload. + UploadID string `xml:"UploadId"` + + // Error + Err error +} diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go new file mode 100644 index 000000000..647165112 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-error-response.go @@ -0,0 +1,236 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "fmt" + "net/http" + "strconv" +) + +/* **** SAMPLE ERROR RESPONSE **** + + + AccessDenied + Access Denied + bucketName + objectName + F19772218238A85A + GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD + +*/ + +// ErrorResponse - Is the typed error returned by all API operations. +type ErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + AmzBucketRegion string +} + +// ToErrorResponse - Returns parsed ErrorResponse struct from body and +// http headers. +// +// For example: +// +// import s3 "github.com/minio/minio-go" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// } +// ... +func ToErrorResponse(err error) ErrorResponse { + switch err := err.(type) { + case ErrorResponse: + return err + default: + return ErrorResponse{} + } +} + +// Error - Returns HTTP error string +func (e ErrorResponse) Error() string { + return e.Message +} + +// Common string for errors to report issue location in unexpected +// cases. +const ( + reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." +) + +// httpRespToErrorResponse returns a new encoded ErrorResponse +// structure as error. +func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { + if resp == nil { + msg := "Response is empty. " + reportIssue + return ErrInvalidArgument(msg) + } + var errResp ErrorResponse + err := xmlDecoder(resp.Body, &errResp) + // Xml decoding failed with no body, fall back to HTTP headers. + if err != nil { + switch resp.StatusCode { + case http.StatusNotFound: + if objectName == "" { + errResp = ErrorResponse{ + Code: "NoSuchBucket", + Message: "The specified bucket does not exist.", + BucketName: bucketName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + } else { + errResp = ErrorResponse{ + Code: "NoSuchKey", + Message: "The specified key does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + } + case http.StatusForbidden: + errResp = ErrorResponse{ + Code: "AccessDenied", + Message: "Access Denied.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + case http.StatusConflict: + errResp = ErrorResponse{ + Code: "Conflict", + Message: "Bucket not empty.", + BucketName: bucketName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + default: + errResp = ErrorResponse{ + Code: resp.Status, + Message: resp.Status, + BucketName: bucketName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + } + } + + // AccessDenied without a signature mismatch code, usually means + // that the bucket policy has certain restrictions where some API + // operations are not allowed. Handle this case so that top level + // callers can interpret this easily and fall back if needed to a + // lower functionality call. Read each individual API specific + // code for such fallbacks. + if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied" { + errResp.Code = "NotImplemented" + errResp.Message = "Operation is not allowed according to your bucket policy." + } + return errResp +} + +// ErrEntityTooLarge - Input size is larger than supported maximum. +func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) + return ErrorResponse{ + Code: "EntityTooLarge", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrEntityTooSmall - Input size is smaller than supported minimum. +func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", totalSize) + return ErrorResponse{ + Code: "EntityTooLarge", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrUnexpectedEOF - Unexpected end of file reached. +func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.", + strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10)) + return ErrorResponse{ + Code: "UnexpectedEOF", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrInvalidBucketName - Invalid bucket name response. +func ErrInvalidBucketName(message string) error { + return ErrorResponse{ + Code: "InvalidBucketName", + Message: message, + RequestID: "minio", + } +} + +// ErrInvalidObjectName - Invalid object name response. +func ErrInvalidObjectName(message string) error { + return ErrorResponse{ + Code: "NoSuchKey", + Message: message, + RequestID: "minio", + } +} + +// ErrInvalidParts - Invalid number of parts. +func ErrInvalidParts(expectedParts, uploadedParts int) error { + msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", expectedParts, uploadedParts) + return ErrorResponse{ + Code: "InvalidParts", + Message: msg, + RequestID: "minio", + } +} + +// ErrInvalidObjectPrefix - Invalid object prefix response is +// similar to object name response. +var ErrInvalidObjectPrefix = ErrInvalidObjectName + +// ErrInvalidArgument - Invalid argument response. +func ErrInvalidArgument(message string) error { + return ErrorResponse{ + Code: "InvalidArgument", + Message: message, + RequestID: "minio", + } +} diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go new file mode 100644 index 000000000..265a58eea --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-get-object-file.go @@ -0,0 +1,104 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "io" + "os" + "path/filepath" +) + +// FGetObject - download contents of an object to a local file. +func (c Client) FGetObject(bucketName, objectName, filePath string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + + // Verify if destination already exists. + st, err := os.Stat(filePath) + if err == nil { + // If the destination exists and is a directory. + if st.IsDir() { + return ErrInvalidArgument("fileName is a directory.") + } + } + + // Proceed if file does not exist. return for all other errors. + if err != nil { + if !os.IsNotExist(err) { + return err + } + } + + // Extract top level direcotry. + objectDir, _ := filepath.Split(filePath) + if objectDir != "" { + // Create any missing top level directories. + if err := os.MkdirAll(objectDir, 0700); err != nil { + return err + } + } + + // Gather md5sum. + objectStat, err := c.StatObject(bucketName, objectName) + if err != nil { + return err + } + + // Write to a temporary file "fileName.part.minio" before saving. + filePartPath := filePath + objectStat.ETag + ".part.minio" + + // If exists, open in append mode. If not create it as a part file. + filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + return err + } + + // Issue Stat to get the current offset. + st, err = filePart.Stat() + if err != nil { + return err + } + + // Seek to current position for incoming reader. + objectReader, objectStat, err := c.getObject(bucketName, objectName, st.Size(), 0) + if err != nil { + return err + } + + // Write to the part file. + if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { + return err + } + + // Close the file before rename, this is specifically needed for Windows users. + if err = filePart.Close(); err != nil { + return err + } + + // Safely completed. Now commit by renaming to actual filename. + if err = os.Rename(filePartPath, filePath); err != nil { + return err + } + + // Return. + return nil +} diff --git a/vendor/github.com/minio/minio-go/api-get.go b/vendor/github.com/minio/minio-go/api-get.go new file mode 100644 index 000000000..56d44c9f5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-get.go @@ -0,0 +1,537 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "errors" + "fmt" + "io" + "math" + "net/http" + "net/url" + "strings" + "sync" + "time" +) + +// GetBucketACL - Get the permissions on an existing bucket. +// +// Returned values are: +// +// private - Owner gets full access. +// public-read - Owner gets full access, others get read access. +// public-read-write - Owner gets full access, others get full access +// too. +// authenticated-read - Owner gets full access, authenticated users +// get read access. +func (c Client) GetBucketACL(bucketName string) (BucketACL, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return "", err + } + + // Set acl query. + urlValues := make(url.Values) + urlValues.Set("acl", "") + + // Instantiate a new request. + req, err := c.newRequest("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + if err != nil { + return "", err + } + + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return "", err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode access control policy. + policy := accessControlPolicy{} + err = xmlDecoder(resp.Body, &policy) + if err != nil { + return "", err + } + + // We need to avoid following de-serialization check for Google + // Cloud Storage. On Google Cloud Storage "private" canned ACL's + // policy do not have grant list. Treat it as a valid case, check + // for all other vendors. + if !isGoogleEndpoint(c.endpointURL) { + if policy.AccessControlList.Grant == nil { + errorResponse := ErrorResponse{ + Code: "InternalError", + Message: "Access control Grant list is empty. " + reportIssue, + BucketName: bucketName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + return "", errorResponse + } + } + + // Boolean cues to indentify right canned acls. + var publicRead, publicWrite, authenticatedRead bool + + // Handle grants. + grants := policy.AccessControlList.Grant + for _, g := range grants { + if g.Grantee.URI == "" && g.Permission == "FULL_CONTROL" { + continue + } + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { + authenticatedRead = true + break + } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { + publicWrite = true + } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { + publicRead = true + } + } + + // Verify if acl is authenticated read. + if authenticatedRead { + return BucketACL("authenticated-read"), nil + } + // Verify if acl is private. + if !publicWrite && !publicRead { + return BucketACL("private"), nil + } + // Verify if acl is public-read. + if !publicWrite && publicRead { + return BucketACL("public-read"), nil + } + // Verify if acl is public-read-write. + if publicRead && publicWrite { + return BucketACL("public-read-write"), nil + } + + return "", ErrorResponse{ + Code: "NoSuchBucketPolicy", + Message: "The specified bucket does not have a bucket policy.", + BucketName: bucketName, + RequestID: "minio", + } +} + +// GetObject - returns an seekable, readable object. +func (c Client) GetObject(bucketName, objectName string) (*Object, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return nil, err + } + if err := isValidObjectName(objectName); err != nil { + return nil, err + } + // Send an explicit info to get the actual object size. + objectInfo, err := c.StatObject(bucketName, objectName) + if err != nil { + return nil, err + } + + // Create request channel. + reqCh := make(chan readRequest) + // Create response channel. + resCh := make(chan readResponse) + // Create done channel. + doneCh := make(chan struct{}) + + // This routine feeds partial object data as and when the caller + // reads. + go func() { + defer close(reqCh) + defer close(resCh) + + // Loop through the incoming control messages and read data. + for { + select { + // When the done channel is closed exit our routine. + case <-doneCh: + return + // Request message. + case req := <-reqCh: + // Get shortest length. + // NOTE: Last remaining bytes are usually smaller than + // req.Buffer size. Use that as the final length. + length := math.Min(float64(len(req.Buffer)), float64(objectInfo.Size-req.Offset)) + httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length)) + if err != nil { + resCh <- readResponse{ + Error: err, + } + return + } + size, err := io.ReadFull(httpReader, req.Buffer) + if err == io.ErrUnexpectedEOF { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + resCh <- readResponse{ + Size: int(size), + Error: err, + } + } + } + }() + // Return the readerAt backed by routine. + return newObject(reqCh, resCh, doneCh, objectInfo), nil +} + +// Read response message container to reply back for the request. +type readResponse struct { + Size int + Error error +} + +// Read request message container to communicate with internal +// go-routine. +type readRequest struct { + Buffer []byte + Offset int64 // readAt offset. +} + +// Object represents an open object. It implements Read, ReadAt, +// Seeker, Close for a HTTP stream. +type Object struct { + // Mutex. + mutex *sync.Mutex + + // User allocated and defined. + reqCh chan<- readRequest + resCh <-chan readResponse + doneCh chan<- struct{} + currOffset int64 + objectInfo ObjectInfo + + // Keeps track of closed call. + isClosed bool + + // Previous error saved for future calls. + prevErr error +} + +// Read reads up to len(p) bytes into p. It returns the number of +// bytes read (0 <= n <= len(p)) and any error encountered. Returns +// io.EOF upon end of file. +func (o *Object) Read(b []byte) (n int, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // Previous prevErr is which was saved in previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + + // If current offset has reached Size limit, return EOF. + if o.currOffset >= o.objectInfo.Size { + return 0, io.EOF + } + + // Send current information over control channel to indicate we + // are ready. + reqMsg := readRequest{} + + // Send the offset and pointer to the buffer over the channel. + reqMsg.Buffer = b + reqMsg.Offset = o.currOffset + + // Send read request over the control channel. + o.reqCh <- reqMsg + + // Get data over the response channel. + dataMsg := <-o.resCh + + // Bytes read. + bytesRead := int64(dataMsg.Size) + + // Update current offset. + o.currOffset += bytesRead + + if dataMsg.Error == nil { + // If currOffset read is equal to objectSize + // We have reached end of file, we return io.EOF. + if o.currOffset >= o.objectInfo.Size { + return dataMsg.Size, io.EOF + } + return dataMsg.Size, nil + } + + // Save any error. + o.prevErr = dataMsg.Error + return dataMsg.Size, dataMsg.Error +} + +// Stat returns the ObjectInfo structure describing object. +func (o *Object) Stat() (ObjectInfo, error) { + if o == nil { + return ObjectInfo{}, ErrInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil || o.isClosed { + return ObjectInfo{}, o.prevErr + } + + return o.objectInfo, nil +} + +// ReadAt reads len(b) bytes from the File starting at byte offset +// off. It returns the number of bytes read and the error, if any. +// ReadAt always returns a non-nil error when n < len(b). At end of +// file, that error is io.EOF. +func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is which was saved in previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + + // If offset is negative and offset is greater than or equal to + // object size we return EOF. + if offset < 0 || offset >= o.objectInfo.Size { + return 0, io.EOF + } + + // Send current information over control channel to indicate we + // are ready. + reqMsg := readRequest{} + + // Send the offset and pointer to the buffer over the channel. + reqMsg.Buffer = b + reqMsg.Offset = offset + + // Send read request over the control channel. + o.reqCh <- reqMsg + + // Get data over the response channel. + dataMsg := <-o.resCh + + // Bytes read. + bytesRead := int64(dataMsg.Size) + + if dataMsg.Error == nil { + // If offset+bytes read is equal to objectSize + // we have reached end of file, we return io.EOF. + if offset+bytesRead == o.objectInfo.Size { + return dataMsg.Size, io.EOF + } + return dataMsg.Size, nil + } + + // Save any error. + o.prevErr = dataMsg.Error + return dataMsg.Size, dataMsg.Error +} + +// Seek sets the offset for the next Read or Write to offset, +// interpreted according to whence: 0 means relative to the +// origin of the file, 1 means relative to the current offset, +// and 2 means relative to the end. +// Seek returns the new offset and an error, if any. +// +// Seeking to a negative offset is an error. Seeking to any positive +// offset is legal, subsequent io operations succeed until the +// underlying object is not closed. +func (o *Object) Seek(offset int64, whence int) (n int64, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil { + // At EOF seeking is legal, for any other errors we return. + if o.prevErr != io.EOF { + return 0, o.prevErr + } + } + + // Negative offset is valid for whence of '2'. + if offset < 0 && whence != 2 { + return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence)) + } + switch whence { + default: + return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) + case 0: + if offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset = offset + case 1: + if o.currOffset+offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset += offset + case 2: + // Seeking to positive offset is valid for whence '2', but + // since we are backing a Reader we have reached 'EOF' if + // offset is positive. + if offset > 0 { + return 0, io.EOF + } + // Seeking to negative position not allowed for whence. + if o.objectInfo.Size+offset < 0 { + return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) + } + o.currOffset += offset + } + // Return the effective offset. + return o.currOffset, nil +} + +// Close - The behavior of Close after the first call returns error +// for subsequent Close() calls. +func (o *Object) Close() (err error) { + if o == nil { + return ErrInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // if already closed return an error. + if o.isClosed { + return o.prevErr + } + + // Close successfully. + close(o.doneCh) + + // Save for future operations. + errMsg := "Object is already closed. Bad file descriptor." + o.prevErr = errors.New(errMsg) + // Save here that we closed done channel successfully. + o.isClosed = true + return nil +} + +// newObject instantiates a new *minio.Object* +func newObject(reqCh chan<- readRequest, resCh <-chan readResponse, doneCh chan<- struct{}, objectInfo ObjectInfo) *Object { + return &Object{ + mutex: &sync.Mutex{}, + reqCh: reqCh, + resCh: resCh, + doneCh: doneCh, + objectInfo: objectInfo, + } +} + +// getObject - retrieve object from Object Storage. +// +// Additionally this function also takes range arguments to download the specified +// range bytes of an object. Setting offset and length = 0 will download the full object. +// +// For more information about the HTTP Range header. +// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectInfo, error) { + // Validate input arguments. + if err := isValidBucketName(bucketName); err != nil { + return nil, ObjectInfo{}, err + } + if err := isValidObjectName(objectName); err != nil { + return nil, ObjectInfo{}, err + } + + customHeader := make(http.Header) + // Set ranges if length and offset are valid. + if length > 0 && offset >= 0 { + customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } else if offset > 0 && length == 0 { + customHeader.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } else if length < 0 && offset == 0 { + customHeader.Set("Range", fmt.Sprintf("bytes=%d", length)) + } + + // Instantiate a new request. + req, err := c.newRequest("GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + }) + if err != nil { + return nil, ObjectInfo{}, err + } + // Execute the request. + resp, err := c.do(req) + if err != nil { + return nil, ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Trim off the odd double quotes from ETag in the beginning and end. + md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + md5sum = strings.TrimSuffix(md5sum, "\"") + + // Parse the date. + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + msg := "Last-Modified time format not recognized. " + reportIssue + return nil, ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: msg, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + } + // Get content-type. + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + var objectStat ObjectInfo + objectStat.ETag = md5sum + objectStat.Key = objectName + objectStat.Size = resp.ContentLength + objectStat.LastModified = date + objectStat.ContentType = contentType + + // do not close body here, caller will close + return resp.Body, objectStat, nil +} diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go new file mode 100644 index 000000000..534ac4eb4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-list.go @@ -0,0 +1,539 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "net/http" + "net/url" + "strings" +) + +// ListBuckets list all buckets owned by this authenticated user. +// +// This call requires explicit authentication, no anonymous requests are +// allowed for listing buckets. +// +// api := client.New(....) +// for message := range api.ListBuckets() { +// fmt.Println(message) +// } +// +func (c Client) ListBuckets() ([]BucketInfo, error) { + // Instantiate a new request. + req, err := c.newRequest("GET", requestMetadata{}) + if err != nil { + return nil, err + } + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, "", "") + } + } + listAllMyBucketsResult := listAllMyBucketsResult{} + err = xmlDecoder(resp.Body, &listAllMyBucketsResult) + if err != nil { + return nil, err + } + return listAllMyBucketsResult.Buckets.Bucket, nil +} + +// ListObjects - (List Objects) - List some objects or all recursively. +// +// ListObjects lists all objects matching the objectPrefix from +// the specified bucket. If recursion is enabled it would list +// all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive +// and a done channel for pro-actively closing the internal go +// routine. If you enable recursive as 'true' this function will +// return back all the objects in a given bucket name and object +// prefix. +// +// api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) { +// fmt.Println(message) +// } +// +func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1000) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + // Validate incoming object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + // Save marker for next request. + var marker string + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000) + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + // Save the marker. + marker = object.Key + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectInfo{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send object prefixes. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // If next marker present, save it for next request. + if result.NextMarker != "" { + marker = result.NextMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +/// Bucket Read Operations. + +// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?marker - Specifies the key to start with when listing objects in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) { + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + return listBucketResult{}, err + } + // Validate object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + return listBucketResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + // Set object prefix. + if objectPrefix != "" { + urlValues.Set("prefix", urlEncodePath(objectPrefix)) + } + // Set object marker. + if objectMarker != "" { + urlValues.Set("marker", urlEncodePath(objectMarker)) + } + // Set delimiter. + if delimiter != "" { + urlValues.Set("delimiter", delimiter) + } + + // maxkeys should default to 1000 or less. + if maxkeys == 0 || maxkeys > 1000 { + maxkeys = 1000 + } + // Set max keys. + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + + // Initialize a new request. + req, err := c.newRequest("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + if err != nil { + return listBucketResult{}, err + } + // Execute list buckets. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return listBucketResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode listBuckets XML. + listBucketResult := listBucketResult{} + err = xmlDecoder(resp.Body, &listBucketResult) + if err != nil { + return listBucketResult, err + } + return listBucketResult, nil +} + +// ListIncompleteUploads - List incompletely uploaded multipart objects. +// +// ListIncompleteUploads lists all incompleted objects matching the +// objectPrefix from the specified bucket. If recursion is enabled +// it would list all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive +// and a done channel to pro-actively close the internal go routine. +// If you enable recursive as 'true' this function will return back all +// the multipart objects in a given bucket name. +// +// api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } +// +func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { + // Turn on size aggregation of individual parts. + isAggregateSize := true + return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh) +} + +// listIncompleteUploads lists all incomplete uploads. +func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { + // Allocate channel for multipart uploads. + objectMultipartStatCh := make(chan ObjectMultipartInfo, 1000) + // Delimiter is set to "/" by default. + delimiter := "/" + if recursive { + // If recursive do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + // Validate incoming object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { + defer close(objectMultipartStatCh) + // object and upload ID marker for future requests. + var objectMarker string + var uploadIDMarker string + for { + // list all multipart uploads. + result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000) + if err != nil { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return + } + // Save objectMarker and uploadIDMarker for next request. + objectMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + // Send all multipart uploads. + for _, obj := range result.Uploads { + // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. + if aggregateSize { + // Get total multipart size. + obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID) + if err != nil { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + } + } + select { + // Send individual uploads here. + case objectMultipartStatCh <- obj: + // If done channel return here. + case <-doneCh: + return + } + } + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectMultipartInfo{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send delimited prefixes here. + case objectMultipartStatCh <- object: + // If done channel return here. + case <-doneCh: + return + } + } + // Listing ends if result not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectMultipartStatCh) + // return. + return objectMultipartStatCh +} + +// listMultipartUploads - (List Multipart Uploads). +// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. +// request parameters. :- +// --------- +// ?key-marker - Specifies the multipart upload after which listing should begin. +// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. +func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set uploads. + urlValues.Set("uploads", "") + // Set object key marker. + if keyMarker != "" { + urlValues.Set("key-marker", urlEncodePath(keyMarker)) + } + // Set upload id marker. + if uploadIDMarker != "" { + urlValues.Set("upload-id-marker", uploadIDMarker) + } + // Set prefix marker. + if prefix != "" { + urlValues.Set("prefix", urlEncodePath(prefix)) + } + // Set delimiter. + if delimiter != "" { + urlValues.Set("delimiter", delimiter) + } + + // maxUploads should be 1000 or less. + if maxUploads == 0 || maxUploads > 1000 { + maxUploads = 1000 + } + // Set max-uploads. + urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) + + // Instantiate a new request. + req, err := c.newRequest("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + if err != nil { + return listMultipartUploadsResult{}, err + } + // Execute list multipart uploads request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return listMultipartUploadsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode response body. + listMultipartUploadsResult := listMultipartUploadsResult{} + err = xmlDecoder(resp.Body, &listMultipartUploadsResult) + if err != nil { + return listMultipartUploadsResult, err + } + return listMultipartUploadsResult, nil +} + +// listObjectParts list all object parts recursively. +func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]objectPart, err error) { + // Part number marker for the next batch of request. + var nextPartNumberMarker int + partsInfo = make(map[int]objectPart) + for { + // Get list of uploaded parts a maximum of 1000 per request. + listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000) + if err != nil { + return nil, err + } + // Append to parts info. + for _, part := range listObjPartsResult.ObjectParts { + // Trim off the odd double quotes from ETag in the beginning and end. + part.ETag = strings.TrimPrefix(part.ETag, "\"") + part.ETag = strings.TrimSuffix(part.ETag, "\"") + partsInfo[part.PartNumber] = part + } + // Keep part number marker, for the next iteration. + nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker + // Listing ends result is not truncated, return right here. + if !listObjPartsResult.IsTruncated { + break + } + } + + // Return all the parts. + return partsInfo, nil +} + +// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name. +func (c Client) findUploadID(bucketName, objectName string) (uploadID string, err error) { + // Make list incomplete uploads recursive. + isRecursive := true + // Turn off size aggregation of individual parts, in this request. + isAggregateSize := false + // latestUpload to track the latest multipart info for objectName. + var latestUpload ObjectMultipartInfo + // Create done channel to cleanup the routine. + doneCh := make(chan struct{}) + defer close(doneCh) + // List all incomplete uploads. + for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) { + if mpUpload.Err != nil { + return "", mpUpload.Err + } + if objectName == mpUpload.Key { + if mpUpload.Initiated.Sub(latestUpload.Initiated) > 0 { + latestUpload = mpUpload + } + } + } + // Return the latest upload id. + return latestUpload.UploadID, nil +} + +// getTotalMultipartSize - calculate total uploaded size for the a given multipart object. +func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) { + // Iterate over all parts and aggregate the size. + partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + for _, partInfo := range partsInfo { + size += partInfo.Size + } + return size, nil +} + +// listObjectPartsQuery (List Parts query) +// - lists some or all (up to 1000) parts that have been uploaded +// for a specific multipart upload +// +// You can use the request parameters as selection criteria to return +// a subset of the uploads in a bucket, request parameters :- +// --------- +// ?part-number-marker - Specifies the part after which listing should +// begin. +// ?max-parts - Maximum parts to be listed per request. +func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number marker. + urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // maxParts should be 1000 or less. + if maxParts == 0 || maxParts > 1000 { + maxParts = 1000 + } + // Set max parts. + urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) + + req, err := c.newRequest("GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + }) + if err != nil { + return listObjectPartsResult{}, err + } + // Exectue list object parts. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return listObjectPartsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode list object parts XML. + listObjectPartsResult := listObjectPartsResult{} + err = xmlDecoder(resp.Body, &listObjectPartsResult) + if err != nil { + return listObjectPartsResult, err + } + return listObjectPartsResult, nil +} diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go new file mode 100644 index 000000000..0f350d22e --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-presigned.go @@ -0,0 +1,144 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "errors" + "time" +) + +// presignURL - Returns a presigned URL for an input 'method'. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration) (url string, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return "", err + } + if err := isValidObjectName(objectName); err != nil { + return "", err + } + if err := isValidExpiry(expires); err != nil { + return "", err + } + + if method == "" { + return "", ErrInvalidArgument("method cannot be empty.") + } + + expireSeconds := int64(expires / time.Second) + // Instantiate a new request. + // Since expires is set newRequest will presign the request. + req, err := c.newRequest(method, requestMetadata{ + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + }) + if err != nil { + return "", err + } + return req.URL.String(), nil +} + +// PresignedGetObject - Returns a presigned URL to access an object without credentials. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration) (url string, err error) { + return c.presignURL("GET", bucketName, objectName, expires) +} + +// PresignedPutObject - Returns a presigned URL to upload an object without credentials. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (url string, err error) { + return c.presignURL("PUT", bucketName, objectName, expires) +} + +// PresignedPostPolicy - Returns POST form data to upload an object at a location. +func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { + // Validate input arguments. + if p.expiration.IsZero() { + return nil, errors.New("Expiration time must be specified") + } + if _, ok := p.formData["key"]; !ok { + return nil, errors.New("object key must be specified") + } + if _, ok := p.formData["bucket"]; !ok { + return nil, errors.New("bucket name must be specified") + } + + bucketName := p.formData["bucket"] + // Fetch the bucket location. + location, err := c.getBucketLocation(bucketName) + if err != nil { + return nil, err + } + + // Keep time. + t := time.Now().UTC() + // For signature version '2' handle here. + if c.signature.isV2() { + policyBase64 := p.base64() + p.formData["policy"] = policyBase64 + // For Google endpoint set this value to be 'GoogleAccessId'. + if isGoogleEndpoint(c.endpointURL) { + p.formData["GoogleAccessId"] = c.accessKeyID + } else { + // For all other endpoints set this value to be 'AWSAccessKeyId'. + p.formData["AWSAccessKeyId"] = c.accessKeyID + } + // Sign the policy. + p.formData["signature"] = postPresignSignatureV2(policyBase64, c.secretAccessKey) + return p.formData, nil + } + + // Add date policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-date", + value: t.Format(iso8601DateFormat), + }); err != nil { + return nil, err + } + + // Add algorithm policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-algorithm", + value: signV4Algorithm, + }); err != nil { + return nil, err + } + + // Add a credential policy. + credential := getCredential(c.accessKeyID, location, t) + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-credential", + value: credential, + }); err != nil { + return nil, err + } + + // Get base64 encoded policy. + policyBase64 := p.base64() + // Fill in the form data. + p.formData["policy"] = policyBase64 + p.formData["x-amz-algorithm"] = signV4Algorithm + p.formData["x-amz-credential"] = credential + p.formData["x-amz-date"] = t.Format(iso8601DateFormat) + p.formData["x-amz-signature"] = postPresignSignatureV4(policyBase64, t, c.secretAccessKey, location) + return p.formData, nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go new file mode 100644 index 000000000..f9431136c --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-bucket.go @@ -0,0 +1,214 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/hex" + "encoding/xml" + "io/ioutil" + "net/http" + "net/url" +) + +/// Bucket operations + +// MakeBucket makes a new bucket. +// +// Optional arguments are acl and location - by default all buckets are created +// with ``private`` acl and in US Standard region. +// +// ACL valid values - http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html +// +// private - owner gets full access [default]. +// public-read - owner gets full access, all others get read access. +// public-read-write - owner gets full access, all others get full access too. +// authenticated-read - owner gets full access, authenticated users get read access. +// +// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html +// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations +func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) error { + // Validate the input arguments. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if !acl.isValidBucketACL() { + return ErrInvalidArgument("Unrecognized ACL " + acl.String()) + } + + // If location is empty, treat is a default region 'us-east-1'. + if location == "" { + location = "us-east-1" + } + + // Instantiate the request. + req, err := c.makeBucketRequest(bucketName, acl, location) + if err != nil { + return err + } + + // Execute the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Save the location into cache on a successfull makeBucket response. + c.bucketLocCache.Set(bucketName, location) + + // Return. + return nil +} + +// makeBucketRequest constructs request for makeBucket. +func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location string) (*http.Request, error) { + // Validate input arguments. + if err := isValidBucketName(bucketName); err != nil { + return nil, err + } + if !acl.isValidBucketACL() { + return nil, ErrInvalidArgument("Unrecognized ACL " + acl.String()) + } + + // Set get bucket location always as path style. + targetURL := *c.endpointURL + if bucketName != "" { + // If endpoint supports virtual host style use that always. + // Currently only S3 and Google Cloud Storage would support this. + if isVirtualHostSupported(c.endpointURL, bucketName) { + targetURL.Host = bucketName + "." + c.endpointURL.Host + targetURL.Path = "/" + } else { + // If not fall back to using path style. + targetURL.Path = "/" + bucketName + } + } + + // get a new HTTP request for the method. + req, err := http.NewRequest("PUT", targetURL.String(), nil) + if err != nil { + return nil, err + } + + // by default bucket acl is set to private. + req.Header.Set("x-amz-acl", "private") + if acl != "" { + req.Header.Set("x-amz-acl", string(acl)) + } + + // set UserAgent for the request. + c.setUserAgent(req) + + // set sha256 sum for signature calculation only with signature version '4'. + if c.signature.isV4() { + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) + } + + // If location is not 'us-east-1' create bucket location config. + if location != "us-east-1" && location != "" { + createBucketConfig := createBucketConfiguration{} + createBucketConfig.Location = location + var createBucketConfigBytes []byte + createBucketConfigBytes, err = xml.Marshal(createBucketConfig) + if err != nil { + return nil, err + } + createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes) + req.Body = ioutil.NopCloser(createBucketConfigBuffer) + req.ContentLength = int64(createBucketConfigBuffer.Len()) + if c.signature.isV4() { + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBuffer.Bytes()))) + } + } + + // Sign the request. + if c.signature.isV4() { + // Signature calculated for MakeBucket request should be for 'us-east-1', + // regardless of the bucket's location constraint. + req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") + } else if c.signature.isV2() { + req = signV2(*req, c.accessKeyID, c.secretAccessKey) + } + + // Return signed request. + return req, nil +} + +// SetBucketACL set the permissions on an existing bucket using access control lists (ACL). +// +// For example +// +// private - owner gets full access [default]. +// public-read - owner gets full access, all others get read access. +// public-read-write - owner gets full access, all others get full access too. +// authenticated-read - owner gets full access, authenticated users get read access. +func (c Client) SetBucketACL(bucketName string, acl BucketACL) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if !acl.isValidBucketACL() { + return ErrInvalidArgument("Unrecognized ACL " + acl.String()) + } + + // Set acl query. + urlValues := make(url.Values) + urlValues.Set("acl", "") + + // Add misc headers. + customHeader := make(http.Header) + + if acl != "" { + customHeader.Set("x-amz-acl", acl.String()) + } else { + customHeader.Set("x-amz-acl", "private") + } + + // Instantiate a new request. + req, err := c.newRequest("PUT", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + customHeader: customHeader, + }) + if err != nil { + return err + } + + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + // if error return. + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // return + return nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go new file mode 100644 index 000000000..beab6d6cc --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-common.go @@ -0,0 +1,240 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/md5" + "crypto/sha256" + "hash" + "io" + "math" + "os" +) + +// Verify if reader is *os.File +func isFile(reader io.Reader) (ok bool) { + _, ok = reader.(*os.File) + return +} + +// Verify if reader is *minio.Object +func isObject(reader io.Reader) (ok bool) { + _, ok = reader.(*Object) + return +} + +// Verify if reader is a generic ReaderAt +func isReadAt(reader io.Reader) (ok bool) { + _, ok = reader.(io.ReaderAt) + return +} + +// shouldUploadPart - verify if part should be uploaded. +func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool { + // If part not found should upload the part. + uploadedPart, found := objectParts[objPart.PartNumber] + if !found { + return true + } + // if size mismatches should upload the part. + if objPart.Size != uploadedPart.Size { + return true + } + // if md5sum mismatches should upload the part. + if objPart.ETag == uploadedPart.ETag { + return true + } + return false +} + +// optimalPartInfo - calculate the optimal part info for a given +// object size. +// +// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible +// object storage it will have the following parameters as constants. +// +// maxPartsCount - 10000 +// minPartSize - 5MiB +// maxMultipartPutObjectSize - 5TiB +// +func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { + // object size is '-1' set it to 5TiB. + if objectSize == -1 { + objectSize = maxMultipartPutObjectSize + } + // object size is larger than supported maximum. + if objectSize > maxMultipartPutObjectSize { + err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") + return + } + // Use floats for part size for all calculations to avoid + // overflows during float64 to int64 conversions. + partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount)) + partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize + // Total parts count. + totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) + // Part size. + partSize = int64(partSizeFlt) + // Last part size. + lastPartSize = objectSize - int64(totalPartsCount-1)*partSize + return totalPartsCount, partSize, lastPartSize, nil +} + +// hashCopyBuffer is identical to hashCopyN except that it stages +// through the provided buffer (if one is required) rather than +// allocating a temporary one. If buf is nil, one is allocated for 5MiB. +func (c Client) hashCopyBuffer(writer io.Writer, reader io.Reader, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) { + // MD5 and SHA256 hasher. + var hashMD5, hashSHA256 hash.Hash + // MD5 and SHA256 hasher. + hashMD5 = md5.New() + hashWriter := io.MultiWriter(writer, hashMD5) + if c.signature.isV4() { + hashSHA256 = sha256.New() + hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256) + } + + // Allocate buf if not initialized. + if buf == nil { + buf = make([]byte, optimalReadBufferSize) + } + + // Using io.CopyBuffer to copy in large buffers, default buffer + // for io.Copy of 32KiB is too small. + size, err = io.CopyBuffer(hashWriter, reader, buf) + if err != nil { + return nil, nil, 0, err + } + + // Finalize md5 sum and sha256 sum. + md5Sum = hashMD5.Sum(nil) + if c.signature.isV4() { + sha256Sum = hashSHA256.Sum(nil) + } + return md5Sum, sha256Sum, size, err +} + +// hashCopyN - Calculates Md5sum and SHA256sum for up to partSize amount of bytes. +func (c Client) hashCopyN(writer io.Writer, reader io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) { + // MD5 and SHA256 hasher. + var hashMD5, hashSHA256 hash.Hash + // MD5 and SHA256 hasher. + hashMD5 = md5.New() + hashWriter := io.MultiWriter(writer, hashMD5) + if c.signature.isV4() { + hashSHA256 = sha256.New() + hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256) + } + + // Copies to input at writer. + size, err = io.CopyN(hashWriter, reader, partSize) + if err != nil { + // If not EOF return error right here. + if err != io.EOF { + return nil, nil, 0, err + } + } + + // Finalize md5shum and sha256 sum. + md5Sum = hashMD5.Sum(nil) + if c.signature.isV4() { + sha256Sum = hashSHA256.Sum(nil) + } + return md5Sum, sha256Sum, size, err +} + +// getUploadID - fetch upload id if already present for an object name +// or initiate a new request to fetch a new upload id. +func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadID string, isNew bool, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return "", false, err + } + if err := isValidObjectName(objectName); err != nil { + return "", false, err + } + + // Set content Type to default if empty string. + if contentType == "" { + contentType = "application/octet-stream" + } + + // Find upload id for previous upload for an object. + uploadID, err = c.findUploadID(bucketName, objectName) + if err != nil { + return "", false, err + } + if uploadID == "" { + // Initiate multipart upload for an object. + initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType) + if err != nil { + return "", false, err + } + // Save the new upload id. + uploadID = initMultipartUploadResult.UploadID + // Indicate that this is a new upload id. + isNew = true + } + return uploadID, isNew, nil +} + +// computeHashBuffer - Calculates MD5 and SHA256 for an input read +// Seeker is identical to computeHash except that it stages +// through the provided buffer (if one is required) rather than +// allocating a temporary one. If buf is nil, it uses a temporary +// buffer. +func (c Client) computeHashBuffer(reader io.ReadSeeker, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) { + // MD5 and SHA256 hasher. + var hashMD5, hashSHA256 hash.Hash + // MD5 and SHA256 hasher. + hashMD5 = md5.New() + hashWriter := io.MultiWriter(hashMD5) + if c.signature.isV4() { + hashSHA256 = sha256.New() + hashWriter = io.MultiWriter(hashMD5, hashSHA256) + } + + // If no buffer is provided, no need to allocate just use io.Copy. + if buf == nil { + size, err = io.Copy(hashWriter, reader) + if err != nil { + return nil, nil, 0, err + } + } else { + size, err = io.CopyBuffer(hashWriter, reader, buf) + if err != nil { + return nil, nil, 0, err + } + } + + // Seek back reader to the beginning location. + if _, err := reader.Seek(0, 0); err != nil { + return nil, nil, 0, err + } + + // Finalize md5shum and sha256 sum. + md5Sum = hashMD5.Sum(nil) + if c.signature.isV4() { + sha256Sum = hashSHA256.Sum(nil) + } + return md5Sum, sha256Sum, size, nil +} + +// computeHash - Calculates MD5 and SHA256 for an input read Seeker. +func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) { + return c.computeHashBuffer(reader, nil) +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go new file mode 100644 index 000000000..b7fa86fd8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-file.go @@ -0,0 +1,240 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "sort" +) + +// FPutObject - Create an object in a bucket, with contents from file at filePath. +func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Open the referenced file. + fileReader, err := os.Open(filePath) + // If any error fail quickly here. + if err != nil { + return 0, err + } + defer fileReader.Close() + + // Save the file stat. + fileStat, err := fileReader.Stat() + if err != nil { + return 0, err + } + + // Save the file size. + fileSize := fileStat.Size() + + // Check for largest object size allowed. + if fileSize > int64(maxMultipartPutObjectSize) { + return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName) + } + + // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. + // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. + if isGoogleEndpoint(c.endpointURL) { + if fileSize > int64(maxSinglePutObjectSize) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize), + Key: objectName, + BucketName: bucketName, + } + } + // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size. + return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil) + } + + // NOTE: S3 doesn't allow anonymous multipart requests. + if isAmazonEndpoint(c.endpointURL) && c.anonymous { + if fileSize > int64(maxSinglePutObjectSize) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize), + Key: objectName, + BucketName: bucketName, + } + } + // Do not compute MD5 for anonymous requests to Amazon + // S3. Uploads up to 5GiB in size. + return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil) + } + + // Small object upload is initiated for uploads for input data size smaller than 5MiB. + if fileSize < minPartSize { + return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil) + } + // Upload all large objects as multipart. + n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, contentType, nil) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "NotImplemented" { + // If size of file is greater than '5GiB' fail. + if fileSize > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil) + } + return n, err + } + return n, nil +} + +// putObjectMultipartFromFile - Creates object from contents of *os.File +// +// NOTE: This function is meant to be used for readers with local +// file as in *os.File. This function resumes by skipping all the +// necessary parts which were already uploaded by verifying them +// against MD5SUM of each individual parts. This function also +// effectively utilizes file system capabilities of reading from +// specific sections and not having to create temporary files. +func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, contentType string, progress io.Reader) (int64, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Get upload id for an object, initiates a new multipart request + // if it cannot find any previously partially uploaded object. + uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType) + if err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var completeMultipartUpload completeMultipartUpload + + // A map of all uploaded parts. + var partsInfo = make(map[int]objectPart) + + // If this session is a continuation of a previous session fetch all + // previously uploaded parts info. + if !isNew { + // Fetch previously upload parts and maximum part size. + partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := optimalPartInfo(fileSize) + if err != nil { + return 0, err + } + + // Part number always starts with '1'. + partNumber := 1 + + for partNumber <= totalPartsCount { + // Get a section reader on a particular offset. + sectionReader := io.NewSectionReader(fileReader, totalUploadedSize, partSize) + + // Calculates MD5 and SHA256 sum for a section reader. + var md5Sum, sha256Sum []byte + var prtSize int64 + md5Sum, sha256Sum, prtSize, err = c.computeHash(sectionReader) + if err != nil { + return 0, err + } + + var reader io.Reader + // Update progress reader appropriately to the latest offset + // as we read from the source. + reader = newHook(sectionReader, progress) + + // Verify if part should be uploaded. + if shouldUploadPart(objectPart{ + ETag: hex.EncodeToString(md5Sum), + PartNumber: partNumber, + Size: prtSize, + }, partsInfo) { + // Proceed to upload the part. + var objPart objectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader), partNumber, + md5Sum, sha256Sum, prtSize) + if err != nil { + return totalUploadedSize, err + } + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + } else { + // Update the progress reader for the skipped part. + if progress != nil { + if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil { + return totalUploadedSize, err + } + } + } + + // Save successfully uploaded size. + totalUploadedSize += prtSize + + // Increment part number. + partNumber++ + } + + // Verify if we uploaded all data. + if totalUploadedSize != fileSize { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName) + } + + // Loop over uploaded parts to save them in a Parts array before completing the multipart request. + for _, part := range partsInfo { + var complPart completePart + complPart.ETag = part.ETag + complPart.PartNumber = part.PartNumber + completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) + } + + // Verify if totalPartsCount is not equal to total list of parts. + if totalPartsCount != len(completeMultipartUpload.Parts) { + return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts)) + } + + // Sort all completed parts. + sort.Sort(completedParts(completeMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go new file mode 100644 index 000000000..ee0019165 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go @@ -0,0 +1,379 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/hex" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" +) + +// Comprehensive put object operation involving multipart resumable uploads. +// +// Following code handles these types of readers. +// +// - *os.File +// - *minio.Object +// - Any reader which has a method 'ReadAt()' +// +// If we exhaust all the known types, code proceeds to use stream as +// is where each part is re-downloaded, checksummed and verified +// before upload. +func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) { + if size > 0 && size >= minPartSize { + // Verify if reader is *os.File, then use file system functionalities. + if isFile(reader) { + return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress) + } + // Verify if reader is *minio.Object or io.ReaderAt. + // NOTE: Verification of object is kept for a specific purpose + // while it is going to be duck typed similar to io.ReaderAt. + // It is to indicate that *minio.Object implements io.ReaderAt. + // and such a functionality is used in the subsequent code + // path. + if isObject(reader) || isReadAt(reader) { + return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType, progress) + } + } + // For any other data size and reader type we do generic multipart + // approach by staging data in temporary files and uploading them. + return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType, progress) +} + +// putObjectStream uploads files bigger than 5MiB, and also supports +// special case where size is unknown i.e '-1'. +func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // A map of all previously uploaded parts. + var partsInfo = make(map[int]objectPart) + + // getUploadID for an object, initiates a new multipart request + // if it cannot find any previously partially uploaded object. + uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType) + if err != nil { + return 0, err + } + + // If This session is a continuation of a previous session fetch all + // previously uploaded parts info. + if !isNew { + // Fetch previously uploaded parts and maximum part size. + partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := optimalPartInfo(size) + if err != nil { + return 0, err + } + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize a temporary buffer. + tmpBuffer := new(bytes.Buffer) + + for partNumber <= totalPartsCount { + // Calculates MD5 and SHA256 sum while copying partSize bytes + // into tmpBuffer. + md5Sum, sha256Sum, prtSize, rErr := c.hashCopyN(tmpBuffer, reader, partSize) + if rErr != nil { + if rErr != io.EOF { + return 0, rErr + } + } + + var reader io.Reader + // Update progress reader appropriately to the latest offset + // as we read from the source. + reader = newHook(tmpBuffer, progress) + + // Verify if part should be uploaded. + if shouldUploadPart(objectPart{ + ETag: hex.EncodeToString(md5Sum), + PartNumber: partNumber, + Size: prtSize, + }, partsInfo) { + // Proceed to upload the part. + var objPart objectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader), partNumber, + md5Sum, sha256Sum, prtSize) + if err != nil { + // Reset the temporary buffer upon any error. + tmpBuffer.Reset() + return totalUploadedSize, err + } + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + } else { + // Update the progress reader for the skipped part. + if progress != nil { + if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil { + return totalUploadedSize, err + } + } + } + + // Reset the temporary buffer. + tmpBuffer.Reset() + + // Save successfully uploaded size. + totalUploadedSize += prtSize + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if size < 0 && rErr == io.EOF { + break + } + + // Increment part number. + partNumber++ + } + + // Verify if we uploaded all the data. + if size > 0 { + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // Loop over uploaded parts to save them in a Parts array before completing the multipart request. + for _, part := range partsInfo { + var complPart completePart + complPart.ETag = part.ETag + complPart.PartNumber = part.PartNumber + complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart) + } + + if size > 0 { + // Verify if totalPartsCount is not equal to total list of parts. + if totalPartsCount != len(complMultipartUpload.Parts) { + return totalUploadedSize, ErrInvalidParts(partNumber, len(complMultipartUpload.Parts)) + } + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. +func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return initiateMultipartUploadResult{}, err + } + if err := isValidObjectName(objectName); err != nil { + return initiateMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploads", "") + + if contentType == "" { + contentType = "application/octet-stream" + } + + // Set ContentType header. + customHeader := make(http.Header) + customHeader.Set("Content-Type", contentType) + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + } + + // Instantiate the request. + req, err := c.newRequest("POST", reqMetadata) + if err != nil { + return initiateMultipartUploadResult{}, err + } + + // Execute the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return initiateMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode xml for new multipart upload. + initiateMultipartUploadResult := initiateMultipartUploadResult{} + err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) + if err != nil { + return initiateMultipartUploadResult, err + } + return initiateMultipartUploadResult, nil +} + +// uploadPart - Uploads a part in a multipart upload. +func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.ReadCloser, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return objectPart{}, err + } + if err := isValidObjectName(objectName); err != nil { + return objectPart{}, err + } + if size > maxPartSize { + return objectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName) + } + if size <= -1 { + return objectPart{}, ErrEntityTooSmall(size, bucketName, objectName) + } + if partNumber <= 0 { + return objectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.") + } + if uploadID == "" { + return objectPart{}, ErrInvalidArgument("UploadID cannot be empty.") + } + + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number. + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: reader, + contentLength: size, + contentMD5Bytes: md5Sum, + contentSHA256Bytes: sha256Sum, + } + + // Instantiate a request. + req, err := c.newRequest("PUT", reqMetadata) + if err != nil { + return objectPart{}, err + } + // Execute the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return objectPart{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return objectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Once successfully uploaded, return completed part. + objPart := objectPart{} + objPart.Size = size + objPart.PartNumber = partNumber + // Trim off the odd double quotes from ETag in the beginning and end. + objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"") + return objPart, nil +} + +// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. +func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return completeMultipartUploadResult{}, err + } + if err := isValidObjectName(objectName); err != nil { + return completeMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Marshal complete multipart body. + completeMultipartUploadBytes, err := xml.Marshal(complete) + if err != nil { + return completeMultipartUploadResult{}, err + } + + // Instantiate all the complete multipart buffer. + completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: ioutil.NopCloser(completeMultipartUploadBuffer), + contentLength: int64(completeMultipartUploadBuffer.Len()), + contentSHA256Bytes: sum256(completeMultipartUploadBuffer.Bytes()), + } + + // Instantiate the request. + req, err := c.newRequest("POST", reqMetadata) + if err != nil { + return completeMultipartUploadResult{}, err + } + + // Execute the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return completeMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode completed multipart upload response on success. + completeMultipartUploadResult := completeMultipartUploadResult{} + err = xmlDecoder(resp.Body, &completeMultipartUploadResult) + if err != nil { + return completeMultipartUploadResult, err + } + return completeMultipartUploadResult, nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-progress.go b/vendor/github.com/minio/minio-go/api-put-object-progress.go new file mode 100644 index 000000000..de3b348f8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-progress.go @@ -0,0 +1,105 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "io" + +// PutObjectWithProgress - With progress. +func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + if reader == nil { + return 0, ErrInvalidArgument("Input reader is invalid, cannot be nil.") + } + + // Size of the object. + var size int64 + + // Get reader size. + size, err = getReaderSize(reader) + if err != nil { + return 0, err + } + + // Check for largest object size allowed. + if size > int64(maxMultipartPutObjectSize) { + return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) + } + + // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. + // So we fall back to single PUT operation with the maximum limit of 5GiB. + if isGoogleEndpoint(c.endpointURL) { + if size <= -1 { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.", + Key: objectName, + BucketName: bucketName, + } + } + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size. + return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress) + } + + // NOTE: S3 doesn't allow anonymous multipart requests. + if isAmazonEndpoint(c.endpointURL) && c.anonymous { + if size <= -1 { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: "Content-Length cannot be negative for anonymous requests.", + Key: objectName, + BucketName: bucketName, + } + } + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Do not compute MD5 for anonymous requests to Amazon + // S3. Uploads up to 5GiB in size. + return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress) + } + + // putSmall object. + if size < minPartSize && size > 0 { + return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress) + } + // For all sizes greater than 5MiB do multipart. + n, err = c.putObjectMultipart(bucketName, objectName, reader, size, contentType, progress) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "NotImplemented" { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress) + } + return n, err + } + return n, nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-readat.go b/vendor/github.com/minio/minio-go/api-put-object-readat.go new file mode 100644 index 000000000..ddb1ab3dc --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-readat.go @@ -0,0 +1,208 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "io" + "io/ioutil" + "sort" +) + +// shouldUploadPartReadAt - verify if part should be uploaded. +func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart) bool { + // If part not found part should be uploaded. + uploadedPart, found := objectParts[objPart.PartNumber] + if !found { + return true + } + // if size mismatches part should be uploaded. + if uploadedPart.Size != objPart.Size { + return true + } + return false +} + +// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader +// of type which implements io.ReaderAt interface (ReadAt method). +// +// NOTE: This function is meant to be used for all readers which +// implement io.ReaderAt which allows us for resuming multipart +// uploads but reading at an offset, which would avoid re-read the +// data which was already uploaded. Internally this function uses +// temporary files for staging all the data, these temporary files are +// cleaned automatically when the caller i.e http client closes the +// stream after uploading all the contents successfully. +func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, contentType string, progress io.Reader) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Get upload id for an object, initiates a new multipart request + // if it cannot find any previously partially uploaded object. + uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType) + if err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // A map of all uploaded parts. + var partsInfo = make(map[int]objectPart) + + // Fetch all parts info previously uploaded. + if !isNew { + partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) + if err != nil { + return 0, err + } + + // Used for readability, lastPartNumber is always + // totalPartsCount. + lastPartNumber := totalPartsCount + + // partNumber always starts with '1'. + partNumber := 1 + + // Initialize a temporary buffer. + tmpBuffer := new(bytes.Buffer) + + // Read defaults to reading at 5MiB buffer. + readBuffer := make([]byte, optimalReadBufferSize) + + // Upload all the missing parts. + for partNumber <= lastPartNumber { + // Verify object if its uploaded. + verifyObjPart := objectPart{ + PartNumber: partNumber, + Size: partSize, + } + // Special case if we see a last part number, save last part + // size as the proper part size. + if partNumber == lastPartNumber { + verifyObjPart = objectPart{ + PartNumber: lastPartNumber, + Size: lastPartSize, + } + } + + // Verify if part should be uploaded. + if !shouldUploadPartReadAt(verifyObjPart, partsInfo) { + // Increment part number when not uploaded. + partNumber++ + if progress != nil { + // Update the progress reader for the skipped part. + if _, err = io.CopyN(ioutil.Discard, progress, verifyObjPart.Size); err != nil { + return 0, err + } + } + continue + } + + // If partNumber was not uploaded we calculate the missing + // part offset and size. For all other part numbers we + // calculate offset based on multiples of partSize. + readOffset := int64(partNumber-1) * partSize + missingPartSize := partSize + + // As a special case if partNumber is lastPartNumber, we + // calculate the offset based on the last part size. + if partNumber == lastPartNumber { + readOffset = (size - lastPartSize) + missingPartSize = lastPartSize + } + + // Get a section reader on a particular offset. + sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize) + + // Calculates MD5 and SHA256 sum for a section reader. + var md5Sum, sha256Sum []byte + var prtSize int64 + md5Sum, sha256Sum, prtSize, err = c.hashCopyBuffer(tmpBuffer, sectionReader, readBuffer) + if err != nil { + return 0, err + } + + var reader io.Reader + // Update progress reader appropriately to the latest offset + // as we read from the source. + reader = newHook(tmpBuffer, progress) + + // Proceed to upload the part. + var objPart objectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader), + partNumber, md5Sum, sha256Sum, prtSize) + if err != nil { + // Reset the buffer upon any error. + tmpBuffer.Reset() + return 0, err + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Increment part number here after successful part upload. + partNumber++ + + // Reset the buffer. + tmpBuffer.Reset() + } + + // Loop over uploaded parts to save them in a Parts array before completing the multipart request. + for _, part := range partsInfo { + var complPart completePart + complPart.ETag = part.ETag + complPart.PartNumber = part.PartNumber + totalUploadedSize += part.Size + complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart) + } + + // Verify if we uploaded all the data. + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + + // Verify if totalPartsCount is not equal to total list of parts. + if totalPartsCount != len(complMultipartUpload.Parts) { + return totalUploadedSize, ErrInvalidParts(totalPartsCount, len(complMultipartUpload.Parts)) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go new file mode 100644 index 000000000..a09e658f4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object.go @@ -0,0 +1,287 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "os" + "reflect" + "runtime" + "strings" +) + +// getReaderSize - Determine the size of Reader if available. +func getReaderSize(reader io.Reader) (size int64, err error) { + var result []reflect.Value + size = -1 + if reader != nil { + // Verify if there is a method by name 'Size'. + lenFn := reflect.ValueOf(reader).MethodByName("Size") + if lenFn.IsValid() { + if lenFn.Kind() == reflect.Func { + // Call the 'Size' function and save its return value. + result = lenFn.Call([]reflect.Value{}) + if result != nil && len(result) == 1 { + lenValue := result[0] + if lenValue.IsValid() { + switch lenValue.Kind() { + case reflect.Int: + fallthrough + case reflect.Int8: + fallthrough + case reflect.Int16: + fallthrough + case reflect.Int32: + fallthrough + case reflect.Int64: + size = lenValue.Int() + } + } + } + } + } else { + // Fallback to Stat() method, two possible Stat() structs + // exist. + switch v := reader.(type) { + case *os.File: + var st os.FileInfo + st, err = v.Stat() + if err != nil { + // Handle this case specially for "windows", + // certain files for example 'Stdin', 'Stdout' and + // 'Stderr' it is not allowed to fetch file information. + if runtime.GOOS == "windows" { + if strings.Contains(err.Error(), "GetFileInformationByHandle") { + return -1, nil + } + } + return + } + // Ignore if input is a directory, throw an error. + if st.Mode().IsDir() { + return -1, ErrInvalidArgument("Input file cannot be a directory.") + } + // Ignore 'Stdin', 'Stdout' and 'Stderr', since they + // represent *os.File type but internally do not + // implement Seekable calls. Ignore them and treat + // them like a stream with unknown length. + switch st.Name() { + case "stdin": + fallthrough + case "stdout": + fallthrough + case "stderr": + return + } + size = st.Size() + case *Object: + var st ObjectInfo + st, err = v.Stat() + if err != nil { + return + } + size = st.Size + } + } + } + // Returns the size here. + return size, err +} + +// completedParts is a collection of parts sortable by their part numbers. +// used for sorting the uploaded parts before completing the multipart request. +type completedParts []completePart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// PutObject creates an object in a bucket. +// +// You must have WRITE permissions on a bucket to create an object. +// +// - For size smaller than 5MiB PutObject automatically does a single atomic Put operation. +// - For size larger than 5MiB PutObject automatically does a resumable multipart Put operation. +// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF. +// Maximum object size that can be uploaded through this operation will be 5TiB. +// +// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. +// So we fall back to single PUT operation with the maximum limit of 5GiB. +// +// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation. +func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) { + return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil) +} + +// putObjectNoChecksum special function used Google Cloud Storage. This special function +// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. +func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + + // Update progress reader appropriately to the latest offset as we + // read from the source. + reader = newHook(reader, progress) + + // This function does not calculate sha256 and md5sum for payload. + // Execute put object. + st, err := c.putObjectDo(bucketName, objectName, ioutil.NopCloser(reader), nil, nil, size, contentType) + if err != nil { + return 0, err + } + if st.Size != size { + return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) + } + return size, nil +} + +// putObjectSingle is a special function for uploading single put object request. +// This special function is used as a fallback when multipart upload fails. +func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // If size is a stream, upload up to 5GiB. + if size <= -1 { + size = maxSinglePutObjectSize + } + var md5Sum, sha256Sum []byte + var readCloser io.ReadCloser + if size <= minPartSize { + // Initialize a new temporary buffer. + tmpBuffer := new(bytes.Buffer) + md5Sum, sha256Sum, size, err = c.hashCopyN(tmpBuffer, reader, size) + readCloser = ioutil.NopCloser(tmpBuffer) + } else { + // Initialize a new temporary file. + var tmpFile *tempFile + tmpFile, err = newTempFile("single$-putobject-single") + if err != nil { + return 0, err + } + md5Sum, sha256Sum, size, err = c.hashCopyN(tmpFile, reader, size) + // Seek back to beginning of the temporary file. + if _, err = tmpFile.Seek(0, 0); err != nil { + return 0, err + } + readCloser = tmpFile + } + // Return error if its not io.EOF. + if err != nil { + if err != io.EOF { + return 0, err + } + } + // Progress the reader to the size. + if progress != nil { + if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil { + return size, err + } + } + // Execute put object. + st, err := c.putObjectDo(bucketName, objectName, readCloser, md5Sum, sha256Sum, size, contentType) + if err != nil { + return 0, err + } + if st.Size != size { + return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) + } + return size, nil +} + +// putObjectDo - executes the put object http operation. +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := isValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + + if size <= -1 { + return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName) + } + + if size > maxSinglePutObjectSize { + return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + + if strings.TrimSpace(contentType) == "" { + contentType = "application/octet-stream" + } + + // Set headers. + customHeader := make(http.Header) + customHeader.Set("Content-Type", contentType) + + // Populate request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Bytes: md5Sum, + contentSHA256Bytes: sha256Sum, + } + // Initiate new request. + req, err := c.newRequest("PUT", reqMetadata) + if err != nil { + return ObjectInfo{}, err + } + // Execute the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + var metadata ObjectInfo + // Trim off the odd double quotes from ETag in the beginning and end. + metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"") + // A success here means data was written to server successfully. + metadata.Size = size + + // Return here. + return metadata, nil +} diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go new file mode 100644 index 000000000..8f59c15e6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-remove.go @@ -0,0 +1,167 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "net/url" +) + +// RemoveBucket deletes the bucket name. +// +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. +func (c Client) RemoveBucket(bucketName string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + // Instantiate a new request. + req, err := c.newRequest("DELETE", requestMetadata{ + bucketName: bucketName, + }) + if err != nil { + return err + } + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + + return nil +} + +// RemoveObject remove an object from a bucket. +func (c Client) RemoveObject(bucketName, objectName string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + // Instantiate the request. + req, err := c.newRequest("DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + }) + if err != nil { + return err + } + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return err + } + // DeleteObject always responds with http '204' even for + // objects which do not exist. So no need to handle them + // specifically. + return nil +} + +// RemoveIncompleteUpload aborts an partially uploaded object. +// Requires explicit authentication, no anonymous requests are allowed for multipart API. +func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + // Find multipart upload id of the object to be aborted. + uploadID, err := c.findUploadID(bucketName, objectName) + if err != nil { + return err + } + if uploadID != "" { + // Upload id found, abort the incomplete multipart upload. + err := c.abortMultipartUpload(bucketName, objectName, uploadID) + if err != nil { + return err + } + } + return nil +} + +// abortMultipartUpload aborts a multipart upload for the given +// uploadID, all previously uploaded parts are deleted. +func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Instantiate a new DELETE request. + req, err := c.newRequest("DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + }) + if err != nil { + return err + } + + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + // Abort has no response body, handle it for any errors. + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + // This is needed specifically for abort and it cannot + // be converged into default case. + errorResponse = ErrorResponse{ + Code: "NoSuchUpload", + Message: "The specified multipart upload does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + default: + return httpRespToErrorResponse(resp, bucketName, objectName) + } + return errorResponse + } + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/api-s3-definitions.go b/vendor/github.com/minio/minio-go/api-s3-definitions.go new file mode 100644 index 000000000..de562e475 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-s3-definitions.go @@ -0,0 +1,197 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "time" +) + +// listAllMyBucketsResult container for listBuckets response. +type listAllMyBucketsResult struct { + // Container for one or more buckets. + Buckets struct { + Bucket []BucketInfo + } + Owner owner +} + +// owner container for bucket owner information. +type owner struct { + DisplayName string + ID string +} + +// commonPrefix container for prefix response. +type commonPrefix struct { + Prefix string +} + +// listBucketResult container for listObjects response. +type listBucketResult struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []commonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + Marker string + MaxKeys int64 + Name string + + // When response is truncated (the IsTruncated element value in + // the response is true), you can use the key name in this field + // as marker in the subsequent request to get next set of objects. + // Object storage lists objects in alphabetical order Note: This + // element is returned only if you have delimiter request + // parameter specified. If response does not include the NextMaker + // and it is truncated, you can use the value of the last Key in + // the response as the marker in the subsequent request to get the + // next set of object keys. + NextMarker string + Prefix string +} + +// listMultipartUploadsResult container for ListMultipartUploads response +type listMultipartUploadsResult struct { + Bucket string + KeyMarker string + UploadIDMarker string `xml:"UploadIdMarker"` + NextKeyMarker string + NextUploadIDMarker string `xml:"NextUploadIdMarker"` + EncodingType string + MaxUploads int64 + IsTruncated bool + Uploads []ObjectMultipartInfo `xml:"Upload"` + Prefix string + Delimiter string + // A response can contain CommonPrefixes only if you specify a delimiter. + CommonPrefixes []commonPrefix +} + +// initiator container for who initiated multipart upload. +type initiator struct { + ID string + DisplayName string +} + +// objectPart container for particular part of an object. +type objectPart struct { + // Part number identifies the part. + PartNumber int + + // Date and time the part was uploaded. + LastModified time.Time + + // Entity tag returned when the part was uploaded, usually md5sum + // of the part. + ETag string + + // Size of the uploaded part data. + Size int64 +} + +// listObjectPartsResult container for ListObjectParts response. +type listObjectPartsResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` + + Initiator initiator + Owner owner + + StorageClass string + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + + // Indicates whether the returned list of parts is truncated. + IsTruncated bool + ObjectParts []objectPart `xml:"Part"` + + EncodingType string +} + +// initiateMultipartUploadResult container for InitiateMultiPartUpload +// response. +type initiateMultipartUploadResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` +} + +// completeMultipartUploadResult container for completed multipart +// upload response. +type completeMultipartUploadResult struct { + Location string + Bucket string + Key string + ETag string +} + +// completePart sub container lists individual part numbers and their +// md5sum, part of completeMultipartUpload. +type completePart struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` + + // Part number identifies the part. + PartNumber int + ETag string +} + +// completeMultipartUpload container for completing multipart upload. +type completeMultipartUpload struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` + Parts []completePart `xml:"Part"` +} + +// createBucketConfiguration container for bucket configuration. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` + Location string `xml:"LocationConstraint"` +} + +// grant container for the grantee and his or her permissions. +type grant struct { + // grantee container for DisplayName and ID of the person being + // granted permissions. + Grantee struct { + ID string + DisplayName string + EmailAddress string + Type string + URI string + } + Permission string +} + +// accessControlPolicy contains the elements providing ACL permissions +// for a bucket. +type accessControlPolicy struct { + // accessControlList container for ACL information. + AccessControlList struct { + Grant []grant + } + Owner owner +} diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go new file mode 100644 index 000000000..20f66e8fc --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-stat.go @@ -0,0 +1,125 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "strconv" + "strings" + "time" +) + +// BucketExists verify if bucket exists and you have permission to access it. +func (c Client) BucketExists(bucketName string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + // Instantiate a new request. + req, err := c.newRequest("HEAD", requestMetadata{ + bucketName: bucketName, + }) + if err != nil { + return err + } + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// StatObject verifies if object exists and you have permission to access. +func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := isValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + // Instantiate a new request. + req, err := c.newRequest("HEAD", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + }) + if err != nil { + return ObjectInfo{}, err + } + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Trim off the odd double quotes from ETag in the beginning and end. + md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + md5sum = strings.TrimSuffix(md5sum, "\"") + + // Parse content length. + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: "Content-Length is invalid. " + reportIssue, + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + } + // Parse Last-Modified has http time format. + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: "Last-Modified time format is invalid. " + reportIssue, + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + } + // Fetch content type if any present. + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + // Save object metadata info. + var objectStat ObjectInfo + objectStat.ETag = md5sum + objectStat.Key = objectName + objectStat.Size = size + objectStat.LastModified = date + objectStat.ContentType = contentType + return objectStat, nil +} diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go new file mode 100644 index 000000000..b39934605 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api.go @@ -0,0 +1,533 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/http/httputil" + "net/url" + "os" + "regexp" + "runtime" + "strings" + "time" +) + +// Client implements Amazon S3 compatible methods. +type Client struct { + /// Standard options. + + // AccessKeyID required for authorized requests. + accessKeyID string + // SecretAccessKey required for authorized requests. + secretAccessKey string + // Choose a signature type if necessary. + signature SignatureType + // Set to 'true' if Client has no access and secret keys. + anonymous bool + + // User supplied. + appInfo struct { + appName string + appVersion string + } + endpointURL *url.URL + + // Needs allocation. + httpClient *http.Client + bucketLocCache *bucketLocationCache + + // Advanced functionality + isTraceEnabled bool + traceOutput io.Writer +} + +// Global constants. +const ( + libraryName = "minio-go" + libraryVersion = "0.2.5" +) + +// User Agent should always following the below style. +// Please open an issue to discuss any new changes here. +// +// Minio (OS; ARCH) LIB/VER APP/VER +const ( + libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion +) + +// NewV2 - instantiate minio client with Amazon S3 signature version +// '2' compatibility. +func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) + if err != nil { + return nil, err + } + // Set to use signature version '2'. + clnt.signature = SignatureV2 + return clnt, nil +} + +// NewV4 - instantiate minio client with Amazon S3 signature version +// '4' compatibility. +func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) + if err != nil { + return nil, err + } + // Set to use signature version '4'. + clnt.signature = SignatureV4 + return clnt, nil +} + +// New - instantiate minio client Client, adds automatic verification +// of signature. +func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) + if err != nil { + return nil, err + } + // Google cloud storage should be set to signature V2, force it if + // not. + if isGoogleEndpoint(clnt.endpointURL) { + clnt.signature = SignatureV2 + } + // If Amazon S3 set to signature v2. + if isAmazonEndpoint(clnt.endpointURL) { + clnt.signature = SignatureV4 + } + return clnt, nil +} + +func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, insecure) + if err != nil { + return nil, err + } + + // instantiate new Client. + clnt := new(Client) + clnt.accessKeyID = accessKeyID + clnt.secretAccessKey = secretAccessKey + if clnt.accessKeyID == "" || clnt.secretAccessKey == "" { + clnt.anonymous = true + } + + // Save endpoint URL, user agent for future uses. + clnt.endpointURL = endpointURL + + // Instantiate http client and bucket location cache. + clnt.httpClient = &http.Client{} + clnt.bucketLocCache = newBucketLocationCache() + + // Return. + return clnt, nil +} + +// SetAppInfo - add application details to user agent. +func (c *Client) SetAppInfo(appName string, appVersion string) { + // if app name and version is not set, we do not a new user + // agent. + if appName != "" && appVersion != "" { + c.appInfo = struct { + appName string + appVersion string + }{} + c.appInfo.appName = appName + c.appInfo.appVersion = appVersion + } +} + +// SetCustomTransport - set new custom transport. +func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { + // Set this to override default transport + // ``http.DefaultTransport``. + // + // This transport is usually needed for debugging OR to add your + // own custom TLS certificates on the client transport, for custom + // CA's and certs which are not part of standard certificate + // authority follow this example :- + // + // tr := &http.Transport{ + // TLSClientConfig: &tls.Config{RootCAs: pool}, + // DisableCompression: true, + // } + // api.SetTransport(tr) + // + if c.httpClient != nil { + c.httpClient.Transport = customHTTPTransport + } +} + +// TraceOn - enable HTTP tracing. +func (c *Client) TraceOn(outputStream io.Writer) error { + // if outputStream is nil then default to os.Stdout. + if outputStream == nil { + outputStream = os.Stdout + } + // Sets a new output stream. + c.traceOutput = outputStream + + // Enable tracing. + c.isTraceEnabled = true + return nil +} + +// TraceOff - disable HTTP tracing. +func (c *Client) TraceOff() { + // Disable tracing. + c.isTraceEnabled = false +} + +// requestMetadata - is container for all the values to make a +// request. +type requestMetadata struct { + // If set newRequest presigns the URL. + presignURL bool + + // User supplied. + bucketName string + objectName string + queryValues url.Values + customHeader http.Header + expires int64 + + // Generated by our internal code. + bucketLocation string + contentBody io.ReadCloser + contentLength int64 + contentSHA256Bytes []byte + contentMD5Bytes []byte +} + +// Filter out signature value from Authorization header. +func (c Client) filterSignature(req *http.Request) { + // For anonymous requests, no need to filter. + if c.anonymous { + return + } + // Handle if Signature V2. + if c.signature.isV2() { + // Set a temporary redacted auth + req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**") + return + } + + /// Signature V4 authorization header. + + // Save the original auth. + origAuth := req.Header.Get("Authorization") + // Strip out accessKeyID from: + // Credential=////aws4_request + regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/") + newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") + + // Strip out 256-bit signature from: Signature=<256-bit signature> + regSign := regexp.MustCompile("Signature=([[0-9a-f]+)") + newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") + + // Set a temporary redacted auth + req.Header.Set("Authorization", newAuth) + return +} + +// dumpHTTP - dump HTTP request and response. +func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { + // Starts http dump. + _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") + if err != nil { + return err + } + + // Filter out Signature field from Authorization header. + c.filterSignature(req) + + // Only display request header. + reqTrace, err := httputil.DumpRequestOut(req, false) + if err != nil { + return err + } + + // Write request to trace output. + _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) + if err != nil { + return err + } + + // Only display response header. + var respTrace []byte + + // For errors we make sure to dump response body as well. + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusPartialContent && + resp.StatusCode != http.StatusNoContent { + respTrace, err = httputil.DumpResponse(resp, true) + if err != nil { + return err + } + } else { + // WORKAROUND for https://github.com/golang/go/issues/13942. + // httputil.DumpResponse does not print response headers for + // all successful calls which have response ContentLength set + // to zero. Keep this workaround until the above bug is fixed. + if resp.ContentLength == 0 { + var buffer bytes.Buffer + if err := resp.Header.Write(&buffer); err != nil { + return err + } + respTrace = buffer.Bytes() + respTrace = append(respTrace, []byte("\r\n")...) + } else { + respTrace, err = httputil.DumpResponse(resp, false) + if err != nil { + return err + } + } + } + // Write response to trace output. + _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) + if err != nil { + return err + } + + // Ends the http dump. + _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") + if err != nil { + return err + } + + // Returns success. + return nil +} + +// do - execute http request. +func (c Client) do(req *http.Request) (*http.Response, error) { + // execute the request. + resp, err := c.httpClient.Do(req) + if err != nil { + return resp, err + } + // If trace is enabled, dump http request and response. + if c.isTraceEnabled { + err = c.dumpHTTP(req, resp) + if err != nil { + return nil, err + } + } + return resp, nil +} + +// newRequest - instantiate a new HTTP request for a given method. +func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) { + // If no method is supplied default to 'POST'. + if method == "" { + method = "POST" + } + + // Gather location only if bucketName is present. + location := "us-east-1" // Default all other requests to "us-east-1". + if metadata.bucketName != "" { + location, err = c.getBucketLocation(metadata.bucketName) + if err != nil { + return nil, err + } + } + + // Save location. + metadata.bucketLocation = location + + // Construct a new target URL. + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.bucketLocation, metadata.queryValues) + if err != nil { + return nil, err + } + + // Initialize a new HTTP request for the method. + req, err = http.NewRequest(method, targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Generate presign url if needed, return right here. + if metadata.expires != 0 && metadata.presignURL { + if c.anonymous { + return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.") + } + if c.signature.isV2() { + // Presign URL with signature v2. + req = preSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires) + } else { + // Presign URL with signature v4. + req = preSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires) + } + return req, nil + } + + // Set content body if available. + if metadata.contentBody != nil { + req.Body = metadata.contentBody + } + + // set UserAgent for the request. + c.setUserAgent(req) + + // Set all headers. + for k, v := range metadata.customHeader { + req.Header.Set(k, v[0]) + } + + // set incoming content-length. + if metadata.contentLength > 0 { + req.ContentLength = metadata.contentLength + } + + // Set sha256 sum only for non anonymous credentials. + if !c.anonymous { + // set sha256 sum for signature calculation only with + // signature version '4'. + if c.signature.isV4() { + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) + if metadata.contentSHA256Bytes != nil { + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSHA256Bytes)) + } + } + } + + // set md5Sum for content protection. + if metadata.contentMD5Bytes != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) + } + + // Sign the request for all authenticated requests. + if !c.anonymous { + if c.signature.isV2() { + // Add signature version '2' authorization header. + req = signV2(*req, c.accessKeyID, c.secretAccessKey) + } else if c.signature.isV4() { + // Add signature version '4' authorization header. + req = signV4(*req, c.accessKeyID, c.secretAccessKey, location) + } + } + + // Return request. + return req, nil +} + +// set User agent. +func (c Client) setUserAgent(req *http.Request) { + req.Header.Set("User-Agent", libraryUserAgent) + if c.appInfo.appName != "" && c.appInfo.appVersion != "" { + req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) + } +} + +// makeTargetURL make a new target url. +func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) { + // Save host. + host := c.endpointURL.Host + // For Amazon S3 endpoint, try to fetch location based endpoint. + if isAmazonEndpoint(c.endpointURL) { + // Fetch new host based on the bucket location. + host = getS3Endpoint(bucketLocation) + } + // Save scheme. + scheme := c.endpointURL.Scheme + + urlStr := scheme + "://" + host + "/" + // Make URL only if bucketName is available, otherwise use the + // endpoint URL. + if bucketName != "" { + // Save if target url will have buckets which suppport virtual host. + isVirtualHostStyle := isVirtualHostSupported(c.endpointURL, bucketName) + + // If endpoint supports virtual host style use that always. + // Currently only S3 and Google Cloud Storage would support + // virtual host style. + if isVirtualHostStyle { + urlStr = scheme + "://" + bucketName + "." + host + "/" + if objectName != "" { + urlStr = urlStr + urlEncodePath(objectName) + } + } else { + // If not fall back to using path style. + urlStr = urlStr + bucketName + if objectName != "" { + urlStr = urlStr + "/" + urlEncodePath(objectName) + } + } + } + // If there are any query values, add them to the end. + if len(queryValues) > 0 { + urlStr = urlStr + "?" + queryValues.Encode() + } + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + return u, nil +} + +// CloudStorageClient - Cloud Storage Client interface. +type CloudStorageClient interface { + // Bucket Read/Write/Stat operations. + MakeBucket(bucketName string, cannedACL BucketACL, location string) error + BucketExists(bucketName string) error + RemoveBucket(bucketName string) error + SetBucketACL(bucketName string, cannedACL BucketACL) error + GetBucketACL(bucketName string) (BucketACL, error) + + ListBuckets() ([]BucketInfo, error) + ListObjects(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo + ListIncompleteUploads(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo + + // Object Read/Write/Stat operations. + GetObject(bucketName, objectName string) (reader *Object, err error) + PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) + StatObject(bucketName, objectName string) (ObjectInfo, error) + RemoveObject(bucketName, objectName string) error + RemoveIncompleteUpload(bucketName, objectName string) error + + // File to Object API. + FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) + FGetObject(bucketName, objectName, filePath string) error + + // PutObjectWithProgress for progress. + PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) + + // Presigned operations. + PresignedGetObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error) + PresignedPutObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error) + PresignedPostPolicy(*PostPolicy) (formData map[string]string, err error) + + // Application info. + SetAppInfo(appName, appVersion string) + + // Set custom transport. + SetCustomTransport(customTransport http.RoundTripper) + + // HTTP tracing methods. + TraceOn(traceOutput io.Writer) error + TraceOff() +} diff --git a/vendor/github.com/minio/minio-go/api_functional_v2_test.go b/vendor/github.com/minio/minio-go/api_functional_v2_test.go new file mode 100644 index 000000000..1c7da6826 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api_functional_v2_test.go @@ -0,0 +1,945 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio_test + +import ( + "bytes" + crand "crypto/rand" + "errors" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "testing" + "time" + + "github.com/minio/minio-go" +) + +func TestGetObjectClosedTwiceV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate data more than 32K + buf := make([]byte, rand.Intn(1<<20)+32*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano())) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + if err := r.Close(); err != nil { + t.Fatal("Error:", err) + } + if err := r.Close(); err == nil { + t.Fatal("Error: object is already closed, should return error") + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests removing partially uploaded objects. +func TestRemovePartiallyUploadedV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping function tests for short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.NewV2( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + reader, writer := io.Pipe() + go func() { + i := 0 + for i < 25 { + _, err = io.CopyN(writer, crand.Reader, 128*1024) + if err != nil { + t.Fatal("Error:", err, bucketName) + } + i++ + } + writer.CloseWithError(errors.New("Proactively closed to be verified later.")) + }() + + objectName := bucketName + "-resumable" + _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream") + if err == nil { + t.Fatal("Error: PutObject should fail.") + } + if err.Error() != "Proactively closed to be verified later." { + t.Fatal("Error:", err) + } + err = c.RemoveIncompleteUpload(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests resumable put object cloud to cloud. +func TestResumbalePutObjectV2(t *testing.T) { + // By passing 'go test -short' skips these tests. + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Create a temporary file. + file, err := ioutil.TempFile(os.TempDir(), "resumable") + if err != nil { + t.Fatal("Error:", err) + } + + // Copy 11MiB worth of random data. + n, err := io.CopyN(file, crand.Reader, 11*1024*1024) + if err != nil { + t.Fatal("Error:", err) + } + if n != int64(11*1024*1024) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) + } + + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + t.Fatal("Error:", err) + } + + // New object name. + objectName := bucketName + "-resumable" + + // Upload the file. + n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream") + if err != nil { + t.Fatal("Error:", err) + } + if n != int64(11*1024*1024) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) + } + + // Get the uploaded object. + reader, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err) + } + + // Upload now cloud to cloud. + n, err = c.PutObject(bucketName, objectName+"-put", reader, "application/octest-stream") + if err != nil { + t.Fatal("Error:", err) + } + + // Get object info. + objInfo, err := reader.Stat() + if err != nil { + t.Fatal("Error:", err) + } + if n != objInfo.Size { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", objInfo.Size, n) + } + + // Remove all temp files, objects and bucket. + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveObject(bucketName, objectName+"-put") + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + + err = os.Remove(file.Name()) + if err != nil { + t.Fatal("Error:", err) + } + +} + +// Tests resumable file based put object multipart upload. +func TestResumableFPutObjectV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + file, err := ioutil.TempFile(os.TempDir(), "resumable") + if err != nil { + t.Fatal("Error:", err) + } + + n, err := io.CopyN(file, crand.Reader, 11*1024*1024) + if err != nil { + t.Fatal("Error:", err) + } + if n != int64(11*1024*1024) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) + } + + objectName := bucketName + "-resumable" + + n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream") + if err != nil { + t.Fatal("Error:", err) + } + if n != int64(11*1024*1024) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) + } + + // Close the file pro-actively for windows. + file.Close() + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + + err = os.Remove(file.Name()) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests resumable put object multipart upload. +func TestResumablePutObjectV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // generate 11MB + buf := make([]byte, 11*1024*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + objectName := bucketName + "-resumable" + reader := bytes.NewReader(buf) + n, err := c.PutObject(bucketName, objectName, reader, "application/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests get object ReaderSeeker interface methods. +func TestGetObjectReadSeekFunctionalV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate data more than 32K + buf := make([]byte, rand.Intn(1<<20)+32*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano())) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + + offset := int64(2048) + n, err = r.Seek(offset, 0) + if err != nil { + t.Fatal("Error:", err, offset) + } + if n != offset { + t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n", + offset, n) + } + n, err = r.Seek(0, 1) + if err != nil { + t.Fatal("Error:", err) + } + if n != offset { + t.Fatalf("Error: number of current seek does not match, want %v, got %v\n", + offset, n) + } + _, err = r.Seek(offset, 2) + if err == nil { + t.Fatal("Error: seek on positive offset for whence '2' should error out") + } + n, err = r.Seek(-offset, 2) + if err != nil { + t.Fatal("Error:", err) + } + if n != 0 { + t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n) + } + var buffer bytes.Buffer + if _, err = io.CopyN(&buffer, r, st.Size); err != nil { + t.Fatal("Error:", err) + } + if !bytes.Equal(buf, buffer.Bytes()) { + t.Fatal("Error: Incorrect read bytes v/s original buffer.") + } + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests get object ReaderAt interface methods. +func TestGetObjectReadAtFunctionalV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate data more than 32K + buf := make([]byte, rand.Intn(1<<20)+32*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano())) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + + offset := int64(2048) + + // Read directly + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + m, err := r.ReadAt(buf2, offset) + if err != nil { + t.Fatal("Error:", err, st.Size, len(buf2), offset) + } + if m != len(buf2) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2)) + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + t.Fatal("Error: Incorrect read between two ReadAt from same offset.") + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + t.Fatal("Error:", err, st.Size, len(buf3), offset) + } + if m != len(buf3) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3)) + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + t.Fatal("Error: Incorrect read between two ReadAt from same offset.") + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + t.Fatal("Error:", err, st.Size, len(buf4), offset) + } + if m != len(buf4) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4)) + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + t.Fatal("Error: Incorrect read between two ReadAt from same offset.") + } + + buf5 := make([]byte, n) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + t.Fatal("Error:", err, len(buf5)) + } + } + if m != len(buf5) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5)) + } + if !bytes.Equal(buf, buf5) { + t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.") + } + + buf6 := make([]byte, n+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + t.Fatal("Error:", err, len(buf6)) + } + } + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests comprehensive list of all methods. +func TestFunctionalV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano())) + file, err := os.Create(fileName) + if err != nil { + t.Fatal("Error:", err) + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + t.Fatal("Error:", err) + } + } + file.Close() + + // Verify if bucket exits and you have access. + err = c.BucketExists(bucketName) + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Make the bucket 'public read/write'. + err = c.SetBucketACL(bucketName, "public-read-write") + if err != nil { + t.Fatal("Error:", err) + } + + // Get the previously set acl. + acl, err := c.GetBucketACL(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + + // ACL must be 'public read/write'. + if acl != minio.BucketACL("public-read-write") { + t.Fatal("Error:", acl) + } + + // List all buckets. + buckets, err := c.ListBuckets() + if len(buckets) == 0 { + t.Fatal("Error: list buckets cannot be empty", buckets) + } + if err != nil { + t.Fatal("Error:", err) + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + t.Fatal("Error: bucket ", bucketName, "not found") + } + + objectName := bucketName + "unique" + + // Generate data + buf := make([]byte, rand.Intn(1<<19)) + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error: ", err) + } + + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "") + if err != nil { + t.Fatal("Error: ", err) + } + if n != int64(len(buf)) { + t.Fatal("Error: bad length ", n, len(buf)) + } + + n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName+"-nolength") + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + t.Fatal("Error: object " + objectName + " not found.") + } + + incompObjNotFound := true + for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + t.Fatal("Error: unexpected dangling incomplete upload found.") + } + + newReader, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + t.Fatal("Error: ", err) + } + + if !bytes.Equal(newReadBytes, buf) { + t.Fatal("Error: bytes mismatch.") + } + + err = c.FGetObject(bucketName, objectName, fileName+"-f") + if err != nil { + t.Fatal("Error: ", err) + } + + presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second) + if err != nil { + t.Fatal("Error: ", err) + } + + resp, err := http.Get(presignedGetURL) + if err != nil { + t.Fatal("Error: ", err) + } + if resp.StatusCode != http.StatusOK { + t.Fatal("Error: ", resp.Status) + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal("Error: ", err) + } + if !bytes.Equal(newPresignedBytes, buf) { + t.Fatal("Error: bytes mismatch.") + } + + presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + t.Fatal("Error: ", err) + } + buf = make([]byte, rand.Intn(1<<20)) + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error: ", err) + } + req, err := http.NewRequest("PUT", presignedPutURL, bytes.NewReader(buf)) + if err != nil { + t.Fatal("Error: ", err) + } + httpClient := &http.Client{} + resp, err = httpClient.Do(req) + if err != nil { + t.Fatal("Error: ", err) + } + + newReader, err = c.GetObject(bucketName, objectName+"-presigned") + if err != nil { + t.Fatal("Error: ", err) + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + t.Fatal("Error: ", err) + } + + if !bytes.Equal(newReadBytes, buf) { + t.Fatal("Error: bytes mismatch.") + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveObject(bucketName, objectName+"-f") + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveObject(bucketName, objectName+"-nolength") + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveObject(bucketName, objectName+"-presigned") + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + err = c.RemoveBucket(bucketName) + if err == nil { + t.Fatal("Error:") + } + if err.Error() != "The specified bucket does not exist" { + t.Fatal("Error: ", err) + } + if err = os.Remove(fileName); err != nil { + t.Fatal("Error: ", err) + } + if err = os.Remove(fileName + "-f"); err != nil { + t.Fatal("Error: ", err) + } +} diff --git a/vendor/github.com/minio/minio-go/api_functional_v4_test.go b/vendor/github.com/minio/minio-go/api_functional_v4_test.go new file mode 100644 index 000000000..ba7b7614b --- /dev/null +++ b/vendor/github.com/minio/minio-go/api_functional_v4_test.go @@ -0,0 +1,1026 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio_test + +import ( + "bytes" + crand "crypto/rand" + "errors" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "testing" + "time" + + "github.com/minio/minio-go" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return string(b[0:30]) +} + +// Tests various bucket supported formats. +func TestMakeBucketRegions(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(bucketName, "private", "eu-central-1"); err != nil { + t.Fatal("Error:", err, bucketName) + } + + if err = c.RemoveBucket(bucketName); err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + if err = c.MakeBucket(bucketName+".withperiod", "private", "us-west-2"); err != nil { + t.Fatal("Error:", err, bucketName+".withperiod") + } + + // Remove the newly created bucket. + if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil { + t.Fatal("Error:", err, bucketName+".withperiod") + } +} + +// Test get object reader to not throw error on being closed twice. +func TestGetObjectClosedTwice(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate data more than 32K + buf := make([]byte, rand.Intn(1<<20)+32*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano())) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + if err := r.Close(); err != nil { + t.Fatal("Error:", err) + } + if err := r.Close(); err == nil { + t.Fatal("Error: object is already closed, should return error") + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests removing partially uploaded objects. +func TestRemovePartiallyUploaded(t *testing.T) { + if testing.Short() { + t.Skip("skipping function tests for short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + reader, writer := io.Pipe() + go func() { + i := 0 + for i < 25 { + _, err = io.CopyN(writer, crand.Reader, 128*1024) + if err != nil { + t.Fatal("Error:", err, bucketName) + } + i++ + } + err = writer.CloseWithError(errors.New("Proactively closed to be verified later.")) + if err != nil { + t.Fatal("Error:", err) + } + }() + + objectName := bucketName + "-resumable" + _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream") + if err == nil { + t.Fatal("Error: PutObject should fail.") + } + if err.Error() != "Proactively closed to be verified later." { + t.Fatal("Error:", err) + } + err = c.RemoveIncompleteUpload(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests resumable put object cloud to cloud. +func TestResumbalePutObject(t *testing.T) { + // By passing 'go test -short' skips these tests. + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Create a temporary file. + file, err := ioutil.TempFile(os.TempDir(), "resumable") + if err != nil { + t.Fatal("Error:", err) + } + + // Copy 11MiB worth of random data. + n, err := io.CopyN(file, crand.Reader, 11*1024*1024) + if err != nil { + t.Fatal("Error:", err) + } + if n != int64(11*1024*1024) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) + } + + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + t.Fatal("Error:", err) + } + + // New object name. + objectName := bucketName + "-resumable" + + // Upload the file. + n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream") + if err != nil { + t.Fatal("Error:", err) + } + if n != int64(11*1024*1024) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) + } + + // Get the uploaded object. + reader, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err) + } + + // Upload now cloud to cloud. + n, err = c.PutObject(bucketName, objectName+"-put", reader, "application/octest-stream") + if err != nil { + t.Fatal("Error:", err) + } + + // Get object info. + objInfo, err := reader.Stat() + if err != nil { + t.Fatal("Error:", err) + } + if n != objInfo.Size { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", objInfo.Size, n) + } + + // Remove all temp files, objects and bucket. + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveObject(bucketName, objectName+"-put") + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + + err = os.Remove(file.Name()) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests resumable file based put object multipart upload. +func TestResumableFPutObject(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + file, err := ioutil.TempFile(os.TempDir(), "resumable") + if err != nil { + t.Fatal("Error:", err) + } + + n, err := io.CopyN(file, crand.Reader, 11*1024*1024) + if err != nil { + t.Fatal("Error:", err) + } + if n != int64(11*1024*1024) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) + } + + // Close the file pro-actively for windows. + err = file.Close() + if err != nil { + t.Fatal("Error:", err) + } + + objectName := bucketName + "-resumable" + + n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream") + if err != nil { + t.Fatal("Error:", err) + } + if n != int64(11*1024*1024) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + + err = os.Remove(file.Name()) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests resumable put object multipart upload. +func TestResumablePutObject(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate 11MB + buf := make([]byte, 11*1024*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + objectName := bucketName + "-resumable" + reader := bytes.NewReader(buf) + n, err := c.PutObject(bucketName, objectName, reader, "application/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests get object ReaderSeeker interface methods. +func TestGetObjectReadSeekFunctional(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate data more than 32K + buf := make([]byte, rand.Intn(1<<20)+32*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano())) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + + offset := int64(2048) + n, err = r.Seek(offset, 0) + if err != nil { + t.Fatal("Error:", err, offset) + } + if n != offset { + t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n", + offset, n) + } + n, err = r.Seek(0, 1) + if err != nil { + t.Fatal("Error:", err) + } + if n != offset { + t.Fatalf("Error: number of current seek does not match, want %v, got %v\n", + offset, n) + } + _, err = r.Seek(offset, 2) + if err == nil { + t.Fatal("Error: seek on positive offset for whence '2' should error out") + } + n, err = r.Seek(-offset, 2) + if err != nil { + t.Fatal("Error:", err) + } + if n != 0 { + t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n) + } + var buffer bytes.Buffer + if _, err = io.CopyN(&buffer, r, st.Size); err != nil { + t.Fatal("Error:", err) + } + if !bytes.Equal(buf, buffer.Bytes()) { + t.Fatal("Error: Incorrect read bytes v/s original buffer.") + } + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests get object ReaderAt interface methods. +func TestGetObjectReadAtFunctional(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate data more than 32K + buf := make([]byte, rand.Intn(1<<20)+32*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano())) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + + offset := int64(2048) + + // read directly + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + m, err := r.ReadAt(buf2, offset) + if err != nil { + t.Fatal("Error:", err, st.Size, len(buf2), offset) + } + if m != len(buf2) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2)) + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + t.Fatal("Error: Incorrect read between two ReadAt from same offset.") + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + t.Fatal("Error:", err, st.Size, len(buf3), offset) + } + if m != len(buf3) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3)) + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + t.Fatal("Error: Incorrect read between two ReadAt from same offset.") + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + t.Fatal("Error:", err, st.Size, len(buf4), offset) + } + if m != len(buf4) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4)) + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + t.Fatal("Error: Incorrect read between two ReadAt from same offset.") + } + + buf5 := make([]byte, n) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + t.Fatal("Error:", err, len(buf5)) + } + } + if m != len(buf5) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5)) + } + if !bytes.Equal(buf, buf5) { + t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.") + } + + buf6 := make([]byte, n+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + t.Fatal("Error:", err, len(buf6)) + } + } + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests comprehensive list of all methods. +func TestFunctional(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano())) + file, err := os.Create(fileName) + if err != nil { + t.Fatal("Error:", err) + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + t.Fatal("Error:", err) + } + } + file.Close() + + // Verify if bucket exits and you have access. + err = c.BucketExists(bucketName) + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Make the bucket 'public read/write'. + err = c.SetBucketACL(bucketName, "public-read-write") + if err != nil { + t.Fatal("Error:", err) + } + + // Get the previously set acl. + acl, err := c.GetBucketACL(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + + // ACL must be 'public read/write'. + if acl != minio.BucketACL("public-read-write") { + t.Fatal("Error:", acl) + } + + // List all buckets. + buckets, err := c.ListBuckets() + if len(buckets) == 0 { + t.Fatal("Error: list buckets cannot be empty", buckets) + } + if err != nil { + t.Fatal("Error:", err) + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + t.Fatal("Error: bucket ", bucketName, "not found") + } + + objectName := bucketName + "unique" + + // Generate data + buf := make([]byte, rand.Intn(1<<19)) + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error: ", err) + } + + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "") + if err != nil { + t.Fatal("Error: ", err) + } + if n != int64(len(buf)) { + t.Fatal("Error: bad length ", n, len(buf)) + } + + n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName+"-nolength") + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + t.Fatal("Error: object " + objectName + " not found.") + } + + incompObjNotFound := true + for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + t.Fatal("Error: unexpected dangling incomplete upload found.") + } + + newReader, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + t.Fatal("Error: ", err) + } + + if !bytes.Equal(newReadBytes, buf) { + t.Fatal("Error: bytes mismatch.") + } + + err = c.FGetObject(bucketName, objectName, fileName+"-f") + if err != nil { + t.Fatal("Error: ", err) + } + + presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second) + if err != nil { + t.Fatal("Error: ", err) + } + + resp, err := http.Get(presignedGetURL) + if err != nil { + t.Fatal("Error: ", err) + } + if resp.StatusCode != http.StatusOK { + t.Fatal("Error: ", resp.Status) + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal("Error: ", err) + } + if !bytes.Equal(newPresignedBytes, buf) { + t.Fatal("Error: bytes mismatch.") + } + + presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + t.Fatal("Error: ", err) + } + buf = make([]byte, rand.Intn(1<<20)) + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error: ", err) + } + req, err := http.NewRequest("PUT", presignedPutURL, bytes.NewReader(buf)) + if err != nil { + t.Fatal("Error: ", err) + } + httpClient := &http.Client{} + resp, err = httpClient.Do(req) + if err != nil { + t.Fatal("Error: ", err) + } + + newReader, err = c.GetObject(bucketName, objectName+"-presigned") + if err != nil { + t.Fatal("Error: ", err) + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + t.Fatal("Error: ", err) + } + + if !bytes.Equal(newReadBytes, buf) { + t.Fatal("Error: bytes mismatch.") + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveObject(bucketName, objectName+"-f") + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveObject(bucketName, objectName+"-nolength") + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveObject(bucketName, objectName+"-presigned") + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + err = c.RemoveBucket(bucketName) + if err == nil { + t.Fatal("Error:") + } + if err.Error() != "The specified bucket does not exist" { + t.Fatal("Error: ", err) + } + if err = os.Remove(fileName); err != nil { + t.Fatal("Error: ", err) + } + if err = os.Remove(fileName + "-f"); err != nil { + t.Fatal("Error: ", err) + } +} diff --git a/vendor/github.com/minio/minio-go/api_unit_test.go b/vendor/github.com/minio/minio-go/api_unit_test.go new file mode 100644 index 000000000..2afc666d8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api_unit_test.go @@ -0,0 +1,583 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "testing" +) + +type customReader struct{} + +func (c *customReader) Read(p []byte) (n int, err error) { + return 0, nil +} + +func (c *customReader) Size() (n int64) { + return 10 +} + +// Tests getReaderSize() for various Reader types. +func TestGetReaderSize(t *testing.T) { + var reader io.Reader + size, err := getReaderSize(reader) + if err != nil { + t.Fatal("Error:", err) + } + if size != -1 { + t.Fatal("Reader shouldn't have any length.") + } + + bytesReader := bytes.NewReader([]byte("Hello World")) + size, err = getReaderSize(bytesReader) + if err != nil { + t.Fatal("Error:", err) + } + if size != int64(len("Hello World")) { + t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World")) + } + + size, err = getReaderSize(new(customReader)) + if err != nil { + t.Fatal("Error:", err) + } + if size != int64(10) { + t.Fatalf("Reader length doesn't match got: %v, want: %v", size, 10) + } + + stringsReader := strings.NewReader("Hello World") + size, err = getReaderSize(stringsReader) + if err != nil { + t.Fatal("Error:", err) + } + if size != int64(len("Hello World")) { + t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World")) + } + + // Create request channel. + reqCh := make(chan readRequest) + // Create response channel. + resCh := make(chan readResponse) + // Create done channel. + doneCh := make(chan struct{}) + // objectInfo. + objectInfo := ObjectInfo{Size: 10} + objectReader := newObject(reqCh, resCh, doneCh, objectInfo) + defer objectReader.Close() + + size, err = getReaderSize(objectReader) + if err != nil { + t.Fatal("Error:", err) + } + if size != int64(10) { + t.Fatalf("Reader length doesn't match got: %v, want: %v", size, 10) + } + + fileReader, err := ioutil.TempFile(os.TempDir(), "prefix") + if err != nil { + t.Fatal("Error:", err) + } + defer fileReader.Close() + defer os.RemoveAll(fileReader.Name()) + + size, err = getReaderSize(fileReader) + if err != nil { + t.Fatal("Error:", err) + } + if size == -1 { + t.Fatal("Reader length for file cannot be -1.") + } + + // Verify for standard input, output and error file descriptors. + size, err = getReaderSize(os.Stdin) + if err != nil { + t.Fatal("Error:", err) + } + if size != -1 { + t.Fatal("Stdin should have length of -1.") + } + size, err = getReaderSize(os.Stdout) + if err != nil { + t.Fatal("Error:", err) + } + if size != -1 { + t.Fatal("Stdout should have length of -1.") + } + size, err = getReaderSize(os.Stderr) + if err != nil { + t.Fatal("Error:", err) + } + if size != -1 { + t.Fatal("Stderr should have length of -1.") + } + file, err := os.Open(os.TempDir()) + if err != nil { + t.Fatal("Error:", err) + } + defer file.Close() + _, err = getReaderSize(file) + if err == nil { + t.Fatal("Input file as directory should throw an error.") + } +} + +// Tests valid hosts for location. +func TestValidBucketLocation(t *testing.T) { + s3Hosts := []struct { + bucketLocation string + endpoint string + }{ + {"us-east-1", "s3.amazonaws.com"}, + {"unknown", "s3.amazonaws.com"}, + {"ap-southeast-1", "s3-ap-southeast-1.amazonaws.com"}, + } + for _, s3Host := range s3Hosts { + endpoint := getS3Endpoint(s3Host.bucketLocation) + if endpoint != s3Host.endpoint { + t.Fatal("Error: invalid bucket location", endpoint) + } + } +} + +// Tests valid bucket names. +func TestBucketNames(t *testing.T) { + buckets := []struct { + name string + valid error + }{ + {".mybucket", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")}, + {"mybucket.", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")}, + {"mybucket-", ErrInvalidBucketName("Bucket name contains invalid characters.")}, + {"my", ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")}, + {"", ErrInvalidBucketName("Bucket name cannot be empty.")}, + {"my..bucket", ErrInvalidBucketName("Bucket name cannot have successive periods.")}, + {"my.bucket.com", nil}, + {"my-bucket", nil}, + {"123my-bucket", nil}, + } + + for _, b := range buckets { + err := isValidBucketName(b.name) + if err != b.valid { + t.Fatal("Error:", err) + } + } +} + +// Tests temp file. +func TestTempFile(t *testing.T) { + tmpFile, err := newTempFile("testing") + if err != nil { + t.Fatal("Error:", err) + } + fileName := tmpFile.Name() + // Closing temporary file purges the file. + err = tmpFile.Close() + if err != nil { + t.Fatal("Error:", err) + } + st, err := os.Stat(fileName) + if err != nil && !os.IsNotExist(err) { + t.Fatal("Error:", err) + } + if err == nil && st != nil { + t.Fatal("Error: file should be deleted and should not exist.") + } +} + +// Tests url encoding. +func TestEncodeURL2Path(t *testing.T) { + type urlStrings struct { + objName string + encodedObjName string + } + + bucketName := "bucketName" + want := []urlStrings{ + { + objName: "本語", + encodedObjName: "%E6%9C%AC%E8%AA%9E", + }, + { + objName: "本語.1", + encodedObjName: "%E6%9C%AC%E8%AA%9E.1", + }, + { + objName: ">123>3123123", + encodedObjName: "%3E123%3E3123123", + }, + { + objName: "test 1 2.txt", + encodedObjName: "test%201%202.txt", + }, + { + objName: "test++ 1.txt", + encodedObjName: "test%2B%2B%201.txt", + }, + } + + for _, o := range want { + u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName)) + if err != nil { + t.Fatal("Error:", err) + } + urlPath := "/" + bucketName + "/" + o.encodedObjName + if urlPath != encodeURL2Path(u) { + t.Fatal("Error") + } + } +} + +// Tests error response structure. +func TestErrorResponse(t *testing.T) { + var err error + err = ErrorResponse{ + Code: "Testing", + } + errResp := ToErrorResponse(err) + if errResp.Code != "Testing" { + t.Fatal("Type conversion failed, we have an empty struct.") + } + + // Test http response decoding. + var httpResponse *http.Response + // Set empty variables + httpResponse = nil + var bucketName, objectName string + + // Should fail with invalid argument. + err = httpRespToErrorResponse(httpResponse, bucketName, objectName) + errResp = ToErrorResponse(err) + if errResp.Code != "InvalidArgument" { + t.Fatal("Empty response input should return invalid argument.") + } +} + +// Tests signature calculation. +func TestSignatureCalculation(t *testing.T) { + req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil) + if err != nil { + t.Fatal("Error:", err) + } + req = signV4(*req, "", "", "us-east-1") + if req.Header.Get("Authorization") != "" { + t.Fatal("Error: anonymous credentials should not have Authorization header.") + } + + req = preSignV4(*req, "", "", "us-east-1", 0) + if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") { + t.Fatal("Error: anonymous credentials should not have Signature query resource.") + } + + req = signV2(*req, "", "") + if req.Header.Get("Authorization") != "" { + t.Fatal("Error: anonymous credentials should not have Authorization header.") + } + + req = preSignV2(*req, "", "", 0) + if strings.Contains(req.URL.RawQuery, "Signature") { + t.Fatal("Error: anonymous credentials should not have Signature query resource.") + } + + req = signV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1") + if req.Header.Get("Authorization") == "" { + t.Fatal("Error: normal credentials should have Authorization header.") + } + + req = preSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0) + if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") { + t.Fatal("Error: normal credentials should have Signature query resource.") + } + + req = signV2(*req, "ACCESS-KEY", "SECRET-KEY") + if req.Header.Get("Authorization") == "" { + t.Fatal("Error: normal credentials should have Authorization header.") + } + + req = preSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0) + if !strings.Contains(req.URL.RawQuery, "Signature") { + t.Fatal("Error: normal credentials should not have Signature query resource.") + } +} + +// Tests signature type. +func TestSignatureType(t *testing.T) { + clnt := Client{} + if !clnt.signature.isV4() { + t.Fatal("Error") + } + clnt.signature = SignatureV2 + if !clnt.signature.isV2() { + t.Fatal("Error") + } + if clnt.signature.isV4() { + t.Fatal("Error") + } + clnt.signature = SignatureV4 + if !clnt.signature.isV4() { + t.Fatal("Error") + } +} + +// Tests bucket acl types. +func TestBucketACLTypes(t *testing.T) { + want := map[string]bool{ + "private": true, + "public-read": true, + "public-read-write": true, + "authenticated-read": true, + "invalid": false, + } + for acl, ok := range want { + if BucketACL(acl).isValidBucketACL() != ok { + t.Fatal("Error") + } + } +} + +// Tests optimal part size. +func TestPartSize(t *testing.T) { + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(5000000000000000000) + if err == nil { + t.Fatal("Error: should fail") + } + totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5497558138880) + if err != nil { + t.Fatal("Error: ", err) + } + if totalPartsCount != 9987 { + t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount) + } + if partSize != 550502400 { + t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize) + } + if lastPartSize != 241172480 { + t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize) + } + totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5000000000) + if err != nil { + t.Fatal("Error:", err) + } + if partSize != minPartSize { + t.Fatalf("Error: expecting part size of %v: got %v instead", minPartSize, partSize) + } + totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(-1) + if err != nil { + t.Fatal("Error:", err) + } + if totalPartsCount != 9987 { + t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount) + } + if partSize != 550502400 { + t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize) + } + if lastPartSize != 241172480 { + t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize) + } +} + +// Tests url encoding. +func TestURLEncoding(t *testing.T) { + type urlStrings struct { + name string + encodedName string + } + + want := []urlStrings{ + { + name: "bigfile-1._%", + encodedName: "bigfile-1._%25", + }, + { + name: "本語", + encodedName: "%E6%9C%AC%E8%AA%9E", + }, + { + name: "本語.1", + encodedName: "%E6%9C%AC%E8%AA%9E.1", + }, + { + name: ">123>3123123", + encodedName: "%3E123%3E3123123", + }, + { + name: "test 1 2.txt", + encodedName: "test%201%202.txt", + }, + { + name: "test++ 1.txt", + encodedName: "test%2B%2B%201.txt", + }, + } + + for _, u := range want { + if u.encodedName != urlEncodePath(u.name) { + t.Fatal("Error") + } + } +} + +// Tests constructing valid endpoint url. +func TestGetEndpointURL(t *testing.T) { + if _, err := getEndpointURL("s3.amazonaws.com", false); err != nil { + t.Fatal("Error:", err) + } + if _, err := getEndpointURL("192.168.1.1", false); err != nil { + t.Fatal("Error:", err) + } + if _, err := getEndpointURL("13333.123123.-", false); err == nil { + t.Fatal("Error") + } + if _, err := getEndpointURL("s3.aamzza.-", false); err == nil { + t.Fatal("Error") + } + if _, err := getEndpointURL("s3.amazonaws.com:443", false); err == nil { + t.Fatal("Error") + } +} + +// Tests valid ip address. +func TestValidIPAddr(t *testing.T) { + type validIP struct { + ip string + valid bool + } + + want := []validIP{ + { + ip: "192.168.1.1", + valid: true, + }, + { + ip: "192.1.8", + valid: false, + }, + { + ip: "..192.", + valid: false, + }, + { + ip: "192.168.1.1.1", + valid: false, + }, + } + for _, w := range want { + valid := isValidIP(w.ip) + if valid != w.valid { + t.Fatal("Error") + } + } +} + +// Tests valid endpoint domain. +func TestValidEndpointDomain(t *testing.T) { + type validEndpoint struct { + endpointDomain string + valid bool + } + + want := []validEndpoint{ + { + endpointDomain: "s3.amazonaws.com", + valid: true, + }, + { + endpointDomain: "s3.amazonaws.com_", + valid: false, + }, + { + endpointDomain: "%$$$", + valid: false, + }, + { + endpointDomain: "s3.amz.test.com", + valid: true, + }, + { + endpointDomain: "s3.%%", + valid: false, + }, + { + endpointDomain: "localhost", + valid: true, + }, + { + endpointDomain: "-localhost", + valid: false, + }, + { + endpointDomain: "", + valid: false, + }, + { + endpointDomain: "\n \t", + valid: false, + }, + { + endpointDomain: " ", + valid: false, + }, + } + for _, w := range want { + valid := isValidDomain(w.endpointDomain) + if valid != w.valid { + t.Fatal("Error:", w.endpointDomain) + } + } +} + +// Tests valid endpoint url. +func TestValidEndpointURL(t *testing.T) { + type validURL struct { + url string + valid bool + } + want := []validURL{ + { + url: "https://s3.amazonaws.com", + valid: true, + }, + { + url: "https://s3.amazonaws.com/bucket/object", + valid: false, + }, + { + url: "192.168.1.1", + valid: false, + }, + } + for _, w := range want { + u, err := url.Parse(w.url) + if err != nil { + t.Fatal("Error:", err) + } + valid := false + if err := isValidEndpointURL(u); err == nil { + valid = true + } + if valid != w.valid { + t.Fatal("Error") + } + } +} diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml new file mode 100644 index 000000000..5b8824d45 --- /dev/null +++ b/vendor/github.com/minio/minio-go/appveyor.yml @@ -0,0 +1,36 @@ +# version format +version: "{build}" + +# Operating system (build VM template) +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\minio\minio-go + +# environment variables +environment: + GOPATH: c:\gopath + GO15VENDOREXPERIMENT: 1 + +# scripts that run after cloning repository +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + - go get -u github.com/golang/lint/golint + - go get -u golang.org/x/tools/cmd/vet + - go get -u github.com/remyoudompheng/go-misc/deadcode + +# to run your custom scripts instead of automatic MSBuild +build_script: + - go vet ./... + - gofmt -s -l . + - golint github.com/minio/minio-go... + - deadcode + - go test -short -v + - go test -short -race -v + +# to disable automatic tests +test: off + +# to disable deployment +deploy: off diff --git a/vendor/github.com/minio/minio-go/bucket-acl.go b/vendor/github.com/minio/minio-go/bucket-acl.go new file mode 100644 index 000000000..d8eda0f54 --- /dev/null +++ b/vendor/github.com/minio/minio-go/bucket-acl.go @@ -0,0 +1,75 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// BucketACL - Bucket level access control. +type BucketACL string + +// Different types of ACL's currently supported for buckets. +const ( + bucketPrivate = BucketACL("private") + bucketReadOnly = BucketACL("public-read") + bucketPublic = BucketACL("public-read-write") + bucketAuthenticated = BucketACL("authenticated-read") +) + +// Stringify acl. +func (b BucketACL) String() string { + if string(b) == "" { + return "private" + } + return string(b) +} + +// isValidBucketACL - Is provided acl string supported. +func (b BucketACL) isValidBucketACL() bool { + switch true { + case b.isPrivate(): + fallthrough + case b.isReadOnly(): + fallthrough + case b.isPublic(): + fallthrough + case b.isAuthenticated(): + return true + case b.String() == "private": + // By default its "private" + return true + default: + return false + } +} + +// isPrivate - Is acl Private. +func (b BucketACL) isPrivate() bool { + return b == bucketPrivate +} + +// isPublicRead - Is acl PublicRead. +func (b BucketACL) isReadOnly() bool { + return b == bucketReadOnly +} + +// isPublicReadWrite - Is acl PublicReadWrite. +func (b BucketACL) isPublic() bool { + return b == bucketPublic +} + +// isAuthenticated - Is acl AuthenticatedRead. +func (b BucketACL) isAuthenticated() bool { + return b == bucketAuthenticated +} diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go new file mode 100644 index 000000000..4af161c14 --- /dev/null +++ b/vendor/github.com/minio/minio-go/bucket-cache.go @@ -0,0 +1,154 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/hex" + "net/http" + "net/url" + "path/filepath" + "sync" +) + +// bucketLocationCache - Provides simple mechansim to hold bucket +// locations in memory. +type bucketLocationCache struct { + // mutex is used for handling the concurrent + // read/write requests for cache. + sync.RWMutex + + // items holds the cached bucket locations. + items map[string]string +} + +// newBucketLocationCache - Provides a new bucket location cache to be +// used internally with the client object. +func newBucketLocationCache() *bucketLocationCache { + return &bucketLocationCache{ + items: make(map[string]string), + } +} + +// Get - Returns a value of a given key if it exists. +func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { + r.RLock() + defer r.RUnlock() + location, ok = r.items[bucketName] + return +} + +// Set - Will persist a value into cache. +func (r *bucketLocationCache) Set(bucketName string, location string) { + r.Lock() + defer r.Unlock() + r.items[bucketName] = location +} + +// Delete - Deletes a bucket name from cache. +func (r *bucketLocationCache) Delete(bucketName string) { + r.Lock() + defer r.Unlock() + delete(r.items, bucketName) +} + +// getBucketLocation - Get location for the bucketName from location map cache. +func (c Client) getBucketLocation(bucketName string) (string, error) { + // For anonymous requests, default to "us-east-1" and let other calls + // move forward. + if c.anonymous { + return "us-east-1", nil + } + if location, ok := c.bucketLocCache.Get(bucketName); ok { + return location, nil + } + + // Initialize a new request. + req, err := c.getBucketLocationRequest(bucketName) + if err != nil { + return "", err + } + + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return "", err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Extract location. + var locationConstraint string + err = xmlDecoder(resp.Body, &locationConstraint) + if err != nil { + return "", err + } + + location := locationConstraint + // Location is empty will be 'us-east-1'. + if location == "" { + location = "us-east-1" + } + + // Location can be 'EU' convert it to meaningful 'eu-west-1'. + if location == "EU" { + location = "eu-west-1" + } + + // Save the location into cache. + c.bucketLocCache.Set(bucketName, location) + + // Return. + return location, nil +} + +// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. +func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) { + // Set location query. + urlValues := make(url.Values) + urlValues.Set("location", "") + + // Set get bucket location always as path style. + targetURL := c.endpointURL + targetURL.Path = filepath.Join(bucketName, "") + targetURL.RawQuery = urlValues.Encode() + + // Get a new HTTP request for the method. + req, err := http.NewRequest("GET", targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Set UserAgent for the request. + c.setUserAgent(req) + + // Set sha256 sum for signature calculation only with signature version '4'. + if c.signature.isV4() { + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) + } + + // Sign the request. + if c.signature.isV4() { + req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") + } else if c.signature.isV2() { + req = signV2(*req, c.accessKeyID, c.secretAccessKey) + } + return req, nil +} diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go new file mode 100644 index 000000000..b0aa009d8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/constants.go @@ -0,0 +1,42 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +/// Multipart upload defaults. + +// miniPartSize - minimum part size 5MiB per object after which +// putObject behaves internally as multipart. +const minPartSize = 1024 * 1024 * 5 + +// maxPartsCount - maximum number of parts for a single multipart session. +const maxPartsCount = 10000 + +// maxPartSize - maximum part size 5GiB for a single multipart upload +// operation. +const maxPartSize = 1024 * 1024 * 1024 * 5 + +// maxSinglePutObjectSize - maximum size 5GiB of object per PUT +// operation. +const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 + +// maxMultipartPutObjectSize - maximum size 5TiB of object for +// Multipart operation. +const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 + +// optimalReadBufferSize - optimal buffer 5MiB used for reading +// through Read operation. +const optimalReadBufferSize = 1024 * 1024 * 5 diff --git a/vendor/github.com/minio/minio-go/hook-reader.go b/vendor/github.com/minio/minio-go/hook-reader.go new file mode 100644 index 000000000..043425f23 --- /dev/null +++ b/vendor/github.com/minio/minio-go/hook-reader.go @@ -0,0 +1,54 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "io" + +// hookReader hooks additional reader in the source stream. It is +// useful for making progress bars. Second reader is appropriately +// notified about the exact number of bytes read from the primary +// source on each Read operation. +type hookReader struct { + source io.Reader + hook io.Reader +} + +// Read implements io.Reader. Always reads from the source, the return +// value 'n' number of bytes are reported through the hook. Returns +// error for all non io.EOF conditions. +func (hr *hookReader) Read(b []byte) (n int, err error) { + n, err = hr.source.Read(b) + if err != nil && err != io.EOF { + return n, err + } + // Progress the hook with the total read bytes from the source. + if _, herr := hr.hook.Read(b[:n]); herr != nil { + if herr != io.EOF { + return n, herr + } + } + return n, err +} + +// newHook returns a io.Reader which implements hookReader that +// reports the data read from the source to the hook. +func newHook(source, hook io.Reader) io.Reader { + if hook == nil { + return source + } + return &hookReader{source, hook} +} diff --git a/vendor/github.com/minio/minio-go/post-policy.go b/vendor/github.com/minio/minio-go/post-policy.go new file mode 100644 index 000000000..2a675d770 --- /dev/null +++ b/vendor/github.com/minio/minio-go/post-policy.go @@ -0,0 +1,191 @@ +package minio + +import ( + "encoding/base64" + "fmt" + "strings" + "time" +) + +// expirationDateFormat date format for expiration key in json policy. +const expirationDateFormat = "2006-01-02T15:04:05.999Z" + +// policyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// +// Example: +// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } +// +type policyCondition struct { + matchType string + condition string + value string +} + +// PostPolicy - Provides strict static type conversion and validation +// for Amazon S3's POST policy JSON string. +type PostPolicy struct { + // Expiration date and time of the POST policy. + expiration time.Time + // Collection of different policy conditions. + conditions []policyCondition + // ContentLengthRange minimum and maximum allowable size for the + // uploaded content. + contentLengthRange struct { + min int64 + max int64 + } + + // Post form data. + formData map[string]string +} + +// NewPostPolicy - Instantiate new post policy. +func NewPostPolicy() *PostPolicy { + p := &PostPolicy{} + p.conditions = make([]policyCondition, 0) + p.formData = make(map[string]string) + return p +} + +// SetExpires - Sets expiration time for the new policy. +func (p *PostPolicy) SetExpires(t time.Time) error { + if t.IsZero() { + return ErrInvalidArgument("No expiry time set.") + } + p.expiration = t + return nil +} + +// SetKey - Sets an object name for the policy based upload. +func (p *PostPolicy) SetKey(key string) error { + if strings.TrimSpace(key) == "" || key == "" { + return ErrInvalidArgument("Object name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$key", + value: key, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = key + return nil +} + +// SetKeyStartsWith - Sets an object name that an policy based upload +// can start with. +func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { + if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { + return ErrInvalidArgument("Object prefix is empty.") + } + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$key", + value: keyStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = keyStartsWith + return nil +} + +// SetBucket - Sets bucket at which objects will be uploaded to. +func (p *PostPolicy) SetBucket(bucketName string) error { + if strings.TrimSpace(bucketName) == "" || bucketName == "" { + return ErrInvalidArgument("Bucket name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$bucket", + value: bucketName, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["bucket"] = bucketName + return nil +} + +// SetContentType - Sets content-type of the object for this policy +// based upload. +func (p *PostPolicy) SetContentType(contentType string) error { + if strings.TrimSpace(contentType) == "" || contentType == "" { + return ErrInvalidArgument("No content type specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Type", + value: contentType, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentType + return nil +} + +// SetContentLengthRange - Set new min and max content length +// condition for all incoming uploads. +func (p *PostPolicy) SetContentLengthRange(min, max int64) error { + if min > max { + return ErrInvalidArgument("Minimum limit is larger than maximum limit.") + } + if min < 0 { + return ErrInvalidArgument("Minimum limit cannot be negative.") + } + if max < 0 { + return ErrInvalidArgument("Maximum limit cannot be negative.") + } + p.contentLengthRange.min = min + p.contentLengthRange.max = max + return nil +} + +// addNewPolicy - internal helper to validate adding new policies. +func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { + if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + return ErrInvalidArgument("Policy fields are empty.") + } + p.conditions = append(p.conditions, policyCond) + return nil +} + +// Stringer interface for printing policy in json formatted string. +func (p PostPolicy) String() string { + return string(p.marshalJSON()) +} + +// marshalJSON - Provides Marshalled JSON in bytes. +func (p PostPolicy) marshalJSON() []byte { + expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` + var conditionsStr string + conditions := []string{} + for _, po := range p.conditions { + conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) + } + if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { + conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", + p.contentLengthRange.min, p.contentLengthRange.max)) + } + if len(conditions) > 0 { + conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" + } + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionsStr + retStr = retStr + "}" + return []byte(retStr) +} + +// base64 - Produces base64 of PostPolicy's Marshalled json. +func (p PostPolicy) base64() string { + return base64.StdEncoding.EncodeToString(p.marshalJSON()) +} diff --git a/vendor/github.com/minio/minio-go/request-signature-v2.go b/vendor/github.com/minio/minio-go/request-signature-v2.go new file mode 100644 index 000000000..696828182 --- /dev/null +++ b/vendor/github.com/minio/minio-go/request-signature-v2.go @@ -0,0 +1,289 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" +) + +// Signature and API related constants. +const ( + signV2Algorithm = "AWS" +) + +// Encode input URL path to URL encoded path. +func encodeURL2Path(u *url.URL) (path string) { + // Encode URL path. + if strings.HasSuffix(u.Host, ".s3.amazonaws.com") { + path = "/" + strings.TrimSuffix(u.Host, ".s3.amazonaws.com") + path += u.Path + path = urlEncodePath(path) + return + } + if strings.HasSuffix(u.Host, ".storage.googleapis.com") { + path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com") + path += u.Path + path = urlEncodePath(path) + return + } + path = urlEncodePath(u.Path) + return +} + +// preSignV2 - presign the request in following style. +// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. +func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + d := time.Now().UTC() + // Add date if not present. + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", d.Format(http.TimeFormat)) + } + + // Get encoded URL path. + path := encodeURL2Path(req.URL) + + // Find epoch expires when the request will expire. + epochExpires := d.Unix() + expires + + // Get string to sign. + stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", req.Method, epochExpires, path) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Calculate signature. + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + + query := req.URL.Query() + // Handle specially for Google Cloud Storage. + if strings.Contains(req.URL.Host, ".storage.googleapis.com") { + query.Set("GoogleAccessId", accessKeyID) + } else { + query.Set("AWSAccessKeyId", accessKeyID) + } + + // Fill in Expires and Signature for presigned query. + query.Set("Expires", strconv.FormatInt(epochExpires, 10)) + query.Set("Signature", signature) + + // Encode query and save. + req.URL.RawQuery = query.Encode() + return &req +} + +// postPresignSignatureV2 - presigned signature for PostPolicy +// request. +func postPresignSignatureV2(policyBase64, secretAccessKey string) string { + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(policyBase64)) + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + return signature +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-MD5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// signV2 sign the request before Do() (AWS Signature Version 2). +func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + d := time.Now().UTC() + + // Add date if not present. + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", d.Format(http.TimeFormat)) + } + + // Calculate HMAC for secretAccessKey. + stringToSign := getStringToSignV2(req) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Prepare auth header. + authHeader := new(bytes.Buffer) + authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) + encoder := base64.NewEncoder(base64.StdEncoding, authHeader) + encoder.Write(hm.Sum(nil)) + encoder.Close() + + // Set Authorization header. + req.Header.Set("Authorization", authHeader.String()) + + return &req +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-MD5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func getStringToSignV2(req http.Request) string { + buf := new(bytes.Buffer) + // Write standard headers. + writeDefaultHeaders(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req) + return buf.String() +} + +// writeDefaultHeader - write all default necessary headers +func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method) + buf.WriteByte('\n') + buf.WriteString(req.Header.Get("Content-MD5")) + buf.WriteByte('\n') + buf.WriteString(req.Header.Get("Content-Type")) + buf.WriteByte('\n') + buf.WriteString(req.Header.Get("Date")) + buf.WriteByte('\n') +} + +// writeCanonicalizedHeaders - write canonicalized headers. +func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { + var protoHeaders []string + vals := make(map[string][]string) + for k, vv := range req.Header { + // All the AMZ headers should be lowercase + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-amz") { + protoHeaders = append(protoHeaders, lk) + vals[lk] = vv + } + } + sort.Strings(protoHeaders) + for _, k := range protoHeaders { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + if strings.Contains(v, "\n") { + // TODO: "Unfold" long headers that + // span multiple lines (as allowed by + // RFC 2616, section 4.2) by replacing + // the folding white-space (including + // new-line) by a single space. + buf.WriteString(v) + } else { + buf.WriteString(v) + } + } + buf.WriteByte('\n') + } +} + +// Must be sorted: +var resourceList = []string{ + "acl", + "location", + "logging", + "notification", + "partNumber", + "policy", + "response-content-type", + "response-content-language", + "response-expires", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "requestPayment", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// From the Amazon docs: +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) error { + // Save request URL. + requestURL := req.URL + + // Get encoded URL path. + path := encodeURL2Path(requestURL) + buf.WriteString(path) + + sort.Strings(resourceList) + if requestURL.RawQuery != "" { + var n int + vals, _ := url.ParseQuery(requestURL.RawQuery) + // Verify if any sub resource queries are present, if yes + // canonicallize them. + for _, resource := range resourceList { + if vv, ok := vals[resource]; ok && len(vv) > 0 { + n++ + // First element + switch n { + case 1: + buf.WriteByte('?') + // The rest + default: + buf.WriteByte('&') + } + buf.WriteString(resource) + // Request parameters + if len(vv[0]) > 0 { + buf.WriteByte('=') + buf.WriteString(url.QueryEscape(vv[0])) + } + } + } + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/request-signature-v4.go b/vendor/github.com/minio/minio-go/request-signature-v4.go new file mode 100644 index 000000000..dfd11e9e4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/request-signature-v4.go @@ -0,0 +1,303 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/hex" + "net/http" + "sort" + "strconv" + "strings" + "time" +) + +// Signature and API related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" + yyyymmdd = "20060102" +) + +/// +/// Excerpts from @lsegal - +/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +/// +/// User-Agent: +/// +/// This is ignored from signing because signing this causes +/// problems with generating pre-signed URLs (that are executed +/// by other agents) or when customers pass requests through +/// proxies, which may modify the user-agent. +/// +/// Content-Length: +/// +/// This is ignored from signing because generating a pre-signed +/// URL should not provide a content-length constraint, +/// specifically when vending a S3 pre-signed PUT URL. The +/// corollary to this is that when sending regular requests +/// (non-pre-signed), the signature contains a checksum of the +/// body, which implicitly validates the payload length (since +/// changing the number of bytes would change the checksum) +/// and therefore this header is not valuable in the signature. +/// +/// Content-Type: +/// +/// Signing this header causes quite a number of problems in +/// browser environments, where browsers like to modify and +/// normalize the content-type header in different ways. There is +/// more information on this in https://goo.gl/2E9gyy. Avoiding +/// this field simplifies logic and reduces the possibility of +/// future bugs. +/// +/// Authorization: +/// +/// Is skipped for obvious reasons +/// +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secret, loc string, t time.Time) []byte { + date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) + location := sumHMAC(date, []byte(loc)) + service := sumHMAC(location, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getScope generate a string of a specific date, an AWS region, and a +// service. +func getScope(location string, t time.Time) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + location, + "s3", + "aws4_request", + }, "/") + return scope +} + +// getCredential generate a credential string. +func getCredential(accessKeyID, location string, t time.Time) string { + scope := getScope(location, t) + return accessKeyID + "/" + scope +} + +// getHashedPayload get the hexadecimal value of the SHA256 hash of +// the request payload. +func getHashedPayload(req http.Request) string { + hashedPayload := req.Header.Get("X-Amz-Content-Sha256") + if hashedPayload == "" { + // Presign does not have a payload, use S3 recommended value. + hashedPayload = "UNSIGNED-PAYLOAD" + } + return hashedPayload +} + +// getCanonicalHeaders generate a list of request headers for +// signature. +func getCanonicalHeaders(req http.Request) string { + var headers []string + vals := make(map[string][]string) + for k, vv := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + headers = append(headers, "host") + sort.Strings(headers) + + var buf bytes.Buffer + // Save all the headers in canonical form
: newline + // separated for each header. + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(req.URL.Host) + fallthrough + default: + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + return buf.String() +} + +// getSignedHeaders generate all signed request headers. +// i.e lexically sorted, semicolon-separated list of lowercase +// request header names. +func getSignedHeaders(req http.Request) string { + var headers []string + for k := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // Ignored header found continue. + } + headers = append(headers, strings.ToLower(k)) + } + headers = append(headers, "host") + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalRequest generate a canonical request of style. +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +func getCanonicalRequest(req http.Request) string { + req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + canonicalRequest := strings.Join([]string{ + req.Method, + urlEncodePath(req.URL.Path), + req.URL.RawQuery, + getCanonicalHeaders(req), + getSignedHeaders(req), + getHashedPayload(req), + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSignV4(t time.Time, location, canonicalRequest string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" + stringToSign = stringToSign + getScope(location, t) + "\n" + stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + return stringToSign +} + +// preSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Get credential string. + credential := getCredential(accessKeyID, location, t) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", signedHeaders) + query.Set("X-Amz-Credential", credential) + req.URL.RawQuery = query.Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest) + + // Gext hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + signature + + return &req +} + +// postPresignSignatureV4 - presigned signature for PostPolicy +// requests. +func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + // Get signining key. + signingkey := getSigningKey(secretAccessKey, location, t) + // Calculate signature. + signature := getSignature(signingkey, policyBase64) + return signature +} + +// signV4 sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. +func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Set x-amz-date. + req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest) + + // Get hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t) + + // Get credential string. + credential := getCredential(accessKeyID, location, t) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // If regular request, construct the final authorization header. + parts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + + // Set authorization header. + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return &req +} diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go new file mode 100644 index 000000000..8c9ff5e88 --- /dev/null +++ b/vendor/github.com/minio/minio-go/s3-endpoints.go @@ -0,0 +1,40 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// awsS3EndpointMap Amazon S3 endpoint map. +var awsS3EndpointMap = map[string]string{ + "us-east-1": "s3.amazonaws.com", + "us-west-2": "s3-us-west-2.amazonaws.com", + "us-west-1": "s3-us-west-1.amazonaws.com", + "eu-west-1": "s3-eu-west-1.amazonaws.com", + "eu-central-1": "s3-eu-central-1.amazonaws.com", + "ap-southeast-1": "s3-ap-southeast-1.amazonaws.com", + "ap-northeast-1": "s3-ap-northeast-1.amazonaws.com", + "ap-northeast-2": "s3-ap-northeast-2.amazonaws.com", + "sa-east-1": "s3-sa-east-1.amazonaws.com", +} + +// getS3Endpoint get Amazon S3 endpoint based on the bucket location. +func getS3Endpoint(bucketLocation string) (s3Endpoint string) { + s3Endpoint, ok := awsS3EndpointMap[bucketLocation] + if !ok { + // Default to 's3.amazonaws.com' endpoint. + s3Endpoint = "s3.amazonaws.com" + } + return s3Endpoint +} diff --git a/vendor/github.com/minio/minio-go/signature-type.go b/vendor/github.com/minio/minio-go/signature-type.go new file mode 100644 index 000000000..cae74cd01 --- /dev/null +++ b/vendor/github.com/minio/minio-go/signature-type.go @@ -0,0 +1,37 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// SignatureType is type of Authorization requested for a given HTTP request. +type SignatureType int + +// Different types of supported signatures - default is Latest i.e SignatureV4. +const ( + Latest SignatureType = iota + SignatureV4 + SignatureV2 +) + +// isV2 - is signature SignatureV2? +func (s SignatureType) isV2() bool { + return s == SignatureV2 +} + +// isV4 - is signature SignatureV4? +func (s SignatureType) isV4() bool { + return s == SignatureV4 || s == Latest +} diff --git a/vendor/github.com/minio/minio-go/tempfile.go b/vendor/github.com/minio/minio-go/tempfile.go new file mode 100644 index 000000000..65c7b0da1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/tempfile.go @@ -0,0 +1,60 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "io/ioutil" + "os" + "sync" +) + +// tempFile - temporary file container. +type tempFile struct { + *os.File + mutex *sync.Mutex +} + +// newTempFile returns a new temporary file, once closed it automatically deletes itself. +func newTempFile(prefix string) (*tempFile, error) { + // use platform specific temp directory. + file, err := ioutil.TempFile(os.TempDir(), prefix) + if err != nil { + return nil, err + } + return &tempFile{ + File: file, + mutex: &sync.Mutex{}, + }, nil +} + +// Close - closer wrapper to close and remove temporary file. +func (t *tempFile) Close() error { + t.mutex.Lock() + defer t.mutex.Unlock() + if t.File != nil { + // Close the file. + if err := t.File.Close(); err != nil { + return err + } + // Remove file. + if err := os.Remove(t.File.Name()); err != nil { + return err + } + t.File = nil + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go new file mode 100644 index 000000000..63966c30c --- /dev/null +++ b/vendor/github.com/minio/minio-go/utils.go @@ -0,0 +1,305 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/xml" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "regexp" + "strings" + "time" + "unicode/utf8" +) + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getEndpointURL - construct a new endpoint. +func getEndpointURL(endpoint string, inSecure bool) (*url.URL, error) { + if strings.Contains(endpoint, ":") { + host, _, err := net.SplitHostPort(endpoint) + if err != nil { + return nil, err + } + if !isValidIP(host) && !isValidDomain(host) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } else { + if !isValidIP(endpoint) && !isValidDomain(endpoint) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } + // if inSecure is true, use 'http' scheme. + scheme := "https" + if inSecure { + scheme = "http" + } + + // Construct a secured endpoint URL. + endpointURLStr := scheme + "://" + endpoint + endpointURL, err := url.Parse(endpointURLStr) + if err != nil { + return nil, err + } + + // Validate incoming endpoint URL. + if err := isValidEndpointURL(endpointURL); err != nil { + return nil, err + } + return endpointURL, nil +} + +// isValidDomain validates if input string is a valid domain name. +func isValidDomain(host string) bool { + // See RFC 1035, RFC 3696. + host = strings.TrimSpace(host) + if len(host) == 0 || len(host) > 255 { + return false + } + // host cannot start or end with "-" + if host[len(host)-1:] == "-" || host[:1] == "-" { + return false + } + // host cannot start or end with "_" + if host[len(host)-1:] == "_" || host[:1] == "_" { + return false + } + // host cannot start or end with a "." + if host[len(host)-1:] == "." || host[:1] == "." { + return false + } + // All non alphanumeric characters are invalid. + if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 604800 { + return ErrInvalidArgument("Expires cannot be greater than 7 days.") + } + return nil +} + +// We support '.' with bucket names but we fallback to using path +// style requests instead for such buckets. +var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) + +// isValidBucketName - verify bucket name in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func isValidBucketName(bucketName string) error { + if strings.TrimSpace(bucketName) == "" { + return ErrInvalidBucketName("Bucket name cannot be empty.") + } + if len(bucketName) < 3 { + return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.") + } + if len(bucketName) > 63 { + return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.") + } + if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' { + return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.") + } + if match, _ := regexp.MatchString("\\.\\.", bucketName); match == true { + return ErrInvalidBucketName("Bucket name cannot have successive periods.") + } + if !validBucketName.MatchString(bucketName) { + return ErrInvalidBucketName("Bucket name contains invalid characters.") + } + return nil +} + +// isValidObjectName - verify object name in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func isValidObjectName(objectName string) error { + if strings.TrimSpace(objectName) == "" { + return ErrInvalidObjectName("Object name cannot be empty.") + } + if len(objectName) > 1024 { + return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.") + } + if !utf8.ValidString(objectName) { + return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.") + } + return nil +} + +// isValidObjectPrefix - verify if object prefix is valid. +func isValidObjectPrefix(objectPrefix string) error { + if len(objectPrefix) > 1024 { + return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.") + } + if !utf8.ValidString(objectPrefix) { + return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.") + } + return nil +} + +// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func urlEncodePath(pathName string) string { + // if object matches reserved string, no need to encode them + reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + if reservedNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 5ebef9803..7c2ce3c04 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -32,6 +32,16 @@ "revision": "ee1815431e497d3850809578c93ab6705f1a19f7", "revisionTime": "2015-08-19T22:15:06-07:00" }, + { + "path": "github.com/gorilla/rpc/v2", + "revision": "64e20900b8aa38bb0771dec71ba3bcc2b07fc8ec", + "revisionTime": "2015-11-05T07:45:51+08:00" + }, + { + "path": "github.com/gorilla/rpc/v2/json", + "revision": "64e20900b8aa38bb0771dec71ba3bcc2b07fc8ec", + "revisionTime": "2015-11-05T07:45:51+08:00" + }, { "path": "github.com/mattn/go-isatty", "revision": "7fcbc72f853b92b5720db4a6b8482be612daef24", @@ -42,6 +52,11 @@ "revision": "c4a07c7b68db77ccd119183fb1d01dd5972434ab", "revisionTime": "2015-11-18T20:00:48-08:00" }, + { + "path": "github.com/minio/minio-go", + "revision": "412df729f2c19ce60895770403f266cf5eac56f7", + "revisionTime": "2016-01-22T16:23:42-08:00" + }, { "path": "github.com/minio/minio-xl/pkg/atomic", "revision": "69c47f638917ab1cb9e24649c84ac38e6f1891b8", diff --git a/web-auth-handlers.go b/web-auth-handlers.go deleted file mode 100644 index 7129725ac..000000000 --- a/web-auth-handlers.go +++ /dev/null @@ -1,104 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/dgrijalva/jwt-go" -) - -func srvLogin(requestUser *User) (int, []byte) { - authBackend := InitJWT() - if authBackend.Authenticate(requestUser) { - token, err := authBackend.GenerateToken(requestUser.Username) - if err != nil { - return http.StatusInternalServerError, nil - } - response, err := json.Marshal(AuthToken{token}) - if err != nil { - return http.StatusInternalServerError, nil - } - return http.StatusOK, response - } - return http.StatusUnauthorized, nil -} - -func srvRefreshToken(req *http.Request) (int, []byte) { - authBackend := InitJWT() - tokenRequest, err := jwt.ParseFromRequest(req, func(token *jwt.Token) (interface{}, error) { - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - return authBackend.PublicKey, nil - }) - if err != nil { - return http.StatusInternalServerError, nil - } - if tokenRequest.Valid { - userName, ok := tokenRequest.Claims["sub"].(string) - if !ok { - return http.StatusUnauthorized, nil - } - token, err := authBackend.GenerateToken(userName) - if err != nil { - return http.StatusInternalServerError, nil - } - response, err := json.Marshal(AuthToken{token}) - if err != nil { - return http.StatusInternalServerError, nil - } - return http.StatusOK, response - } - return http.StatusUnauthorized, nil -} - -func srvLogout(req *http.Request) int { - authBackend := InitJWT() - tokenRequest, err := jwt.ParseFromRequest(req, func(token *jwt.Token) (interface{}, error) { - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - return authBackend.PublicKey, nil - }) - if err != nil { - return http.StatusInternalServerError - } - if tokenRequest.Valid { - tokenString := req.Header.Get("Authorization") - if err = authBackend.Logout(tokenString, tokenRequest); err != nil { - return http.StatusInternalServerError - } - return http.StatusOK - } - return http.StatusUnauthorized -} - -// LoginHandler - user login handler. -func (web WebAPI) LoginHandler(w http.ResponseWriter, r *http.Request) { - requestUser := new(User) - decoder := json.NewDecoder(r.Body) - if err := decoder.Decode(&requestUser); err != nil { - w.WriteHeader(http.StatusInternalServerError) - return - } - responseStatus, token := srvLogin(requestUser) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(responseStatus) - w.Write(token) -} - -// RefreshTokenHandler - refresh token handler. -func (web WebAPI) RefreshTokenHandler(w http.ResponseWriter, r *http.Request) { - responseStatus, token := srvRefreshToken(r) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(responseStatus) - w.Write(token) -} - -// LogoutHandler - user logout handler. -func (web WebAPI) LogoutHandler(w http.ResponseWriter, r *http.Request) { - responseStatus := srvLogout(r) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(responseStatus) -} diff --git a/web-definitions.go b/web-definitions.go index 49d8e84b0..48587dce2 100644 --- a/web-definitions.go +++ b/web-definitions.go @@ -1,12 +1,27 @@ package main -// AuthToken - auth token +// ListBucketsArgs - list bucket args. +type ListBucketsArgs struct{} + +// ListObjectsArgs - list object args. +type ListObjectsArgs struct { + BucketName string `json:"bucketName"` + Prefix string `json:"prefix"` +} + +// GetObjectURLArgs - get object url. +type GetObjectURLArgs struct { + BucketName string `json:"bucketName"` + ObjectName string `json:"objectName"` +} + +// AuthToken - auth token reply type AuthToken struct { Token string `json:"token" form:"token"` } -// User users. -type User struct { +// LoginArgs - login arguments. +type LoginArgs struct { Username string `json:"username" form:"username"` Password string `json:"password" form:"password"` } diff --git a/web-handlers.go b/web-handlers.go new file mode 100644 index 000000000..587f228b2 --- /dev/null +++ b/web-handlers.go @@ -0,0 +1,122 @@ +package main + +import ( + "fmt" + "net/http" + "time" + + "github.com/dgrijalva/jwt-go" + "github.com/minio/minio-go" +) + +func isAuthenticated(req *http.Request) bool { + authBackend := InitJWT() + tokenRequest, err := jwt.ParseFromRequest(req, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + return authBackend.PublicKey, nil + }) + if err != nil { + return false + } + return tokenRequest.Valid +} + +// ListBuckets - list buckets api. +func (web *WebAPI) ListBuckets(r *http.Request, args *ListBucketsArgs, reply *[]minio.BucketInfo) error { + if !isAuthenticated(r) { + return errUnAuthorizedRequest + } + client, err := minio.New("localhost:9000", web.AccessKeyID, web.SecretAccessKey, true) + if err != nil { + return err + } + buckets, err := client.ListBuckets() + if err != nil { + return err + } + *reply = buckets + return nil +} + +// ListObjects - list objects api. +func (web *WebAPI) ListObjects(r *http.Request, args *ListObjectsArgs, reply *[]minio.ObjectInfo) error { + if !isAuthenticated(r) { + return errUnAuthorizedRequest + } + client, err := minio.New("localhost:9000", web.AccessKeyID, web.SecretAccessKey, true) + if err != nil { + return err + } + doneCh := make(chan struct{}) + defer close(doneCh) + + var objects []minio.ObjectInfo + for object := range client.ListObjects(args.BucketName, args.Prefix, false, doneCh) { + if object.Err != nil { + return object.Err + } + objects = append(objects, object) + } + *reply = objects + return nil +} + +// GetObjectURL - get object url. +func (web *WebAPI) GetObjectURL(r *http.Request, args *GetObjectURLArgs, reply *string) error { + if !isAuthenticated(r) { + return errUnAuthorizedRequest + } + client, err := minio.New("localhost:9000", web.AccessKeyID, web.SecretAccessKey, true) + if err != nil { + return err + } + urlStr, err := client.PresignedGetObject(args.BucketName, args.ObjectName, time.Duration(60*60)*time.Second) + if err != nil { + return err + } + *reply = urlStr + return nil +} + +// Login - user login handler. +func (web *WebAPI) Login(r *http.Request, args *LoginArgs, reply *AuthToken) error { + authBackend := InitJWT() + if authBackend.Authenticate(args, web.AccessKeyID, web.SecretAccessKey) { + token, err := authBackend.GenerateToken(args.Username) + if err != nil { + return err + } + reply.Token = token + return nil + } + return errUnAuthorizedRequest +} + +// RefreshToken - refresh token handler. +func (web *WebAPI) RefreshToken(r *http.Request, args *LoginArgs, reply *AuthToken) error { + if isAuthenticated(r) { + authBackend := InitJWT() + token, err := authBackend.GenerateToken(args.Username) + if err != nil { + return err + } + reply.Token = token + return nil + } + return errUnAuthorizedRequest +} + +// Logout - user logout. +func (web *WebAPI) Logout(r *http.Request, arg *string, reply *string) error { + if isAuthenticated(r) { + authBackend := InitJWT() + tokenString := r.Header.Get("Authorization") + if err := authBackend.Logout(tokenString); err != nil { + return err + } + return nil + } + return errUnAuthorizedRequest +}