From c2031ca066a37d9d1a104810294383cecc07770e Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 30 Jun 2015 12:18:31 -0700 Subject: [PATCH 01/19] Add server and control command --- commands.go | 169 +++++++++------------------ pkg/featureflags/featureflag.go | 30 ----- pkg/featureflags/featureflag_test.go | 22 ---- pkg/featureflags/features.go | 6 - 4 files changed, 58 insertions(+), 169 deletions(-) delete mode 100644 pkg/featureflags/featureflag.go delete mode 100644 pkg/featureflags/featureflag_test.go delete mode 100644 pkg/featureflags/features.go diff --git a/commands.go b/commands.go index 27cd5efa2..748f17c3e 100644 --- a/commands.go +++ b/commands.go @@ -2,146 +2,93 @@ package main import ( "os/user" - "path/filepath" - "strings" - "time" - "github.com/dustin/go-humanize" "github.com/minio/cli" - "github.com/minio/minio/pkg/iodine" "github.com/minio/minio/pkg/server" ) -func appendUniq(slice []string, i string) []string { - for _, ele := range slice { - if ele == i { - return slice +func removeDuplicates(slice []string) []string { + newSlice := []string{} + seen := make(map[string]struct{}) + for _, val := range slice { + if _, ok := seen[val]; !ok { + newSlice = append(newSlice, val) + seen[val] = struct{}{} } } - return append(slice, i) + return newSlice } var commands = []cli.Command{ - modeCmd, + serverCmd, + controlCmd, } -var modeCommands = []cli.Command{ - donutCmd, -} - -var modeCmd = cli.Command{ - Name: "mode", - Subcommands: modeCommands, - Description: "Mode of execution", -} - -var donutCmd = cli.Command{ - Name: "donut", - Description: "[status: EXPERIMENTAL]. Path to donut volume.", - Action: runDonut, +var serverCmd = cli.Command{ + Name: "server", + Description: "Server mode", + Action: runServer, CustomHelpTemplate: `NAME: - minio mode {{.Name}} - {{.Description}} + minio {{.Name}} - {{.Description}} USAGE: - minio mode {{.Name}} PATH + minio {{.Name}} EXAMPLES: - 1. Create a donut volume under "/mnt/backup", with a cache limit of 64MB with 1hr expiration - $ minio mode {{.Name}} limit 64MB expire 1h paths /mnt/backup - - 2. Create a donut volume under collection of paths, put a cache limit of 512MB - $ minio mode {{.Name}} limit 512MB paths "" + 1. Start in server mode + $ minio server `, } -func runDonut(c *cli.Context) { - var err error +var controlCmd = cli.Command{ + Name: "control", + Description: "Control mode", + Action: runController, + CustomHelpTemplate: `NAME: + minio {{.Name}} - {{.Description}} - u, err := user.Current() +USAGE: + minio {{.Name}} + +EXAMPLES: + 1. Start in controller mode + $ minio control + +`, +} + +func runServer(c *cli.Context) { + _, err := user.Current() if err != nil { Fatalf("Unable to determine current user. Reason: %s\n", err) } if len(c.Args()) < 1 { - cli.ShowCommandHelpAndExit(c, "donut", 1) // last argument is exit code - } - var maxMemory uint64 - maxMemorySet := false - - var expiration time.Duration - expirationSet := false - - var paths []string - pathSet := false - - args := c.Args() - for len(args) > 0 { - switch args.First() { - case "limit": - { - if maxMemorySet { - Fatalln("Limit should be set only once") - } - args = args.Tail() - maxMemory, err = humanize.ParseBytes(args.First()) - if err != nil { - Fatalf("Invalid memory size [%s] passed. Reason: %s\n", args.First(), iodine.New(err, nil)) - } - if maxMemory < 1024*1024*10 { - Fatalf("Invalid memory size [%s] passed. Should be greater than 10M\n", args.First()) - } - args = args.Tail() - maxMemorySet = true - } - case "expire": - { - if expirationSet { - Fatalln("Expiration should be set only once") - } - args = args.Tail() - expiration, err = time.ParseDuration(args.First()) - if err != nil { - Fatalf("Invalid expiration time [%s] passed. Reason: %s\n", args.First(), iodine.New(err, nil)) - } - args = args.Tail() - expirationSet = true - } - case "paths": - if pathSet { - Fatalln("Path should be set only once") - } - // supporting multiple paths - args = args.Tail() - if strings.TrimSpace(args.First()) == "" { - p := filepath.Join(u.HomeDir, "minio-storage", "donut") - paths = appendUniq(paths, p) - } else { - for _, arg := range args { - paths = appendUniq(paths, strings.TrimSpace(arg)) - } - } - args = args.Tail() - pathSet = true - default: - { - cli.ShowCommandHelpAndExit(c, "donut", 1) // last argument is exit code - } - } - } - if maxMemorySet == false { - Fatalln("Memory limit must be set") - } - if pathSet == false { - Fatalln("Path must be set") + cli.ShowCommandHelpAndExit(c, "server", 1) // last argument is exit code } apiServerConfig := getAPIServerConfig(c) - donutDriver := server.Factory{ - Config: apiServerConfig, - Paths: paths, - MaxMemory: maxMemory, - Expiration: expiration, + s := server.Factory{ + Config: apiServerConfig, } - apiServer := donutDriver.GetStartServerFunc() + apiServer := s.GetStartServerFunc() + // webServer := getWebServerConfigFunc(c) + servers := []server.StartServerFunc{apiServer} //, webServer} + server.StartMinio(servers) +} + +func runController(c *cli.Context) { + _, err := user.Current() + if err != nil { + Fatalf("Unable to determine current user. Reason: %s\n", err) + } + if len(c.Args()) < 1 { + cli.ShowCommandHelpAndExit(c, "control", 1) // last argument is exit code + } + apiServerConfig := getAPIServerConfig(c) + s := server.Factory{ + Config: apiServerConfig, + } + apiServer := s.GetStartServerFunc() // webServer := getWebServerConfigFunc(c) servers := []server.StartServerFunc{apiServer} //, webServer} server.StartMinio(servers) diff --git a/pkg/featureflags/featureflag.go b/pkg/featureflags/featureflag.go deleted file mode 100644 index 9a7bd3904..000000000 --- a/pkg/featureflags/featureflag.go +++ /dev/null @@ -1,30 +0,0 @@ -package featureflags - -import ( - "sync" -) - -var features = make(map[string]bool) -var lock = &sync.RWMutex{} - -// Get feature will return true if the feature is enabled, otherwise false -func Get(feature string) bool { - lock.RLock() - defer lock.RUnlock() - res := features[feature] - return res -} - -// Enable a feature -func Enable(feature string) { - lock.Lock() - defer lock.Unlock() - features[feature] = true -} - -// Disable a feature -func Disable(feature string) { - lock.Lock() - defer lock.Unlock() - features[feature] = false -} diff --git a/pkg/featureflags/featureflag_test.go b/pkg/featureflags/featureflag_test.go deleted file mode 100644 index ff5de5d3a..000000000 --- a/pkg/featureflags/featureflag_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package featureflags - -import ( - "testing" -) - -func TestFeatureFlag(t *testing.T) { - foo := Get("foo") - if foo { - t.Fail() - } - Enable("foo") - foo = Get("foo") - if !foo { - t.Fail() - } - Disable("foo") - foo = Get("foo") - if foo { - t.Fail() - } -} diff --git a/pkg/featureflags/features.go b/pkg/featureflags/features.go deleted file mode 100644 index da85bc1e1..000000000 --- a/pkg/featureflags/features.go +++ /dev/null @@ -1,6 +0,0 @@ -package featureflags - -const ( - // MultipartPutObject ... - MultipartPutObject = "minio.multipart_put_object" -) From 72572d6c7120fb3a86b9b8a6bac97d8990ea3900 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 30 Jun 2015 14:42:29 -0700 Subject: [PATCH 02/19] Remove some api server code bringing in new cleanup --- pkg/api/api-bucket-handlers.go | 155 ++ ...{api_definitions.go => api-definitions.go} | 0 ...ic_handlers.go => api-generic-handlers.go} | 4 +- .../logging.go => api-logging-handlers.go} | 54 +- ...ect_handlers.go => api-object-handlers.go} | 229 +-- ...e_limiter.go => api-ratelimit-handlers.go} | 14 +- pkg/api/{api_response.go => api-response.go} | 0 pkg/api/{api_router.go => api-router.go} | 48 +- pkg/api/api_bucket_handlers.go | 320 --- pkg/api/api_test.go | 1793 ----------------- pkg/api/quota/bandwidth_cap.go | 152 -- pkg/api/quota/conn_limit.go | 89 - pkg/api/quota/errors.go | 127 -- pkg/api/quota/quota_handler.go | 96 - pkg/api/quota/request_limit.go | 65 - .../httpserver.go => api/server.go} | 60 +- pkg/server/server.go | 129 -- 17 files changed, 244 insertions(+), 3091 deletions(-) create mode 100644 pkg/api/api-bucket-handlers.go rename pkg/api/{api_definitions.go => api-definitions.go} (100%) rename pkg/api/{api_generic_handlers.go => api-generic-handlers.go} (98%) rename pkg/api/{logging/logging.go => api-logging-handlers.go} (67%) rename pkg/api/{api_object_handlers.go => api-object-handlers.go} (51%) rename pkg/api/{quota/rate_limiter.go => api-ratelimit-handlers.go} (78%) rename pkg/api/{api_response.go => api-response.go} (100%) rename pkg/api/{api_router.go => api-router.go} (64%) delete mode 100644 pkg/api/api_bucket_handlers.go delete mode 100644 pkg/api/api_test.go delete mode 100644 pkg/api/quota/bandwidth_cap.go delete mode 100644 pkg/api/quota/conn_limit.go delete mode 100644 pkg/api/quota/errors.go delete mode 100644 pkg/api/quota/quota_handler.go delete mode 100644 pkg/api/quota/request_limit.go rename pkg/{server/httpserver/httpserver.go => api/server.go} (67%) delete mode 100644 pkg/server/server.go diff --git a/pkg/api/api-bucket-handlers.go b/pkg/api/api-bucket-handlers.go new file mode 100644 index 000000000..cec239e98 --- /dev/null +++ b/pkg/api/api-bucket-handlers.go @@ -0,0 +1,155 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package api + +import ( + "log" + "net/http" + + "github.com/gorilla/mux" +) + +func (server *minioAPI) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool { + vars := mux.Vars(req) + bucket := vars["bucket"] + log.Println(bucket) + return true +} + +// GET Bucket (List Multipart uploads) +// ------------------------- +// This operation lists in-progress multipart uploads. An in-progress +// multipart upload is a multipart upload that has been initiated, +// using the Initiate Multipart Upload request, but has not yet been completed or aborted. +// This operation returns at most 1,000 multipart uploads in the response. +// +func (server *minioAPI) listMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) { + acceptsContentType := getContentType(req) + log.Println(acceptsContentType) + + resources := getBucketMultipartResources(req.URL.Query()) + if resources.MaxUploads == 0 { + resources.MaxUploads = maxObjectList + } + + vars := mux.Vars(req) + bucket := vars["bucket"] + log.Println(bucket) +} + +// GET Bucket (List Objects) +// ------------------------- +// This implementation of the GET operation returns some or all (up to 1000) +// of the objects in a bucket. You can use the request parameters as selection +// criteria to return a subset of the objects in a bucket. +// +func (server *minioAPI) listObjectsHandler(w http.ResponseWriter, req *http.Request) { + acceptsContentType := getContentType(req) + // verify if bucket allows this operation + if !server.isValidOp(w, req, acceptsContentType) { + return + } + + if isRequestUploads(req.URL.Query()) { + server.listMultipartUploadsHandler(w, req) + return + } + + resources := getBucketResources(req.URL.Query()) + if resources.Maxkeys == 0 { + resources.Maxkeys = maxObjectList + } + + vars := mux.Vars(req) + bucket := vars["bucket"] + log.Println(bucket) + +} + +// GET Service +// ----------- +// This implementation of the GET operation returns a list of all buckets +// owned by the authenticated sender of the request. +func (server *minioAPI) listBucketsHandler(w http.ResponseWriter, req *http.Request) { + acceptsContentType := getContentType(req) + // uncomment this when we have webcli + // without access key credentials one cannot list buckets + // if _, err := stripAuth(req); err != nil { + // writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path) + // return + // } + log.Println(acceptsContentType) +} + +// PUT Bucket +// ---------- +// This implementation of the PUT operation creates a new bucket for authenticated request +func (server *minioAPI) putBucketHandler(w http.ResponseWriter, req *http.Request) { + acceptsContentType := getContentType(req) + // uncomment this when we have webcli + // without access key credentials one cannot create a bucket + // if _, err := stripAuth(req); err != nil { + // writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path) + // return + // } + if isRequestBucketACL(req.URL.Query()) { + server.putBucketACLHandler(w, req) + return + } + // read from 'x-amz-acl' + aclType := getACLType(req) + if aclType == unsupportedACLType { + writeErrorResponse(w, req, NotImplemented, acceptsContentType, req.URL.Path) + return + } + + vars := mux.Vars(req) + bucket := vars["bucket"] + log.Println(bucket) +} + +// PUT Bucket ACL +// ---------- +// This implementation of the PUT operation modifies the bucketACL for authenticated request +func (server *minioAPI) putBucketACLHandler(w http.ResponseWriter, req *http.Request) { + acceptsContentType := getContentType(req) + // read from 'x-amz-acl' + aclType := getACLType(req) + if aclType == unsupportedACLType { + writeErrorResponse(w, req, NotImplemented, acceptsContentType, req.URL.Path) + return + } + + vars := mux.Vars(req) + bucket := vars["bucket"] + log.Println(bucket) +} + +// HEAD Bucket +// ---------- +// This operation is useful to determine if a bucket exists. +// The operation returns a 200 OK if the bucket exists and you +// have permission to access it. Otherwise, the operation might +// return responses such as 404 Not Found and 403 Forbidden. +func (server *minioAPI) headBucketHandler(w http.ResponseWriter, req *http.Request) { + acceptsContentType := getContentType(req) + log.Println(acceptsContentType) + + vars := mux.Vars(req) + bucket := vars["bucket"] + log.Println(bucket) +} diff --git a/pkg/api/api_definitions.go b/pkg/api/api-definitions.go similarity index 100% rename from pkg/api/api_definitions.go rename to pkg/api/api-definitions.go diff --git a/pkg/api/api_generic_handlers.go b/pkg/api/api-generic-handlers.go similarity index 98% rename from pkg/api/api_generic_handlers.go rename to pkg/api/api-generic-handlers.go index 49c5021c5..2f0cf251e 100644 --- a/pkg/api/api_generic_handlers.go +++ b/pkg/api/api-generic-handlers.go @@ -98,7 +98,7 @@ func stripAuth(r *http.Request) (*auth, error) { return a, nil } -func getDate(req *http.Request) (time.Time, error) { +func parseDate(req *http.Request) (time.Time, error) { amzDate := req.Header.Get("X-Amz-Date") switch { case amzDate != "": @@ -154,7 +154,7 @@ func (h timeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { writeErrorResponse(w, r, RequestTimeTooSkewed, acceptsContentType, r.URL.Path) return } - date, err := getDate(r) + date, err := parseDate(r) if err != nil { // there is no way to knowing if this is a valid request, could be a attack reject such clients writeErrorResponse(w, r, RequestTimeTooSkewed, acceptsContentType, r.URL.Path) diff --git a/pkg/api/logging/logging.go b/pkg/api/api-logging-handlers.go similarity index 67% rename from pkg/api/logging/logging.go rename to pkg/api/api-logging-handlers.go index 825ef90c7..87e12b98d 100644 --- a/pkg/api/logging/logging.go +++ b/pkg/api/api-logging-handlers.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package logging +package api import ( "bytes" @@ -31,12 +31,12 @@ import ( ) type logHandler struct { - http.Handler - Logger chan<- []byte + handler http.Handler + logger chan<- []byte } -// LogMessage is a serializable json log message -type LogMessage struct { +// logMessage is a serializable json log message +type logMessage struct { StartTime time.Time Duration time.Duration StatusMessage string // human readable http status message @@ -49,38 +49,38 @@ type LogMessage struct { } } -// LogWriter is used to capture status for log messages -type LogWriter struct { - ResponseWriter http.ResponseWriter - LogMessage *LogMessage +// logWriter is used to capture status for log messages +type logWriter struct { + responseWriter http.ResponseWriter + logMessage *logMessage } // WriteHeader writes headers and stores status in LogMessage -func (w *LogWriter) WriteHeader(status int) { - w.LogMessage.StatusMessage = http.StatusText(status) - w.ResponseWriter.WriteHeader(status) +func (w *logWriter) WriteHeader(status int) { + w.logMessage.StatusMessage = http.StatusText(status) + w.responseWriter.WriteHeader(status) } // Header Dummy wrapper for LogWriter -func (w *LogWriter) Header() http.Header { - return w.ResponseWriter.Header() +func (w *logWriter) Header() http.Header { + return w.responseWriter.Header() } // Write Dummy wrapper for LogWriter -func (w *LogWriter) Write(data []byte) (int, error) { - return w.ResponseWriter.Write(data) +func (w *logWriter) Write(data []byte) (int, error) { + return w.responseWriter.Write(data) } func (h *logHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - logMessage := &LogMessage{ + logMessage := &logMessage{ StartTime: time.Now().UTC(), } - logWriter := &LogWriter{ResponseWriter: w, LogMessage: logMessage} - h.Handler.ServeHTTP(logWriter, req) - h.Logger <- getLogMessage(logMessage, w, req) + logWriter := &logWriter{responseWriter: w, logMessage: logMessage} + h.handler.ServeHTTP(logWriter, req) + h.logger <- getLogMessage(logMessage, w, req) } -func getLogMessage(logMessage *LogMessage, w http.ResponseWriter, req *http.Request) []byte { +func getLogMessage(logMessage *logMessage, w http.ResponseWriter, req *http.Request) []byte { // store lower level details logMessage.HTTP.ResponseHeaders = w.Header() logMessage.HTTP.Request = req @@ -94,14 +94,14 @@ func getLogMessage(logMessage *LogMessage, w http.ResponseWriter, req *http.Requ return js } -// LogHandler logs requests -func LogHandler(h http.Handler) http.Handler { - logger, _ := FileLogger("access.log") - return &logHandler{Handler: h, Logger: logger} +// loggingHandler logs requests +func loggingHandler(h http.Handler) http.Handler { + logger, _ := fileLogger("access.log") + return &logHandler{handler: h, logger: logger} } -// FileLogger returns a channel that is used to write to the logger -func FileLogger(filename string) (chan<- []byte, error) { +// fileLogger returns a channel that is used to write to the logger +func fileLogger(filename string) (chan<- []byte, error) { ch := make(chan []byte) file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) if err != nil { diff --git a/pkg/api/api_object_handlers.go b/pkg/api/api-object-handlers.go similarity index 51% rename from pkg/api/api_object_handlers.go rename to pkg/api/api-object-handlers.go index 41f4e0aea..409c4d571 100644 --- a/pkg/api/api_object_handlers.go +++ b/pkg/api/api-object-handlers.go @@ -25,7 +25,6 @@ import ( "github.com/gorilla/mux" "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/drivers" "github.com/minio/minio/pkg/utils/log" ) @@ -48,47 +47,8 @@ func (server *minioAPI) getObjectHandler(w http.ResponseWriter, req *http.Reques vars := mux.Vars(req) bucket = vars["bucket"] object = vars["object"] + log.Println(bucket, object) - metadata, err := server.driver.GetObjectMetadata(bucket, object) - switch iodine.ToError(err).(type) { - case nil: // success - { - httpRange, err := getRequestedRange(req, metadata.Size) - if err != nil { - writeErrorResponse(w, req, InvalidRange, acceptsContentType, req.URL.Path) - return - } - switch httpRange.start == 0 && httpRange.length == 0 { - case true: - setObjectHeaders(w, metadata) - if _, err := server.driver.GetObject(w, bucket, object); err != nil { - // unable to write headers, we've already printed data. Just close the connection. - log.Error.Println(iodine.New(err, nil)) - } - case false: - metadata.Size = httpRange.length - setRangeObjectHeaders(w, metadata, httpRange) - w.WriteHeader(http.StatusPartialContent) - if _, err := server.driver.GetPartialObject(w, bucket, object, httpRange.start, httpRange.length); err != nil { - // unable to write headers, we've already printed data. Just close the connection. - log.Error.Println(iodine.New(err, nil)) - } - } - } - case drivers.ObjectNotFound: - { - writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) - } - case drivers.ObjectNameInvalid: - { - writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } } // HEAD Object @@ -105,34 +65,7 @@ func (server *minioAPI) headObjectHandler(w http.ResponseWriter, req *http.Reque vars := mux.Vars(req) bucket = vars["bucket"] object = vars["object"] - - metadata, err := server.driver.GetObjectMetadata(bucket, object) - switch iodine.ToError(err).(type) { - case nil: - { - setObjectHeaders(w, metadata) - w.WriteHeader(http.StatusOK) - } - case drivers.ObjectNotFound: - { - error := getErrorCode(NoSuchKey) - w.Header().Set("Server", "Minio") - w.WriteHeader(error.HTTPStatusCode) - } - case drivers.ObjectNameInvalid: - { - error := getErrorCode(NoSuchKey) - w.Header().Set("Server", "Minio") - w.WriteHeader(error.HTTPStatusCode) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - error := getErrorCode(InternalError) - w.Header().Set("Server", "Minio") - w.WriteHeader(error.HTTPStatusCode) - } - } + log.Println(bucket, object) } // PUT Object @@ -182,36 +115,7 @@ func (server *minioAPI) putObjectHandler(w http.ResponseWriter, req *http.Reques writeErrorResponse(w, req, InvalidRequest, acceptsContentType, req.URL.Path) return } - calculatedMD5, err := server.driver.CreateObject(bucket, object, "", md5, sizeInt64, req.Body) - switch iodine.ToError(err).(type) { - case nil: - { - w.Header().Set("ETag", calculatedMD5) - writeSuccessResponse(w, acceptsContentType) - - } - case drivers.ObjectExists: - { - writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path) - } - case drivers.BadDigest: - { - writeErrorResponse(w, req, BadDigest, acceptsContentType, req.URL.Path) - } - case drivers.EntityTooLarge: - { - writeErrorResponse(w, req, EntityTooLarge, acceptsContentType, req.URL.Path) - } - case drivers.InvalidDigest: - { - writeErrorResponse(w, req, InvalidDigest, acceptsContentType, req.URL.Path) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } + log.Println(bucket, object, sizeInt64) } /// Multipart API @@ -233,27 +137,7 @@ func (server *minioAPI) newMultipartUploadHandler(w http.ResponseWriter, req *ht vars := mux.Vars(req) bucket = vars["bucket"] object = vars["object"] - uploadID, err := server.driver.NewMultipartUpload(bucket, object, "") - switch iodine.ToError(err).(type) { - case nil: - { - response := generateInitiateMultipartUploadResult(bucket, object, uploadID) - encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) - // write headers - setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) - // write body - w.Write(encodedSuccessResponse) - } - case drivers.ObjectExists: - { - writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } + log.Println(bucket, object) } // Upload part @@ -293,6 +177,7 @@ func (server *minioAPI) putObjectPartHandler(w http.ResponseWriter, req *http.Re vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] + log.Println(bucket, object, sizeInt64) uploadID := req.URL.Query().Get("uploadId") partIDString := req.URL.Query().Get("partNumber") @@ -301,40 +186,7 @@ func (server *minioAPI) putObjectPartHandler(w http.ResponseWriter, req *http.Re if err != nil { writeErrorResponse(w, req, InvalidPart, acceptsContentType, req.URL.Path) } - calculatedMD5, err := server.driver.CreateObjectPart(bucket, object, uploadID, partID, "", md5, sizeInt64, req.Body) - switch iodine.ToError(err).(type) { - case nil: - { - w.Header().Set("ETag", calculatedMD5) - writeSuccessResponse(w, acceptsContentType) - - } - case drivers.InvalidUploadID: - { - writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) - } - case drivers.ObjectExists: - { - writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path) - } - case drivers.BadDigest: - { - writeErrorResponse(w, req, BadDigest, acceptsContentType, req.URL.Path) - } - case drivers.EntityTooLarge: - { - writeErrorResponse(w, req, EntityTooLarge, acceptsContentType, req.URL.Path) - } - case drivers.InvalidDigest: - { - writeErrorResponse(w, req, InvalidDigest, acceptsContentType, req.URL.Path) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } + log.Println(uploadID, partID) } // Abort multipart upload @@ -349,25 +201,8 @@ func (server *minioAPI) abortMultipartUploadHandler(w http.ResponseWriter, req * bucket := vars["bucket"] object := vars["object"] - objectResourcesMetadata := getObjectResources(req.URL.Query()) - - err := server.driver.AbortMultipartUpload(bucket, object, objectResourcesMetadata.UploadID) - switch iodine.ToError(err).(type) { - case nil: - { - setCommonHeaders(w, getContentTypeString(acceptsContentType), 0) - w.WriteHeader(http.StatusNoContent) - } - case drivers.InvalidUploadID: - { - writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } + //objectResourcesMetadata := getObjectResources(req.URL.Query()) + log.Println(bucket, object) } // List object parts @@ -386,28 +221,7 @@ func (server *minioAPI) listObjectPartsHandler(w http.ResponseWriter, req *http. vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] - - objectResourcesMetadata, err := server.driver.ListObjectParts(bucket, object, objectResourcesMetadata) - switch iodine.ToError(err).(type) { - case nil: - { - response := generateListPartsResult(objectResourcesMetadata) - encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) - // write headers - setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) - // write body - w.Write(encodedSuccessResponse) - } - case drivers.InvalidUploadID: - { - writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } + log.Println(bucket, object) } // Complete multipart upload @@ -434,34 +248,15 @@ func (server *minioAPI) completeMultipartUploadHandler(w http.ResponseWriter, re vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] - objectResourcesMetadata := getObjectResources(req.URL.Query()) + log.Println(bucket, object) + + //objectResourcesMetadata := getObjectResources(req.URL.Query()) partMap := make(map[int]string) for _, part := range parts.Part { partMap[part.PartNumber] = part.ETag } - etag, err := server.driver.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, partMap) - switch iodine.ToError(err).(type) { - case nil: - { - response := generateCompleteMultpartUploadResult(bucket, object, "", etag) - encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) - // write headers - setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) - // write body - w.Write(encodedSuccessResponse) - } - case drivers.InvalidUploadID: - { - writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } } /// Delete API diff --git a/pkg/api/quota/rate_limiter.go b/pkg/api/api-ratelimit-handlers.go similarity index 78% rename from pkg/api/quota/rate_limiter.go rename to pkg/api/api-ratelimit-handlers.go index 5649538c1..03e31102c 100644 --- a/pkg/api/quota/rate_limiter.go +++ b/pkg/api/api-ratelimit-handlers.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package quota +package api import "net/http" @@ -24,26 +24,26 @@ type rateLimit struct { rateQueue chan bool } -func (c *rateLimit) Add() { +func (c rateLimit) Add() { c.rateQueue <- true // fill in the queue return } -func (c *rateLimit) Remove() { +func (c rateLimit) Remove() { <-c.rateQueue // invalidate the queue, after the request is served return } // ServeHTTP is an http.Handler ServeHTTP method -func (c *rateLimit) ServeHTTP(w http.ResponseWriter, req *http.Request) { +func (c rateLimit) ServeHTTP(w http.ResponseWriter, req *http.Request) { c.Add() // add c.handler.ServeHTTP(w, req) // serve c.Remove() // remove } -// RateLimit limits the number of concurrent http requests -func RateLimit(handle http.Handler, limit int) http.Handler { - return &rateLimit{ +// rateLimitHandler limits the number of concurrent http requests +func rateLimitHandler(handle http.Handler, limit int) http.Handler { + return rateLimit{ handler: handle, rateQueue: make(chan bool, limit), } diff --git a/pkg/api/api_response.go b/pkg/api/api-response.go similarity index 100% rename from pkg/api/api_response.go rename to pkg/api/api-response.go diff --git a/pkg/api/api_router.go b/pkg/api/api-router.go similarity index 64% rename from pkg/api/api_router.go rename to pkg/api/api-router.go index 7d6db5265..c0e77aeca 100644 --- a/pkg/api/api_router.go +++ b/pkg/api/api-router.go @@ -16,42 +16,15 @@ package api -import ( - "net/http" +import router "github.com/gorilla/mux" - router "github.com/gorilla/mux" - "github.com/minio/minio/pkg/api/logging" - "github.com/minio/minio/pkg/api/quota" - "github.com/minio/minio/pkg/storage/drivers" -) +type minioAPI struct{} -type minioAPI struct { - driver drivers.Driver -} - -// Config api configurable parameters -type Config struct { - RateLimit int - driver drivers.Driver -} - -// GetDriver - get a an existing set driver -func (c Config) GetDriver() drivers.Driver { - return c.driver -} - -// SetDriver - set a new driver -func (c *Config) SetDriver(driver drivers.Driver) { - c.driver = driver -} - -// HTTPHandler - http wrapper handler -func HTTPHandler(config Config) http.Handler { - var mux *router.Router +// Handler - api wrapper handler +func New(config Config) API { var api = minioAPI{} - api.driver = config.GetDriver() - mux = router.NewRouter() + mux := router.NewRouter() mux.HandleFunc("/", api.listBucketsHandler).Methods("GET") mux.HandleFunc("/{bucket}", api.listObjectsHandler).Methods("GET") mux.HandleFunc("/{bucket}", api.putBucketHandler).Methods("PUT") @@ -75,12 +48,7 @@ func HTTPHandler(config Config) http.Handler { handler = timeValidityHandler(handler) handler = ignoreResourcesHandler(handler) handler = validateAuthHeaderHandler(handler) - // handler = quota.BandwidthCap(h, 25*1024*1024, time.Duration(30*time.Minute)) - // handler = quota.BandwidthCap(h, 100*1024*1024, time.Duration(24*time.Hour)) - // handler = quota.RequestLimit(h, 100, time.Duration(30*time.Minute)) - // handler = quota.RequestLimit(h, 1000, time.Duration(24*time.Hour)) - // handler = quota.ConnectionLimit(handler, config.ConnectionLimit) - handler = quota.RateLimit(handler, config.RateLimit) - handler = logging.LogHandler(handler) - return handler + handler = rateLimitHandler(handler, config.RateLimit) + handler = loggingHandler(handler) + return API{config, handler} } diff --git a/pkg/api/api_bucket_handlers.go b/pkg/api/api_bucket_handlers.go deleted file mode 100644 index 38934c7bc..000000000 --- a/pkg/api/api_bucket_handlers.go +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package api - -import ( - "net/http" - - "github.com/gorilla/mux" - "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/drivers" - "github.com/minio/minio/pkg/utils/log" -) - -func (server *minioAPI) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool { - vars := mux.Vars(req) - bucket := vars["bucket"] - - bucketMetadata, err := server.driver.GetBucketMetadata(bucket) - switch iodine.ToError(err).(type) { - case drivers.BucketNotFound: - { - writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) - return false - } - case drivers.BucketNameInvalid: - { - writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) - return false - } - case nil: - if _, err := stripAuth(req); err != nil { - if bucketMetadata.ACL.IsPrivate() { - return true - //uncomment this when we have webcli - //writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path) - //return false - } - if bucketMetadata.ACL.IsPublicRead() && req.Method == "PUT" { - return true - //uncomment this when we have webcli - //writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path) - //return false - } - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } - return true -} - -// GET Bucket (List Multipart uploads) -// ------------------------- -// This operation lists in-progress multipart uploads. An in-progress -// multipart upload is a multipart upload that has been initiated, -// using the Initiate Multipart Upload request, but has not yet been completed or aborted. -// This operation returns at most 1,000 multipart uploads in the response. -// -func (server *minioAPI) listMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - resources := getBucketMultipartResources(req.URL.Query()) - if resources.MaxUploads == 0 { - resources.MaxUploads = maxObjectList - } - - vars := mux.Vars(req) - bucket := vars["bucket"] - - resources, err := server.driver.ListMultipartUploads(bucket, resources) - switch iodine.ToError(err).(type) { - case nil: // success - { - // generate response - response := generateListMultipartUploadsResult(bucket, resources) - encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) - // write headers - setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) - // write body - w.Write(encodedSuccessResponse) - } - case drivers.BucketNotFound: - { - writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } - -} - -// GET Bucket (List Objects) -// ------------------------- -// This implementation of the GET operation returns some or all (up to 1000) -// of the objects in a bucket. You can use the request parameters as selection -// criteria to return a subset of the objects in a bucket. -// -func (server *minioAPI) listObjectsHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - // verify if bucket allows this operation - if !server.isValidOp(w, req, acceptsContentType) { - return - } - - if isRequestUploads(req.URL.Query()) { - server.listMultipartUploadsHandler(w, req) - return - } - - resources := getBucketResources(req.URL.Query()) - if resources.Maxkeys == 0 { - resources.Maxkeys = maxObjectList - } - - vars := mux.Vars(req) - bucket := vars["bucket"] - - objects, resources, err := server.driver.ListObjects(bucket, resources) - switch iodine.ToError(err).(type) { - case nil: // success - { - // generate response - response := generateListObjectsResponse(bucket, objects, resources) - encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) - // write headers - setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) - // write body - w.Write(encodedSuccessResponse) - } - case drivers.ObjectNotFound: - { - writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) - } - case drivers.ObjectNameInvalid: - { - writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } -} - -// GET Service -// ----------- -// This implementation of the GET operation returns a list of all buckets -// owned by the authenticated sender of the request. -func (server *minioAPI) listBucketsHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - // uncomment this when we have webcli - // without access key credentials one cannot list buckets - // if _, err := stripAuth(req); err != nil { - // writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path) - // return - // } - buckets, err := server.driver.ListBuckets() - switch iodine.ToError(err).(type) { - case nil: - { - // generate response - response := generateListBucketsResponse(buckets) - encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) - // write headers - setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) - // write response - w.Write(encodedSuccessResponse) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } -} - -// PUT Bucket -// ---------- -// This implementation of the PUT operation creates a new bucket for authenticated request -func (server *minioAPI) putBucketHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - // uncomment this when we have webcli - // without access key credentials one cannot create a bucket - // if _, err := stripAuth(req); err != nil { - // writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path) - // return - // } - if isRequestBucketACL(req.URL.Query()) { - server.putBucketACLHandler(w, req) - return - } - // read from 'x-amz-acl' - aclType := getACLType(req) - if aclType == unsupportedACLType { - writeErrorResponse(w, req, NotImplemented, acceptsContentType, req.URL.Path) - return - } - - vars := mux.Vars(req) - bucket := vars["bucket"] - err := server.driver.CreateBucket(bucket, getACLTypeString(aclType)) - switch iodine.ToError(err).(type) { - case nil: - { - // Make sure to add Location information here only for bucket - w.Header().Set("Location", "/"+bucket) - writeSuccessResponse(w, acceptsContentType) - } - case drivers.TooManyBuckets: - { - writeErrorResponse(w, req, TooManyBuckets, acceptsContentType, req.URL.Path) - } - case drivers.BucketNameInvalid: - { - writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) - } - case drivers.BucketExists: - { - writeErrorResponse(w, req, BucketAlreadyExists, acceptsContentType, req.URL.Path) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } -} - -// PUT Bucket ACL -// ---------- -// This implementation of the PUT operation modifies the bucketACL for authenticated request -func (server *minioAPI) putBucketACLHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - // read from 'x-amz-acl' - aclType := getACLType(req) - if aclType == unsupportedACLType { - writeErrorResponse(w, req, NotImplemented, acceptsContentType, req.URL.Path) - return - } - - vars := mux.Vars(req) - bucket := vars["bucket"] - err := server.driver.SetBucketMetadata(bucket, getACLTypeString(aclType)) - switch iodine.ToError(err).(type) { - case nil: - { - writeSuccessResponse(w, acceptsContentType) - } - case drivers.BucketNameInvalid: - { - writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) - } - case drivers.BucketNotFound: - { - writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } - } -} - -// HEAD Bucket -// ---------- -// This operation is useful to determine if a bucket exists. -// The operation returns a 200 OK if the bucket exists and you -// have permission to access it. Otherwise, the operation might -// return responses such as 404 Not Found and 403 Forbidden. -func (server *minioAPI) headBucketHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - vars := mux.Vars(req) - bucket := vars["bucket"] - - _, err := server.driver.GetBucketMetadata(bucket) - switch iodine.ToError(err).(type) { - case nil: - { - writeSuccessResponse(w, acceptsContentType) - } - case drivers.BucketNotFound: - { - error := getErrorCode(NoSuchBucket) - w.WriteHeader(error.HTTPStatusCode) - } - case drivers.BucketNameInvalid: - { - error := getErrorCode(InvalidBucketName) - w.WriteHeader(error.HTTPStatusCode) - } - default: - { - log.Error.Println(iodine.New(err, nil)) - error := getErrorCode(InternalError) - w.WriteHeader(error.HTTPStatusCode) - } - } -} diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go deleted file mode 100644 index e53a1cbe6..000000000 --- a/pkg/api/api_test.go +++ /dev/null @@ -1,1793 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2014 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package api - -import ( - "bytes" - "io" - "io/ioutil" - "log" - "os" - "reflect" - "strconv" - "strings" - "testing" - "time" - - "encoding/xml" - "net/http" - "net/http/httptest" - - "github.com/minio/minio/pkg/storage/drivers" - "github.com/minio/minio/pkg/storage/drivers/cache" - "github.com/minio/minio/pkg/storage/drivers/donut" - "github.com/minio/minio/pkg/storage/drivers/mocks" - "github.com/stretchr/testify/mock" - - . "github.com/minio/check" -) - -func Test(t *testing.T) { TestingT(t) } - -type MySuite struct { - Driver drivers.Driver - MockDriver *mocks.Driver - initDriver func() (drivers.Driver, string) - Root string -} - -var _ = Suite(&MySuite{ - initDriver: func() (drivers.Driver, string) { - return startMockDriver(), "" - }, -}) - -var _ = Suite(&MySuite{ - initDriver: func() (drivers.Driver, string) { - root, _ := ioutil.TempDir(os.TempDir(), "minio-api") - var roots []string - roots = append(roots, root) - driver, _ := donut.NewDriver(roots) - driver, _ = cache.NewDriver(10000, 3*time.Hour, driver) - return driver, root - }, -}) - -func (s *MySuite) SetUpSuite(c *C) { - driver, root := s.initDriver() - if root != "" { - defer os.RemoveAll(root) - } - log.Println("Running API Suite:", reflect.TypeOf(driver)) -} - -func (s *MySuite) SetUpTest(c *C) { - driver, root := s.initDriver() - var typedDriver *mocks.Driver - switch driver := driver.(type) { - case *mocks.Driver: - { - typedDriver = driver - } - default: - { - typedDriver = startMockDriver() - } - } - s.Driver = driver - s.Root = root - s.MockDriver = typedDriver -} - -func (s *MySuite) TearDownTest(c *C) { - root := strings.TrimSpace(s.Root) - if root != "" { - os.RemoveAll(s.Root) - } - s.Driver = nil - s.Root = "" -} - -func setDummyAuthHeader(req *http.Request) { - authDummy := "AWS4-HMAC-SHA256 Credential=AC5NH40NQLTL4DUMMY/20130524/us-east-1/s3/aws4_request, SignedHeaders=date;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class, Signature=98ad721746da40c64f1a55b78f14c238d841ea1380cd77a1b5971af0ece108bd" - req.Header.Set("Authorization", authDummy) - req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) -} - -func setConfig(driver drivers.Driver) Config { - conf := Config{RateLimit: 16} - conf.SetDriver(driver) - return conf -} - -func (s *MySuite) TestNonExistantBucket(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - s.MockDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, drivers.BucketNotFound{Bucket: "bucket"}).Once() - request, err := http.NewRequest("HEAD", testServer.URL+"/bucket", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusNotFound) -} - -func (s *MySuite) TestEmptyObject(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - metadata := drivers.ObjectMetadata{ - Bucket: "bucket", - Key: "key", - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - Md5: "d41d8cd98f00b204e9800998ecf8427e", - Size: 0, - } - typedDriver.On("CreateBucket", "bucket", "private").Return(nil).Once() - typedDriver.On("CreateObject", "bucket", "object", "", "", 0, mock.Anything).Return(metadata.Md5, nil).Once() - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Twice() - typedDriver.On("GetObjectMetadata", "bucket", "object").Return(metadata, nil).Once() - typedDriver.On("GetObject", mock.Anything, "bucket", "object").Return(int64(0), nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "object").Return(metadata, nil).Once() - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - buffer := bytes.NewBufferString("") - driver.CreateBucket("bucket", "private") - driver.CreateObject("bucket", "object", "", "", 0, buffer) - - request, err := http.NewRequest("GET", testServer.URL+"/bucket/object", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - responseBody, err := ioutil.ReadAll(response.Body) - c.Assert(err, IsNil) - c.Assert(true, Equals, bytes.Equal(responseBody, buffer.Bytes())) - - resMetadata, err := driver.GetObjectMetadata("bucket", "object") - c.Assert(err, IsNil) - verifyHeaders(c, response.Header, resMetadata.Created, 0, "application/octet-stream", resMetadata.Md5) -} - -func (s *MySuite) TestBucket(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - metadata := drivers.BucketMetadata{ - Name: "bucket", - Created: time.Now().UTC(), - ACL: drivers.BucketACL("private"), - } - typedDriver.On("CreateBucket", "bucket", "private").Return(nil).Once() - typedDriver.On("GetBucketMetadata", "bucket").Return(metadata, nil).Once() - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - driver.CreateBucket("bucket", "private") - - request, err := http.NewRequest("HEAD", testServer.URL+"/bucket", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) -} - -func (s *MySuite) TestObject(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - metadata := drivers.ObjectMetadata{ - Bucket: "bucket", - Key: "key", - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - Md5: "5eb63bbbe01eeed093cb22bb8f5acdc3", - Size: 11, - } - typedDriver.On("CreateBucket", "bucket", "private").Return(nil).Once() - typedDriver.On("CreateObject", "bucket", "object", "", "", mock.Anything, mock.Anything).Return(metadata.Md5, nil).Once() - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Twice() - typedDriver.On("GetObjectMetadata", "bucket", "object").Return(metadata, nil).Twice() - typedDriver.SetGetObjectWriter("bucket", "object", []byte("hello world")) - typedDriver.On("GetObject", mock.Anything, "bucket", "object").Return(int64(0), nil).Once() - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - buffer := bytes.NewBufferString("hello world") - driver.CreateBucket("bucket", "private") - driver.CreateObject("bucket", "object", "", "", int64(buffer.Len()), buffer) - - request, err := http.NewRequest("GET", testServer.URL+"/bucket/object", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - responseBody, err := ioutil.ReadAll(response.Body) - c.Assert(err, IsNil) - c.Assert(responseBody, DeepEquals, []byte("hello world")) - - resMetadata, err := driver.GetObjectMetadata("bucket", "object") - c.Assert(err, IsNil) - verifyHeaders(c, response.Header, resMetadata.Created, len("hello world"), "application/octet-stream", metadata.Md5) -} - -func (s *MySuite) TestMultipleObjects(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - metadata1 := drivers.ObjectMetadata{ - Bucket: "bucket", - Key: "object1", - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - Md5: "6f11ac20bf1d3c85c586fa793fa03186", - Size: 9, - } - metadata2 := drivers.ObjectMetadata{ - Bucket: "bucket", - Key: "object2", - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - Md5: "c1c7f5decb9ff01edf1af096ebb8f4a4", - Size: 9, - } - metadata3 := drivers.ObjectMetadata{ - Bucket: "bucket", - Key: "object3", - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - Md5: "4e74ad3b92e2843e208a13ae1cf0d52c", - Size: 11, - } - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - buffer1 := bytes.NewBufferString("hello one") - buffer2 := bytes.NewBufferString("hello two") - buffer3 := bytes.NewBufferString("hello three") - - typedDriver.On("CreateBucket", "bucket", "private").Return(nil).Once() - driver.CreateBucket("bucket", "private") - typedDriver.On("CreateObject", "bucket", "object1", "", "", mock.Anything, mock.Anything).Return(metadata1.Md5, nil).Once() - driver.CreateObject("bucket", "object1", "", "", int64(buffer1.Len()), buffer1) - typedDriver.On("CreateObject", "bucket", "object2", "", "", mock.Anything, mock.Anything).Return(metadata2.Md5, nil).Once() - driver.CreateObject("bucket", "object2", "", "", int64(buffer2.Len()), buffer2) - typedDriver.On("CreateObject", "bucket", "object3", "", "", mock.Anything, mock.Anything).Return(metadata3.Md5, nil).Once() - driver.CreateObject("bucket", "object3", "", "", int64(buffer3.Len()), buffer3) - - // test non-existant object - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "object").Return(drivers.ObjectMetadata{}, drivers.ObjectNotFound{}).Once() - request, err := http.NewRequest("GET", testServer.URL+"/bucket/object", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - - verifyError(c, response, "NoSuchKey", "The specified key does not exist.", http.StatusNotFound) - //// test object 1 - - // get object - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "object1").Return(metadata1, nil).Once() - typedDriver.SetGetObjectWriter("bucket", "object1", []byte("hello one")) - typedDriver.On("GetObject", mock.Anything, "bucket", "object1").Return(int64(0), nil).Once() - request, err = http.NewRequest("GET", testServer.URL+"/bucket/object1", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client = http.Client{} - response, err = client.Do(request) - c.Assert(err, IsNil) - - // get metadata - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "object1").Return(metadata1, nil).Once() - metadata, err := driver.GetObjectMetadata("bucket", "object1") - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - // verify headers - verifyHeaders(c, response.Header, metadata.Created, len("hello one"), "application/octet-stream", metadata.Md5) - c.Assert(err, IsNil) - - // verify response data - responseBody, err := ioutil.ReadAll(response.Body) - c.Assert(err, IsNil) - c.Assert(true, Equals, bytes.Equal(responseBody, []byte("hello one"))) - - // test object 2 - // get object - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "object2").Return(metadata2, nil).Once() - typedDriver.SetGetObjectWriter("bucket", "object2", []byte("hello two")) - typedDriver.On("GetObject", mock.Anything, "bucket", "object2").Return(int64(0), nil).Once() - request, err = http.NewRequest("GET", testServer.URL+"/bucket/object2", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client = http.Client{} - response, err = client.Do(request) - c.Assert(err, IsNil) - - // get metadata - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "object2").Return(metadata2, nil).Once() - metadata, err = driver.GetObjectMetadata("bucket", "object2") - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - // verify headers - verifyHeaders(c, response.Header, metadata.Created, len("hello two"), "application/octet-stream", metadata.Md5) - c.Assert(err, IsNil) - - // verify response data - responseBody, err = ioutil.ReadAll(response.Body) - c.Assert(err, IsNil) - c.Assert(true, Equals, bytes.Equal(responseBody, []byte("hello two"))) - - // test object 3 - // get object - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "object3").Return(metadata3, nil).Once() - typedDriver.SetGetObjectWriter("bucket", "object3", []byte("hello three")) - typedDriver.On("GetObject", mock.Anything, "bucket", "object3").Return(int64(0), nil).Once() - request, err = http.NewRequest("GET", testServer.URL+"/bucket/object3", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client = http.Client{} - response, err = client.Do(request) - c.Assert(err, IsNil) - - // get metadata - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "object3").Return(metadata3, nil).Once() - metadata, err = driver.GetObjectMetadata("bucket", "object3") - - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - // verify headers - verifyHeaders(c, response.Header, metadata.Created, len("hello three"), "application/octet-stream", metadata.Md5) - c.Assert(err, IsNil) - - // verify object - responseBody, err = ioutil.ReadAll(response.Body) - c.Assert(err, IsNil) - c.Assert(true, Equals, bytes.Equal(responseBody, []byte("hello three"))) -} - -func (s *MySuite) TestNotImplemented(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - request, err := http.NewRequest("GET", testServer.URL+"/bucket/object?policy", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusNotImplemented) - -} - -func (s *MySuite) TestHeader(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - typedDriver.AssertExpectations(c) - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - typedDriver.On("CreateBucket", "bucket", "private").Return(nil).Once() - err := driver.CreateBucket("bucket", "private") - c.Assert(err, IsNil) - - bucketMetadata := drivers.BucketMetadata{ - Name: "bucket", - Created: time.Now().UTC(), - ACL: drivers.BucketACL("private"), - } - typedDriver.On("GetBucketMetadata", "bucket").Return(bucketMetadata, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "object").Return(drivers.ObjectMetadata{}, drivers.ObjectNotFound{}).Once() - request, err := http.NewRequest("GET", testServer.URL+"/bucket/object", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - - verifyError(c, response, "NoSuchKey", "The specified key does not exist.", http.StatusNotFound) - - objectMetadata := drivers.ObjectMetadata{ - Bucket: "bucket", - Key: "object", - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - Md5: "6f5902ac237024bdd0c176cb93063dc4", - Size: 11, - } - - buffer := bytes.NewBufferString("hello world") - typedDriver.On("GetBucketMetadata", "foo").Return(bucketMetadata, nil).Once() - typedDriver.On("CreateObject", "bucket", "object", "", "", mock.Anything, mock.Anything).Return(objectMetadata.Md5, nil).Once() - driver.CreateObject("bucket", "object", "", "", int64(buffer.Len()), buffer) - - typedDriver.On("GetBucketMetadata", "bucket").Return(bucketMetadata, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "object").Return(objectMetadata, nil).Once() - typedDriver.SetGetObjectWriter("", "", []byte("hello world")) - typedDriver.On("GetObject", mock.Anything, "bucket", "object").Return(int64(0), nil).Once() - request, err = http.NewRequest("GET", testServer.URL+"/bucket/object", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client = http.Client{} - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - typedDriver.On("GetBucketMetadata", "bucket").Return(bucketMetadata, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "object").Return(objectMetadata, nil).Once() - metadata, err := driver.GetObjectMetadata("bucket", "object") - c.Assert(err, IsNil) - verifyHeaders(c, response.Header, metadata.Created, len("hello world"), "application/octet-stream", metadata.Md5) -} - -func (s *MySuite) TestPutBucket(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - typedDriver.On("ListBuckets").Return(make([]drivers.BucketMetadata, 0), nil).Once() - buckets, err := driver.ListBuckets() - c.Assert(len(buckets), Equals, 0) - c.Assert(err, IsNil) - - typedDriver.On("CreateBucket", "bucket", "private").Return(nil).Once() - request, err := http.NewRequest("PUT", testServer.URL+"/bucket", bytes.NewBufferString("")) - c.Assert(err, IsNil) - request.Header.Add("x-amz-acl", "private") - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - // check bucket exists - typedDriver.On("ListBuckets").Return([]drivers.BucketMetadata{{Name: "bucket"}}, nil).Once() - buckets, err = driver.ListBuckets() - c.Assert(len(buckets), Equals, 1) - c.Assert(err, IsNil) - c.Assert(buckets[0].Name, Equals, "bucket") -} - -func (s *MySuite) TestPutObject(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - resources := drivers.BucketResourcesMetadata{} - - resources.Maxkeys = 1000 - resources.Prefix = "" - - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("ListObjects", "bucket", mock.Anything).Return([]drivers.ObjectMetadata{}, - drivers.BucketResourcesMetadata{}, drivers.BucketNotFound{}).Once() - objects, resources, err := driver.ListObjects("bucket", resources) - c.Assert(len(objects), Equals, 0) - c.Assert(resources.IsTruncated, Equals, false) - c.Assert(err, Not(IsNil)) - - // breaks on fs driver,// breaks on fs driver, so we subtract one second - // date1 := time.Now().UTC() - date1 := time.Now().UTC().Add(-time.Second) - - // Put Bucket before - Put Object into a bucket - typedDriver.On("CreateBucket", "bucket", "private").Return(nil).Once() - request, err := http.NewRequest("PUT", testServer.URL+"/bucket", nil) - c.Assert(err, IsNil) - request.Header.Add("x-amz-acl", "private") - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - twoMetadata := drivers.ObjectMetadata{ - Bucket: "bucket", - Key: "two", - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - Md5: "6f5902ac237024bdd0c176cb93063dc4", - Size: 11, - } - - typedDriver.On("CreateObject", "bucket", "two", "", "", mock.Anything, mock.Anything).Return(twoMetadata.Md5, nil).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/bucket/two", bytes.NewBufferString("hello world")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - date2 := time.Now().UTC() - - resources.Maxkeys = 1000 - resources.Prefix = "" - - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Twice() - typedDriver.On("ListObjects", "bucket", mock.Anything).Return([]drivers.ObjectMetadata{{}}, drivers.BucketResourcesMetadata{}, nil).Once() - objects, resources, err = driver.ListObjects("bucket", resources) - c.Assert(len(objects), Equals, 1) - c.Assert(resources.IsTruncated, Equals, false) - c.Assert(err, IsNil) - - var writer bytes.Buffer - - typedDriver.On("GetObjectMetadata", "bucket", "two").Return(twoMetadata, nil).Once() - typedDriver.SetGetObjectWriter("bucket", "two", []byte("hello world")) - typedDriver.On("GetObject", mock.Anything, "bucket", "two").Return(int64(11), nil).Once() - driver.GetObject(&writer, "bucket", "two") - - c.Assert(bytes.Equal(writer.Bytes(), []byte("hello world")), Equals, true) - - metadata, err := driver.GetObjectMetadata("bucket", "two") - c.Assert(err, IsNil) - lastModified := metadata.Created - - c.Assert(date1.Before(lastModified), Equals, true) - c.Assert(lastModified.Before(date2), Equals, true) -} - -func (s *MySuite) TestListBuckets(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - typedDriver.On("ListBuckets").Return([]drivers.BucketMetadata{}, nil).Once() - request, err := http.NewRequest("GET", testServer.URL+"/", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - listResponse, err := readListBucket(response.Body) - c.Assert(err, IsNil) - c.Assert(len(listResponse.Buckets.Bucket), Equals, 0) - - typedDriver.On("CreateBucket", "foo", "private").Return(nil).Once() - err = driver.CreateBucket("foo", "private") - c.Assert(err, IsNil) - - bucketMetadata := []drivers.BucketMetadata{ - {Name: "foo", Created: time.Now().UTC()}, - } - typedDriver.On("ListBuckets").Return(bucketMetadata, nil).Once() - request, err = http.NewRequest("GET", testServer.URL+"/", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client = http.Client{} - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - listResponse, err = readListBucket(response.Body) - c.Assert(err, IsNil) - c.Assert(len(listResponse.Buckets.Bucket), Equals, 1) - c.Assert(listResponse.Buckets.Bucket[0].Name, Equals, "foo") - - typedDriver.On("CreateBucket", "bar", "private").Return(nil).Once() - err = driver.CreateBucket("bar", "private") - c.Assert(err, IsNil) - - bucketMetadata = []drivers.BucketMetadata{ - {Name: "bar", Created: time.Now().UTC()}, - bucketMetadata[0], - } - - typedDriver.On("ListBuckets").Return(bucketMetadata, nil).Once() - request, err = http.NewRequest("GET", testServer.URL+"/", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client = http.Client{} - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - listResponse, err = readListBucket(response.Body) - c.Assert(err, IsNil) - c.Assert(len(listResponse.Buckets.Bucket), Equals, 2) - - c.Assert(listResponse.Buckets.Bucket[0].Name, Equals, "bar") - c.Assert(listResponse.Buckets.Bucket[1].Name, Equals, "foo") -} - -func readListBucket(reader io.Reader) (ListBucketsResponse, error) { - var results ListBucketsResponse - decoder := xml.NewDecoder(reader) - err := decoder.Decode(&results) - return results, err -} - -func (s *MySuite) TestListObjects(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - // TODO Implement -} - -func (s *MySuite) TestNotBeAbleToCreateObjectInNonexistantBucket(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - objectMetadata := drivers.ObjectMetadata{ - Bucket: "bucket", - Key: "object1", - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - Md5: "6f5902ac237024bdd0c176cb93063dc4", - Size: 11, - } - - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, drivers.BucketNotFound{}).Once() - typedDriver.On("CreateObject", "bucket", "object1", "", "", mock.Anything, mock.Anything).Return(objectMetadata.Md5, nil).Once() - request, err := http.NewRequest("PUT", testServer.URL+"/bucket/object1", bytes.NewBufferString("hello world")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "NoSuchBucket", "The specified bucket does not exist.", http.StatusNotFound) -} - -func (s *MySuite) TestHeadOnObject(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - objectMetadata := drivers.ObjectMetadata{ - Bucket: "bucket", - Key: "object1", - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - Md5: "6f5902ac237024bdd0c176cb93063dc4", - Size: 11, - } - - typedDriver.On("CreateBucket", "bucket", "private").Return(nil).Once() - request, err := http.NewRequest("PUT", testServer.URL+"/bucket", nil) - c.Assert(err, IsNil) - request.Header.Add("x-amz-acl", "private") - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("CreateObject", "bucket", "object1", "", "", mock.Anything, mock.Anything).Return(objectMetadata.Md5, nil).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/bucket/object1", bytes.NewBufferString("hello world")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "object1").Return(objectMetadata, nil).Once() - request, err = http.NewRequest("HEAD", testServer.URL+"/bucket/object1", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) -} - -func (s *MySuite) TestHeadOnBucket(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - typedDriver.On("CreateBucket", "bucket", "private").Return(nil).Once() - request, err := http.NewRequest("PUT", testServer.URL+"/bucket", nil) - c.Assert(err, IsNil) - request.Header.Add("x-amz-acl", "private") - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Once() - request, err = http.NewRequest("HEAD", testServer.URL+"/bucket", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) -} - -func (s *MySuite) TestDateFormat(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - typedDriver.On("CreateBucket", "bucket", "private").Return(nil).Once() - request, err := http.NewRequest("PUT", testServer.URL+"/bucket", nil) - c.Assert(err, IsNil) - request.Header.Add("x-amz-acl", "private") - setDummyAuthHeader(request) - - // set an invalid date - request.Header.Set("Date", "asfasdfadf") - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "RequestTimeTooSkewed", - "The difference between the request time and the server's time is too large.", http.StatusForbidden) - - request.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) - response, err = client.Do(request) - c.Assert(response.StatusCode, Equals, http.StatusOK) -} - -func verifyHeaders(c *C, header http.Header, date time.Time, size int, contentType string, etag string) { - // Verify date - c.Assert(header.Get("Last-Modified"), Equals, date.Format(http.TimeFormat)) - - // verify size - c.Assert(header.Get("Content-Length"), Equals, strconv.Itoa(size)) - - // verify content type - c.Assert(header.Get("Content-Type"), Equals, contentType) - - // verify etag - c.Assert(header.Get("Etag"), Equals, "\""+etag+"\"") -} - -func (s *MySuite) TestXMLNameNotInBucketListJson(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - typedDriver.On("CreateBucket", "foo", "private").Return(nil).Once() - err := driver.CreateBucket("foo", "private") - c.Assert(err, IsNil) - - typedDriver.On("ListBuckets").Return([]drivers.BucketMetadata{{Name: "foo", Created: time.Now().UTC()}}, nil) - request, err := http.NewRequest("GET", testServer.URL+"/", nil) - c.Assert(err, IsNil) - request.Header.Add("Accept", "application/json") - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - byteResults, err := ioutil.ReadAll(response.Body) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(byteResults), "XML"), Equals, false) -} - -func (s *MySuite) TestXMLNameNotInObjectListJson(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - typedDriver.On("CreateBucket", "foo", "private").Return(nil).Once() - err := driver.CreateBucket("foo", "private") - c.Assert(err, IsNil) - - resources := drivers.BucketResourcesMetadata{} - resources.Maxkeys = 1000 - resources.Prefix = "" - - metadata := drivers.BucketMetadata{ - Name: "foo", - Created: time.Now().UTC(), - ACL: drivers.BucketACL("private"), - } - - typedDriver.On("GetBucketMetadata", "foo").Return(metadata, nil).Once() - typedDriver.On("ListObjects", "foo", resources).Return([]drivers.ObjectMetadata{}, drivers.BucketResourcesMetadata{}, nil).Once() - request, err := http.NewRequest("GET", testServer.URL+"/foo", nil) - c.Assert(err, IsNil) - request.Header.Add("Accept", "application/json") - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - byteResults, err := ioutil.ReadAll(response.Body) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(byteResults), "XML"), Equals, false) -} - -func (s *MySuite) TestContentTypePersists(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - typedDriver.On("CreateBucket", "bucket", "private").Return(nil).Once() - err := driver.CreateBucket("bucket", "private") - c.Assert(err, IsNil) - - metadata := drivers.BucketMetadata{ - Name: "bucket", - Created: time.Now().UTC(), - ACL: drivers.BucketACL("private"), - } - // test head - oneMetadata := drivers.ObjectMetadata{ - Bucket: "bucket", - Key: "one", - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - Md5: "d41d8cd98f00b204e9800998ecf8427e", - Size: 0, - } - - typedDriver.On("GetBucketMetadata", "bucket").Return(metadata, nil).Once() - typedDriver.On("CreateObject", "bucket", "one", "", "", mock.Anything, mock.Anything).Return(oneMetadata.Md5, nil).Once() - request, err := http.NewRequest("PUT", testServer.URL+"/bucket/one", bytes.NewBufferString("hello world")) - delete(request.Header, "Content-Type") - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - typedDriver.On("GetBucketMetadata", "bucket").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "one").Return(oneMetadata, nil).Once() - request, err = http.NewRequest("HEAD", testServer.URL+"/bucket/one", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.Header.Get("Content-Type"), Equals, "application/octet-stream") - - // test get object - typedDriver.SetGetObjectWriter("bucket", "once", []byte("")) - typedDriver.On("GetBucketMetadata", "bucket").Return(metadata, nil).Twice() - typedDriver.On("GetObjectMetadata", "bucket", "one").Return(oneMetadata, nil).Once() - typedDriver.On("GetObject", mock.Anything, "bucket", "one").Return(int64(0), nil).Once() - request, err = http.NewRequest("GET", testServer.URL+"/bucket/one", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - client = http.Client{} - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - c.Assert(response.Header.Get("Content-Type"), Equals, "application/octet-stream") - - twoMetadata := drivers.ObjectMetadata{ - Bucket: "bucket", - Key: "one", - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - // Fix MD5 - Md5: "d41d8cd98f00b204e9800998ecf8427e", - Size: 0, - } - - typedDriver.On("GetBucketMetadata", "bucket").Return(metadata, nil).Once() - typedDriver.On("CreateObject", "bucket", "two", "", "", mock.Anything, mock.Anything).Return(twoMetadata.Md5, nil).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/bucket/two", bytes.NewBufferString("hello world")) - delete(request.Header, "Content-Type") - request.Header.Add("Content-Type", "application/json") - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - typedDriver.On("GetBucketMetadata", "bucket").Return(metadata, nil).Once() - typedDriver.On("GetObjectMetadata", "bucket", "two").Return(twoMetadata, nil).Once() - request, err = http.NewRequest("HEAD", testServer.URL+"/bucket/two", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.Header.Get("Content-Type"), Equals, "application/octet-stream") - - // test get object - typedDriver.On("GetBucketMetadata", "bucket").Return(metadata, nil).Twice() - typedDriver.On("GetObjectMetadata", "bucket", "two").Return(twoMetadata, nil).Once() - typedDriver.On("GetObject", mock.Anything, "bucket", "two").Return(int64(0), nil).Once() - request, err = http.NewRequest("GET", testServer.URL+"/bucket/two", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.Header.Get("Content-Type"), Equals, "application/octet-stream") -} - -func (s *MySuite) TestPartialContent(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - - metadata := drivers.ObjectMetadata{ - Bucket: "foo", - Key: "bar", - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - Md5: "6f5902ac237024bdd0c176cb93063dc4", // even for range requests, md5sum is returned for the full object - Size: 11, - } - - typedDriver.On("CreateBucket", "foo", "private").Return(nil).Once() - typedDriver.On("CreateObject", "foo", "bar", "", "", mock.Anything, mock.Anything).Return(metadata.Md5, nil).Once() - err := driver.CreateBucket("foo", "private") - c.Assert(err, IsNil) - - driver.CreateObject("foo", "bar", "", "", int64(len("hello world")), bytes.NewBufferString("hello world")) - - // prepare for GET on range request - typedDriver.SetGetObjectWriter("foo", "bar", []byte("hello world")) - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("GetObjectMetadata", "foo", "bar").Return(metadata, nil).Once() - typedDriver.On("GetPartialObject", mock.Anything, "foo", "bar", int64(6), int64(2)).Return(int64(2), nil).Once() - - // prepare request - request, err := http.NewRequest("GET", testServer.URL+"/foo/bar", nil) - c.Assert(err, IsNil) - request.Header.Add("Accept", "application/json") - request.Header.Add("Range", "bytes=6-7") - setDummyAuthHeader(request) - - client := http.Client{} - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusPartialContent) - partialObject, err := ioutil.ReadAll(response.Body) - c.Assert(err, IsNil) - - c.Assert(string(partialObject), Equals, "wo") -} - -func (s *MySuite) TestListObjectsHandlerErrors(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - default: - { - return - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - client := http.Client{} - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, drivers.BucketNameInvalid{}).Once() - request, err := http.NewRequest("GET", testServer.URL+"/foo", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err := client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "InvalidBucketName", "The specified bucket is not valid.", http.StatusBadRequest) - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, drivers.BucketNotFound{}).Once() - request, err = http.NewRequest("GET", testServer.URL+"/foo", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - verifyError(c, response, "NoSuchBucket", "The specified bucket does not exist.", http.StatusNotFound) - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("ListObjects", "foo", mock.Anything).Return(make([]drivers.ObjectMetadata, 0), drivers.BucketResourcesMetadata{}, drivers.ObjectNameInvalid{}).Once() - request, err = http.NewRequest("GET", testServer.URL+"/foo", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "NoSuchKey", "The specified key does not exist.", http.StatusNotFound) - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("ListObjects", "foo", mock.Anything).Return(make([]drivers.ObjectMetadata, 0), drivers.BucketResourcesMetadata{}, drivers.ObjectNotFound{}).Once() - request, err = http.NewRequest("GET", testServer.URL+"/foo", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "NoSuchKey", "The specified key does not exist.", http.StatusNotFound) - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, drivers.BackendCorrupted{}).Once() - typedDriver.On("ListObjects", "foo", mock.Anything).Return(make([]drivers.ObjectMetadata, 0), drivers.BucketResourcesMetadata{}, drivers.BackendCorrupted{}).Once() - request, err = http.NewRequest("GET", testServer.URL+"/foo", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "InternalError", "We encountered an internal error, please try again.", http.StatusInternalServerError) -} - -func (s *MySuite) TestListBucketsErrors(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - default: - { - return - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - client := http.Client{} - - metadata := drivers.BucketMetadata{ - Name: "foo", - Created: time.Now().UTC(), - ACL: drivers.BucketACL("private"), - } - - typedDriver.On("GetBucketMetadata", "foo").Return(metadata, nil).Once() - typedDriver.On("ListObjects", "foo", mock.Anything).Return(make([]drivers.ObjectMetadata, 0), - drivers.BucketResourcesMetadata{}, drivers.BackendCorrupted{}).Once() - request, err := http.NewRequest("GET", testServer.URL+"/foo", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err := client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "InternalError", "We encountered an internal error, please try again.", http.StatusInternalServerError) -} - -func (s *MySuite) TestPutBucketErrors(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - default: - { - return - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - client := http.Client{} - - typedDriver.On("CreateBucket", "foo", "private").Return(drivers.BucketNameInvalid{}).Once() - request, err := http.NewRequest("PUT", testServer.URL+"/foo", bytes.NewBufferString("")) - c.Assert(err, IsNil) - request.Header.Add("x-amz-acl", "private") - setDummyAuthHeader(request) - - response, err := client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "InvalidBucketName", "The specified bucket is not valid.", http.StatusBadRequest) - - typedDriver.On("CreateBucket", "foo", "private").Return(drivers.BucketExists{}).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/foo", bytes.NewBufferString("")) - c.Assert(err, IsNil) - request.Header.Add("x-amz-acl", "private") - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "BucketAlreadyExists", "The requested bucket name is not available.", http.StatusConflict) - - typedDriver.On("CreateBucket", "foo", "private").Return(drivers.BackendCorrupted{}).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/foo", bytes.NewBufferString("")) - c.Assert(err, IsNil) - request.Header.Add("x-amz-acl", "private") - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "InternalError", "We encountered an internal error, please try again.", http.StatusInternalServerError) - - typedDriver.On("CreateBucket", "foo", "unknown").Return(nil).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/foo", bytes.NewBufferString("")) - c.Assert(err, IsNil) - request.Header.Add("x-amz-acl", "unknown") - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "NotImplemented", "A header you provided implies functionality that is not implemented.", http.StatusNotImplemented) -} - -func (s *MySuite) TestGetObjectErrors(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - default: - { - return - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - client := http.Client{} - - metadata := drivers.BucketMetadata{ - Name: "foo", - Created: time.Now().UTC(), - ACL: drivers.BucketACL("private"), - } - typedDriver.On("GetBucketMetadata", "foo").Return(metadata, nil).Once() - typedDriver.On("GetObjectMetadata", "foo", "bar").Return(drivers.ObjectMetadata{}, drivers.ObjectNotFound{}).Once() - request, err := http.NewRequest("GET", testServer.URL+"/foo/bar", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err := client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "NoSuchKey", "The specified key does not exist.", http.StatusNotFound) - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, drivers.BucketNotFound{}).Once() - request, err = http.NewRequest("GET", testServer.URL+"/foo/bar", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "NoSuchBucket", "The specified bucket does not exist.", http.StatusNotFound) - - typedDriver.On("GetBucketMetadata", "foo").Return(metadata, nil).Once() - typedDriver.On("GetObjectMetadata", "foo", "bar").Return(drivers.ObjectMetadata{}, drivers.ObjectNameInvalid{}).Once() - request, err = http.NewRequest("GET", testServer.URL+"/foo/bar", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "NoSuchKey", "The specified key does not exist.", http.StatusNotFound) - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, drivers.BucketNameInvalid{}).Once() - request, err = http.NewRequest("GET", testServer.URL+"/foo/bar", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "InvalidBucketName", "The specified bucket is not valid.", http.StatusBadRequest) - - typedDriver.On("GetBucketMetadata", "foo").Return(metadata, nil).Once() - typedDriver.On("GetObjectMetadata", "foo", "bar").Return(drivers.ObjectMetadata{}, drivers.BackendCorrupted{}).Once() - request, err = http.NewRequest("GET", testServer.URL+"/foo/bar", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "InternalError", "We encountered an internal error, please try again.", http.StatusInternalServerError) -} - -func (s *MySuite) TestGetObjectRangeErrors(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - default: - { - return - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - client := http.Client{} - - metadata := drivers.ObjectMetadata{ - Bucket: "foo", - Key: "bar", - - ContentType: "application/octet-stream", - Created: time.Now().UTC(), - Md5: "e81c4e4f2b7b93b481e13a8553c2ae1b", - Size: 11, - } - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("GetObjectMetadata", "foo", "bar").Return(metadata, nil).Once() - request, err := http.NewRequest("GET", testServer.URL+"/foo/bar", nil) - request.Header.Add("Range", "bytes=7-6") - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err := client.Do(request) - c.Assert(err, IsNil) - verifyError(c, response, "InvalidRange", "The requested range cannot be satisfied.", http.StatusRequestedRangeNotSatisfiable) -} - -func (s *MySuite) TestObjectMultipartAbort(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - client := http.Client{} - - // create bucket - typedDriver.On("CreateBucket", "foo", "private").Return(nil).Once() - request, err := http.NewRequest("PUT", testServer.URL+"/foo", bytes.NewBufferString("")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, 200) - - // Initiate multipart upload - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("NewMultipartUpload", "foo", "object", "").Return("uploadid", nil).Once() - request, err = http.NewRequest("POST", testServer.URL+"/foo/object?uploads", bytes.NewBufferString("")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - decoder := xml.NewDecoder(response.Body) - newResponse := &InitiateMultipartUploadResult{} - - err = decoder.Decode(newResponse) - c.Assert(err, IsNil) - c.Assert(len(newResponse.UploadID) > 0, Equals, true) - uploadID := newResponse.UploadID - - // put part one - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("CreateObjectPart", "foo", "object", "uploadid", 1, "", "", 11, mock.Anything).Return("5eb63bbbe01eeed093cb22bb8f5acdc3", nil).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/foo/object?uploadId="+uploadID+"&partNumber=1", bytes.NewBufferString("hello world")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response1, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response1.StatusCode, Equals, http.StatusOK) - - // put part two - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("CreateObjectPart", "foo", "object", "uploadid", 2, "", "", 11, mock.Anything).Return("5eb63bbbe01eeed093cb22bb8f5acdc3", nil).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/foo/object?uploadId="+uploadID+"&partNumber=2", bytes.NewBufferString("hello world")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response2, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response2.StatusCode, Equals, http.StatusOK) - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("AbortMultipartUpload", "foo", "object", "uploadid").Return(nil).Once() - request, err = http.NewRequest("DELETE", testServer.URL+"/foo/object?uploadId="+uploadID, nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response3, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response3.StatusCode, Equals, http.StatusNoContent) -} - -func (s *MySuite) TestBucketMultipartList(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - client := http.Client{} - - // create bucket - typedDriver.On("CreateBucket", "foo", "private").Return(nil).Once() - request, err := http.NewRequest("PUT", testServer.URL+"/foo", bytes.NewBufferString("")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, 200) - - // Initiate multipart upload - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("NewMultipartUpload", "foo", "object", "").Return("uploadid", nil).Once() - request, err = http.NewRequest("POST", testServer.URL+"/foo/object?uploads", bytes.NewBufferString("")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - decoder := xml.NewDecoder(response.Body) - newResponse := &InitiateMultipartUploadResult{} - - err = decoder.Decode(newResponse) - c.Assert(err, IsNil) - c.Assert(len(newResponse.UploadID) > 0, Equals, true) - uploadID := newResponse.UploadID - - // put part one - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("CreateObjectPart", "foo", "object", "uploadid", 1, "", "", 11, mock.Anything).Return("5eb63bbbe01eeed093cb22bb8f5acdc3", nil).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/foo/object?uploadId="+uploadID+"&partNumber=1", bytes.NewBufferString("hello world")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response1, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response1.StatusCode, Equals, http.StatusOK) - - // put part two - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("CreateObjectPart", "foo", "object", "uploadid", 2, "", "", 11, mock.Anything).Return("5eb63bbbe01eeed093cb22bb8f5acdc3", nil).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/foo/object?uploadId="+uploadID+"&partNumber=2", bytes.NewBufferString("hello world")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response2, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response2.StatusCode, Equals, http.StatusOK) - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("ListMultipartUploads", "foo", mock.Anything).Return(drivers.BucketMultipartResourcesMetadata{}, nil).Once() - request, err = http.NewRequest("GET", testServer.URL+"/foo?uploads", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response3, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response3.StatusCode, Equals, http.StatusOK) - - decoder = xml.NewDecoder(response3.Body) - newResponse3 := &ListMultipartUploadsResponse{} - err = decoder.Decode(newResponse3) - c.Assert(err, IsNil) - c.Assert(newResponse3.Bucket, Equals, "foo") -} - -func (s *MySuite) TestObjectMultipartList(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - client := http.Client{} - - // create bucket - typedDriver.On("CreateBucket", "foo", "private").Return(nil).Once() - request, err := http.NewRequest("PUT", testServer.URL+"/foo", bytes.NewBufferString("")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, 200) - - // Initiate multipart upload - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("NewMultipartUpload", "foo", "object", "").Return("uploadid", nil).Once() - request, err = http.NewRequest("POST", testServer.URL+"/foo/object?uploads", bytes.NewBufferString("")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - decoder := xml.NewDecoder(response.Body) - newResponse := &InitiateMultipartUploadResult{} - - err = decoder.Decode(newResponse) - c.Assert(err, IsNil) - c.Assert(len(newResponse.UploadID) > 0, Equals, true) - uploadID := newResponse.UploadID - - // put part one - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("CreateObjectPart", "foo", "object", "uploadid", 1, "", "", 11, mock.Anything).Return("5eb63bbbe01eeed093cb22bb8f5acdc3", nil).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/foo/object?uploadId="+uploadID+"&partNumber=1", bytes.NewBufferString("hello world")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response1, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response1.StatusCode, Equals, http.StatusOK) - - // put part two - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("CreateObjectPart", "foo", "object", "uploadid", 2, "", "", 11, mock.Anything).Return("5eb63bbbe01eeed093cb22bb8f5acdc3", nil).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/foo/object?uploadId="+uploadID+"&partNumber=2", bytes.NewBufferString("hello world")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response2, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response2.StatusCode, Equals, http.StatusOK) - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("ListObjectParts", "foo", "object", mock.Anything).Return(drivers.ObjectResourcesMetadata{}, nil).Once() - request, err = http.NewRequest("GET", testServer.URL+"/foo/object?uploadId="+uploadID, nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response3, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response3.StatusCode, Equals, http.StatusOK) - -} - -func (s *MySuite) TestObjectMultipart(c *C) { - switch driver := s.Driver.(type) { - case *mocks.Driver: - { - driver.AssertExpectations(c) - } - } - driver := s.Driver - typedDriver := s.MockDriver - - httpHandler := HTTPHandler(setConfig(driver)) - testServer := httptest.NewServer(httpHandler) - defer testServer.Close() - client := http.Client{} - - // create bucket - typedDriver.On("CreateBucket", "foo", "private").Return(nil).Once() - request, err := http.NewRequest("PUT", testServer.URL+"/foo", bytes.NewBufferString("")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, 200) - - // Initiate multipart upload - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("NewMultipartUpload", "foo", "object", "").Return("uploadid", nil).Once() - request, err = http.NewRequest("POST", testServer.URL+"/foo/object?uploads", bytes.NewBufferString("")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - decoder := xml.NewDecoder(response.Body) - newResponse := &InitiateMultipartUploadResult{} - - err = decoder.Decode(newResponse) - c.Assert(err, IsNil) - c.Assert(len(newResponse.UploadID) > 0, Equals, true) - uploadID := newResponse.UploadID - - // put part one - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("CreateObjectPart", "foo", "object", "uploadid", 1, "", "", 11, mock.Anything).Return("5eb63bbbe01eeed093cb22bb8f5acdc3", nil).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/foo/object?uploadId="+uploadID+"&partNumber=1", bytes.NewBufferString("hello world")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response1, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response1.StatusCode, Equals, http.StatusOK) - - // put part two - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("CreateObjectPart", "foo", "object", "uploadid", 2, "", "", 11, mock.Anything).Return("5eb63bbbe01eeed093cb22bb8f5acdc3", nil).Once() - request, err = http.NewRequest("PUT", testServer.URL+"/foo/object?uploadId="+uploadID+"&partNumber=2", bytes.NewBufferString("hello world")) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response2, err := client.Do(request) - c.Assert(err, IsNil) - c.Assert(response2.StatusCode, Equals, http.StatusOK) - - // complete multipart upload - completeUploads := &CompleteMultipartUpload{ - Part: []Part{ - { - PartNumber: 1, - ETag: response1.Header.Get("ETag"), - }, - { - PartNumber: 2, - ETag: response2.Header.Get("ETag"), - }, - }, - } - - var completeBuffer bytes.Buffer - encoder := xml.NewEncoder(&completeBuffer) - encoder.Encode(completeUploads) - - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("CompleteMultipartUpload", "foo", "object", "uploadid", mock.Anything).Return("etag", nil).Once() - request, err = http.NewRequest("POST", testServer.URL+"/foo/object?uploadId="+uploadID, &completeBuffer) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - // get data - typedDriver.On("GetBucketMetadata", "foo").Return(drivers.BucketMetadata{}, nil).Once() - typedDriver.On("GetObjectMetadata", "foo", "object").Return(drivers.ObjectMetadata{Size: 22}, nil).Once() - typedDriver.On("GetObject", mock.Anything, "foo", "object").Return(int64(22), nil).Once() - typedDriver.SetGetObjectWriter("foo", "object", []byte("hello worldhello world")) - request, err = http.NewRequest("GET", testServer.URL+"/foo/object", nil) - c.Assert(err, IsNil) - setDummyAuthHeader(request) - - response, err = client.Do(request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - object, err := ioutil.ReadAll(response.Body) - c.Assert(err, IsNil) - c.Assert(string(object), Equals, ("hello worldhello world")) -} - -func verifyError(c *C, response *http.Response, code, description string, statusCode int) { - data, err := ioutil.ReadAll(response.Body) - c.Assert(err, IsNil) - errorResponse := ErrorResponse{} - err = xml.Unmarshal(data, &errorResponse) - c.Assert(err, IsNil) - c.Assert(errorResponse.Code, Equals, code) - c.Assert(errorResponse.Message, Equals, description) - c.Assert(response.StatusCode, Equals, statusCode) -} - -func startMockDriver() *mocks.Driver { - return &mocks.Driver{ - ObjectWriterData: make(map[string][]byte), - } -} diff --git a/pkg/api/quota/bandwidth_cap.go b/pkg/api/quota/bandwidth_cap.go deleted file mode 100644 index 33ddcac81..000000000 --- a/pkg/api/quota/bandwidth_cap.go +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package quota - -import ( - "errors" - "io" - "net" - "net/http" - "time" - - "sync" - - "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/utils/log" -) - -// bandwidthQuotaHandler -type bandwidthQuotaHandler struct { - handler http.Handler - quotas *quotaMap -} - -// ServeHTTP is an http.Handler ServeHTTP method -func (h *bandwidthQuotaHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - host, _, _ := net.SplitHostPort(req.RemoteAddr) - longIP := longIP{net.ParseIP(host)}.IptoUint32() - if h.quotas.WillExceedQuota(longIP, req.ContentLength) { - hosts, _ := net.LookupAddr(uint32ToIP(longIP).String()) - log.Debug.Printf("Offending Host: %s, BandwidthUsed: %d", hosts, h.quotas.GetQuotaUsed(longIP)) - writeErrorResponse(w, req, BandWidthInsufficientToProceed, req.URL.Path) - return - } - qr := "aReader{ - ReadCloser: req.Body, - quotas: h.quotas, - ip: longIP, - w: w, - req: req, - lock: &sync.RWMutex{}, - } - req.Body = qr - w = "aWriter{ - ResponseWriter: w, - quotas: h.quotas, - ip: longIP, - quotaReader: qr, - } - h.handler.ServeHTTP(w, req) -} - -// BandwidthCap sets a quote based upon bandwidth used -func BandwidthCap(h http.Handler, limit int64, duration time.Duration) http.Handler { - return &bandwidthQuotaHandler{ - handler: h, - quotas: "aMap{ - data: make(map[int64]map[uint32]int64), - limit: int64(limit), - duration: duration, - segmentSize: segmentSize(duration), - }, - } -} - -type quotaReader struct { - io.ReadCloser - quotas *quotaMap - ip uint32 - w http.ResponseWriter - req *http.Request - err bool - lock *sync.RWMutex -} - -func (q *quotaReader) Read(b []byte) (int, error) { - log.Println(q.quotas.GetQuotaUsed(q.ip)) - log.Println(q.quotas.limit) - q.lock.Lock() - defer q.lock.Unlock() - if q.err { - return 0, iodine.New(errors.New("Quota Met"), nil) - } - if q.err == false && q.quotas.IsQuotaMet(q.ip) { - defer q.lock.Unlock() - q.err = true - hosts, _ := net.LookupAddr(uint32ToIP(q.ip).String()) - log.Debug.Printf("Offending Host: %s, BandwidthUsed: %d", hosts, q.quotas.GetQuotaUsed(q.ip)) - writeErrorResponse(q.w, q.req, BandWidthQuotaExceeded, q.req.URL.Path) - return 0, iodine.New(errors.New("Quota Met"), nil) - } - n, err := q.ReadCloser.Read(b) - q.quotas.Add(q.ip, int64(n)) - return n, iodine.New(err, nil) -} - -func (q *quotaReader) Close() error { - return iodine.New(q.ReadCloser.Close(), nil) -} - -type quotaWriter struct { - ResponseWriter http.ResponseWriter - quotas *quotaMap - ip uint32 - quotaReader *quotaReader -} - -func (q *quotaWriter) Write(b []byte) (int, error) { - q.quotaReader.lock.RLock() - defer q.quotaReader.lock.RUnlock() - if q.quotas.IsQuotaMet(q.ip) { - return 0, iodine.New(errors.New("Quota Met"), nil) - } - q.quotas.Add(q.ip, int64(len(b))) - n, err := q.ResponseWriter.Write(b) - // remove from quota if a full write isn't performed - q.quotas.Add(q.ip, int64(n-len(b))) - return n, iodine.New(err, nil) -} -func (q *quotaWriter) Header() http.Header { - return q.ResponseWriter.Header() -} - -func (q *quotaWriter) WriteHeader(status int) { - q.quotaReader.lock.RLock() - defer q.quotaReader.lock.RUnlock() - if q.quotas.IsQuotaMet(q.ip) || q.quotaReader.err { - return - } - q.ResponseWriter.WriteHeader(status) -} - -func segmentSize(duration time.Duration) time.Duration { - var segmentSize time.Duration - for i := int64(1); i < duration.Nanoseconds(); i = i * 10 { - segmentSize = time.Duration(i) - } - return segmentSize -} diff --git a/pkg/api/quota/conn_limit.go b/pkg/api/quota/conn_limit.go deleted file mode 100644 index 03f905486..000000000 --- a/pkg/api/quota/conn_limit.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package quota - -import ( - "net" - "net/http" - "sync" - - "github.com/minio/minio/pkg/utils/log" -) - -// requestLimitHandler -type connLimit struct { - sync.RWMutex - handler http.Handler - connections map[uint32]int - limit int -} - -func (c *connLimit) IsLimitExceeded(ip uint32) bool { - if c.connections[ip] >= c.limit { - return true - } - return false -} - -func (c *connLimit) GetUsed(ip uint32) int { - return c.connections[ip] -} - -func (c *connLimit) Add(ip uint32) { - c.Lock() - defer c.Unlock() - count := c.connections[ip] - count = count + 1 - c.connections[ip] = count - return -} - -func (c *connLimit) Remove(ip uint32) { - c.Lock() - defer c.Unlock() - count, _ := c.connections[ip] - count = count - 1 - if count <= 0 { - delete(c.connections, ip) - return - } - c.connections[ip] = count -} - -// ServeHTTP is an http.Handler ServeHTTP method -func (c *connLimit) ServeHTTP(w http.ResponseWriter, req *http.Request) { - host, _, _ := net.SplitHostPort(req.RemoteAddr) - longIP := longIP{net.ParseIP(host)}.IptoUint32() - if c.IsLimitExceeded(longIP) { - hosts, _ := net.LookupAddr(uint32ToIP(longIP).String()) - log.Debug.Printf("Connection limit reached - Host: %s, Total Connections: %d\n", hosts, c.GetUsed(longIP)) - writeErrorResponse(w, req, ConnectionLimitExceeded, req.URL.Path) - return - } - c.Add(longIP) - defer c.Remove(longIP) - c.handler.ServeHTTP(w, req) -} - -// ConnectionLimit limits the number of concurrent connections -func ConnectionLimit(h http.Handler, limit int) http.Handler { - return &connLimit{ - handler: h, - connections: make(map[uint32]int), - limit: limit, - } -} diff --git a/pkg/api/quota/errors.go b/pkg/api/quota/errors.go deleted file mode 100644 index c4ad019e5..000000000 --- a/pkg/api/quota/errors.go +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package quota - -import ( - "bytes" - "encoding/xml" - "net/http" -) - -// copied from api, no cyclic deps allowed - -// Error structure -type Error struct { - Code string - Description string - HTTPStatusCode int -} - -// ErrorResponse - error response format -type ErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string - Message string - Resource string - RequestID string - HostID string -} - -// Quota standard errors non exhaustive list -const ( - RequestTimeTooSkewed = iota - BandWidthQuotaExceeded - BandWidthInsufficientToProceed - ConnectionLimitExceeded - SlowDown -) - -// Golang http doesn't implement these -const ( - StatusTooManyRequests = 429 -) - -func writeErrorResponse(w http.ResponseWriter, req *http.Request, errorType int, resource string) { - error := getErrorCode(errorType) - errorResponse := getErrorResponse(error, resource) - encodedErrorResponse := encodeErrorResponse(errorResponse) - // set headers - writeErrorHeaders(w) - w.WriteHeader(error.HTTPStatusCode) - // write body - w.Write(encodedErrorResponse) -} - -func writeErrorHeaders(w http.ResponseWriter) { - w.Header().Set("Server", "Minio") - w.Header().Set("Accept-Ranges", "bytes") - w.Header().Set("Content-Type", "application/xml") - w.Header().Set("Connection", "close") -} - -// Error code to Error structure map -var errorCodeResponse = map[int]Error{ - BandWidthQuotaExceeded: { - Code: "BandwidthQuotaExceeded", - Description: "Bandwidth Quota Exceeded.", - HTTPStatusCode: StatusTooManyRequests, - }, - BandWidthInsufficientToProceed: { - Code: "BandwidthQuotaWillBeExceeded", - Description: "Bandwidth quota will be exceeded with this request.", - HTTPStatusCode: StatusTooManyRequests, - }, - ConnectionLimitExceeded: { - Code: "ConnectionLimitExceeded", - Description: "Connections Limit Exceeded.", - HTTPStatusCode: StatusTooManyRequests, - }, - SlowDown: { - Code: "SlowDown", - Description: "Reduce your request rate.", - HTTPStatusCode: StatusTooManyRequests, - }, -} - -// Write error response headers -func encodeErrorResponse(response interface{}) []byte { - var bytesBuffer bytes.Buffer - encoder := xml.NewEncoder(&bytesBuffer) - encoder.Encode(response) - return bytesBuffer.Bytes() -} - -// errorCodeError provides errorCode to Error. It returns empty if the code provided is unknown -func getErrorCode(code int) Error { - return errorCodeResponse[code] -} - -// getErrorResponse gets in standard error and resource value and -// provides a encodable populated response values -func getErrorResponse(err Error, resource string) ErrorResponse { - var data = ErrorResponse{} - data.Code = err.Code - data.Message = err.Description - if resource != "" { - data.Resource = resource - } - // TODO implement this in future - data.RequestID = "3L137" - data.HostID = "3L137" - - return data -} diff --git a/pkg/api/quota/quota_handler.go b/pkg/api/quota/quota_handler.go deleted file mode 100644 index 4b56c1510..000000000 --- a/pkg/api/quota/quota_handler.go +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package quota - -import ( - "encoding/binary" - "net" - "sync" - "time" -) - -// map[minute][address] = current quota -type quotaMap struct { - sync.RWMutex - data map[int64]map[uint32]int64 - limit int64 - duration time.Duration - segmentSize time.Duration -} - -func (q *quotaMap) CanExpire() { - q.Lock() - defer q.Unlock() - currentMinute := time.Now().UTC().UnixNano() / q.segmentSize.Nanoseconds() - // divide by segmentSize, otherwise expiredQuotas will always be negative - expiredQuotas := currentMinute - (q.duration.Nanoseconds() / q.segmentSize.Nanoseconds()) - for time := range q.data { - if time < expiredQuotas { - delete(q.data, time) - } - } -} - -func (q *quotaMap) Add(ip uint32, size int64) { - q.CanExpire() - q.Lock() - defer q.Unlock() - currentMinute := time.Now().UTC().UnixNano() / q.segmentSize.Nanoseconds() - if _, ok := q.data[currentMinute]; !ok { - q.data[currentMinute] = make(map[uint32]int64) - } - currentData, _ := q.data[currentMinute][ip] - proposedDataSize := currentData + size - q.data[currentMinute][ip] = proposedDataSize -} - -func (q *quotaMap) IsQuotaMet(ip uint32) bool { - q.CanExpire() - if q.GetQuotaUsed(ip) >= q.limit { - return true - } - return false -} - -func (q *quotaMap) GetQuotaUsed(ip uint32) (total int64) { - q.CanExpire() - q.RLock() - defer q.RUnlock() - for _, segment := range q.data { - if used, ok := segment[ip]; ok { - total += used - } - } - return -} - -func (q *quotaMap) WillExceedQuota(ip uint32, size int64) (result bool) { - return q.GetQuotaUsed(ip)+size > q.limit -} - -type longIP struct { - net.IP -} - -// []byte to uint32 representation -func (p longIP) IptoUint32() (result uint32) { - ip := p.To4() - if ip == nil { - return 0 - } - return binary.BigEndian.Uint32(ip) -} diff --git a/pkg/api/quota/request_limit.go b/pkg/api/quota/request_limit.go deleted file mode 100644 index 976b017aa..000000000 --- a/pkg/api/quota/request_limit.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package quota - -import ( - "encoding/binary" - "net" - "net/http" - "time" - - "github.com/minio/minio/pkg/utils/log" -) - -// requestLimitHandler -type requestLimitHandler struct { - handler http.Handler - quotas *quotaMap -} - -//convert a uint32 to an ipv4 -func uint32ToIP(ip uint32) net.IP { - addr := net.IP{0, 0, 0, 0} - binary.BigEndian.PutUint32(addr, ip) - return addr -} - -// ServeHTTP is an http.Handler ServeHTTP method -func (h *requestLimitHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - host, _, _ := net.SplitHostPort(req.RemoteAddr) - longIP := longIP{net.ParseIP(host)}.IptoUint32() - if h.quotas.IsQuotaMet(longIP) { - hosts, _ := net.LookupAddr(uint32ToIP(longIP).String()) - log.Debug.Printf("Offending Host: %s, RequestUSED: %d\n", hosts, h.quotas.GetQuotaUsed(longIP)) - writeErrorResponse(w, req, SlowDown, req.URL.Path) - } - h.quotas.Add(longIP, 1) - h.handler.ServeHTTP(w, req) -} - -// RequestLimit sets a quote based upon number of requests allowed over a time period -func RequestLimit(h http.Handler, limit int64, duration time.Duration) http.Handler { - return &requestLimitHandler{ - handler: h, - quotas: "aMap{ - data: make(map[int64]map[uint32]int64), - limit: int64(limit), - duration: duration, - segmentSize: segmentSize(duration), - }, - } -} diff --git a/pkg/server/httpserver/httpserver.go b/pkg/api/server.go similarity index 67% rename from pkg/server/httpserver/httpserver.go rename to pkg/api/server.go index 31aac54b3..4aaf179b8 100644 --- a/pkg/server/httpserver/httpserver.go +++ b/pkg/api/server.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package httpserver +package api import ( "fmt" @@ -32,38 +32,27 @@ type Config struct { RateLimit int } -// Server - http server related -type Server struct { - config Config - handler http.Handler -} - // Start http server -func Start(handler http.Handler, config Config) (chan<- string, <-chan error, Server) { - ctrlChannel := make(chan string) - errorChannel := make(chan error) - server := Server{ - config: config, - handler: handler, - } - go start(ctrlChannel, errorChannel, server) - return ctrlChannel, errorChannel, server +func Start(a API) <-chan error { + errCh := make(chan error) + go start(errCh, a) + return errCh } -func start(ctrlChannel <-chan string, errorChannel chan<- error, server Server) { - defer close(errorChannel) +func start(errCh chan error, a API) { + defer close(errCh) var err error // Minio server config httpServer := &http.Server{ - Addr: server.config.Address, - Handler: server.handler, + Addr: a.config.Address, + Handler: a.handler, MaxHeaderBytes: 1 << 20, } - host, port, err := net.SplitHostPort(server.config.Address) + host, port, err := net.SplitHostPort(a.config.Address) if err != nil { - errorChannel <- err + errCh <- err return } @@ -74,7 +63,7 @@ func start(ctrlChannel <-chan string, errorChannel chan<- error, server Server) default: addrs, err := net.InterfaceAddrs() if err != nil { - errorChannel <- err + errCh <- err return } for _, addr := range addrs { @@ -92,14 +81,31 @@ func start(ctrlChannel <-chan string, errorChannel chan<- error, server Server) fmt.Printf("Starting minio server on: http://%s:%s\n", host, port) } err = httpServer.ListenAndServe() - case server.config.TLS == true: + case a.config.TLS == true: for _, host := range hosts { fmt.Printf("Starting minio server on: https://%s:%s\n", host, port) } - err = httpServer.ListenAndServeTLS(server.config.CertFile, server.config.KeyFile) + err = httpServer.ListenAndServeTLS(a.config.CertFile, a.config.KeyFile) } if err != nil { - errorChannel <- err + errCh <- err } - + errCh <- nil + return +} + +// API is used to build api server +type API struct { + config Config + handler http.Handler +} + +// StartServer APIFactory builds api server +func StartServer(conf Config) error { + for err := range Start(New(conf)) { + if err != nil { + return err + } + } + return nil } diff --git a/pkg/server/server.go b/pkg/server/server.go deleted file mode 100644 index 1913ce66a..000000000 --- a/pkg/server/server.go +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2014 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package server - -import ( - "errors" - "fmt" - "reflect" - "time" - - "github.com/minio/minio/pkg/api" - "github.com/minio/minio/pkg/api/web" - "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/server/httpserver" - "github.com/minio/minio/pkg/storage/drivers" - "github.com/minio/minio/pkg/storage/drivers/cache" - "github.com/minio/minio/pkg/storage/drivers/donut" - "github.com/minio/minio/pkg/utils/log" -) - -// WebFactory is used to build web cli server -type WebFactory struct { - httpserver.Config -} - -// GetStartServerFunc builds web cli server -func (f WebFactory) GetStartServerFunc() StartServerFunc { - return func() (chan<- string, <-chan error) { - ctrl, status, _ := httpserver.Start(web.HTTPHandler(), f.Config) - return ctrl, status - } -} - -// Factory is used to build api server -type Factory struct { - httpserver.Config - Paths []string - MaxMemory uint64 - Expiration time.Duration -} - -// GetStartServerFunc Factory builds api server -func (f Factory) GetStartServerFunc() StartServerFunc { - return func() (chan<- string, <-chan error) { - conf := api.Config{RateLimit: f.RateLimit} - var driver drivers.Driver - var err error - if len(f.Paths) != 0 { - driver, err = donut.NewDriver(f.Paths) - if err != nil { - log.Fatalln(err) - } - driver, err = cache.NewDriver(f.MaxMemory, f.Expiration, driver) - if err != nil { - log.Fatalln(err) - } - } - conf.SetDriver(driver) - ctrl, status, _ := httpserver.Start(api.HTTPHandler(conf), f.Config) - return ctrl, status - } -} - -// StartServerFunc describes a function that can be used to start a server with StartMinio -type StartServerFunc func() (chan<- string, <-chan error) - -// StartMinio starts minio server -func StartMinio(servers []StartServerFunc) { - var ctrlChannels []chan<- string - var errChannels []<-chan error - for _, server := range servers { - ctrlChannel, errChannel := server() - ctrlChannels = append(ctrlChannels, ctrlChannel) - errChannels = append(errChannels, errChannel) - } - cases := createSelectCases(errChannels) - for len(cases) > 0 { - chosen, value, recvOk := reflect.Select(cases) - switch recvOk { - case true: - // Status Message Received - switch true { - case value.Interface() != nil: - // For any error received cleanup all existing channels and fail - for _, ch := range ctrlChannels { - close(ch) - } - msg := fmt.Sprintf("%q", value.Interface()) - log.Fatal(iodine.New(errors.New(msg), nil)) - } - case false: - // Channel closed, remove from list - var aliveStatusChans []<-chan error - for i, ch := range errChannels { - if i != chosen { - aliveStatusChans = append(aliveStatusChans, ch) - } - } - // create new select cases without defunct channel - errChannels = aliveStatusChans - cases = createSelectCases(errChannels) - } - } -} - -func createSelectCases(channels []<-chan error) []reflect.SelectCase { - cases := make([]reflect.SelectCase, len(channels)) - for i, ch := range channels { - cases[i] = reflect.SelectCase{ - Dir: reflect.SelectRecv, - Chan: reflect.ValueOf(ch), - } - } - return cases -} From dc0df3dc0e00a3aa7ae54cc945af44cbf79f980f Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 30 Jun 2015 16:30:02 -0700 Subject: [PATCH 03/19] Breakaway from driver model, move cache into donut --- commands.go | 18 +- main.go | 19 +- pkg/api/api-response.go | 10 +- pkg/api/api-router.go | 2 +- pkg/api/headers.go | 6 +- pkg/api/resources.go | 8 +- .../cache => donut}/cache-multipart.go | 95 ++-- pkg/storage/{drivers/cache => donut}/cache.go | 191 +++---- pkg/storage/{drivers => donut}/date.go | 2 +- pkg/storage/donut/donut.go | 37 ++ pkg/storage/donut/errors.go | 171 ++++++ pkg/storage/{ => donut}/trove/trove.go | 0 pkg/storage/{ => donut}/trove/trove_test.go | 0 .../{drivers/driver.go => donut/utils.go} | 43 +- pkg/storage/drivers/LICENSE | 202 ------- pkg/storage/drivers/README.md | 2 - pkg/storage/drivers/api_testsuite.go | 535 ------------------ pkg/storage/drivers/cache/cache_test.go | 41 -- pkg/storage/drivers/donut/donut-multipart.go | 47 -- pkg/storage/drivers/donut/donut.go | 410 -------------- pkg/storage/drivers/donut/donut_test.go | 55 -- pkg/storage/drivers/dummy/README.md | 1 - pkg/storage/drivers/dummy/dummy.go | 108 ---- pkg/storage/drivers/errors.go | 228 -------- pkg/storage/drivers/mocks/Driver.go | 187 ------ 25 files changed, 346 insertions(+), 2072 deletions(-) rename pkg/storage/{drivers/cache => donut}/cache-multipart.go (73%) rename pkg/storage/{drivers/cache => donut}/cache.go (64%) rename pkg/storage/{drivers => donut}/date.go (99%) rename pkg/storage/{ => donut}/trove/trove.go (100%) rename pkg/storage/{ => donut}/trove/trove_test.go (100%) rename pkg/storage/{drivers/driver.go => donut/utils.go} (71%) delete mode 100644 pkg/storage/drivers/LICENSE delete mode 100644 pkg/storage/drivers/README.md delete mode 100644 pkg/storage/drivers/api_testsuite.go delete mode 100644 pkg/storage/drivers/cache/cache_test.go delete mode 100644 pkg/storage/drivers/donut/donut-multipart.go delete mode 100644 pkg/storage/drivers/donut/donut.go delete mode 100644 pkg/storage/drivers/donut/donut_test.go delete mode 100644 pkg/storage/drivers/dummy/README.md delete mode 100644 pkg/storage/drivers/dummy/dummy.go delete mode 100644 pkg/storage/drivers/errors.go delete mode 100644 pkg/storage/drivers/mocks/Driver.go diff --git a/commands.go b/commands.go index 748f17c3e..16822d70b 100644 --- a/commands.go +++ b/commands.go @@ -4,7 +4,7 @@ import ( "os/user" "github.com/minio/cli" - "github.com/minio/minio/pkg/server" + "github.com/minio/minio/pkg/api" ) func removeDuplicates(slice []string) []string { @@ -67,13 +67,9 @@ func runServer(c *cli.Context) { cli.ShowCommandHelpAndExit(c, "server", 1) // last argument is exit code } apiServerConfig := getAPIServerConfig(c) - s := server.Factory{ - Config: apiServerConfig, + if err := api.Start(apiServerConfig); err != nil { + Fatalln(err) } - apiServer := s.GetStartServerFunc() - // webServer := getWebServerConfigFunc(c) - servers := []server.StartServerFunc{apiServer} //, webServer} - server.StartMinio(servers) } func runController(c *cli.Context) { @@ -84,12 +80,4 @@ func runController(c *cli.Context) { if len(c.Args()) < 1 { cli.ShowCommandHelpAndExit(c, "control", 1) // last argument is exit code } - apiServerConfig := getAPIServerConfig(c) - s := server.Factory{ - Config: apiServerConfig, - } - apiServer := s.GetStartServerFunc() - // webServer := getWebServerConfigFunc(c) - servers := []server.StartServerFunc{apiServer} //, webServer} - server.StartMinio(servers) } diff --git a/main.go b/main.go index eeb5c171d..3fb676d72 100644 --- a/main.go +++ b/main.go @@ -26,8 +26,8 @@ import ( "github.com/dustin/go-humanize" "github.com/minio/cli" + "github.com/minio/minio/pkg/api" "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/server/httpserver" ) var globalDebugFlag = false @@ -78,7 +78,7 @@ func getAPIServerConfig(c *cli.Context) httpserver.Config { Fatalln("Both certificate and key are required to enable https.") } tls := (certFile != "" && keyFile != "") - return httpserver.Config{ + return api.Config{ Address: c.GlobalString("address"), TLS: tls, CertFile: certFile, @@ -87,21 +87,6 @@ func getAPIServerConfig(c *cli.Context) httpserver.Config { } } -/* -func getWebServerConfigFunc(c *cli.Context) server.StartServerFunc { - config := httpserver.Config{ - Address: c.GlobalString("address-mgmt"), - TLS: false, - CertFile: "", - KeyFile: "", - } - webDrivers := server.WebFactory{ - Config: config, - } - return webDrivers.GetStartServerFunc() -} -*/ - // Tries to get os/arch/platform specific information // Returns a map of current os/arch/platform/memstats func getSystemData() map[string]string { diff --git a/pkg/api/api-response.go b/pkg/api/api-response.go index 509e4818f..6ef01f260 100644 --- a/pkg/api/api-response.go +++ b/pkg/api/api-response.go @@ -20,7 +20,7 @@ import ( "net/http" "sort" - "github.com/minio/minio/pkg/storage/drivers" + "github.com/minio/minio/pkg/storage/donut" ) // Reply date format @@ -34,7 +34,7 @@ const ( // // output: // populated struct that can be serialized to match xml and json api spec output -func generateListBucketsResponse(buckets []drivers.BucketMetadata) ListBucketsResponse { +func generateListBucketsResponse(buckets []donut.BucketMetadata) ListBucketsResponse { var listbuckets []*Bucket var data = ListBucketsResponse{} var owner = Owner{} @@ -70,7 +70,7 @@ func (b itemKey) Less(i, j int) bool { return b[i].Key < b[j].Key } // // output: // populated struct that can be serialized to match xml and json api spec output -func generateListObjectsResponse(bucket string, objects []drivers.ObjectMetadata, bucketResources drivers.BucketResourcesMetadata) ListObjectsResponse { +func generateListObjectsResponse(bucket string, objects []donut.ObjectMetadata, bucketResources donut.BucketResourcesMetadata) ListObjectsResponse { var contents []*Object var prefixes []*CommonPrefix var owner = Owner{} @@ -131,7 +131,7 @@ func generateCompleteMultpartUploadResult(bucket, key, location, etag string) Co } // generateListPartsResult -func generateListPartsResult(objectMetadata drivers.ObjectResourcesMetadata) ListPartsResponse { +func generateListPartsResult(objectMetadata donut.ObjectResourcesMetadata) ListPartsResponse { // TODO - support EncodingType in xml decoding listPartsResponse := ListPartsResponse{} listPartsResponse.Bucket = objectMetadata.Bucket @@ -161,7 +161,7 @@ func generateListPartsResult(objectMetadata drivers.ObjectResourcesMetadata) Lis } // generateListMultipartUploadsResult -func generateListMultipartUploadsResult(bucket string, metadata drivers.BucketMultipartResourcesMetadata) ListMultipartUploadsResponse { +func generateListMultipartUploadsResult(bucket string, metadata donut.BucketMultipartResourcesMetadata) ListMultipartUploadsResponse { listMultipartUploadsResponse := ListMultipartUploadsResponse{} listMultipartUploadsResponse.Bucket = bucket listMultipartUploadsResponse.Delimiter = metadata.Delimiter diff --git a/pkg/api/api-router.go b/pkg/api/api-router.go index c0e77aeca..b6750b28d 100644 --- a/pkg/api/api-router.go +++ b/pkg/api/api-router.go @@ -20,7 +20,7 @@ import router "github.com/gorilla/mux" type minioAPI struct{} -// Handler - api wrapper handler +// New api func New(config Config) API { var api = minioAPI{} diff --git a/pkg/api/headers.go b/pkg/api/headers.go index 934e03c3f..3e21af1fb 100644 --- a/pkg/api/headers.go +++ b/pkg/api/headers.go @@ -23,7 +23,7 @@ import ( "net/http" "strconv" - "github.com/minio/minio/pkg/storage/drivers" + "github.com/minio/minio/pkg/storage/donut" ) // No encoder interface exists, so we create one. @@ -62,7 +62,7 @@ func encodeErrorResponse(response interface{}, acceptsType contentType) []byte { } // Write object header -func setObjectHeaders(w http.ResponseWriter, metadata drivers.ObjectMetadata) { +func setObjectHeaders(w http.ResponseWriter, metadata donut.ObjectMetadata) { lastModified := metadata.Created.Format(http.TimeFormat) // common headers setCommonHeaders(w, metadata.ContentType, int(metadata.Size)) @@ -72,7 +72,7 @@ func setObjectHeaders(w http.ResponseWriter, metadata drivers.ObjectMetadata) { } // Write range object header -func setRangeObjectHeaders(w http.ResponseWriter, metadata drivers.ObjectMetadata, contentRange *httpRange) { +func setRangeObjectHeaders(w http.ResponseWriter, metadata donut.ObjectMetadata, contentRange *httpRange) { // set common headers setCommonHeaders(w, metadata.ContentType, int(metadata.Size)) // set object headers diff --git a/pkg/api/resources.go b/pkg/api/resources.go index 0b7076c0e..dea992b4e 100644 --- a/pkg/api/resources.go +++ b/pkg/api/resources.go @@ -20,11 +20,11 @@ import ( "net/url" "strconv" - "github.com/minio/minio/pkg/storage/drivers" + "github.com/minio/minio/pkg/storage/donut" ) // parse bucket url queries -func getBucketResources(values url.Values) (v drivers.BucketResourcesMetadata) { +func getBucketResources(values url.Values) (v donut.BucketResourcesMetadata) { v.Prefix = values.Get("prefix") v.Marker = values.Get("marker") v.Maxkeys, _ = strconv.Atoi(values.Get("max-keys")) @@ -34,7 +34,7 @@ func getBucketResources(values url.Values) (v drivers.BucketResourcesMetadata) { } // part bucket url queries for ?uploads -func getBucketMultipartResources(values url.Values) (v drivers.BucketMultipartResourcesMetadata) { +func getBucketMultipartResources(values url.Values) (v donut.BucketMultipartResourcesMetadata) { v.Prefix = values.Get("prefix") v.KeyMarker = values.Get("key-marker") v.MaxUploads, _ = strconv.Atoi(values.Get("max-uploads")) @@ -45,7 +45,7 @@ func getBucketMultipartResources(values url.Values) (v drivers.BucketMultipartRe } // parse object url queries -func getObjectResources(values url.Values) (v drivers.ObjectResourcesMetadata) { +func getObjectResources(values url.Values) (v donut.ObjectResourcesMetadata) { v.UploadID = values.Get("uploadId") v.PartNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker")) v.MaxParts, _ = strconv.Atoi(values.Get("max-parts")) diff --git a/pkg/storage/drivers/cache/cache-multipart.go b/pkg/storage/donut/cache-multipart.go similarity index 73% rename from pkg/storage/drivers/cache/cache-multipart.go rename to pkg/storage/donut/cache-multipart.go index 56c29bf24..93ab51ea2 100644 --- a/pkg/storage/drivers/cache/cache-multipart.go +++ b/pkg/storage/donut/cache-multipart.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package cache +package donut import ( "bytes" @@ -32,28 +32,27 @@ import ( "time" "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/drivers" ) -func (cache *cacheDriver) NewMultipartUpload(bucket, key, contentType string) (string, error) { +func (cache donut) NewMultipartUpload(bucket, key, contentType string) (string, error) { cache.lock.RLock() - if !drivers.IsValidBucket(bucket) { + if !IsValidBucket(bucket) { cache.lock.RUnlock() - return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) + return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } - if !drivers.IsValidObjectName(key) { + if !IsValidObjectName(key) { cache.lock.RUnlock() - return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil) + return "", iodine.New(ObjectNameInvalid{Object: key}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { cache.lock.RUnlock() - return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) + return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := cache.storedBuckets[bucket] objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { cache.lock.RUnlock() - return "", iodine.New(drivers.ObjectExists{Bucket: bucket, Object: key}, nil) + return "", iodine.New(ObjectExists{Bucket: bucket, Object: key}, nil) } cache.lock.RUnlock() @@ -72,12 +71,12 @@ func (cache *cacheDriver) NewMultipartUpload(bucket, key, contentType string) (s return uploadID, nil } -func (cache *cacheDriver) AbortMultipartUpload(bucket, key, uploadID string) error { +func (cache donut) AbortMultipartUpload(bucket, key, uploadID string) error { cache.lock.RLock() storedBucket := cache.storedBuckets[bucket] if storedBucket.multiPartSession[key].uploadID != uploadID { cache.lock.RUnlock() - return iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil) + return iodine.New(InvalidUploadID{UploadID: uploadID}, nil) } cache.lock.RUnlock() @@ -90,13 +89,13 @@ func getMultipartKey(key string, uploadID string, partNumber int) string { return key + "?uploadId=" + uploadID + "&partNumber=" + strconv.Itoa(partNumber) } -func (cache *cacheDriver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { +func (cache donut) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { // Verify upload id cache.lock.RLock() storedBucket := cache.storedBuckets[bucket] if storedBucket.multiPartSession[key].uploadID != uploadID { cache.lock.RUnlock() - return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil) + return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil) } cache.lock.RUnlock() @@ -110,19 +109,19 @@ func (cache *cacheDriver) CreateObjectPart(bucket, key, uploadID string, partID } // createObject - PUT object to cache buffer -func (cache *cacheDriver) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { +func (cache donut) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { cache.lock.RLock() - if !drivers.IsValidBucket(bucket) { + if !IsValidBucket(bucket) { cache.lock.RUnlock() - return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) + return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } - if !drivers.IsValidObjectName(key) { + if !IsValidObjectName(key) { cache.lock.RUnlock() - return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil) + return "", iodine.New(ObjectNameInvalid{Object: key}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { cache.lock.RUnlock() - return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) + return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := cache.storedBuckets[bucket] // get object key @@ -141,7 +140,7 @@ func (cache *cacheDriver) createObjectPart(bucket, key, uploadID string, partID expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection - return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil) + return "", iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } @@ -180,10 +179,10 @@ func (cache *cacheDriver) createObjectPart(bucket, key, uploadID string, partID // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { - return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil) + return "", iodine.New(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil) } } - newPart := drivers.PartMetadata{ + newPart := PartMetadata{ PartNumber: partID, LastModified: time.Now().UTC(), ETag: md5Sum, @@ -201,36 +200,36 @@ func (cache *cacheDriver) createObjectPart(bucket, key, uploadID string, partID return md5Sum, nil } -func (cache *cacheDriver) cleanupMultipartSession(bucket, key, uploadID string) { +func (cache donut) cleanupMultipartSession(bucket, key, uploadID string) { cache.lock.Lock() defer cache.lock.Unlock() delete(cache.storedBuckets[bucket].multiPartSession, key) } -func (cache *cacheDriver) cleanupMultiparts(bucket, key, uploadID string) { +func (cache donut) cleanupMultiparts(bucket, key, uploadID string) { for i := 1; i <= cache.storedBuckets[bucket].multiPartSession[key].totalParts; i++ { objectKey := bucket + "/" + getMultipartKey(key, uploadID, i) cache.multiPartObjects.Delete(objectKey) } } -func (cache *cacheDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) { - if !drivers.IsValidBucket(bucket) { - return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) +func (cache donut) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) { + if !IsValidBucket(bucket) { + return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } - if !drivers.IsValidObjectName(key) { - return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil) + if !IsValidObjectName(key) { + return "", iodine.New(ObjectNameInvalid{Object: key}, nil) } // Verify upload id cache.lock.RLock() if _, ok := cache.storedBuckets[bucket]; ok == false { cache.lock.RUnlock() - return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) + return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := cache.storedBuckets[bucket] if storedBucket.multiPartSession[key].uploadID != uploadID { cache.lock.RUnlock() - return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil) + return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil) } cache.lock.RUnlock() @@ -249,10 +248,10 @@ func (cache *cacheDriver) CompleteMultipartUpload(bucket, key, uploadID string, // complete multi part request header md5sum per part is hex encoded recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\"")) if err != nil { - return "", iodine.New(drivers.InvalidDigest{Md5: recvMD5}, nil) + return "", iodine.New(InvalidDigest{Md5: recvMD5}, nil) } if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) { - return "", iodine.New(drivers.BadDigest{Md5: recvMD5, Bucket: bucket, Key: getMultipartKey(key, uploadID, i)}, nil) + return "", iodine.New(BadDigest{Md5: recvMD5, Bucket: bucket, Key: getMultipartKey(key, uploadID, i)}, nil) } _, err = io.Copy(&fullObject, bytes.NewBuffer(object)) if err != nil { @@ -279,21 +278,21 @@ func (cache *cacheDriver) CompleteMultipartUpload(bucket, key, uploadID string, } // byKey is a sortable interface for UploadMetadata slice -type byKey []*drivers.UploadMetadata +type byKey []*UploadMetadata func (a byKey) Len() int { return len(a) } func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key } -func (cache *cacheDriver) ListMultipartUploads(bucket string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) { +func (cache donut) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) { // TODO handle delimiter cache.lock.RLock() defer cache.lock.RUnlock() if _, ok := cache.storedBuckets[bucket]; ok == false { - return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) + return BucketMultipartResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := cache.storedBuckets[bucket] - var uploads []*drivers.UploadMetadata + var uploads []*UploadMetadata for key, session := range storedBucket.multiPartSession { if strings.HasPrefix(key, resources.Prefix) { @@ -309,7 +308,7 @@ func (cache *cacheDriver) ListMultipartUploads(bucket string, resources drivers. switch { case resources.KeyMarker != "" && resources.UploadIDMarker == "": if key > resources.KeyMarker { - upload := new(drivers.UploadMetadata) + upload := new(UploadMetadata) upload.Key = key upload.UploadID = session.uploadID upload.Initiated = session.initiated @@ -318,7 +317,7 @@ func (cache *cacheDriver) ListMultipartUploads(bucket string, resources drivers. case resources.KeyMarker != "" && resources.UploadIDMarker != "": if session.uploadID > resources.UploadIDMarker { if key >= resources.KeyMarker { - upload := new(drivers.UploadMetadata) + upload := new(UploadMetadata) upload.Key = key upload.UploadID = session.uploadID upload.Initiated = session.initiated @@ -326,7 +325,7 @@ func (cache *cacheDriver) ListMultipartUploads(bucket string, resources drivers. } } default: - upload := new(drivers.UploadMetadata) + upload := new(UploadMetadata) upload.Key = key upload.UploadID = session.uploadID upload.Initiated = session.initiated @@ -340,30 +339,30 @@ func (cache *cacheDriver) ListMultipartUploads(bucket string, resources drivers. } // partNumber is a sortable interface for Part slice -type partNumber []*drivers.PartMetadata +type partNumber []*PartMetadata func (a partNumber) Len() int { return len(a) } func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } -func (cache *cacheDriver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) { +func (cache donut) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) { // Verify upload id cache.lock.RLock() defer cache.lock.RUnlock() if _, ok := cache.storedBuckets[bucket]; ok == false { - return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) + return ObjectResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := cache.storedBuckets[bucket] if _, ok := storedBucket.multiPartSession[key]; ok == false { - return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: key}, nil) + return ObjectResourcesMetadata{}, iodine.New(ObjectNotFound{Bucket: bucket, Object: key}, nil) } if storedBucket.multiPartSession[key].uploadID != resources.UploadID { - return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.InvalidUploadID{UploadID: resources.UploadID}, nil) + return ObjectResourcesMetadata{}, iodine.New(InvalidUploadID{UploadID: resources.UploadID}, nil) } objectResourcesMetadata := resources objectResourcesMetadata.Bucket = bucket objectResourcesMetadata.Key = key - var parts []*drivers.PartMetadata + var parts []*PartMetadata var startPartNumber int switch { case objectResourcesMetadata.PartNumberMarker == 0: @@ -381,7 +380,7 @@ func (cache *cacheDriver) ListObjectParts(bucket, key string, resources drivers. } part, ok := storedBucket.partMetadata[bucket+"/"+getMultipartKey(key, resources.UploadID, i)] if !ok { - return drivers.ObjectResourcesMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil) + return ObjectResourcesMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil) } parts = append(parts, &part) } @@ -390,7 +389,7 @@ func (cache *cacheDriver) ListObjectParts(bucket, key string, resources drivers. return objectResourcesMetadata, nil } -func (cache *cacheDriver) expiredPart(a ...interface{}) { +func (cache donut) expiredPart(a ...interface{}) { key := a[0].(string) // loop through all buckets for _, storedBucket := range cache.storedBuckets { diff --git a/pkg/storage/drivers/cache/cache.go b/pkg/storage/donut/cache.go similarity index 64% rename from pkg/storage/drivers/cache/cache.go rename to pkg/storage/donut/cache.go index 4b4bb21ce..5175e7332 100644 --- a/pkg/storage/drivers/cache/cache.go +++ b/pkg/storage/donut/cache.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package cache +package donut import ( "bufio" @@ -29,42 +29,11 @@ import ( "sort" "strconv" "strings" - "sync" "time" "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/drivers" - "github.com/minio/minio/pkg/storage/trove" ) -// cacheDriver - local variables -type cacheDriver struct { - storedBuckets map[string]storedBucket - lock *sync.RWMutex - objects *trove.Cache - multiPartObjects *trove.Cache - maxSize uint64 - expiration time.Duration - - // stacked driver - driver drivers.Driver -} - -// storedBucket saved bucket -type storedBucket struct { - bucketMetadata drivers.BucketMetadata - objectMetadata map[string]drivers.ObjectMetadata - partMetadata map[string]drivers.PartMetadata - multiPartSession map[string]multiPartSession -} - -// multiPartSession multipart session -type multiPartSession struct { - totalParts int - uploadID string - initiated time.Time -} - // total Number of buckets allowed const ( totalBuckets = 100 @@ -88,36 +57,18 @@ func newProxyWriter(w io.Writer) *proxyWriter { return &proxyWriter{writer: w, writtenBytes: nil} } -// NewDriver instantiate a new cache driver -func NewDriver(maxSize uint64, expiration time.Duration, driver drivers.Driver) (drivers.Driver, error) { - cache := new(cacheDriver) - cache.storedBuckets = make(map[string]storedBucket) - cache.maxSize = maxSize - cache.expiration = expiration - cache.objects = trove.NewCache(maxSize, expiration) - cache.multiPartObjects = trove.NewCache(0, time.Duration(0)) - cache.lock = new(sync.RWMutex) - - cache.objects.OnExpired = cache.expiredObject - cache.multiPartObjects.OnExpired = cache.expiredPart - - // set up cache expiration - cache.objects.ExpireObjects(time.Second * 5) - return cache, nil -} - // GetObject - GET object from cache buffer -func (cache *cacheDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) { +func (cache donut) GetObject(w io.Writer, bucket string, object string) (int64, error) { cache.lock.RLock() defer cache.lock.RUnlock() - if !drivers.IsValidBucket(bucket) { - return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) + if !IsValidBucket(bucket) { + return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } - if !drivers.IsValidObjectName(object) { - return 0, iodine.New(drivers.ObjectNameInvalid{Object: object}, nil) + if !IsValidObjectName(object) { + return 0, iodine.New(ObjectNameInvalid{Object: object}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { - return 0, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) + return 0, iodine.New(BucketNotFound{Bucket: bucket}, nil) } objectKey := bucket + "/" + object data, ok := cache.objects.Get(objectKey) @@ -125,7 +76,7 @@ func (cache *cacheDriver) GetObject(w io.Writer, bucket string, object string) ( if cache.driver != nil { return cache.driver.GetObject(w, bucket, object) } - return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) + return 0, iodine.New(ObjectNotFound{Bucket: bucket, Object: object}, nil) } written, err := io.Copy(w, bytes.NewBuffer(data)) if err != nil { @@ -135,7 +86,7 @@ func (cache *cacheDriver) GetObject(w io.Writer, bucket string, object string) ( } // GetPartialObject - GET object from cache buffer range -func (cache *cacheDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { +func (cache donut) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { errParams := map[string]string{ "bucket": bucket, "object": object, @@ -144,14 +95,14 @@ func (cache *cacheDriver) GetPartialObject(w io.Writer, bucket, object string, s } cache.lock.RLock() defer cache.lock.RUnlock() - if !drivers.IsValidBucket(bucket) { - return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, errParams) + if !IsValidBucket(bucket) { + return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, errParams) } - if !drivers.IsValidObjectName(object) { - return 0, iodine.New(drivers.ObjectNameInvalid{Object: object}, errParams) + if !IsValidObjectName(object) { + return 0, iodine.New(ObjectNameInvalid{Object: object}, errParams) } if start < 0 { - return 0, iodine.New(drivers.InvalidRange{ + return 0, iodine.New(InvalidRange{ Start: start, Length: length, }, errParams) @@ -162,7 +113,7 @@ func (cache *cacheDriver) GetPartialObject(w io.Writer, bucket, object string, s if cache.driver != nil { return cache.driver.GetPartialObject(w, bucket, object, start, length) } - return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) + return 0, iodine.New(ObjectNotFound{Bucket: bucket, Object: object}, nil) } written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length) if err != nil { @@ -172,21 +123,21 @@ func (cache *cacheDriver) GetPartialObject(w io.Writer, bucket, object string, s } // GetBucketMetadata - -func (cache *cacheDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) { +func (cache donut) GetBucketMetadata(bucket string) (BucketMetadata, error) { cache.lock.RLock() - if !drivers.IsValidBucket(bucket) { + if !IsValidBucket(bucket) { cache.lock.RUnlock() - return drivers.BucketMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) + return BucketMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { if cache.driver == nil { cache.lock.RUnlock() - return drivers.BucketMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) + return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } bucketMetadata, err := cache.driver.GetBucketMetadata(bucket) if err != nil { cache.lock.RUnlock() - return drivers.BucketMetadata{}, iodine.New(err, nil) + return BucketMetadata{}, iodine.New(err, nil) } storedBucket := cache.storedBuckets[bucket] cache.lock.RUnlock() @@ -200,15 +151,15 @@ func (cache *cacheDriver) GetBucketMetadata(bucket string) (drivers.BucketMetada } // SetBucketMetadata - -func (cache *cacheDriver) SetBucketMetadata(bucket, acl string) error { +func (cache donut) SetBucketMetadata(bucket, acl string) error { cache.lock.RLock() - if !drivers.IsValidBucket(bucket) { + if !IsValidBucket(bucket) { cache.lock.RUnlock() - return iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) + return iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { cache.lock.RUnlock() - return iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) + return iodine.New(BucketNotFound{Bucket: bucket}, nil) } if strings.TrimSpace(acl) == "" { acl = "private" @@ -221,7 +172,7 @@ func (cache *cacheDriver) SetBucketMetadata(bucket, acl string) error { } } storedBucket := cache.storedBuckets[bucket] - storedBucket.bucketMetadata.ACL = drivers.BucketACL(acl) + storedBucket.bucketMetadata.ACL = BucketACL(acl) cache.storedBuckets[bucket] = storedBucket cache.lock.Unlock() return nil @@ -246,10 +197,10 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error { return iodine.New(errors.New("invalid argument"), nil) } -func (cache *cacheDriver) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { +func (cache donut) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { if size > int64(cache.maxSize) { - generic := drivers.GenericObjectError{Bucket: bucket, Object: key} - return "", iodine.New(drivers.EntityTooLarge{ + generic := GenericObjectError{Bucket: bucket, Object: key} + return "", iodine.New(EntityTooLarge{ GenericObjectError: generic, Size: strconv.FormatInt(size, 10), MaxSize: strconv.FormatUint(cache.maxSize, 10), @@ -262,26 +213,26 @@ func (cache *cacheDriver) CreateObject(bucket, key, contentType, expectedMD5Sum } // createObject - PUT object to cache buffer -func (cache *cacheDriver) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { +func (cache donut) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { cache.lock.RLock() - if !drivers.IsValidBucket(bucket) { + if !IsValidBucket(bucket) { cache.lock.RUnlock() - return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) + return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } - if !drivers.IsValidObjectName(key) { + if !IsValidObjectName(key) { cache.lock.RUnlock() - return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil) + return "", iodine.New(ObjectNameInvalid{Object: key}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { cache.lock.RUnlock() - return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) + return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := cache.storedBuckets[bucket] // get object key objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { cache.lock.RUnlock() - return "", iodine.New(drivers.ObjectExists{Bucket: bucket, Object: key}, nil) + return "", iodine.New(ObjectExists{Bucket: bucket, Object: key}, nil) } cache.lock.RUnlock() @@ -293,7 +244,7 @@ func (cache *cacheDriver) createObject(bucket, key, contentType, expectedMD5Sum expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection - return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil) + return "", iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } @@ -328,18 +279,18 @@ func (cache *cacheDriver) createObject(bucket, key, contentType, expectedMD5Sum go debug.FreeOSMemory() cache.lock.Unlock() if !ok { - return "", iodine.New(drivers.InternalError{}, nil) + return "", iodine.New(InternalError{}, nil) } md5Sum := hex.EncodeToString(md5SumBytes) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { - return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil) + return "", iodine.New(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil) } } - newObject := drivers.ObjectMetadata{ + newObject := ObjectMetadata{ Bucket: bucket, Key: key, @@ -357,23 +308,23 @@ func (cache *cacheDriver) createObject(bucket, key, contentType, expectedMD5Sum } // CreateBucket - create bucket in cache -func (cache *cacheDriver) CreateBucket(bucketName, acl string) error { +func (cache donut) CreateBucket(bucketName, acl string) error { cache.lock.RLock() if len(cache.storedBuckets) == totalBuckets { cache.lock.RUnlock() - return iodine.New(drivers.TooManyBuckets{Bucket: bucketName}, nil) + return iodine.New(TooManyBuckets{Bucket: bucketName}, nil) } - if !drivers.IsValidBucket(bucketName) { + if !IsValidBucket(bucketName) { cache.lock.RUnlock() - return iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) + return iodine.New(BucketNameInvalid{Bucket: bucketName}, nil) } - if !drivers.IsValidBucketACL(acl) { + if !IsValidBucketACL(acl) { cache.lock.RUnlock() - return iodine.New(drivers.InvalidACL{ACL: acl}, nil) + return iodine.New(InvalidACL{ACL: acl}, nil) } if _, ok := cache.storedBuckets[bucketName]; ok == true { cache.lock.RUnlock() - return iodine.New(drivers.BucketExists{Bucket: bucketName}, nil) + return iodine.New(BucketExists{Bucket: bucketName}, nil) } cache.lock.RUnlock() @@ -387,13 +338,13 @@ func (cache *cacheDriver) CreateBucket(bucketName, acl string) error { } } var newBucket = storedBucket{} - newBucket.objectMetadata = make(map[string]drivers.ObjectMetadata) + newBucket.objectMetadata = make(map[string]ObjectMetadata) newBucket.multiPartSession = make(map[string]multiPartSession) - newBucket.partMetadata = make(map[string]drivers.PartMetadata) - newBucket.bucketMetadata = drivers.BucketMetadata{} + newBucket.partMetadata = make(map[string]PartMetadata) + newBucket.bucketMetadata = BucketMetadata{} newBucket.bucketMetadata.Name = bucketName newBucket.bucketMetadata.Created = time.Now().UTC() - newBucket.bucketMetadata.ACL = drivers.BucketACL(acl) + newBucket.bucketMetadata.ACL = BucketACL(acl) cache.lock.Lock() cache.storedBuckets[bucketName] = newBucket cache.lock.Unlock() @@ -418,7 +369,7 @@ func appendUniq(slice []string, i string) []string { return append(slice, i) } -func (cache *cacheDriver) filterDelimiterPrefix(keys []string, key, delim string, r drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) { +func (cache donut) filterDelimiterPrefix(keys []string, key, delim string, r BucketResourcesMetadata) ([]string, BucketResourcesMetadata) { switch true { case key == r.Prefix: keys = appendUniq(keys, key) @@ -431,7 +382,7 @@ func (cache *cacheDriver) filterDelimiterPrefix(keys []string, key, delim string return keys, r } -func (cache *cacheDriver) listObjects(keys []string, key string, r drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) { +func (cache donut) listObjects(keys []string, key string, r BucketResourcesMetadata) ([]string, BucketResourcesMetadata) { switch true { // Prefix absent, delimit object key based on delimiter case r.IsDelimiterSet(): @@ -460,19 +411,19 @@ func (cache *cacheDriver) listObjects(keys []string, key string, r drivers.Bucke } // ListObjects - list objects from cache -func (cache *cacheDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { +func (cache donut) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) { cache.lock.RLock() defer cache.lock.RUnlock() - if !drivers.IsValidBucket(bucket) { - return nil, drivers.BucketResourcesMetadata{IsTruncated: false}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) + if !IsValidBucket(bucket) { + return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } - if !drivers.IsValidObjectName(resources.Prefix) { - return nil, drivers.BucketResourcesMetadata{IsTruncated: false}, iodine.New(drivers.ObjectNameInvalid{Object: resources.Prefix}, nil) + if !IsValidObjectName(resources.Prefix) { + return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(ObjectNameInvalid{Object: resources.Prefix}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { - return nil, drivers.BucketResourcesMetadata{IsTruncated: false}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) + return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } - var results []drivers.ObjectMetadata + var results []ObjectMetadata var keys []string storedBucket := cache.storedBuckets[bucket] for key := range storedBucket.objectMetadata { @@ -508,17 +459,17 @@ func (cache *cacheDriver) ListObjects(bucket string, resources drivers.BucketRes } // byBucketName is a type for sorting bucket metadata by bucket name -type byBucketName []drivers.BucketMetadata +type byBucketName []BucketMetadata func (b byBucketName) Len() int { return len(b) } func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name } // ListBuckets - List buckets from cache -func (cache *cacheDriver) ListBuckets() ([]drivers.BucketMetadata, error) { +func (cache donut) ListBuckets() ([]BucketMetadata, error) { cache.lock.RLock() defer cache.lock.RUnlock() - var results []drivers.BucketMetadata + var results []BucketMetadata for _, bucket := range cache.storedBuckets { results = append(results, bucket.bucketMetadata) } @@ -527,20 +478,20 @@ func (cache *cacheDriver) ListBuckets() ([]drivers.BucketMetadata, error) { } // GetObjectMetadata - get object metadata from cache -func (cache *cacheDriver) GetObjectMetadata(bucket, key string) (drivers.ObjectMetadata, error) { +func (cache donut) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) { cache.lock.RLock() // check if bucket exists - if !drivers.IsValidBucket(bucket) { + if !IsValidBucket(bucket) { cache.lock.RUnlock() - return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) + return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } - if !drivers.IsValidObjectName(key) { + if !IsValidObjectName(key) { cache.lock.RUnlock() - return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: key}, nil) + return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { cache.lock.RUnlock() - return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) + return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := cache.storedBuckets[bucket] objectKey := bucket + "/" + key @@ -552,7 +503,7 @@ func (cache *cacheDriver) GetObjectMetadata(bucket, key string) (drivers.ObjectM objMetadata, err := cache.driver.GetObjectMetadata(bucket, key) cache.lock.RUnlock() if err != nil { - return drivers.ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, iodine.New(err, nil) } // update cache.lock.Lock() @@ -561,10 +512,10 @@ func (cache *cacheDriver) GetObjectMetadata(bucket, key string) (drivers.ObjectM return objMetadata, nil } cache.lock.RUnlock() - return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: key}, nil) + return ObjectMetadata{}, iodine.New(ObjectNotFound{Bucket: bucket, Object: key}, nil) } -func (cache *cacheDriver) expiredObject(a ...interface{}) { +func (cache donut) expiredObject(a ...interface{}) { cacheStats := cache.objects.Stats() log.Printf("CurrentSize: %d, CurrentItems: %d, TotalExpirations: %d", cacheStats.Bytes, cacheStats.Items, cacheStats.Expired) diff --git a/pkg/storage/drivers/date.go b/pkg/storage/donut/date.go similarity index 99% rename from pkg/storage/drivers/date.go rename to pkg/storage/donut/date.go index c47b83fd6..e91ff5aff 100644 --- a/pkg/storage/drivers/date.go +++ b/pkg/storage/donut/date.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package drivers +package donut import ( "errors" diff --git a/pkg/storage/donut/donut.go b/pkg/storage/donut/donut.go index 88be65fdc..0ac45925d 100644 --- a/pkg/storage/donut/donut.go +++ b/pkg/storage/donut/donut.go @@ -25,8 +25,10 @@ import ( "strconv" "strings" "sync" + "time" "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/storage/donut/trove" ) // donut struct internal data @@ -35,6 +37,32 @@ type donut struct { buckets map[string]bucket nodes map[string]node lock *sync.RWMutex + cache cache +} + +// cache - local variables +type cache struct { + storedBuckets map[string]storedBucket + lock *sync.RWMutex + objects *trove.Cache + multiPartObjects *trove.Cache + maxSize uint64 + expiration time.Duration +} + +// storedBucket saved bucket +type storedBucket struct { + bucketMetadata BucketMetadata + objectMetadata map[string]ObjectMetadata + partMetadata map[string]PartMetadata + multiPartSession map[string]multiPartSession +} + +// multiPartSession multipart session +type multiPartSession struct { + totalParts int + uploadID string + initiated time.Time } // config files used inside Donut @@ -82,6 +110,15 @@ func NewDonut(donutName string, nodeDiskMap map[string][]string) (Donut, error) return nil, iodine.New(err, nil) } } + d.cache.storedBuckets = make(map[string]storedBucket) + d.cache.objects = trove.NewCache(maxSize, expiration) + d.cache.multiPartObjects = trove.NewCache(0, time.Duration(0)) + + d.cache.objects.OnExpired = d.expiredObject + d.cache.multiPartObjects.OnExpired = d.expiredPart + + // set up cache expiration + d.cache.objects.ExpireObjects(time.Second * 5) return d, nil } diff --git a/pkg/storage/donut/errors.go b/pkg/storage/donut/errors.go index 32401f422..f199ff34c 100644 --- a/pkg/storage/donut/errors.go +++ b/pkg/storage/donut/errors.go @@ -16,6 +16,8 @@ package donut +import "fmt" + // InvalidArgument invalid argument type InvalidArgument struct{} @@ -138,3 +140,172 @@ type InvalidErasureTechnique struct { func (e InvalidErasureTechnique) Error() string { return "Invalid erasure technique: " + e.Technique } + +// InternalError - generic internal error +type InternalError struct { +} + +// BackendError - generic disk backend error +type BackendError struct { + Path string +} + +// BackendCorrupted - path has corrupted data +type BackendCorrupted BackendError + +// APINotImplemented - generic API not implemented error +type APINotImplemented struct { + API string +} + +// GenericBucketError - generic bucket error +type GenericBucketError struct { + Bucket string +} + +// GenericObjectError - generic object error +type GenericObjectError struct { + Bucket string + Object string +} + +// ImplementationError - generic implementation error +type ImplementationError struct { + Bucket string + Object string + Err error +} + +// DigestError - Generic Md5 error +type DigestError struct { + Bucket string + Key string + Md5 string +} + +/// ACL related errors + +// InvalidACL - acl invalid +type InvalidACL struct { + ACL string +} + +func (e InvalidACL) Error() string { + return "Requested ACL is " + e.ACL + " invalid" +} + +/// Bucket related errors + +// BucketNameInvalid - bucketname provided is invalid +type BucketNameInvalid GenericBucketError + +// TooManyBuckets - total buckets exceeded +type TooManyBuckets GenericBucketError + +/// Object related errors + +// EntityTooLarge - object size exceeds maximum limit +type EntityTooLarge struct { + GenericObjectError + Size string + MaxSize string +} + +// ObjectNameInvalid - object name provided is invalid +type ObjectNameInvalid GenericObjectError + +// InvalidDigest - md5 in request header invalid +type InvalidDigest DigestError + +// Return string an error formatted as the given text +func (e ImplementationError) Error() string { + error := "" + if e.Bucket != "" { + error = error + "Bucket: " + e.Bucket + " " + } + if e.Object != "" { + error = error + "Object: " + e.Object + " " + } + error = error + "Error: " + e.Err.Error() + return error +} + +// EmbedError - wrapper function for error object +func EmbedError(bucket, object string, err error) ImplementationError { + return ImplementationError{ + Bucket: bucket, + Object: object, + Err: err, + } +} + +// Return string an error formatted as the given text +func (e InternalError) Error() string { + return "Internal error occured" +} + +// Return string an error formatted as the given text +func (e APINotImplemented) Error() string { + return "Api not implemented: " + e.API +} + +// Return string an error formatted as the given text +func (e BucketNameInvalid) Error() string { + return "Bucket name invalid: " + e.Bucket +} + +// Return string an error formatted as the given text +func (e TooManyBuckets) Error() string { + return "Bucket limit exceeded beyond 100, cannot create bucket: " + e.Bucket +} + +// Return string an error formatted as the given text +func (e ObjectNameInvalid) Error() string { + return "Object name invalid: " + e.Bucket + "#" + e.Object +} + +// Return string an error formatted as the given text +func (e EntityTooLarge) Error() string { + return e.Bucket + "#" + e.Object + "with " + e.Size + "reached maximum allowed size limit " + e.MaxSize +} + +// Return string an error formatted as the given text +func (e BackendCorrupted) Error() string { + return "Backend corrupted: " + e.Path +} + +// Return string an error formatted as the given text +func (e InvalidDigest) Error() string { + return "Md5 provided " + e.Md5 + " is invalid" +} + +// OperationNotPermitted - operation not permitted +type OperationNotPermitted struct { + Op string + Reason string +} + +func (e OperationNotPermitted) Error() string { + return "Operation " + e.Op + " not permitted for reason: " + e.Reason +} + +// InvalidRange - invalid range +type InvalidRange struct { + Start int64 + Length int64 +} + +func (e InvalidRange) Error() string { + return fmt.Sprintf("Invalid range start:%d length:%d", e.Start, e.Length) +} + +/// Multipart related errors + +// InvalidUploadID invalid upload id +type InvalidUploadID struct { + UploadID string +} + +func (e InvalidUploadID) Error() string { + return "Invalid upload id " + e.UploadID +} diff --git a/pkg/storage/trove/trove.go b/pkg/storage/donut/trove/trove.go similarity index 100% rename from pkg/storage/trove/trove.go rename to pkg/storage/donut/trove/trove.go diff --git a/pkg/storage/trove/trove_test.go b/pkg/storage/donut/trove/trove_test.go similarity index 100% rename from pkg/storage/trove/trove_test.go rename to pkg/storage/donut/trove/trove_test.go diff --git a/pkg/storage/drivers/driver.go b/pkg/storage/donut/utils.go similarity index 71% rename from pkg/storage/drivers/driver.go rename to pkg/storage/donut/utils.go index 5a70aa28d..d90a45743 100644 --- a/pkg/storage/drivers/driver.go +++ b/pkg/storage/donut/utils.go @@ -1,53 +1,12 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package drivers +package donut import ( - "io" "regexp" "strings" "time" "unicode/utf8" ) -// Driver - generic API interface for various drivers - donut, file, memory -type Driver interface { - // Bucket Operations - ListBuckets() ([]BucketMetadata, error) - CreateBucket(bucket, acl string) error - GetBucketMetadata(bucket string) (BucketMetadata, error) - SetBucketMetadata(bucket, acl string) error - - // Object Operations - GetObject(w io.Writer, bucket, object string) (int64, error) - GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) - GetObjectMetadata(bucket, key string) (ObjectMetadata, error) - ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) - CreateObject(bucket, key, contentType, md5sum string, size int64, data io.Reader) (string, error) - - // Object Multipart Operations - ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) - NewMultipartUpload(bucket, key, contentType string) (string, error) - AbortMultipartUpload(bucket, key, UploadID string) error - CreateObjectPart(bucket, key, uploadID string, partID int, contentType string, md5sum string, size int64, data io.Reader) (string, error) - CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) - ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) -} - // BucketACL - bucket level access control type BucketACL string diff --git a/pkg/storage/drivers/LICENSE b/pkg/storage/drivers/LICENSE deleted file mode 100644 index 8f71f43fe..000000000 --- a/pkg/storage/drivers/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/pkg/storage/drivers/README.md b/pkg/storage/drivers/README.md deleted file mode 100644 index fb3fe38f7..000000000 --- a/pkg/storage/drivers/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# objectdriver -Object Storage Driver diff --git a/pkg/storage/drivers/api_testsuite.go b/pkg/storage/drivers/api_testsuite.go deleted file mode 100644 index b20cae176..000000000 --- a/pkg/storage/drivers/api_testsuite.go +++ /dev/null @@ -1,535 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package drivers - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/hex" - "math/rand" - "reflect" - "strconv" - - "time" - - "github.com/minio/check" - "github.com/minio/minio/pkg/iodine" -) - -// APITestSuite - collection of API tests -func APITestSuite(c *check.C, create func() Driver) { - testCreateBucket(c, create) - testMultipleObjectCreation(c, create) - testPaging(c, create) - testObjectOverwriteFails(c, create) - testNonExistantBucketOperations(c, create) - testBucketMetadata(c, create) - testBucketRecreateFails(c, create) - testPutObjectInSubdir(c, create) - testListBuckets(c, create) - testListBucketsOrder(c, create) - testListObjectsTestsForNonExistantBucket(c, create) - testNonExistantObjectInBucket(c, create) - testGetDirectoryReturnsObjectNotFound(c, create) - testDefaultContentType(c, create) - testMultipartObjectCreation(c, create) - testMultipartObjectAbort(c, create) -} - -func testCreateBucket(c *check.C, create func() Driver) { - drivers := create() - err := drivers.CreateBucket("bucket", "") - c.Assert(err, check.IsNil) -} - -func testMultipartObjectCreation(c *check.C, create func() Driver) { - drivers := create() - switch { - case reflect.TypeOf(drivers).String() == "*donut.donutDriver": - return - } - err := drivers.CreateBucket("bucket", "") - c.Assert(err, check.IsNil) - uploadID, err := drivers.NewMultipartUpload("bucket", "key", "") - c.Assert(err, check.IsNil) - - parts := make(map[int]string) - finalHasher := md5.New() - for i := 1; i <= 10; i++ { - randomPerm := rand.Perm(10) - randomString := "" - for _, num := range randomPerm { - randomString = randomString + strconv.Itoa(num) - } - - hasher := md5.New() - finalHasher.Write([]byte(randomString)) - hasher.Write([]byte(randomString)) - expectedmd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) - expectedmd5Sumhex := hex.EncodeToString(hasher.Sum(nil)) - - calculatedmd5sum, err := drivers.CreateObjectPart("bucket", "key", uploadID, i, "", expectedmd5Sum, int64(len(randomString)), - bytes.NewBufferString(randomString)) - c.Assert(err, check.IsNil) - c.Assert(calculatedmd5sum, check.Equals, expectedmd5Sumhex) - parts[i] = calculatedmd5sum - } - finalExpectedmd5SumHex := hex.EncodeToString(finalHasher.Sum(nil)) - calculatedFinalmd5Sum, err := drivers.CompleteMultipartUpload("bucket", "key", uploadID, parts) - c.Assert(err, check.IsNil) - c.Assert(calculatedFinalmd5Sum, check.Equals, finalExpectedmd5SumHex) -} - -func testMultipartObjectAbort(c *check.C, create func() Driver) { - drivers := create() - switch { - case reflect.TypeOf(drivers).String() == "*donut.donutDriver": - return - } - err := drivers.CreateBucket("bucket", "") - c.Assert(err, check.IsNil) - uploadID, err := drivers.NewMultipartUpload("bucket", "key", "") - c.Assert(err, check.IsNil) - - parts := make(map[int]string) - for i := 1; i <= 10; i++ { - randomPerm := rand.Perm(10) - randomString := "" - for _, num := range randomPerm { - randomString = randomString + strconv.Itoa(num) - } - - hasher := md5.New() - hasher.Write([]byte(randomString)) - expectedmd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) - expectedmd5Sumhex := hex.EncodeToString(hasher.Sum(nil)) - - calculatedmd5sum, err := drivers.CreateObjectPart("bucket", "key", uploadID, i, "", expectedmd5Sum, int64(len(randomString)), - bytes.NewBufferString(randomString)) - c.Assert(err, check.IsNil) - c.Assert(calculatedmd5sum, check.Equals, expectedmd5Sumhex) - parts[i] = calculatedmd5sum - } - err = drivers.AbortMultipartUpload("bucket", "key", uploadID) - c.Assert(err, check.IsNil) -} - -func testMultipleObjectCreation(c *check.C, create func() Driver) { - objects := make(map[string][]byte) - drivers := create() - err := drivers.CreateBucket("bucket", "") - c.Assert(err, check.IsNil) - for i := 0; i < 10; i++ { - randomPerm := rand.Perm(10) - randomString := "" - for _, num := range randomPerm { - randomString = randomString + strconv.Itoa(num) - } - - hasher := md5.New() - hasher.Write([]byte(randomString)) - expectedmd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) - expectedmd5Sumhex := hex.EncodeToString(hasher.Sum(nil)) - - key := "obj" + strconv.Itoa(i) - objects[key] = []byte(randomString) - calculatedmd5sum, err := drivers.CreateObject("bucket", key, "", expectedmd5Sum, int64(len(randomString)), - bytes.NewBufferString(randomString)) - c.Assert(err, check.IsNil) - c.Assert(calculatedmd5sum, check.Equals, expectedmd5Sumhex) - } - - // ensure no duplicate etags - etags := make(map[string]string) - for key, value := range objects { - var byteBuffer bytes.Buffer - _, err := drivers.GetObject(&byteBuffer, "bucket", key) - c.Assert(err, check.IsNil) - c.Assert(byteBuffer.Bytes(), check.DeepEquals, value) - - metadata, err := drivers.GetObjectMetadata("bucket", key) - c.Assert(err, check.IsNil) - c.Assert(metadata.Size, check.Equals, int64(len(value))) - - _, ok := etags[metadata.Md5] - c.Assert(ok, check.Equals, false) - etags[metadata.Md5] = metadata.Md5 - } -} - -func testPaging(c *check.C, create func() Driver) { - drivers := create() - drivers.CreateBucket("bucket", "") - resources := BucketResourcesMetadata{} - objects, resources, err := drivers.ListObjects("bucket", resources) - c.Assert(err, check.IsNil) - c.Assert(len(objects), check.Equals, 0) - c.Assert(resources.IsTruncated, check.Equals, false) - // check before paging occurs - for i := 0; i < 5; i++ { - key := "obj" + strconv.Itoa(i) - drivers.CreateObject("bucket", key, "", "", int64(len(key)), bytes.NewBufferString(key)) - resources.Maxkeys = 5 - resources.Prefix = "" - objects, resources, err = drivers.ListObjects("bucket", resources) - c.Assert(err, check.IsNil) - c.Assert(len(objects), check.Equals, i+1) - c.Assert(resources.IsTruncated, check.Equals, false) - } - // check after paging occurs pages work - for i := 6; i <= 10; i++ { - key := "obj" + strconv.Itoa(i) - drivers.CreateObject("bucket", key, "", "", int64(len(key)), bytes.NewBufferString(key)) - resources.Maxkeys = 5 - resources.Prefix = "" - objects, resources, err = drivers.ListObjects("bucket", resources) - c.Assert(err, check.IsNil) - c.Assert(len(objects), check.Equals, 5) - c.Assert(resources.IsTruncated, check.Equals, true) - } - // check paging with prefix at end returns less objects - { - drivers.CreateObject("bucket", "newPrefix", "", "", int64(len("prefix1")), bytes.NewBufferString("prefix1")) - drivers.CreateObject("bucket", "newPrefix2", "", "", int64(len("prefix2")), bytes.NewBufferString("prefix2")) - resources.Prefix = "new" - resources.Maxkeys = 5 - objects, resources, err = drivers.ListObjects("bucket", resources) - c.Assert(err, check.IsNil) - c.Assert(len(objects), check.Equals, 2) - } - - // check ordering of pages - { - resources.Prefix = "" - resources.Maxkeys = 1000 - objects, resources, err = drivers.ListObjects("bucket", resources) - c.Assert(err, check.IsNil) - c.Assert(objects[0].Key, check.Equals, "newPrefix") - c.Assert(objects[1].Key, check.Equals, "newPrefix2") - c.Assert(objects[2].Key, check.Equals, "obj0") - c.Assert(objects[3].Key, check.Equals, "obj1") - c.Assert(objects[4].Key, check.Equals, "obj10") - } - - // check delimited results with delimiter and prefix - { - drivers.CreateObject("bucket", "this/is/delimited", "", "", int64(len("prefix1")), bytes.NewBufferString("prefix1")) - drivers.CreateObject("bucket", "this/is/also/a/delimited/file", "", "", int64(len("prefix2")), bytes.NewBufferString("prefix2")) - var prefixes []string - resources.CommonPrefixes = prefixes // allocate new everytime - resources.Delimiter = "/" - resources.Prefix = "this/is/" - resources.Maxkeys = 10 - objects, resources, err = drivers.ListObjects("bucket", resources) - c.Assert(err, check.IsNil) - c.Assert(len(objects), check.Equals, 1) - c.Assert(resources.CommonPrefixes[0], check.Equals, "this/is/also/") - } - time.Sleep(time.Second) - - // check delimited results with delimiter without prefix - { - var prefixes []string - resources.CommonPrefixes = prefixes // allocate new everytime - resources.Delimiter = "/" - resources.Prefix = "" - resources.Maxkeys = 1000 - objects, resources, err = drivers.ListObjects("bucket", resources) - c.Assert(err, check.IsNil) - c.Assert(objects[0].Key, check.Equals, "newPrefix") - c.Assert(objects[1].Key, check.Equals, "newPrefix2") - c.Assert(objects[2].Key, check.Equals, "obj0") - c.Assert(objects[3].Key, check.Equals, "obj1") - c.Assert(objects[4].Key, check.Equals, "obj10") - c.Assert(resources.CommonPrefixes[0], check.Equals, "this/") - } - - // check results with Marker - { - var prefixes []string - resources.CommonPrefixes = prefixes // allocate new everytime - resources.Prefix = "" - resources.Marker = "newPrefix" - resources.Delimiter = "" - resources.Maxkeys = 3 - objects, resources, err = drivers.ListObjects("bucket", resources) - c.Assert(err, check.IsNil) - c.Assert(objects[0].Key, check.Equals, "newPrefix2") - c.Assert(objects[1].Key, check.Equals, "obj0") - c.Assert(objects[2].Key, check.Equals, "obj1") - } - // check ordering of results with prefix - { - resources.Prefix = "obj" - resources.Delimiter = "" - resources.Marker = "" - resources.Maxkeys = 1000 - objects, resources, err = drivers.ListObjects("bucket", resources) - c.Assert(err, check.IsNil) - c.Assert(objects[0].Key, check.Equals, "obj0") - c.Assert(objects[1].Key, check.Equals, "obj1") - c.Assert(objects[2].Key, check.Equals, "obj10") - c.Assert(objects[3].Key, check.Equals, "obj2") - c.Assert(objects[4].Key, check.Equals, "obj3") - } - // check ordering of results with prefix and no paging - { - resources.Prefix = "new" - resources.Marker = "" - resources.Maxkeys = 5 - objects, resources, err = drivers.ListObjects("bucket", resources) - c.Assert(err, check.IsNil) - c.Assert(objects[0].Key, check.Equals, "newPrefix") - c.Assert(objects[1].Key, check.Equals, "newPrefix2") - } -} - -func testObjectOverwriteFails(c *check.C, create func() Driver) { - drivers := create() - drivers.CreateBucket("bucket", "") - - hasher1 := md5.New() - hasher1.Write([]byte("one")) - md5Sum1 := base64.StdEncoding.EncodeToString(hasher1.Sum(nil)) - md5Sum1hex := hex.EncodeToString(hasher1.Sum(nil)) - md5Sum11, err := drivers.CreateObject("bucket", "object", "", md5Sum1, int64(len("one")), bytes.NewBufferString("one")) - c.Assert(err, check.IsNil) - c.Assert(md5Sum1hex, check.Equals, md5Sum11) - - hasher2 := md5.New() - hasher2.Write([]byte("three")) - md5Sum2 := base64.StdEncoding.EncodeToString(hasher2.Sum(nil)) - _, err = drivers.CreateObject("bucket", "object", "", md5Sum2, int64(len("three")), bytes.NewBufferString("three")) - c.Assert(err, check.Not(check.IsNil)) - - var bytesBuffer bytes.Buffer - length, err := drivers.GetObject(&bytesBuffer, "bucket", "object") - c.Assert(err, check.IsNil) - c.Assert(length, check.Equals, int64(len("one"))) - c.Assert(string(bytesBuffer.Bytes()), check.Equals, "one") -} - -func testNonExistantBucketOperations(c *check.C, create func() Driver) { - drivers := create() - _, err := drivers.CreateObject("bucket", "object", "", "", int64(len("one")), bytes.NewBufferString("one")) - c.Assert(err, check.Not(check.IsNil)) -} - -func testBucketMetadata(c *check.C, create func() Driver) { - drivers := create() - err := drivers.CreateBucket("string", "") - c.Assert(err, check.IsNil) - - metadata, err := drivers.GetBucketMetadata("string") - c.Assert(err, check.IsNil) - c.Assert(metadata.ACL, check.Equals, BucketACL("private")) -} - -func testBucketRecreateFails(c *check.C, create func() Driver) { - drivers := create() - err := drivers.CreateBucket("string", "") - c.Assert(err, check.IsNil) - err = drivers.CreateBucket("string", "") - c.Assert(err, check.Not(check.IsNil)) -} - -func testPutObjectInSubdir(c *check.C, create func() Driver) { - drivers := create() - err := drivers.CreateBucket("bucket", "") - c.Assert(err, check.IsNil) - - hasher := md5.New() - hasher.Write([]byte("hello world")) - md5Sum1 := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) - md5Sum1hex := hex.EncodeToString(hasher.Sum(nil)) - md5Sum11, err := drivers.CreateObject("bucket", "dir1/dir2/object", "", md5Sum1, int64(len("hello world")), - bytes.NewBufferString("hello world")) - c.Assert(err, check.IsNil) - c.Assert(md5Sum11, check.Equals, md5Sum1hex) - - var bytesBuffer bytes.Buffer - length, err := drivers.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object") - c.Assert(err, check.IsNil) - c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world")) - c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length) -} - -func testListBuckets(c *check.C, create func() Driver) { - drivers := create() - - // test empty list - buckets, err := drivers.ListBuckets() - c.Assert(err, check.IsNil) - c.Assert(len(buckets), check.Equals, 0) - - // add one and test exists - err = drivers.CreateBucket("bucket1", "") - c.Assert(err, check.IsNil) - - buckets, err = drivers.ListBuckets() - c.Assert(len(buckets), check.Equals, 1) - c.Assert(err, check.IsNil) - - // add two and test exists - err = drivers.CreateBucket("bucket2", "") - c.Assert(err, check.IsNil) - - buckets, err = drivers.ListBuckets() - c.Assert(len(buckets), check.Equals, 2) - c.Assert(err, check.IsNil) - - // add three and test exists + prefix - err = drivers.CreateBucket("bucket22", "") - - buckets, err = drivers.ListBuckets() - c.Assert(len(buckets), check.Equals, 3) - c.Assert(err, check.IsNil) -} - -func testListBucketsOrder(c *check.C, create func() Driver) { - // if implementation contains a map, order of map keys will vary. - // this ensures they return in the same order each time - for i := 0; i < 10; i++ { - drivers := create() - // add one and test exists - drivers.CreateBucket("bucket1", "") - drivers.CreateBucket("bucket2", "") - - buckets, err := drivers.ListBuckets() - c.Assert(err, check.IsNil) - c.Assert(len(buckets), check.Equals, 2) - c.Assert(buckets[0].Name, check.Equals, "bucket1") - c.Assert(buckets[1].Name, check.Equals, "bucket2") - } -} - -func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Driver) { - drivers := create() - resources := BucketResourcesMetadata{Prefix: "", Maxkeys: 1000} - objects, resources, err := drivers.ListObjects("bucket", resources) - c.Assert(err, check.Not(check.IsNil)) - c.Assert(resources.IsTruncated, check.Equals, false) - c.Assert(len(objects), check.Equals, 0) -} - -func testNonExistantObjectInBucket(c *check.C, create func() Driver) { - drivers := create() - err := drivers.CreateBucket("bucket", "") - c.Assert(err, check.IsNil) - - var byteBuffer bytes.Buffer - length, err := drivers.GetObject(&byteBuffer, "bucket", "dir1") - c.Assert(length, check.Equals, int64(0)) - c.Assert(err, check.Not(check.IsNil)) - c.Assert(len(byteBuffer.Bytes()), check.Equals, 0) - switch err := iodine.ToError(err).(type) { - case ObjectNotFound: - { - c.Assert(err, check.ErrorMatches, "Object not Found: bucket#dir1") - } - default: - { - c.Assert(err, check.Equals, "fails") - } - } -} - -func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Driver) { - drivers := create() - err := drivers.CreateBucket("bucket", "") - c.Assert(err, check.IsNil) - - _, err = drivers.CreateObject("bucket", "dir1/dir2/object", "", "", int64(len("hello world")), - bytes.NewBufferString("hello world")) - c.Assert(err, check.IsNil) - - var byteBuffer bytes.Buffer - length, err := drivers.GetObject(&byteBuffer, "bucket", "dir1") - c.Assert(length, check.Equals, int64(0)) - switch err := iodine.ToError(err).(type) { - case ObjectNotFound: - { - c.Assert(err.Bucket, check.Equals, "bucket") - c.Assert(err.Object, check.Equals, "dir1") - } - default: - { - // force a failure with a line number - c.Assert(err, check.Equals, "ObjectNotFound") - } - } - c.Assert(len(byteBuffer.Bytes()), check.Equals, 0) - - var byteBuffer2 bytes.Buffer - length, err = drivers.GetObject(&byteBuffer, "bucket", "dir1/") - c.Assert(length, check.Equals, int64(0)) - switch err := iodine.ToError(err).(type) { - case ObjectNotFound: - { - c.Assert(err.Bucket, check.Equals, "bucket") - c.Assert(err.Object, check.Equals, "dir1/") - } - default: - { - // force a failure with a line number - c.Assert(err, check.Equals, "ObjectNotFound") - } - } - c.Assert(len(byteBuffer2.Bytes()), check.Equals, 0) -} - -func testDefaultContentType(c *check.C, create func() Driver) { - drivers := create() - err := drivers.CreateBucket("bucket", "") - c.Assert(err, check.IsNil) - - // test empty - _, err = drivers.CreateObject("bucket", "one", "", "", int64(len("one")), bytes.NewBufferString("one")) - metadata, err := drivers.GetObjectMetadata("bucket", "one") - c.Assert(err, check.IsNil) - c.Assert(metadata.ContentType, check.Equals, "application/octet-stream") - - // test custom - drivers.CreateObject("bucket", "two", "application/text", "", int64(len("two")), bytes.NewBufferString("two")) - metadata, err = drivers.GetObjectMetadata("bucket", "two") - c.Assert(err, check.IsNil) - c.Assert(metadata.ContentType, check.Equals, "application/text") - - // test trim space - drivers.CreateObject("bucket", "three", "\tapplication/json ", "", int64(len("three")), bytes.NewBufferString("three")) - metadata, err = drivers.GetObjectMetadata("bucket", "three") - c.Assert(err, check.IsNil) - c.Assert(metadata.ContentType, check.Equals, "application/json") -} - -func testContentMd5Set(c *check.C, create func() Driver) { - drivers := create() - err := drivers.CreateBucket("bucket", "") - c.Assert(err, check.IsNil) - - // test md5 invalid - badmd5Sum := "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA" - calculatedmd5sum, err := drivers.CreateObject("bucket", "one", "", badmd5Sum, int64(len("one")), bytes.NewBufferString("one")) - c.Assert(err, check.Not(check.IsNil)) - c.Assert(calculatedmd5sum, check.Not(check.Equals), badmd5Sum) - - goodmd5sum := "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA=" - calculatedmd5sum, err = drivers.CreateObject("bucket", "two", "", goodmd5sum, int64(len("one")), bytes.NewBufferString("one")) - c.Assert(err, check.IsNil) - c.Assert(calculatedmd5sum, check.Equals, goodmd5sum) -} diff --git a/pkg/storage/drivers/cache/cache_test.go b/pkg/storage/drivers/cache/cache_test.go deleted file mode 100644 index 30326b3f4..000000000 --- a/pkg/storage/drivers/cache/cache_test.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cache - -import ( - "testing" - "time" - - . "github.com/minio/check" - "github.com/minio/minio/pkg/storage/drivers" -) - -func Test(t *testing.T) { TestingT(t) } - -type MySuite struct{} - -var _ = Suite(&MySuite{}) - -func (s *MySuite) TestAPISuite(c *C) { - create := func() drivers.Driver { - var driver drivers.Driver - store, err := NewDriver(1000000, 3*time.Hour, driver) - c.Check(err, IsNil) - return store - } - drivers.APITestSuite(c, create) -} diff --git a/pkg/storage/drivers/donut/donut-multipart.go b/pkg/storage/drivers/donut/donut-multipart.go deleted file mode 100644 index a418aea95..000000000 --- a/pkg/storage/drivers/donut/donut-multipart.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package donut - -import ( - "io" - - "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/drivers" -) - -func (d donutDriver) NewMultipartUpload(bucketName, objectName, contentType string) (string, error) { - return "", iodine.New(drivers.APINotImplemented{API: "NewMultipartUpload"}, nil) -} - -func (d donutDriver) AbortMultipartUpload(bucketName, objectName, uploadID string) error { - return iodine.New(drivers.APINotImplemented{API: "AbortMultipartUpload"}, nil) -} - -func (d donutDriver) CreateObjectPart(bucketName, objectName, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { - return "", iodine.New(drivers.APINotImplemented{API: "CreateObjectPart"}, nil) -} - -func (d donutDriver) CompleteMultipartUpload(bucketName, objectName, uploadID string, parts map[int]string) (string, error) { - return "", iodine.New(drivers.APINotImplemented{API: "CompleteMultipartUpload"}, nil) -} - -func (d donutDriver) ListMultipartUploads(bucketName string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) { - return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.APINotImplemented{API: "ListMultipartUploads"}, nil) -} -func (d donutDriver) ListObjectParts(bucketName, objectName string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) { - return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.APINotImplemented{API: "ListObjectParts"}, nil) -} diff --git a/pkg/storage/drivers/donut/donut.go b/pkg/storage/drivers/donut/donut.go deleted file mode 100644 index b23fd925b..000000000 --- a/pkg/storage/drivers/donut/donut.go +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package donut - -import ( - "encoding/base64" - "encoding/hex" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "sync" - - "io/ioutil" - - "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/donut" - "github.com/minio/minio/pkg/storage/drivers" -) - -// donutDriver - creates a new single disk drivers driver using donut -type donutDriver struct { - donut donut.Donut - paths []string - lock *sync.RWMutex -} - -// This is a dummy nodeDiskMap which is going to be deprecated soon -// once the Management API is standardized, and we have way of adding -// and removing disks. This is useful for now to take inputs from CLI -func createNodeDiskMap(paths []string) map[string][]string { - if len(paths) == 1 { - nodes := make(map[string][]string) - nodes["localhost"] = make([]string, 16) - for i := 0; i < len(nodes["localhost"]); i++ { - diskPath := filepath.Join(paths[0], strconv.Itoa(i)) - if _, err := os.Stat(diskPath); err != nil { - if os.IsNotExist(err) { - os.MkdirAll(diskPath, 0700) - } - } - nodes["localhost"][i] = diskPath - } - return nodes - } - diskPaths := make([]string, len(paths)) - nodes := make(map[string][]string) - for i, p := range paths { - diskPath := filepath.Join(p, strconv.Itoa(i)) - if _, err := os.Stat(diskPath); err != nil { - if os.IsNotExist(err) { - os.MkdirAll(diskPath, 0700) - } - } - diskPaths[i] = diskPath - } - nodes["localhost"] = diskPaths - return nodes -} - -// NewDriver instantiate a donut driver -func NewDriver(paths []string) (drivers.Driver, error) { - driver := new(donutDriver) - driver.paths = paths - driver.lock = new(sync.RWMutex) - - // Soon to be user configurable, when Management API is available - // we should remove "default" to something which is passed down - // from configuration paramters - var err error - driver.donut, err = donut.NewDonut("default", createNodeDiskMap(driver.paths)) - return driver, iodine.New(err, nil) -} - -// byBucketName is a type for sorting bucket metadata by bucket name -type byBucketName []drivers.BucketMetadata - -func (b byBucketName) Len() int { return len(b) } -func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name } - -// ListBuckets returns a list of buckets -func (d donutDriver) ListBuckets() (results []drivers.BucketMetadata, err error) { - if d.donut == nil { - return nil, iodine.New(drivers.InternalError{}, nil) - } - buckets, err := d.donut.ListBuckets() - if err != nil { - return nil, iodine.New(err, nil) - } - for _, metadata := range buckets { - result := drivers.BucketMetadata{ - Name: metadata.Name, - Created: metadata.Created, - ACL: drivers.BucketACL(metadata.ACL), - } - results = append(results, result) - } - sort.Sort(byBucketName(results)) - return results, nil -} - -// CreateBucket creates a new bucket -func (d donutDriver) CreateBucket(bucketName, acl string) error { - d.lock.Lock() - defer d.lock.Unlock() - if d.donut == nil { - return iodine.New(drivers.InternalError{}, nil) - } - if !drivers.IsValidBucketACL(acl) { - return iodine.New(drivers.InvalidACL{ACL: acl}, nil) - } - if drivers.IsValidBucket(bucketName) { - if strings.TrimSpace(acl) == "" { - acl = "private" - } - if err := d.donut.MakeBucket(bucketName, acl); err != nil { - switch iodine.ToError(err).(type) { - case donut.BucketExists: - return iodine.New(drivers.BucketExists{Bucket: bucketName}, nil) - } - return iodine.New(err, nil) - } - return nil - } - return iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) -} - -// GetBucketMetadata retrieves an bucket's metadata -func (d donutDriver) GetBucketMetadata(bucketName string) (drivers.BucketMetadata, error) { - d.lock.RLock() - defer d.lock.RUnlock() - if d.donut == nil { - return drivers.BucketMetadata{}, iodine.New(drivers.InternalError{}, nil) - } - if !drivers.IsValidBucket(bucketName) { - return drivers.BucketMetadata{}, drivers.BucketNameInvalid{Bucket: bucketName} - } - metadata, err := d.donut.GetBucketMetadata(bucketName) - if err != nil { - return drivers.BucketMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil) - } - bucketMetadata := drivers.BucketMetadata{ - Name: metadata.Name, - Created: metadata.Created, - ACL: drivers.BucketACL(metadata.ACL), - } - return bucketMetadata, nil -} - -// SetBucketMetadata sets bucket's metadata -func (d donutDriver) SetBucketMetadata(bucketName, acl string) error { - d.lock.Lock() - defer d.lock.Unlock() - if d.donut == nil { - return iodine.New(drivers.InternalError{}, nil) - } - if !drivers.IsValidBucket(bucketName) { - return iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) - } - if strings.TrimSpace(acl) == "" { - acl = "private" - } - bucketMetadata := make(map[string]string) - bucketMetadata["acl"] = acl - err := d.donut.SetBucketMetadata(bucketName, bucketMetadata) - if err != nil { - return iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil) - } - return nil -} - -// GetObject retrieves an object and writes it to a writer -func (d donutDriver) GetObject(w io.Writer, bucketName, objectName string) (int64, error) { - if d.donut == nil { - return 0, iodine.New(drivers.InternalError{}, nil) - } - if !drivers.IsValidBucket(bucketName) { - return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) - } - if !drivers.IsValidObjectName(objectName) { - return 0, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, nil) - } - d.lock.RLock() - defer d.lock.RUnlock() - reader, size, err := d.donut.GetObject(bucketName, objectName) - if err != nil { - switch iodine.ToError(err).(type) { - case donut.BucketNotFound: - return 0, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil) - case donut.ObjectNotFound: - return 0, iodine.New(drivers.ObjectNotFound{ - Bucket: bucketName, - Object: objectName, - }, nil) - default: - return 0, iodine.New(drivers.InternalError{}, nil) - } - } - written, err := io.CopyN(w, reader, size) - if err != nil { - return 0, iodine.New(err, nil) - } - return written, nil -} - -// GetPartialObject retrieves an object range and writes it to a writer -func (d donutDriver) GetPartialObject(w io.Writer, bucketName, objectName string, start, length int64) (int64, error) { - d.lock.RLock() - defer d.lock.RUnlock() - if d.donut == nil { - return 0, iodine.New(drivers.InternalError{}, nil) - } - errParams := map[string]string{ - "bucketName": bucketName, - "objectName": objectName, - "start": strconv.FormatInt(start, 10), - "length": strconv.FormatInt(length, 10), - } - if !drivers.IsValidBucket(bucketName) { - return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, errParams) - } - if !drivers.IsValidObjectName(objectName) { - return 0, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, errParams) - } - if start < 0 { - return 0, iodine.New(drivers.InvalidRange{ - Start: start, - Length: length, - }, errParams) - } - reader, size, err := d.donut.GetObject(bucketName, objectName) - if err != nil { - switch iodine.ToError(err).(type) { - case donut.BucketNotFound: - return 0, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil) - case donut.ObjectNotFound: - return 0, iodine.New(drivers.ObjectNotFound{ - Bucket: bucketName, - Object: objectName, - }, nil) - default: - return 0, iodine.New(drivers.InternalError{}, nil) - } - } - defer reader.Close() - if start > size || (start+length-1) > size { - return 0, iodine.New(drivers.InvalidRange{ - Start: start, - Length: length, - }, errParams) - } - _, err = io.CopyN(ioutil.Discard, reader, start) - if err != nil { - return 0, iodine.New(err, errParams) - } - n, err := io.CopyN(w, reader, length) - if err != nil { - return 0, iodine.New(err, errParams) - } - return n, nil -} - -// GetObjectMetadata retrieves an object's metadata -func (d donutDriver) GetObjectMetadata(bucketName, objectName string) (drivers.ObjectMetadata, error) { - d.lock.RLock() - defer d.lock.RUnlock() - - errParams := map[string]string{ - "bucketName": bucketName, - "objectName": objectName, - } - if d.donut == nil { - return drivers.ObjectMetadata{}, iodine.New(drivers.InternalError{}, errParams) - } - if !drivers.IsValidBucket(bucketName) { - return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, errParams) - } - if !drivers.IsValidObjectName(objectName) { - return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, errParams) - } - metadata, err := d.donut.GetObjectMetadata(bucketName, objectName) - if err != nil { - return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{ - Bucket: bucketName, - Object: objectName, - }, errParams) - } - objectMetadata := drivers.ObjectMetadata{ - Bucket: bucketName, - Key: objectName, - - ContentType: metadata.Metadata["contentType"], - Created: metadata.Created, - Md5: metadata.MD5Sum, - Size: metadata.Size, - } - return objectMetadata, nil -} - -type byObjectName []drivers.ObjectMetadata - -func (b byObjectName) Len() int { return len(b) } -func (b byObjectName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byObjectName) Less(i, j int) bool { return b[i].Key < b[j].Key } - -// ListObjects - returns list of objects -func (d donutDriver) ListObjects(bucketName string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { - d.lock.RLock() - defer d.lock.RUnlock() - errParams := map[string]string{ - "bucketName": bucketName, - } - if d.donut == nil { - return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.InternalError{}, errParams) - } - if !drivers.IsValidBucket(bucketName) { - return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) - } - if !drivers.IsValidObjectName(resources.Prefix) { - return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: resources.Prefix}, nil) - } - listObjects, err := d.donut.ListObjects(bucketName, resources.Prefix, resources.Marker, resources.Delimiter, resources.Maxkeys) - if err != nil { - return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams) - } - resources.CommonPrefixes = listObjects.CommonPrefixes - resources.IsTruncated = listObjects.IsTruncated - var results []drivers.ObjectMetadata - for _, objMetadata := range listObjects.Objects { - metadata := drivers.ObjectMetadata{ - Key: objMetadata.Object, - Created: objMetadata.Created, - Size: objMetadata.Size, - } - results = append(results, metadata) - } - sort.Sort(byObjectName(results)) - if resources.IsTruncated && resources.IsDelimiterSet() { - resources.NextMarker = results[len(results)-1].Key - } - return results, resources, nil -} - -// CreateObject creates a new object -func (d donutDriver) CreateObject(bucketName, objectName, contentType, expectedMD5Sum string, size int64, reader io.Reader) (string, error) { - d.lock.Lock() - defer d.lock.Unlock() - errParams := map[string]string{ - "bucketName": bucketName, - "objectName": objectName, - "contentType": contentType, - } - if d.donut == nil { - return "", iodine.New(drivers.InternalError{}, errParams) - } - if !drivers.IsValidBucket(bucketName) { - return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) - } - if !drivers.IsValidObjectName(objectName) { - return "", iodine.New(drivers.ObjectNameInvalid{Object: objectName}, nil) - } - if strings.TrimSpace(contentType) == "" { - contentType = "application/octet-stream" - } - metadata := make(map[string]string) - metadata["contentType"] = strings.TrimSpace(contentType) - metadata["contentLength"] = strconv.FormatInt(size, 10) - if strings.TrimSpace(expectedMD5Sum) != "" { - expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) - if err != nil { - return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil) - } - expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) - } - objMetadata, err := d.donut.PutObject(bucketName, objectName, expectedMD5Sum, reader, metadata) - if err != nil { - switch iodine.ToError(err).(type) { - case donut.BadDigest: - return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucketName, Key: objectName}, nil) - } - return "", iodine.New(err, errParams) - } - newObject := drivers.ObjectMetadata{ - Bucket: bucketName, - Key: objectName, - - ContentType: objMetadata.Metadata["contentType"], - Created: objMetadata.Created, - Md5: objMetadata.MD5Sum, - Size: objMetadata.Size, - } - return newObject.Md5, nil -} diff --git a/pkg/storage/drivers/donut/donut_test.go b/pkg/storage/drivers/donut/donut_test.go deleted file mode 100644 index 1d823e6f0..000000000 --- a/pkg/storage/drivers/donut/donut_test.go +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package donut - -import ( - "io/ioutil" - "os" - "testing" - - . "github.com/minio/check" - "github.com/minio/minio/pkg/storage/drivers" -) - -func Test(t *testing.T) { TestingT(t) } - -type MySuite struct{} - -var _ = Suite(&MySuite{}) - -func (s *MySuite) TestAPISuite(c *C) { - var storageList []string - create := func() drivers.Driver { - var paths []string - p, err := ioutil.TempDir(os.TempDir(), "minio-donut-") - c.Check(err, IsNil) - storageList = append(storageList, p) - paths = append(paths, p) - store, err := NewDriver(paths) - c.Check(err, IsNil) - return store - } - drivers.APITestSuite(c, create) - removeRoots(c, storageList) -} - -func removeRoots(c *C, roots []string) { - for _, root := range roots { - err := os.RemoveAll(root) - c.Check(err, IsNil) - } -} diff --git a/pkg/storage/drivers/dummy/README.md b/pkg/storage/drivers/dummy/README.md deleted file mode 100644 index 952be3406..000000000 --- a/pkg/storage/drivers/dummy/README.md +++ /dev/null @@ -1 +0,0 @@ -This is a dummy driver which is a pass through driver, useful if some one wants to contribute code. \ No newline at end of file diff --git a/pkg/storage/drivers/dummy/dummy.go b/pkg/storage/drivers/dummy/dummy.go deleted file mode 100644 index cd09ff30d..000000000 --- a/pkg/storage/drivers/dummy/dummy.go +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dummy - -import ( - "io" - - "github.com/minio/minio/pkg/storage/drivers" -) - -// dummyDriver -type dummyDriver struct { - driver drivers.Driver -} - -// NewDriver provides a new dummy driver -func NewDriver(driver drivers.Driver) drivers.Driver { - return dummyDriver{driver: driver} -} - -// ListBuckets -func (dummy dummyDriver) ListBuckets() ([]drivers.BucketMetadata, error) { - return dummy.driver.ListBuckets() -} - -// CreateBucket -func (dummy dummyDriver) CreateBucket(bucket, acl string) error { - return dummy.driver.CreateBucket(bucket, acl) -} - -// GetBucketMetadata -func (dummy dummyDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) { - return dummy.driver.GetBucketMetadata(bucket) -} - -// SetBucketMetadata -func (dummy dummyDriver) SetBucketMetadata(bucket, acl string) error { - return dummy.driver.SetBucketMetadata(bucket, acl) -} - -// GetObject -func (dummy dummyDriver) GetObject(w io.Writer, bucket, object string) (int64, error) { - return dummy.driver.GetObject(w, bucket, object) -} - -// GetPartialObject -func (dummy dummyDriver) GetPartialObject(w io.Writer, bucket, object string, start int64, length int64) (int64, error) { - return dummy.driver.GetPartialObject(w, bucket, object, start, length) -} - -// GetObjectMetadata -func (dummy dummyDriver) GetObjectMetadata(bucket, object string) (drivers.ObjectMetadata, error) { - return dummy.driver.GetObjectMetadata(bucket, object) -} - -// ListObjects -func (dummy dummyDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { - return dummy.driver.ListObjects(bucket, resources) -} - -// CreateObject -func (dummy dummyDriver) CreateObject(bucket, key, contentType, md5sum string, size int64, data io.Reader) (string, error) { - return dummy.driver.CreateObject(bucket, key, contentType, md5sum, size, data) -} - -// NewMultipartUpload -func (dummy dummyDriver) NewMultipartUpload(bucket, key, contentType string) (string, error) { - return dummy.driver.NewMultipartUpload(bucket, key, contentType) -} - -// CreateObjectPart -func (dummy dummyDriver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType string, md5sum string, size int64, data io.Reader) (string, error) { - return dummy.driver.CreateObjectPart(bucket, key, uploadID, partID, contentType, md5sum, size, data) -} - -// CompleteMultipartUpload -func (dummy dummyDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) { - return dummy.driver.CompleteMultipartUpload(bucket, key, uploadID, parts) -} - -// ListObjectParts -func (dummy dummyDriver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) { - return dummy.driver.ListObjectParts(bucket, key, resources) -} - -// ListMultipartUploads -func (dummy dummyDriver) ListMultipartUploads(bucket string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) { - return dummy.driver.ListMultipartUploads(bucket, resources) -} - -// AbortMultipartUpload -func (dummy dummyDriver) AbortMultipartUpload(bucket, key, uploadID string) error { - return dummy.driver.AbortMultipartUpload(bucket, key, uploadID) -} diff --git a/pkg/storage/drivers/errors.go b/pkg/storage/drivers/errors.go deleted file mode 100644 index 442dd1be6..000000000 --- a/pkg/storage/drivers/errors.go +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package drivers - -import "fmt" - -// InternalError - generic internal error -type InternalError struct { -} - -// BackendError - generic disk backend error -type BackendError struct { - Path string -} - -// BackendCorrupted - path has corrupted data -type BackendCorrupted BackendError - -// APINotImplemented - generic API not implemented error -type APINotImplemented struct { - API string -} - -// GenericBucketError - generic bucket error -type GenericBucketError struct { - Bucket string -} - -// GenericObjectError - generic object error -type GenericObjectError struct { - Bucket string - Object string -} - -// ImplementationError - generic implementation error -type ImplementationError struct { - Bucket string - Object string - Err error -} - -// DigestError - Generic Md5 error -type DigestError struct { - Bucket string - Key string - Md5 string -} - -/// ACL related errors - -// InvalidACL - acl invalid -type InvalidACL struct { - ACL string -} - -func (e InvalidACL) Error() string { - return "Requested ACL is " + e.ACL + " invalid" -} - -/// Bucket related errors - -// BucketNameInvalid - bucketname provided is invalid -type BucketNameInvalid GenericBucketError - -// BucketExists - bucket already exists -type BucketExists GenericBucketError - -// BucketNotFound - requested bucket not found -type BucketNotFound GenericBucketError - -// TooManyBuckets - total buckets exceeded -type TooManyBuckets GenericBucketError - -/// Object related errors - -// ObjectNotFound - requested object not found -type ObjectNotFound GenericObjectError - -// ObjectExists - object already exists -type ObjectExists GenericObjectError - -// EntityTooLarge - object size exceeds maximum limit -type EntityTooLarge struct { - GenericObjectError - Size string - MaxSize string -} - -// ObjectNameInvalid - object name provided is invalid -type ObjectNameInvalid GenericObjectError - -// BadDigest - md5 mismatch from data received -type BadDigest DigestError - -// InvalidDigest - md5 in request header invalid -type InvalidDigest DigestError - -// Return string an error formatted as the given text -func (e ImplementationError) Error() string { - error := "" - if e.Bucket != "" { - error = error + "Bucket: " + e.Bucket + " " - } - if e.Object != "" { - error = error + "Object: " + e.Object + " " - } - error = error + "Error: " + e.Err.Error() - return error -} - -// EmbedError - wrapper function for error object -func EmbedError(bucket, object string, err error) ImplementationError { - return ImplementationError{ - Bucket: bucket, - Object: object, - Err: err, - } -} - -// Return string an error formatted as the given text -func (e InternalError) Error() string { - return "Internal error occured" -} - -// Return string an error formatted as the given text -func (e ObjectNotFound) Error() string { - return "Object not Found: " + e.Bucket + "#" + e.Object -} - -// Return string an error formatted as the given text -func (e APINotImplemented) Error() string { - return "Api not implemented: " + e.API -} - -// Return string an error formatted as the given text -func (e ObjectExists) Error() string { - return "Object exists: " + e.Bucket + "#" + e.Object -} - -// Return string an error formatted as the given text -func (e BucketNameInvalid) Error() string { - return "Bucket name invalid: " + e.Bucket -} - -// Return string an error formatted as the given text -func (e BucketExists) Error() string { - return "Bucket exists: " + e.Bucket -} - -// Return string an error formatted as the given text -func (e TooManyBuckets) Error() string { - return "Bucket limit exceeded beyond 100, cannot create bucket: " + e.Bucket -} - -// Return string an error formatted as the given text -func (e BucketNotFound) Error() string { - return "Bucket not Found: " + e.Bucket -} - -// Return string an error formatted as the given text -func (e ObjectNameInvalid) Error() string { - return "Object name invalid: " + e.Bucket + "#" + e.Object -} - -// Return string an error formatted as the given text -func (e EntityTooLarge) Error() string { - return e.Bucket + "#" + e.Object + "with " + e.Size + "reached maximum allowed size limit " + e.MaxSize -} - -// Return string an error formatted as the given text -func (e BackendCorrupted) Error() string { - return "Backend corrupted: " + e.Path -} - -// Return string an error formatted as the given text -func (e BadDigest) Error() string { - return "Md5 provided " + e.Md5 + " mismatches for: " + e.Bucket + "#" + e.Key -} - -// Return string an error formatted as the given text -func (e InvalidDigest) Error() string { - return "Md5 provided " + e.Md5 + " is invalid" -} - -// OperationNotPermitted - operation not permitted -type OperationNotPermitted struct { - Op string - Reason string -} - -func (e OperationNotPermitted) Error() string { - return "Operation " + e.Op + " not permitted for reason: " + e.Reason -} - -// InvalidRange - invalid range -type InvalidRange struct { - Start int64 - Length int64 -} - -func (e InvalidRange) Error() string { - return fmt.Sprintf("Invalid range start:%d length:%d", e.Start, e.Length) -} - -/// Multipart related errors - -// InvalidUploadID invalid upload id -type InvalidUploadID struct { - UploadID string -} - -func (e InvalidUploadID) Error() string { - return "Invalid upload id " + e.UploadID -} diff --git a/pkg/storage/drivers/mocks/Driver.go b/pkg/storage/drivers/mocks/Driver.go deleted file mode 100644 index fc70f5ef7..000000000 --- a/pkg/storage/drivers/mocks/Driver.go +++ /dev/null @@ -1,187 +0,0 @@ -package mocks - -import ( - "bytes" - "io" - - "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/drivers" - "github.com/stretchr/testify/mock" -) - -// Driver is a mock -type Driver struct { - mock.Mock - - ObjectWriterData map[string][]byte -} - -// ListBuckets is a mock -func (m *Driver) ListBuckets() ([]drivers.BucketMetadata, error) { - ret := m.Called() - - r0 := ret.Get(0).([]drivers.BucketMetadata) - r1 := ret.Error(1) - - return r0, r1 -} - -// CreateBucket is a mock -func (m *Driver) CreateBucket(bucket, acl string) error { - ret := m.Called(bucket, acl) - - r0 := ret.Error(0) - - return r0 -} - -// GetBucketMetadata is a mock -func (m *Driver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) { - ret := m.Called(bucket) - r0 := ret.Get(0).(drivers.BucketMetadata) - r1 := ret.Error(1) - - return r0, r1 -} - -// SetBucketMetadata is a mock -func (m *Driver) SetBucketMetadata(bucket, acl string) error { - ret := m.Called(bucket, acl) - - r0 := ret.Error(0) - - return r0 -} - -// SetGetObjectWriter is a mock -func (m *Driver) SetGetObjectWriter(bucket, object string, data []byte) { - m.ObjectWriterData[bucket+":"+object] = data -} - -// GetObject is a mock -func (m *Driver) GetObject(w io.Writer, bucket, object string) (int64, error) { - ret := m.Called(w, bucket, object) - r0 := ret.Get(0).(int64) - r1 := ret.Error(1) - if r1 == nil { - if obj, ok := m.ObjectWriterData[bucket+":"+object]; ok { - n, err := io.Copy(w, bytes.NewBuffer(obj)) - if err != nil { - panic(err) - } - r0 = n - } - } - return r0, r1 -} - -// GetPartialObject is a mock -func (m *Driver) GetPartialObject(w io.Writer, bucket, object string, start int64, length int64) (int64, error) { - ret := m.Called(w, bucket, object, start, length) - - r0 := ret.Get(0).(int64) - r1 := ret.Error(1) - - if r1 == nil { - if obj, ok := m.ObjectWriterData[bucket+":"+object]; ok { - source := bytes.NewBuffer(obj) - var nilSink bytes.Buffer - io.CopyN(&nilSink, source, start) - n, _ := io.CopyN(w, source, length) - r0 = n - } - } - r1 = iodine.New(r1, nil) - - return r0, r1 -} - -// GetObjectMetadata is a mock -func (m *Driver) GetObjectMetadata(bucket, object string) (drivers.ObjectMetadata, error) { - ret := m.Called(bucket, object) - - r0 := ret.Get(0).(drivers.ObjectMetadata) - r1 := ret.Error(1) - - return r0, r1 -} - -// ListObjects is a mock -func (m *Driver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { - ret := m.Called(bucket, resources) - - r0 := ret.Get(0).([]drivers.ObjectMetadata) - r1 := ret.Get(1).(drivers.BucketResourcesMetadata) - r2 := ret.Error(2) - - return r0, r1, r2 -} - -// CreateObject is a mock -func (m *Driver) CreateObject(bucket, key, contentType, md5sum string, size int64, data io.Reader) (string, error) { - ret := m.Called(bucket, key, contentType, md5sum, size, data) - - r0 := ret.Get(0).(string) - r1 := ret.Error(1) - - return r0, r1 -} - -// NewMultipartUpload is a mock -func (m *Driver) NewMultipartUpload(bucket, key, contentType string) (string, error) { - ret := m.Called(bucket, key, contentType) - - r0 := ret.Get(0).(string) - r1 := ret.Error(1) - - return r0, r1 -} - -// CreateObjectPart is a mock -func (m *Driver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType string, md5sum string, size int64, data io.Reader) (string, error) { - ret := m.Called(bucket, key, uploadID, partID, contentType, md5sum, size, data) - - r0 := ret.Get(0).(string) - r1 := ret.Error(1) - - return r0, r1 -} - -// CompleteMultipartUpload is a mock -func (m *Driver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) { - ret := m.Called(bucket, key, uploadID, parts) - - r0 := ret.Get(0).(string) - r1 := ret.Error(1) - - return r0, r1 -} - -// ListObjectParts is a mock -func (m *Driver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) { - ret := m.Called(bucket, key, resources) - - r0 := ret.Get(0).(drivers.ObjectResourcesMetadata) - r1 := ret.Error(1) - - return r0, r1 -} - -// ListMultipartUploads is a mock -func (m *Driver) ListMultipartUploads(bucket string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) { - ret := m.Called(bucket, resources) - - r0 := ret.Get(0).(drivers.BucketMultipartResourcesMetadata) - r1 := ret.Error(1) - - return r0, r1 -} - -// AbortMultipartUpload is a mock -func (m *Driver) AbortMultipartUpload(bucket, key, uploadID string) error { - ret := m.Called(bucket, key, uploadID) - - r0 := ret.Error(0) - - return r0 -} From 335c7827eba5460e80381642c140eab32210b627 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 30 Jun 2015 17:08:18 -0700 Subject: [PATCH 04/19] More donut, cache, api cleanup --- commands.go | 2 +- main.go | 2 +- pkg/api/api-response.go | 6 +- pkg/api/headers.go | 6 +- pkg/storage/donut/bucket.go | 2 +- pkg/storage/donut/cache-multipart.go | 34 ++++--- pkg/storage/donut/cache.go | 142 ++++++++++++++++++++------- pkg/storage/donut/definitions.go | 2 +- pkg/storage/donut/donut.go | 43 +------- pkg/storage/donut/donut_test.go | 18 ++-- pkg/storage/donut/interfaces.go | 2 +- pkg/storage/donut/utils.go | 18 ---- 12 files changed, 148 insertions(+), 129 deletions(-) diff --git a/commands.go b/commands.go index 16822d70b..8d18cf80b 100644 --- a/commands.go +++ b/commands.go @@ -67,7 +67,7 @@ func runServer(c *cli.Context) { cli.ShowCommandHelpAndExit(c, "server", 1) // last argument is exit code } apiServerConfig := getAPIServerConfig(c) - if err := api.Start(apiServerConfig); err != nil { + if err := api.StartServer(apiServerConfig); err != nil { Fatalln(err) } } diff --git a/main.go b/main.go index 3fb676d72..82429440b 100644 --- a/main.go +++ b/main.go @@ -71,7 +71,7 @@ func init() { } } -func getAPIServerConfig(c *cli.Context) httpserver.Config { +func getAPIServerConfig(c *cli.Context) api.Config { certFile := c.GlobalString("cert") keyFile := c.GlobalString("key") if (certFile != "" && keyFile == "") || (certFile == "" && keyFile != "") { diff --git a/pkg/api/api-response.go b/pkg/api/api-response.go index 6ef01f260..5f6de4435 100644 --- a/pkg/api/api-response.go +++ b/pkg/api/api-response.go @@ -81,12 +81,12 @@ func generateListObjectsResponse(bucket string, objects []donut.ObjectMetadata, for _, object := range objects { var content = &Object{} - if object.Key == "" { + if object.Object == "" { continue } - content.Key = object.Key + content.Key = object.Object content.LastModified = object.Created.Format(iso8601Format) - content.ETag = "\"" + object.Md5 + "\"" + content.ETag = "\"" + object.MD5Sum + "\"" content.Size = object.Size content.StorageClass = "STANDARD" content.Owner = owner diff --git a/pkg/api/headers.go b/pkg/api/headers.go index 3e21af1fb..01f59b492 100644 --- a/pkg/api/headers.go +++ b/pkg/api/headers.go @@ -65,16 +65,16 @@ func encodeErrorResponse(response interface{}, acceptsType contentType) []byte { func setObjectHeaders(w http.ResponseWriter, metadata donut.ObjectMetadata) { lastModified := metadata.Created.Format(http.TimeFormat) // common headers - setCommonHeaders(w, metadata.ContentType, int(metadata.Size)) + setCommonHeaders(w, metadata.Metadata["contentType"], int(metadata.Size)) // object related headers - w.Header().Set("ETag", "\""+metadata.Md5+"\"") + w.Header().Set("ETag", "\""+metadata.MD5Sum+"\"") w.Header().Set("Last-Modified", lastModified) } // Write range object header func setRangeObjectHeaders(w http.ResponseWriter, metadata donut.ObjectMetadata, contentRange *httpRange) { // set common headers - setCommonHeaders(w, metadata.ContentType, int(metadata.Size)) + setCommonHeaders(w, metadata.Metadata["contentType"], int(metadata.Size)) // set object headers setObjectHeaders(w, metadata) // set content range diff --git a/pkg/storage/donut/bucket.go b/pkg/storage/donut/bucket.go index af5a1c7c8..1c798ffa9 100644 --- a/pkg/storage/donut/bucket.go +++ b/pkg/storage/donut/bucket.go @@ -73,7 +73,7 @@ func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bu metadata := BucketMetadata{} metadata.Version = bucketMetadataVersion metadata.Name = bucketName - metadata.ACL = aclType + metadata.ACL = BucketACL(aclType) metadata.Created = t metadata.Metadata = make(map[string]string) metadata.BucketObjects = make(map[string]interface{}) diff --git a/pkg/storage/donut/cache-multipart.go b/pkg/storage/donut/cache-multipart.go index 93ab51ea2..52eb4a035 100644 --- a/pkg/storage/donut/cache-multipart.go +++ b/pkg/storage/donut/cache-multipart.go @@ -34,7 +34,8 @@ import ( "github.com/minio/minio/pkg/iodine" ) -func (cache donut) NewMultipartUpload(bucket, key, contentType string) (string, error) { +// NewMultipartUpload - +func (cache Cache) NewMultipartUpload(bucket, key, contentType string) (string, error) { cache.lock.RLock() if !IsValidBucket(bucket) { cache.lock.RUnlock() @@ -52,7 +53,7 @@ func (cache donut) NewMultipartUpload(bucket, key, contentType string) (string, objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { cache.lock.RUnlock() - return "", iodine.New(ObjectExists{Bucket: bucket, Object: key}, nil) + return "", iodine.New(ObjectExists{Object: key}, nil) } cache.lock.RUnlock() @@ -71,7 +72,8 @@ func (cache donut) NewMultipartUpload(bucket, key, contentType string) (string, return uploadID, nil } -func (cache donut) AbortMultipartUpload(bucket, key, uploadID string) error { +// AbortMultipartUpload - +func (cache Cache) AbortMultipartUpload(bucket, key, uploadID string) error { cache.lock.RLock() storedBucket := cache.storedBuckets[bucket] if storedBucket.multiPartSession[key].uploadID != uploadID { @@ -89,7 +91,8 @@ func getMultipartKey(key string, uploadID string, partNumber int) string { return key + "?uploadId=" + uploadID + "&partNumber=" + strconv.Itoa(partNumber) } -func (cache donut) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { +// CreateObjectPart - +func (cache Cache) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { // Verify upload id cache.lock.RLock() storedBucket := cache.storedBuckets[bucket] @@ -109,7 +112,7 @@ func (cache donut) CreateObjectPart(bucket, key, uploadID string, partID int, co } // createObject - PUT object to cache buffer -func (cache donut) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { +func (cache Cache) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { cache.lock.RLock() if !IsValidBucket(bucket) { cache.lock.RUnlock() @@ -179,7 +182,7 @@ func (cache donut) createObjectPart(bucket, key, uploadID string, partID int, co // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { - return "", iodine.New(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil) + return "", iodine.New(BadDigest{}, nil) } } newPart := PartMetadata{ @@ -200,20 +203,21 @@ func (cache donut) createObjectPart(bucket, key, uploadID string, partID int, co return md5Sum, nil } -func (cache donut) cleanupMultipartSession(bucket, key, uploadID string) { +func (cache Cache) cleanupMultipartSession(bucket, key, uploadID string) { cache.lock.Lock() defer cache.lock.Unlock() delete(cache.storedBuckets[bucket].multiPartSession, key) } -func (cache donut) cleanupMultiparts(bucket, key, uploadID string) { +func (cache Cache) cleanupMultiparts(bucket, key, uploadID string) { for i := 1; i <= cache.storedBuckets[bucket].multiPartSession[key].totalParts; i++ { objectKey := bucket + "/" + getMultipartKey(key, uploadID, i) cache.multiPartObjects.Delete(objectKey) } } -func (cache donut) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) { +// CompleteMultipartUpload - +func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) { if !IsValidBucket(bucket) { return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } @@ -251,7 +255,7 @@ func (cache donut) CompleteMultipartUpload(bucket, key, uploadID string, parts m return "", iodine.New(InvalidDigest{Md5: recvMD5}, nil) } if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) { - return "", iodine.New(BadDigest{Md5: recvMD5, Bucket: bucket, Key: getMultipartKey(key, uploadID, i)}, nil) + return "", iodine.New(BadDigest{}, nil) } _, err = io.Copy(&fullObject, bytes.NewBuffer(object)) if err != nil { @@ -284,7 +288,8 @@ func (a byKey) Len() int { return len(a) } func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key } -func (cache donut) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) { +// ListMultipartUploads - +func (cache Cache) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) { // TODO handle delimiter cache.lock.RLock() defer cache.lock.RUnlock() @@ -345,7 +350,8 @@ func (a partNumber) Len() int { return len(a) } func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } -func (cache donut) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) { +// ListObjectParts - +func (cache Cache) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) { // Verify upload id cache.lock.RLock() defer cache.lock.RUnlock() @@ -354,7 +360,7 @@ func (cache donut) ListObjectParts(bucket, key string, resources ObjectResources } storedBucket := cache.storedBuckets[bucket] if _, ok := storedBucket.multiPartSession[key]; ok == false { - return ObjectResourcesMetadata{}, iodine.New(ObjectNotFound{Bucket: bucket, Object: key}, nil) + return ObjectResourcesMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil) } if storedBucket.multiPartSession[key].uploadID != resources.UploadID { return ObjectResourcesMetadata{}, iodine.New(InvalidUploadID{UploadID: resources.UploadID}, nil) @@ -389,7 +395,7 @@ func (cache donut) ListObjectParts(bucket, key string, resources ObjectResources return objectResourcesMetadata, nil } -func (cache donut) expiredPart(a ...interface{}) { +func (cache Cache) expiredPart(a ...interface{}) { key := a[0].(string) // loop through all buckets for _, storedBucket := range cache.storedBuckets { diff --git a/pkg/storage/donut/cache.go b/pkg/storage/donut/cache.go index 5175e7332..ad8ab4929 100644 --- a/pkg/storage/donut/cache.go +++ b/pkg/storage/donut/cache.go @@ -24,14 +24,17 @@ import ( "encoding/hex" "errors" "io" + "io/ioutil" "log" "runtime/debug" "sort" "strconv" "strings" + "sync" "time" "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/storage/donut/trove" ) // total Number of buckets allowed @@ -39,6 +42,32 @@ const ( totalBuckets = 100 ) +// Cache - local variables +type Cache struct { + storedBuckets map[string]storedBucket + lock *sync.RWMutex + objects *trove.Cache + multiPartObjects *trove.Cache + maxSize uint64 + expiration time.Duration + donut Donut +} + +// storedBucket saved bucket +type storedBucket struct { + bucketMetadata BucketMetadata + objectMetadata map[string]ObjectMetadata + partMetadata map[string]PartMetadata + multiPartSession map[string]multiPartSession +} + +// multiPartSession multipart session +type multiPartSession struct { + totalParts int + uploadID string + initiated time.Time +} + type proxyWriter struct { writer io.Writer writtenBytes []byte @@ -57,8 +86,23 @@ func newProxyWriter(w io.Writer) *proxyWriter { return &proxyWriter{writer: w, writtenBytes: nil} } +// NewCache new cache +func NewCache(maxSize uint64, expiration time.Duration, donutName string, nodeDiskMap map[string][]string) Cache { + c := Cache{} + c.storedBuckets = make(map[string]storedBucket) + c.objects = trove.NewCache(maxSize, expiration) + c.multiPartObjects = trove.NewCache(0, time.Duration(0)) + c.objects.OnExpired = c.expiredObject + c.multiPartObjects.OnExpired = c.expiredPart + + // set up cache expiration + c.objects.ExpireObjects(time.Second * 5) + c.donut, _ = NewDonut(donutName, nodeDiskMap) + return c +} + // GetObject - GET object from cache buffer -func (cache donut) GetObject(w io.Writer, bucket string, object string) (int64, error) { +func (cache Cache) GetObject(w io.Writer, bucket string, object string) (int64, error) { cache.lock.RLock() defer cache.lock.RUnlock() if !IsValidBucket(bucket) { @@ -73,10 +117,18 @@ func (cache donut) GetObject(w io.Writer, bucket string, object string) (int64, objectKey := bucket + "/" + object data, ok := cache.objects.Get(objectKey) if !ok { - if cache.driver != nil { - return cache.driver.GetObject(w, bucket, object) + if cache.donut != nil { + reader, size, err := cache.donut.GetObject(bucket, object) + if err != nil { + return 0, iodine.New(err, nil) + } + written, err := io.CopyN(w, reader, size) + if err != nil { + return 0, iodine.New(err, nil) + } + return written, nil } - return 0, iodine.New(ObjectNotFound{Bucket: bucket, Object: object}, nil) + return 0, iodine.New(ObjectNotFound{Object: object}, nil) } written, err := io.Copy(w, bytes.NewBuffer(data)) if err != nil { @@ -86,7 +138,7 @@ func (cache donut) GetObject(w io.Writer, bucket string, object string) (int64, } // GetPartialObject - GET object from cache buffer range -func (cache donut) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { +func (cache Cache) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { errParams := map[string]string{ "bucket": bucket, "object": object, @@ -110,10 +162,21 @@ func (cache donut) GetPartialObject(w io.Writer, bucket, object string, start, l objectKey := bucket + "/" + object data, ok := cache.objects.Get(objectKey) if !ok { - if cache.driver != nil { - return cache.driver.GetPartialObject(w, bucket, object, start, length) + if cache.donut != nil { + reader, _, err := cache.donut.GetObject(bucket, object) + if err != nil { + return 0, iodine.New(err, nil) + } + if _, err := io.CopyN(ioutil.Discard, reader, start); err != nil { + return 0, iodine.New(err, nil) + } + written, err := io.CopyN(w, reader, length) + if err != nil { + return 0, iodine.New(err, nil) + } + return written, nil } - return 0, iodine.New(ObjectNotFound{Bucket: bucket, Object: object}, nil) + return 0, iodine.New(ObjectNotFound{Object: object}, nil) } written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length) if err != nil { @@ -123,18 +186,18 @@ func (cache donut) GetPartialObject(w io.Writer, bucket, object string, start, l } // GetBucketMetadata - -func (cache donut) GetBucketMetadata(bucket string) (BucketMetadata, error) { +func (cache Cache) GetBucketMetadata(bucket string) (BucketMetadata, error) { cache.lock.RLock() if !IsValidBucket(bucket) { cache.lock.RUnlock() return BucketMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { - if cache.driver == nil { + if cache.donut == nil { cache.lock.RUnlock() return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } - bucketMetadata, err := cache.driver.GetBucketMetadata(bucket) + bucketMetadata, err := cache.donut.GetBucketMetadata(bucket) if err != nil { cache.lock.RUnlock() return BucketMetadata{}, iodine.New(err, nil) @@ -151,7 +214,7 @@ func (cache donut) GetBucketMetadata(bucket string) (BucketMetadata, error) { } // SetBucketMetadata - -func (cache donut) SetBucketMetadata(bucket, acl string) error { +func (cache Cache) SetBucketMetadata(bucket, acl string) error { cache.lock.RLock() if !IsValidBucket(bucket) { cache.lock.RUnlock() @@ -166,8 +229,10 @@ func (cache donut) SetBucketMetadata(bucket, acl string) error { } cache.lock.RUnlock() cache.lock.Lock() - if cache.driver != nil { - if err := cache.driver.SetBucketMetadata(bucket, acl); err != nil { + m := make(map[string]string) + m["acl"] = acl + if cache.donut != nil { + if err := cache.donut.SetBucketMetadata(bucket, m); err != nil { return iodine.New(err, nil) } } @@ -197,7 +262,8 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error { return iodine.New(errors.New("invalid argument"), nil) } -func (cache donut) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { +// CreateObject - +func (cache Cache) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { if size > int64(cache.maxSize) { generic := GenericObjectError{Bucket: bucket, Object: key} return "", iodine.New(EntityTooLarge{ @@ -213,7 +279,7 @@ func (cache donut) CreateObject(bucket, key, contentType, expectedMD5Sum string, } // createObject - PUT object to cache buffer -func (cache donut) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { +func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { cache.lock.RLock() if !IsValidBucket(bucket) { cache.lock.RUnlock() @@ -232,7 +298,7 @@ func (cache donut) createObject(bucket, key, contentType, expectedMD5Sum string, objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { cache.lock.RUnlock() - return "", iodine.New(ObjectExists{Bucket: bucket, Object: key}, nil) + return "", iodine.New(ObjectExists{Object: key}, nil) } cache.lock.RUnlock() @@ -286,29 +352,31 @@ func (cache donut) createObject(bucket, key, contentType, expectedMD5Sum string, // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { - return "", iodine.New(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil) + return "", iodine.New(BadDigest{}, nil) } } + m := make(map[string]string) + m["contentType"] = contentType newObject := ObjectMetadata{ Bucket: bucket, - Key: key, + Object: key, - ContentType: contentType, - Created: time.Now().UTC(), - Md5: md5Sum, - Size: int64(totalLength), + Metadata: m, + Created: time.Now().UTC(), + MD5Sum: md5Sum, + Size: int64(totalLength), } cache.lock.Lock() storedBucket.objectMetadata[objectKey] = newObject cache.storedBuckets[bucket] = storedBucket cache.lock.Unlock() - return newObject.Md5, nil + return newObject.MD5Sum, nil } // CreateBucket - create bucket in cache -func (cache donut) CreateBucket(bucketName, acl string) error { +func (cache Cache) CreateBucket(bucketName, acl string) error { cache.lock.RLock() if len(cache.storedBuckets) == totalBuckets { cache.lock.RUnlock() @@ -332,8 +400,8 @@ func (cache donut) CreateBucket(bucketName, acl string) error { // default is private acl = "private" } - if cache.driver != nil { - if err := cache.driver.CreateBucket(bucketName, acl); err != nil { + if cache.donut != nil { + if err := cache.donut.MakeBucket(bucketName, BucketACL(acl)); err != nil { return iodine.New(err, nil) } } @@ -369,7 +437,7 @@ func appendUniq(slice []string, i string) []string { return append(slice, i) } -func (cache donut) filterDelimiterPrefix(keys []string, key, delim string, r BucketResourcesMetadata) ([]string, BucketResourcesMetadata) { +func (cache Cache) filterDelimiterPrefix(keys []string, key, delim string, r BucketResourcesMetadata) ([]string, BucketResourcesMetadata) { switch true { case key == r.Prefix: keys = appendUniq(keys, key) @@ -382,7 +450,7 @@ func (cache donut) filterDelimiterPrefix(keys []string, key, delim string, r Buc return keys, r } -func (cache donut) listObjects(keys []string, key string, r BucketResourcesMetadata) ([]string, BucketResourcesMetadata) { +func (cache Cache) listObjects(keys []string, key string, r BucketResourcesMetadata) ([]string, BucketResourcesMetadata) { switch true { // Prefix absent, delimit object key based on delimiter case r.IsDelimiterSet(): @@ -411,7 +479,7 @@ func (cache donut) listObjects(keys []string, key string, r BucketResourcesMetad } // ListObjects - list objects from cache -func (cache donut) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) { +func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) { cache.lock.RLock() defer cache.lock.RUnlock() if !IsValidBucket(bucket) { @@ -448,7 +516,7 @@ func (cache donut) ListObjects(bucket string, resources BucketResourcesMetadata) if len(results) == resources.Maxkeys { resources.IsTruncated = true if resources.IsTruncated && resources.IsDelimiterSet() { - resources.NextMarker = results[len(results)-1].Key + resources.NextMarker = results[len(results)-1].Object } return results, resources, nil } @@ -466,7 +534,7 @@ func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name } // ListBuckets - List buckets from cache -func (cache donut) ListBuckets() ([]BucketMetadata, error) { +func (cache Cache) ListBuckets() ([]BucketMetadata, error) { cache.lock.RLock() defer cache.lock.RUnlock() var results []BucketMetadata @@ -478,7 +546,7 @@ func (cache donut) ListBuckets() ([]BucketMetadata, error) { } // GetObjectMetadata - get object metadata from cache -func (cache donut) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) { +func (cache Cache) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) { cache.lock.RLock() // check if bucket exists if !IsValidBucket(bucket) { @@ -499,8 +567,8 @@ func (cache donut) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) cache.lock.RUnlock() return objMetadata, nil } - if cache.driver != nil { - objMetadata, err := cache.driver.GetObjectMetadata(bucket, key) + if cache.donut != nil { + objMetadata, err := cache.donut.GetObjectMetadata(bucket, key) cache.lock.RUnlock() if err != nil { return ObjectMetadata{}, iodine.New(err, nil) @@ -512,10 +580,10 @@ func (cache donut) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) return objMetadata, nil } cache.lock.RUnlock() - return ObjectMetadata{}, iodine.New(ObjectNotFound{Bucket: bucket, Object: key}, nil) + return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil) } -func (cache donut) expiredObject(a ...interface{}) { +func (cache Cache) expiredObject(a ...interface{}) { cacheStats := cache.objects.Stats() log.Printf("CurrentSize: %d, CurrentItems: %d, TotalExpirations: %d", cacheStats.Bytes, cacheStats.Items, cacheStats.Expired) diff --git a/pkg/storage/donut/definitions.go b/pkg/storage/donut/definitions.go index f94a54be8..333676fb1 100644 --- a/pkg/storage/donut/definitions.go +++ b/pkg/storage/donut/definitions.go @@ -59,7 +59,7 @@ type AllBuckets struct { type BucketMetadata struct { Version string `json:"version"` Name string `json:"name"` - ACL string `json:"acl"` + ACL BucketACL `json:"acl"` Created time.Time `json:"created"` Metadata map[string]string `json:"metadata"` BucketObjects map[string]interface{} `json:"objects"` diff --git a/pkg/storage/donut/donut.go b/pkg/storage/donut/donut.go index 0ac45925d..a34a0a5f0 100644 --- a/pkg/storage/donut/donut.go +++ b/pkg/storage/donut/donut.go @@ -25,10 +25,8 @@ import ( "strconv" "strings" "sync" - "time" "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/donut/trove" ) // donut struct internal data @@ -37,32 +35,6 @@ type donut struct { buckets map[string]bucket nodes map[string]node lock *sync.RWMutex - cache cache -} - -// cache - local variables -type cache struct { - storedBuckets map[string]storedBucket - lock *sync.RWMutex - objects *trove.Cache - multiPartObjects *trove.Cache - maxSize uint64 - expiration time.Duration -} - -// storedBucket saved bucket -type storedBucket struct { - bucketMetadata BucketMetadata - objectMetadata map[string]ObjectMetadata - partMetadata map[string]PartMetadata - multiPartSession map[string]multiPartSession -} - -// multiPartSession multipart session -type multiPartSession struct { - totalParts int - uploadID string - initiated time.Time } // config files used inside Donut @@ -110,26 +82,17 @@ func NewDonut(donutName string, nodeDiskMap map[string][]string) (Donut, error) return nil, iodine.New(err, nil) } } - d.cache.storedBuckets = make(map[string]storedBucket) - d.cache.objects = trove.NewCache(maxSize, expiration) - d.cache.multiPartObjects = trove.NewCache(0, time.Duration(0)) - - d.cache.objects.OnExpired = d.expiredObject - d.cache.multiPartObjects.OnExpired = d.expiredPart - - // set up cache expiration - d.cache.objects.ExpireObjects(time.Second * 5) return d, nil } // MakeBucket - make a new bucket -func (dt donut) MakeBucket(bucket, acl string) error { +func (dt donut) MakeBucket(bucket string, acl BucketACL) error { dt.lock.Lock() defer dt.lock.Unlock() if bucket == "" || strings.TrimSpace(bucket) == "" { return iodine.New(InvalidArgument{}, nil) } - return dt.makeDonutBucket(bucket, acl) + return dt.makeDonutBucket(bucket, acl.String()) } // GetBucketMetadata - get bucket metadata @@ -165,7 +128,7 @@ func (dt donut) SetBucketMetadata(bucketName string, bucketMetadata map[string]s if !ok { return iodine.New(InvalidArgument{}, nil) } - oldBucketMetadata.ACL = acl + oldBucketMetadata.ACL = BucketACL(acl) metadata.Buckets[bucketName] = oldBucketMetadata return dt.setDonutBucketMetadata(metadata) } diff --git a/pkg/storage/donut/donut_test.go b/pkg/storage/donut/donut_test.go index 2c9a09b6d..d1dad9c45 100644 --- a/pkg/storage/donut/donut_test.go +++ b/pkg/storage/donut/donut_test.go @@ -89,7 +89,7 @@ func (s *MySuite) TestEmptyBucket(c *C) { donut, err := NewDonut("test", createTestNodeDiskMap(root)) c.Assert(err, IsNil) - c.Assert(donut.MakeBucket("foo", "private"), IsNil) + c.Assert(donut.MakeBucket("foo", BucketACL("private")), IsNil) // check if bucket is empty listObjects, err := donut.ListObjects("foo", "", "", "", 1) c.Assert(err, IsNil) @@ -106,14 +106,14 @@ func (s *MySuite) TestMakeBucketAndList(c *C) { donut, err := NewDonut("test", createTestNodeDiskMap(root)) c.Assert(err, IsNil) // create bucket - err = donut.MakeBucket("foo", "private") + err = donut.MakeBucket("foo", BucketACL("private")) c.Assert(err, IsNil) // check bucket exists buckets, err := donut.ListBuckets() c.Assert(err, IsNil) c.Assert(len(buckets), Equals, 1) - c.Assert(buckets["foo"].ACL, Equals, "private") + c.Assert(buckets["foo"].ACL, Equals, BucketACL("private")) } // test re-create bucket @@ -123,10 +123,10 @@ func (s *MySuite) TestMakeBucketWithSameNameFails(c *C) { defer os.RemoveAll(root) donut, err := NewDonut("test", createTestNodeDiskMap(root)) c.Assert(err, IsNil) - err = donut.MakeBucket("foo", "private") + err = donut.MakeBucket("foo", BucketACL("private")) c.Assert(err, IsNil) - err = donut.MakeBucket("foo", "private") + err = donut.MakeBucket("foo", BucketACL("private")) c.Assert(err, Not(IsNil)) } @@ -138,10 +138,10 @@ func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) { donut, err := NewDonut("test", createTestNodeDiskMap(root)) c.Assert(err, IsNil) // add a second bucket - err = donut.MakeBucket("foo", "private") + err = donut.MakeBucket("foo", BucketACL("private")) c.Assert(err, IsNil) - err = donut.MakeBucket("bar", "private") + err = donut.MakeBucket("bar", BucketACL("private")) c.Assert(err, IsNil) buckets, err := donut.ListBuckets() @@ -152,7 +152,7 @@ func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) { _, ok = buckets["bar"] c.Assert(ok, Equals, true) - err = donut.MakeBucket("foobar", "private") + err = donut.MakeBucket("foobar", BucketACL("private")) c.Assert(err, IsNil) buckets, err = donut.ListBuckets() @@ -264,7 +264,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) { donut, err := NewDonut("test", createTestNodeDiskMap(root)) c.Assert(err, IsNil) - c.Assert(donut.MakeBucket("foo", "private"), IsNil) + c.Assert(donut.MakeBucket("foo", BucketACL("private")), IsNil) one := ioutil.NopCloser(bytes.NewReader([]byte("one"))) metadata := make(map[string]string) diff --git a/pkg/storage/donut/interfaces.go b/pkg/storage/donut/interfaces.go index 2a382b354..b1ea80318 100644 --- a/pkg/storage/donut/interfaces.go +++ b/pkg/storage/donut/interfaces.go @@ -32,7 +32,7 @@ type ObjectStorage interface { GetBucketMetadata(bucket string) (BucketMetadata, error) SetBucketMetadata(bucket string, metadata map[string]string) error ListBuckets() (map[string]BucketMetadata, error) - MakeBucket(bucket, acl string) error + MakeBucket(bucket string, acl BucketACL) error // Bucket operations ListObjects(bucket, prefix, marker, delim string, maxKeys int) (ListObjects, error) diff --git a/pkg/storage/donut/utils.go b/pkg/storage/donut/utils.go index d90a45743..ed2ce6604 100644 --- a/pkg/storage/donut/utils.go +++ b/pkg/storage/donut/utils.go @@ -36,24 +36,6 @@ func (b BucketACL) IsPublicReadWrite() bool { return b == BucketACL("public-read-write") } -// BucketMetadata - name and create date -type BucketMetadata struct { - Name string - Created time.Time - ACL BucketACL -} - -// ObjectMetadata - object key and its relevant metadata -type ObjectMetadata struct { - Bucket string - Key string - - ContentType string - Created time.Time - Md5 string - Size int64 -} - // FilterMode type type FilterMode int From 4addf7a9969cf821bc9e6b7458e0d6044a0dad6d Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 30 Jun 2015 20:15:48 -0700 Subject: [PATCH 05/19] Restructure API handlers, add JSON RPC simple HelloService right now. --- commands.go | 24 +++- main.go | 17 --- pkg/api/api-router.go | 54 ------- pkg/api/web/web.go | 135 ------------------ pkg/{ => server}/api/acl.go | 0 pkg/{ => server}/api/api-bucket-handlers.go | 35 ++--- pkg/{ => server}/api/api-definitions.go | 9 ++ pkg/{ => server}/api/api-generic-handlers.go | 14 +- pkg/{ => server}/api/api-logging-handlers.go | 4 +- pkg/{ => server}/api/api-object-handlers.go | 56 ++++---- .../api/api-ratelimit-handlers.go | 4 +- pkg/{ => server}/api/api-response.go | 0 pkg/{ => server}/api/contenttype.go | 0 pkg/{ => server}/api/errors.go | 0 pkg/{ => server}/api/headers.go | 0 pkg/{ => server}/api/range.go | 0 pkg/{ => server}/api/resources.go | 0 pkg/{ => server}/api/utils.go | 0 pkg/{api => server}/config/config.go | 0 pkg/{api => server}/config/config_test.go | 0 pkg/server/router.go | 77 ++++++++++ pkg/server/rpc/methods.go | 38 +++++ pkg/server/rpc/server.go | 30 ++++ pkg/{api => server}/server.go | 70 +++++---- 24 files changed, 273 insertions(+), 294 deletions(-) delete mode 100644 pkg/api/api-router.go delete mode 100644 pkg/api/web/web.go rename pkg/{ => server}/api/acl.go (100%) rename pkg/{ => server}/api/api-bucket-handlers.go (79%) rename pkg/{ => server}/api/api-definitions.go (97%) rename pkg/{ => server}/api/api-generic-handlers.go (94%) rename pkg/{ => server}/api/api-logging-handlers.go (97%) rename pkg/{ => server}/api/api-object-handlers.go (78%) rename pkg/{ => server}/api/api-ratelimit-handlers.go (90%) rename pkg/{ => server}/api/api-response.go (100%) rename pkg/{ => server}/api/contenttype.go (100%) rename pkg/{ => server}/api/errors.go (100%) rename pkg/{ => server}/api/headers.go (100%) rename pkg/{ => server}/api/range.go (100%) rename pkg/{ => server}/api/resources.go (100%) rename pkg/{ => server}/api/utils.go (100%) rename pkg/{api => server}/config/config.go (100%) rename pkg/{api => server}/config/config_test.go (100%) create mode 100644 pkg/server/router.go create mode 100644 pkg/server/rpc/methods.go create mode 100644 pkg/server/rpc/server.go rename pkg/{api => server}/server.go (63%) diff --git a/commands.go b/commands.go index 8d18cf80b..a613b766d 100644 --- a/commands.go +++ b/commands.go @@ -4,7 +4,8 @@ import ( "os/user" "github.com/minio/cli" - "github.com/minio/minio/pkg/api" + "github.com/minio/minio/pkg/server" + "github.com/minio/minio/pkg/server/api" ) func removeDuplicates(slice []string) []string { @@ -58,16 +59,29 @@ EXAMPLES: `, } +func getAPIServerConfig(c *cli.Context) api.Config { + certFile := c.GlobalString("cert") + keyFile := c.GlobalString("key") + if (certFile != "" && keyFile == "") || (certFile == "" && keyFile != "") { + Fatalln("Both certificate and key are required to enable https.") + } + tls := (certFile != "" && keyFile != "") + return api.Config{ + Address: c.GlobalString("address"), + TLS: tls, + CertFile: certFile, + KeyFile: keyFile, + RateLimit: c.GlobalInt("ratelimit"), + } +} + func runServer(c *cli.Context) { _, err := user.Current() if err != nil { Fatalf("Unable to determine current user. Reason: %s\n", err) } - if len(c.Args()) < 1 { - cli.ShowCommandHelpAndExit(c, "server", 1) // last argument is exit code - } apiServerConfig := getAPIServerConfig(c) - if err := api.StartServer(apiServerConfig); err != nil { + if err := server.StartServices(apiServerConfig); err != nil { Fatalln(err) } } diff --git a/main.go b/main.go index 82429440b..a372a4975 100644 --- a/main.go +++ b/main.go @@ -26,7 +26,6 @@ import ( "github.com/dustin/go-humanize" "github.com/minio/cli" - "github.com/minio/minio/pkg/api" "github.com/minio/minio/pkg/iodine" ) @@ -71,22 +70,6 @@ func init() { } } -func getAPIServerConfig(c *cli.Context) api.Config { - certFile := c.GlobalString("cert") - keyFile := c.GlobalString("key") - if (certFile != "" && keyFile == "") || (certFile == "" && keyFile != "") { - Fatalln("Both certificate and key are required to enable https.") - } - tls := (certFile != "" && keyFile != "") - return api.Config{ - Address: c.GlobalString("address"), - TLS: tls, - CertFile: certFile, - KeyFile: keyFile, - RateLimit: c.GlobalInt("ratelimit"), - } -} - // Tries to get os/arch/platform specific information // Returns a map of current os/arch/platform/memstats func getSystemData() map[string]string { diff --git a/pkg/api/api-router.go b/pkg/api/api-router.go deleted file mode 100644 index b6750b28d..000000000 --- a/pkg/api/api-router.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2014 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package api - -import router "github.com/gorilla/mux" - -type minioAPI struct{} - -// New api -func New(config Config) API { - var api = minioAPI{} - - mux := router.NewRouter() - mux.HandleFunc("/", api.listBucketsHandler).Methods("GET") - mux.HandleFunc("/{bucket}", api.listObjectsHandler).Methods("GET") - mux.HandleFunc("/{bucket}", api.putBucketHandler).Methods("PUT") - mux.HandleFunc("/{bucket}", api.headBucketHandler).Methods("HEAD") - mux.HandleFunc("/{bucket}/{object:.*}", api.headObjectHandler).Methods("HEAD") - mux.HandleFunc("/{bucket}/{object:.*}", api.putObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}").Methods("PUT") - mux.HandleFunc("/{bucket}/{object:.*}", api.listObjectPartsHandler).Queries("uploadId", "{uploadId:.*}").Methods("GET") - mux.HandleFunc("/{bucket}/{object:.*}", api.completeMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}").Methods("POST") - mux.HandleFunc("/{bucket}/{object:.*}", api.newMultipartUploadHandler).Methods("POST") - mux.HandleFunc("/{bucket}/{object:.*}", api.abortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}").Methods("DELETE") - mux.HandleFunc("/{bucket}/{object:.*}", api.getObjectHandler).Methods("GET") - mux.HandleFunc("/{bucket}/{object:.*}", api.putObjectHandler).Methods("PUT") - - // not implemented yet - mux.HandleFunc("/{bucket}", api.deleteBucketHandler).Methods("DELETE") - - // unsupported API - mux.HandleFunc("/{bucket}/{object:.*}", api.deleteObjectHandler).Methods("DELETE") - - handler := validContentTypeHandler(mux) - handler = timeValidityHandler(handler) - handler = ignoreResourcesHandler(handler) - handler = validateAuthHeaderHandler(handler) - handler = rateLimitHandler(handler, config.RateLimit) - handler = loggingHandler(handler) - return API{config, handler} -} diff --git a/pkg/api/web/web.go b/pkg/api/web/web.go deleted file mode 100644 index 4e7ffeae9..000000000 --- a/pkg/api/web/web.go +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package web - -import ( - "bytes" - "encoding/json" - "net/http" - "path/filepath" - - "github.com/gorilla/mux" - "github.com/minio/minio/pkg/api/config" - "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/utils/crypto/keys" - "github.com/minio/minio/pkg/utils/log" -) - -const ( - defaultWeb = "polygon" -) - -type webAPI struct { - conf config.Config - webPath string -} - -// No encoder interface exists, so we create one. -type encoder interface { - Encode(v interface{}) error -} - -// HTTPHandler - http wrapper handler -func HTTPHandler() http.Handler { - mux := mux.NewRouter() - var api = webAPI{} - - if err := api.conf.SetupConfig(); err != nil { - log.Fatal(iodine.New(err, nil)) - } - - api.webPath = filepath.Join(api.conf.GetConfigPath(), defaultWeb) - mux.Handle("/{polygon:.*}", http.FileServer(http.Dir(api.webPath))).Methods("GET") - mux.HandleFunc("/access", api.accessHandler).Methods("POST") - return mux -} - -func writeResponse(w http.ResponseWriter, response interface{}) []byte { - var bytesBuffer bytes.Buffer - var encoder encoder - w.Header().Set("Content-Type", "application/json") - encoder = json.NewEncoder(&bytesBuffer) - w.Header().Set("Server", "Minio Management Console") - w.Header().Set("Connection", "close") - encoder.Encode(response) - return bytesBuffer.Bytes() -} - -func (web *webAPI) accessHandler(w http.ResponseWriter, req *http.Request) { - var err error - var accesskey, secretkey []byte - username := req.FormValue("username") - if len(username) <= 0 { - w.WriteHeader(http.StatusBadRequest) - return - } - - err = web.conf.ReadConfig() - if err != nil { - log.Error.Println(iodine.New(err, nil)) - w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte(err.Error())) - return - } - - if web.conf.IsUserExists(username) { - w.WriteHeader(http.StatusConflict) - return - } - - var user = config.User{} - user.Name = username - - accesskey, err = keys.GenerateRandomAlphaNumeric(keys.MinioAccessID) - if err != nil { - log.Error.Println(iodine.New(err, nil)) - w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte(err.Error())) - return - } - user.AccessKey = string(accesskey) - - secretkey, err = keys.GenerateRandomBase64(keys.MinioSecretID) - if err != nil { - log.Error.Println(iodine.New(err, nil)) - w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte(err.Error())) - return - } - user.SecretKey = string(secretkey) - - web.conf.AddUser(user) - err = web.conf.WriteConfig() - if err != nil { - log.Error.Println(iodine.New(err, nil)) - w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte(err.Error())) - return - } - - err = web.conf.ReadConfig() - if err != nil { - log.Error.Println(iodine.New(err, nil)) - w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte(err.Error())) - return - } - - // Get user back for sending it over HTTP reply - user = web.conf.GetUser(username) - w.Write(writeResponse(w, user)) -} diff --git a/pkg/api/acl.go b/pkg/server/api/acl.go similarity index 100% rename from pkg/api/acl.go rename to pkg/server/api/acl.go diff --git a/pkg/api/api-bucket-handlers.go b/pkg/server/api/api-bucket-handlers.go similarity index 79% rename from pkg/api/api-bucket-handlers.go rename to pkg/server/api/api-bucket-handlers.go index cec239e98..bae237e0b 100644 --- a/pkg/api/api-bucket-handlers.go +++ b/pkg/server/api/api-bucket-handlers.go @@ -23,21 +23,24 @@ import ( "github.com/gorilla/mux" ) -func (server *minioAPI) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool { +// MinioAPI - +type MinioAPI struct{} + +func (api MinioAPI) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool { vars := mux.Vars(req) bucket := vars["bucket"] log.Println(bucket) return true } -// GET Bucket (List Multipart uploads) +// ListMultipartUploadsHandler - GET Bucket (List Multipart uploads) // ------------------------- // This operation lists in-progress multipart uploads. An in-progress // multipart upload is a multipart upload that has been initiated, // using the Initiate Multipart Upload request, but has not yet been completed or aborted. // This operation returns at most 1,000 multipart uploads in the response. // -func (server *minioAPI) listMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) { +func (api MinioAPI) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) log.Println(acceptsContentType) @@ -51,21 +54,21 @@ func (server *minioAPI) listMultipartUploadsHandler(w http.ResponseWriter, req * log.Println(bucket) } -// GET Bucket (List Objects) +// ListObjectsHandler - GET Bucket (List Objects) // ------------------------- // This implementation of the GET operation returns some or all (up to 1000) // of the objects in a bucket. You can use the request parameters as selection // criteria to return a subset of the objects in a bucket. // -func (server *minioAPI) listObjectsHandler(w http.ResponseWriter, req *http.Request) { +func (api MinioAPI) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // verify if bucket allows this operation - if !server.isValidOp(w, req, acceptsContentType) { + if !api.isValidOp(w, req, acceptsContentType) { return } if isRequestUploads(req.URL.Query()) { - server.listMultipartUploadsHandler(w, req) + api.ListMultipartUploadsHandler(w, req) return } @@ -80,11 +83,11 @@ func (server *minioAPI) listObjectsHandler(w http.ResponseWriter, req *http.Requ } -// GET Service +// ListBucketsHandler - GET Service // ----------- // This implementation of the GET operation returns a list of all buckets // owned by the authenticated sender of the request. -func (server *minioAPI) listBucketsHandler(w http.ResponseWriter, req *http.Request) { +func (api MinioAPI) ListBucketsHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // uncomment this when we have webcli // without access key credentials one cannot list buckets @@ -95,10 +98,10 @@ func (server *minioAPI) listBucketsHandler(w http.ResponseWriter, req *http.Requ log.Println(acceptsContentType) } -// PUT Bucket +// PutBucketHandler - PUT Bucket // ---------- // This implementation of the PUT operation creates a new bucket for authenticated request -func (server *minioAPI) putBucketHandler(w http.ResponseWriter, req *http.Request) { +func (api MinioAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // uncomment this when we have webcli // without access key credentials one cannot create a bucket @@ -107,7 +110,7 @@ func (server *minioAPI) putBucketHandler(w http.ResponseWriter, req *http.Reques // return // } if isRequestBucketACL(req.URL.Query()) { - server.putBucketACLHandler(w, req) + api.PutBucketACLHandler(w, req) return } // read from 'x-amz-acl' @@ -122,10 +125,10 @@ func (server *minioAPI) putBucketHandler(w http.ResponseWriter, req *http.Reques log.Println(bucket) } -// PUT Bucket ACL +// PutBucketACLHandler - PUT Bucket ACL // ---------- // This implementation of the PUT operation modifies the bucketACL for authenticated request -func (server *minioAPI) putBucketACLHandler(w http.ResponseWriter, req *http.Request) { +func (api MinioAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // read from 'x-amz-acl' aclType := getACLType(req) @@ -139,13 +142,13 @@ func (server *minioAPI) putBucketACLHandler(w http.ResponseWriter, req *http.Req log.Println(bucket) } -// HEAD Bucket +// HeadBucketHandler - HEAD Bucket // ---------- // This operation is useful to determine if a bucket exists. // The operation returns a 200 OK if the bucket exists and you // have permission to access it. Otherwise, the operation might // return responses such as 404 Not Found and 403 Forbidden. -func (server *minioAPI) headBucketHandler(w http.ResponseWriter, req *http.Request) { +func (api MinioAPI) HeadBucketHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) log.Println(acceptsContentType) diff --git a/pkg/api/api-definitions.go b/pkg/server/api/api-definitions.go similarity index 97% rename from pkg/api/api-definitions.go rename to pkg/server/api/api-definitions.go index 526bfba6f..e4b2ffbdb 100644 --- a/pkg/api/api-definitions.go +++ b/pkg/server/api/api-definitions.go @@ -18,6 +18,15 @@ package api import "encoding/xml" +// Config - http server config +type Config struct { + Address string + TLS bool + CertFile string + KeyFile string + RateLimit int +} + // Limit number of objects in a given response const ( maxObjectList = 1000 diff --git a/pkg/api/api-generic-handlers.go b/pkg/server/api/api-generic-handlers.go similarity index 94% rename from pkg/api/api-generic-handlers.go rename to pkg/server/api/api-generic-handlers.go index 2f0cf251e..1cff82502 100644 --- a/pkg/api/api-generic-handlers.go +++ b/pkg/server/api/api-generic-handlers.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "github.com/minio/minio/pkg/api/config" + "github.com/minio/minio/pkg/server/config" "github.com/minio/minio/pkg/utils/crypto/keys" ) @@ -128,7 +128,8 @@ func parseDate(req *http.Request) (time.Time, error) { return time.Time{}, errors.New("invalid request") } -func validContentTypeHandler(h http.Handler) http.Handler { +// ValidContentTypeHandler - +func ValidContentTypeHandler(h http.Handler) http.Handler { return contentTypeHandler{h} } @@ -141,7 +142,8 @@ func (h contentTypeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.handler.ServeHTTP(w, r) } -func timeValidityHandler(h http.Handler) http.Handler { +// TimeValidityHandler - +func TimeValidityHandler(h http.Handler) http.Handler { return timeHandler{h} } @@ -170,9 +172,10 @@ func (h timeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.handler.ServeHTTP(w, r) } +// ValidateAuthHeaderHandler - // validate auth header handler is wrapper handler used for API request validation with authorization header. // Current authorization layer supports S3's standard HMAC based signature request. -func validateAuthHeaderHandler(h http.Handler) http.Handler { +func ValidateAuthHeaderHandler(h http.Handler) http.Handler { return validateAuthHandler{h} } @@ -206,10 +209,11 @@ func (h validateAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } +// IgnoreResourcesHandler - // Ignore resources handler is wrapper handler used for API request resource validation // Since we do not support all the S3 queries, it is necessary for us to throw back a // valid error message indicating such a feature is not implemented. -func ignoreResourcesHandler(h http.Handler) http.Handler { +func IgnoreResourcesHandler(h http.Handler) http.Handler { return resourceHandler{h} } diff --git a/pkg/api/api-logging-handlers.go b/pkg/server/api/api-logging-handlers.go similarity index 97% rename from pkg/api/api-logging-handlers.go rename to pkg/server/api/api-logging-handlers.go index 87e12b98d..0ee9ca2fe 100644 --- a/pkg/api/api-logging-handlers.go +++ b/pkg/server/api/api-logging-handlers.go @@ -94,8 +94,8 @@ func getLogMessage(logMessage *logMessage, w http.ResponseWriter, req *http.Requ return js } -// loggingHandler logs requests -func loggingHandler(h http.Handler) http.Handler { +// LoggingHandler logs requests +func LoggingHandler(h http.Handler) http.Handler { logger, _ := fileLogger("access.log") return &logHandler{handler: h, logger: logger} } diff --git a/pkg/api/api-object-handlers.go b/pkg/server/api/api-object-handlers.go similarity index 78% rename from pkg/api/api-object-handlers.go rename to pkg/server/api/api-object-handlers.go index 409c4d571..0ec281500 100644 --- a/pkg/api/api-object-handlers.go +++ b/pkg/server/api/api-object-handlers.go @@ -32,14 +32,14 @@ const ( maxPartsList = 1000 ) -// GET Object +// GetObjectHandler - GET Object // ---------- // This implementation of the GET operation retrieves object. To use GET, // you must have READ access to the object. -func (server *minioAPI) getObjectHandler(w http.ResponseWriter, req *http.Request) { +func (api MinioAPI) GetObjectHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // verify if this operation is allowed - if !server.isValidOp(w, req, acceptsContentType) { + if !api.isValidOp(w, req, acceptsContentType) { return } @@ -51,13 +51,13 @@ func (server *minioAPI) getObjectHandler(w http.ResponseWriter, req *http.Reques } -// HEAD Object +// HeadObjectHandler - HEAD Object // ----------- // The HEAD operation retrieves metadata from an object without returning the object itself. -func (server *minioAPI) headObjectHandler(w http.ResponseWriter, req *http.Request) { +func (api MinioAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // verify if this operation is allowed - if !server.isValidOp(w, req, acceptsContentType) { + if !api.isValidOp(w, req, acceptsContentType) { return } @@ -68,13 +68,13 @@ func (server *minioAPI) headObjectHandler(w http.ResponseWriter, req *http.Reque log.Println(bucket, object) } -// PUT Object +// PutObjectHandler - PUT Object // ---------- // This implementation of the PUT operation adds an object to a bucket. -func (server *minioAPI) putObjectHandler(w http.ResponseWriter, req *http.Request) { +func (api MinioAPI) PutObjectHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // verify if this operation is allowed - if !server.isValidOp(w, req, acceptsContentType) { + if !api.isValidOp(w, req, acceptsContentType) { return } @@ -120,11 +120,11 @@ func (server *minioAPI) putObjectHandler(w http.ResponseWriter, req *http.Reques /// Multipart API -// New multipart upload -func (server *minioAPI) newMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { +// NewMultipartUploadHandler - New multipart upload +func (api MinioAPI) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // handle ACL's here at bucket level - if !server.isValidOp(w, req, acceptsContentType) { + if !api.isValidOp(w, req, acceptsContentType) { return } @@ -140,11 +140,11 @@ func (server *minioAPI) newMultipartUploadHandler(w http.ResponseWriter, req *ht log.Println(bucket, object) } -// Upload part -func (server *minioAPI) putObjectPartHandler(w http.ResponseWriter, req *http.Request) { +// PutObjectPartHandler - Upload part +func (api MinioAPI) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // handle ACL's here at bucket level - if !server.isValidOp(w, req, acceptsContentType) { + if !api.isValidOp(w, req, acceptsContentType) { return } @@ -189,11 +189,11 @@ func (server *minioAPI) putObjectPartHandler(w http.ResponseWriter, req *http.Re log.Println(uploadID, partID) } -// Abort multipart upload -func (server *minioAPI) abortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { +// AbortMultipartUploadHandler - Abort multipart upload +func (api MinioAPI) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // handle ACL's here at bucket level - if !server.isValidOp(w, req, acceptsContentType) { + if !api.isValidOp(w, req, acceptsContentType) { return } @@ -205,11 +205,11 @@ func (server *minioAPI) abortMultipartUploadHandler(w http.ResponseWriter, req * log.Println(bucket, object) } -// List object parts -func (server *minioAPI) listObjectPartsHandler(w http.ResponseWriter, req *http.Request) { +// ListObjectPartsHandler - List object parts +func (api MinioAPI) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // handle ACL's here at bucket level - if !server.isValidOp(w, req, acceptsContentType) { + if !api.isValidOp(w, req, acceptsContentType) { return } @@ -224,11 +224,11 @@ func (server *minioAPI) listObjectPartsHandler(w http.ResponseWriter, req *http. log.Println(bucket, object) } -// Complete multipart upload -func (server *minioAPI) completeMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { +// CompleteMultipartUploadHandler - Complete multipart upload +func (api MinioAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // handle ACL's here at bucket level - if !server.isValidOp(w, req, acceptsContentType) { + if !api.isValidOp(w, req, acceptsContentType) { return } @@ -261,14 +261,14 @@ func (server *minioAPI) completeMultipartUploadHandler(w http.ResponseWriter, re /// Delete API -// Delete bucket -func (server *minioAPI) deleteBucketHandler(w http.ResponseWriter, req *http.Request) { +// DeleteBucketHandler - Delete bucket +func (api MinioAPI) DeleteBucketHandler(w http.ResponseWriter, req *http.Request) { error := getErrorCode(NotImplemented) w.WriteHeader(error.HTTPStatusCode) } -// Delete object -func (server *minioAPI) deleteObjectHandler(w http.ResponseWriter, req *http.Request) { +// DeleteObjectHandler - Delete object +func (api MinioAPI) DeleteObjectHandler(w http.ResponseWriter, req *http.Request) { error := getErrorCode(NotImplemented) w.WriteHeader(error.HTTPStatusCode) } diff --git a/pkg/api/api-ratelimit-handlers.go b/pkg/server/api/api-ratelimit-handlers.go similarity index 90% rename from pkg/api/api-ratelimit-handlers.go rename to pkg/server/api/api-ratelimit-handlers.go index 03e31102c..ab35284de 100644 --- a/pkg/api/api-ratelimit-handlers.go +++ b/pkg/server/api/api-ratelimit-handlers.go @@ -41,8 +41,8 @@ func (c rateLimit) ServeHTTP(w http.ResponseWriter, req *http.Request) { c.Remove() // remove } -// rateLimitHandler limits the number of concurrent http requests -func rateLimitHandler(handle http.Handler, limit int) http.Handler { +// RateLimitHandler limits the number of concurrent http requests +func RateLimitHandler(handle http.Handler, limit int) http.Handler { return rateLimit{ handler: handle, rateQueue: make(chan bool, limit), diff --git a/pkg/api/api-response.go b/pkg/server/api/api-response.go similarity index 100% rename from pkg/api/api-response.go rename to pkg/server/api/api-response.go diff --git a/pkg/api/contenttype.go b/pkg/server/api/contenttype.go similarity index 100% rename from pkg/api/contenttype.go rename to pkg/server/api/contenttype.go diff --git a/pkg/api/errors.go b/pkg/server/api/errors.go similarity index 100% rename from pkg/api/errors.go rename to pkg/server/api/errors.go diff --git a/pkg/api/headers.go b/pkg/server/api/headers.go similarity index 100% rename from pkg/api/headers.go rename to pkg/server/api/headers.go diff --git a/pkg/api/range.go b/pkg/server/api/range.go similarity index 100% rename from pkg/api/range.go rename to pkg/server/api/range.go diff --git a/pkg/api/resources.go b/pkg/server/api/resources.go similarity index 100% rename from pkg/api/resources.go rename to pkg/server/api/resources.go diff --git a/pkg/api/utils.go b/pkg/server/api/utils.go similarity index 100% rename from pkg/api/utils.go rename to pkg/server/api/utils.go diff --git a/pkg/api/config/config.go b/pkg/server/config/config.go similarity index 100% rename from pkg/api/config/config.go rename to pkg/server/config/config.go diff --git a/pkg/api/config/config_test.go b/pkg/server/config/config_test.go similarity index 100% rename from pkg/api/config/config_test.go rename to pkg/server/config/config_test.go diff --git a/pkg/server/router.go b/pkg/server/router.go new file mode 100644 index 000000000..416f45da0 --- /dev/null +++ b/pkg/server/router.go @@ -0,0 +1,77 @@ +/* + * Minimalist Object Storage, (C) 2014 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package server + +import ( + "net/http" + + router "github.com/gorilla/mux" + jsonRPC "github.com/gorilla/rpc/v2" + "github.com/minio/minio/pkg/server/api" + "github.com/minio/minio/pkg/server/rpc" +) + +func registerAPI(mux *router.Router) http.Handler { + api := api.MinioAPI{} + + mux.HandleFunc("/", api.ListBucketsHandler).Methods("GET") + mux.HandleFunc("/{bucket}", api.ListObjectsHandler).Methods("GET") + mux.HandleFunc("/{bucket}", api.PutBucketHandler).Methods("PUT") + mux.HandleFunc("/{bucket}", api.HeadBucketHandler).Methods("HEAD") + mux.HandleFunc("/{bucket}/{object:.*}", api.HeadObjectHandler).Methods("HEAD") + mux.HandleFunc("/{bucket}/{object:.*}", api.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}").Methods("PUT") + mux.HandleFunc("/{bucket}/{object:.*}", api.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}").Methods("GET") + mux.HandleFunc("/{bucket}/{object:.*}", api.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}").Methods("POST") + mux.HandleFunc("/{bucket}/{object:.*}", api.NewMultipartUploadHandler).Methods("POST") + mux.HandleFunc("/{bucket}/{object:.*}", api.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}").Methods("DELETE") + mux.HandleFunc("/{bucket}/{object:.*}", api.GetObjectHandler).Methods("GET") + mux.HandleFunc("/{bucket}/{object:.*}", api.PutObjectHandler).Methods("PUT") + + // not implemented yet + mux.HandleFunc("/{bucket}", api.DeleteBucketHandler).Methods("DELETE") + + // unsupported API + mux.HandleFunc("/{bucket}/{object:.*}", api.DeleteObjectHandler).Methods("DELETE") + + return mux +} + +func registerOthers(mux http.Handler, conf api.Config) http.Handler { + mux = api.ValidContentTypeHandler(mux) + mux = api.TimeValidityHandler(mux) + mux = api.IgnoreResourcesHandler(mux) + mux = api.ValidateAuthHeaderHandler(mux) + mux = api.RateLimitHandler(mux, conf.RateLimit) + mux = api.LoggingHandler(mux) + return mux +} + +func registerRPC(mux *router.Router, r *jsonRPC.Server) http.Handler { + mux.Handle("/rpc", r) + return mux +} + +// APIHandler api handler +func APIHandler(conf api.Config) http.Handler { + mux := router.NewRouter() + return registerOthers(registerAPI(mux), conf) +} + +// RPCHandler rpc handler +func RPCHandler() http.Handler { + return registerRPC(router.NewRouter(), rpc.HelloServiceHandler()) +} diff --git a/pkg/server/rpc/methods.go b/pkg/server/rpc/methods.go new file mode 100644 index 000000000..4c654264b --- /dev/null +++ b/pkg/server/rpc/methods.go @@ -0,0 +1,38 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rpc + +import "net/http" + +// HelloArgs - hello args +type HelloArgs struct { + Who string +} + +// HelloReply - hello reply +type HelloReply struct { + Message string +} + +// HelloService - +type HelloService struct{} + +// Say - +func (h *HelloService) Say(r *http.Request, args *HelloArgs, reply *HelloReply) error { + reply.Message = "Hello, " + args.Who + "!" + return nil +} diff --git a/pkg/server/rpc/server.go b/pkg/server/rpc/server.go new file mode 100644 index 000000000..2b98b5213 --- /dev/null +++ b/pkg/server/rpc/server.go @@ -0,0 +1,30 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rpc + +import ( + "github.com/gorilla/rpc/v2" + "github.com/gorilla/rpc/v2/json" +) + +// HelloServiceHandler - +func HelloServiceHandler() *rpc.Server { + s := rpc.NewServer() + s.RegisterCodec(json.NewCodec(), "application/json") + s.RegisterService(new(HelloService), "") + return s +} diff --git a/pkg/api/server.go b/pkg/server/server.go similarity index 63% rename from pkg/api/server.go rename to pkg/server/server.go index 4aaf179b8..bf76caa06 100644 --- a/pkg/api/server.go +++ b/pkg/server/server.go @@ -14,43 +14,29 @@ * limitations under the License. */ -package api +package server import ( "fmt" "net" "net/http" "strings" + + "github.com/minio/minio/pkg/server/api" ) -// Config - http server config -type Config struct { - Address string - TLS bool - CertFile string - KeyFile string - RateLimit int -} - -// Start http server -func Start(a API) <-chan error { - errCh := make(chan error) - go start(errCh, a) - return errCh -} - -func start(errCh chan error, a API) { +func startAPI(errCh chan error, conf api.Config) { defer close(errCh) var err error // Minio server config httpServer := &http.Server{ - Addr: a.config.Address, - Handler: a.handler, + Addr: conf.Address, + Handler: APIHandler(conf), MaxHeaderBytes: 1 << 20, } - host, port, err := net.SplitHostPort(a.config.Address) + host, port, err := net.SplitHostPort(conf.Address) if err != nil { errCh <- err return @@ -81,11 +67,11 @@ func start(errCh chan error, a API) { fmt.Printf("Starting minio server on: http://%s:%s\n", host, port) } err = httpServer.ListenAndServe() - case a.config.TLS == true: + case conf.TLS == true: for _, host := range hosts { fmt.Printf("Starting minio server on: https://%s:%s\n", host, port) } - err = httpServer.ListenAndServeTLS(a.config.CertFile, a.config.KeyFile) + err = httpServer.ListenAndServeTLS(conf.CertFile, conf.KeyFile) } if err != nil { errCh <- err @@ -94,15 +80,39 @@ func start(errCh chan error, a API) { return } -// API is used to build api server -type API struct { - config Config - handler http.Handler +func startRPC(errCh chan error) { + defer close(errCh) + + rpcHandler := RPCHandler() + var err error + // Minio server config + httpServer := &http.Server{ + Addr: "127.0.0.1:9001", + Handler: rpcHandler, + MaxHeaderBytes: 1 << 20, + } + err = httpServer.ListenAndServe() + if err != nil { + errCh <- err + } + errCh <- nil + return } -// StartServer APIFactory builds api server -func StartServer(conf Config) error { - for err := range Start(New(conf)) { +// StartServices starts basic services for a server +func StartServices(conf api.Config) error { + apiErrCh := make(chan error) + rpcErrCh := make(chan error) + + go startAPI(apiErrCh, conf) + go startRPC(rpcErrCh) + + select { + case err := <-apiErrCh: + if err != nil { + return err + } + case err := <-rpcErrCh: if err != nil { return err } From 188785a8864eda3b7667454bec68525ef0116dcf Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 1 Jul 2015 00:16:17 -0700 Subject: [PATCH 06/19] Add and remove dependencies --- Godeps/Godeps.json | 16 +- .../src/github.com/gorilla/rpc/v2/LICENSE | 27 + .../src/github.com/gorilla/rpc/v2/README.md | 6 + .../gorilla/rpc/v2/compression_selector.go | 90 + .../src/github.com/gorilla/rpc/v2/doc.go | 81 + .../gorilla/rpc/v2/encoder_selector.go | 43 + .../github.com/gorilla/rpc/v2/json/client.go | 57 + .../src/github.com/gorilla/rpc/v2/json/doc.go | 58 + .../gorilla/rpc/v2/json/json_test.go | 117 + .../github.com/gorilla/rpc/v2/json/server.go | 155 + .../github.com/gorilla/rpc/v2/json2/client.go | 75 + .../github.com/gorilla/rpc/v2/json2/error.go | 39 + .../gorilla/rpc/v2/json2/json_test.go | 161 + .../github.com/gorilla/rpc/v2/json2/server.go | 190 ++ .../gorilla/rpc/v2/json2/testapp/README.md | 10 + .../gorilla/rpc/v2/json2/testapp/counter.go | 50 + .../gorilla/rpc/v2/json2/testapp/counter.js | 64 + .../gorilla/rpc/v2/json2/testapp/index.html | 32 + .../rpc/v2/json2/testapp/jquery.jsonrpc.js | 158 + .../src/github.com/gorilla/rpc/v2/map.go | 164 + .../github.com/gorilla/rpc/v2/protorpc/doc.go | 48 + .../gorilla/rpc/v2/protorpc/protorpc_test.go | 87 + .../gorilla/rpc/v2/protorpc/server.go | 147 + .../src/github.com/gorilla/rpc/v2/server.go | 158 + .../github.com/gorilla/rpc/v2/server_test.go | 54 + .../src/github.com/stretchr/objx/.gitignore | 22 - .../src/github.com/stretchr/objx/LICENSE.md | 23 - .../src/github.com/stretchr/objx/README.md | 3 - .../src/github.com/stretchr/objx/accessors.go | 179 - .../stretchr/objx/accessors_test.go | 145 - .../stretchr/objx/codegen/array-access.txt | 14 - .../stretchr/objx/codegen/index.html | 86 - .../stretchr/objx/codegen/template.txt | 286 -- .../stretchr/objx/codegen/types_list.txt | 20 - .../src/github.com/stretchr/objx/constants.go | 13 - .../github.com/stretchr/objx/conversions.go | 117 - .../stretchr/objx/conversions_test.go | 94 - .../src/github.com/stretchr/objx/doc.go | 72 - .../github.com/stretchr/objx/fixture_test.go | 98 - .../src/github.com/stretchr/objx/map.go | 222 -- .../github.com/stretchr/objx/map_for_test.go | 10 - .../src/github.com/stretchr/objx/map_test.go | 147 - .../src/github.com/stretchr/objx/mutations.go | 81 - .../stretchr/objx/mutations_test.go | 77 - .../src/github.com/stretchr/objx/security.go | 14 - .../github.com/stretchr/objx/security_test.go | 12 - .../stretchr/objx/simple_example_test.go | 41 - .../src/github.com/stretchr/objx/tests.go | 17 - .../github.com/stretchr/objx/tests_test.go | 24 - .../stretchr/objx/type_specific_codegen.go | 2881 ----------------- .../objx/type_specific_codegen_test.go | 2867 ---------------- .../src/github.com/stretchr/objx/value.go | 13 - .../github.com/stretchr/objx/value_test.go | 1 - .../stretchr/testify/assert/assertions.go | 805 ----- .../testify/assert/assertions_test.go | 768 ----- .../github.com/stretchr/testify/assert/doc.go | 150 - .../stretchr/testify/assert/errors.go | 10 - .../testify/assert/forward_assertions.go | 262 -- .../testify/assert/forward_assertions_test.go | 526 --- .../testify/assert/http_assertions.go | 157 - .../testify/assert/http_assertions_test.go | 86 - .../github.com/stretchr/testify/mock/doc.go | 43 - .../github.com/stretchr/testify/mock/mock.go | 510 --- .../stretchr/testify/mock/mock_test.go | 669 ---- 64 files changed, 2075 insertions(+), 11577 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/LICENSE create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/README.md create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/compression_selector.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/doc.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/encoder_selector.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/client.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/doc.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/json_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/server.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/client.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/error.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/json_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/server.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/README.md create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/counter.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/counter.js create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/index.html create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/jquery.jsonrpc.js create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/map.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/protorpc/doc.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/protorpc/protorpc_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/protorpc/server.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/server.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/rpc/v2/server_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/README.md delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/accessors.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/constants.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/conversions.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/doc.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/mutations.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/security.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/security_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/tests.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/value.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/value_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index ff5e9ac07..d71a60b9d 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -21,6 +21,10 @@ "ImportPath": "github.com/gorilla/mux", "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" }, + { + "ImportPath": "github.com/gorilla/rpc/v2", + "Rev": "f6dbf92d77c723632269bf29154cc91f2507693b" + }, { "ImportPath": "github.com/minio/check", "Rev": "67f8c16c6c27bb03c82e41c2be533ace00035ab4" @@ -29,18 +33,6 @@ "ImportPath": "github.com/minio/cli", "Comment": "1.2.0-112-g823349c", "Rev": "823349ce91e76834a4af0119d5bbc58fd4d2c6b0" - }, - { - "ImportPath": "github.com/stretchr/objx", - "Rev": "cbeaeb16a013161a98496fad62933b1d21786672" - }, - { - "ImportPath": "github.com/stretchr/testify/assert", - "Rev": "e4ec8152c15fc46bd5056ce65997a07c7d415325" - }, - { - "ImportPath": "github.com/stretchr/testify/mock", - "Rev": "e4ec8152c15fc46bd5056ce65997a07c7d415325" } ] } diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/LICENSE b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/README.md b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/README.md new file mode 100644 index 000000000..8f9af9a8d --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/README.md @@ -0,0 +1,6 @@ +rpc +=== + +gorilla/rpc is a foundation for RPC over HTTP services, providing access to the exported methods of an object through HTTP requests. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/rpc diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/compression_selector.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/compression_selector.go new file mode 100644 index 000000000..bbf3fd1ef --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/compression_selector.go @@ -0,0 +1,90 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "compress/flate" + "compress/gzip" + "io" + "net/http" + "strings" + "unicode" +) + +// gzipWriter writes and closes the gzip writer. +type gzipWriter struct { + w *gzip.Writer +} + +func (gw *gzipWriter) Write(p []byte) (n int, err error) { + defer gw.w.Close() + return gw.w.Write(p) +} + +// gzipEncoder implements the gzip compressed http encoder. +type gzipEncoder struct { +} + +func (enc *gzipEncoder) Encode(w http.ResponseWriter) io.Writer { + w.Header().Set("Content-Encoding", "gzip") + return &gzipWriter{gzip.NewWriter(w)} +} + +// flateWriter writes and closes the flate writer. +type flateWriter struct { + w *flate.Writer +} + +func (fw *flateWriter) Write(p []byte) (n int, err error) { + defer fw.w.Close() + return fw.w.Write(p) +} + +// flateEncoder implements the flate compressed http encoder. +type flateEncoder struct { +} + +func (enc *flateEncoder) Encode(w http.ResponseWriter) io.Writer { + fw, err := flate.NewWriter(w, flate.DefaultCompression) + if err != nil { + return w + } + w.Header().Set("Content-Encoding", "deflate") + return &flateWriter{fw} +} + +// CompressionSelector generates the compressed http encoder. +type CompressionSelector struct { +} + +// acceptedEnc returns the first compression type in "Accept-Encoding" header +// field of the request. +func acceptedEnc(req *http.Request) string { + encHeader := req.Header.Get("Accept-Encoding") + if encHeader == "" { + return "" + } + encTypes := strings.FieldsFunc(encHeader, func(r rune) bool { + return unicode.IsSpace(r) || r == ',' + }) + for _, enc := range encTypes { + if enc == "gzip" || enc == "deflate" { + return enc + } + } + return "" +} + +// Select method selects the correct compression encoder based on http HEADER. +func (_ *CompressionSelector) Select(r *http.Request) Encoder { + switch acceptedEnc(r) { + case "gzip": + return &gzipEncoder{} + case "flate": + return &flateEncoder{} + } + return DefaultEncoder +} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/doc.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/doc.go new file mode 100644 index 000000000..301d5dc06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/doc.go @@ -0,0 +1,81 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/rpc is a foundation for RPC over HTTP services, providing +access to the exported methods of an object through HTTP requests. + +This package derives from the standard net/rpc package but uses a single HTTP +request per call instead of persistent connections. Other differences +compared to net/rpc: + + - Multiple codecs can be registered in the same server. + - A codec is chosen based on the "Content-Type" header from the request. + - Service methods also receive http.Request as parameter. + - This package can be used on Google App Engine. + +Let's setup a server and register a codec and service: + + import ( + "http" + "github.com/gorilla/rpc/v2" + "github.com/gorilla/rpc/v2/json" + ) + + func init() { + s := rpc.NewServer() + s.RegisterCodec(json.NewCodec(), "application/json") + s.RegisterService(new(HelloService), "") + http.Handle("/rpc", s) + } + +This server handles requests to the "/rpc" path using a JSON codec. +A codec is tied to a content type. In the example above, the JSON codec is +registered to serve requests with "application/json" as the value for the +"Content-Type" header. If the header includes a charset definition, it is +ignored; only the media-type part is taken into account. + +A service can be registered using a name. If the name is empty, like in the +example above, it will be inferred from the service type. + +That's all about the server setup. Now let's define a simple service: + + type HelloArgs struct { + Who string + } + + type HelloReply struct { + Message string + } + + type HelloService struct {} + + func (h *HelloService) Say(r *http.Request, args *HelloArgs, reply *HelloReply) error { + reply.Message = "Hello, " + args.Who + "!" + return nil + } + +The example above defines a service with a method "HelloService.Say" and +the arguments and reply related to that method. + +The service must be exported (begin with an upper case letter) or local +(defined in the package registering the service). + +When a service is registered, the server inspects the service methods +and make available the ones that follow these rules: + + - The method name is exported. + - The method has three arguments: *http.Request, *args, *reply. + - All three arguments are pointers. + - The second and third arguments are exported or local. + - The method has return type error. + +All other methods are ignored. + +Gorilla has packages with common RPC codecs. Check out their documentation: + + JSON: http://gorilla-web.appspot.com/pkg/rpc/json +*/ +package rpc diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/encoder_selector.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/encoder_selector.go new file mode 100644 index 000000000..333361f3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/encoder_selector.go @@ -0,0 +1,43 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "io" + "net/http" +) + +// Encoder interface contains the encoder for http response. +// Eg. gzip, flate compressions. +type Encoder interface { + Encode(w http.ResponseWriter) io.Writer +} + +type encoder struct { +} + +func (_ *encoder) Encode(w http.ResponseWriter) io.Writer { + return w +} + +var DefaultEncoder = &encoder{} + +// EncoderSelector interface provides a way to select encoder using the http +// request. Typically people can use this to check HEADER of the request and +// figure out client capabilities. +// Eg. "Accept-Encoding" tells about supported compressions. +type EncoderSelector interface { + Select(r *http.Request) Encoder +} + +type encoderSelector struct { +} + +func (_ *encoderSelector) Select(_ *http.Request) Encoder { + return DefaultEncoder +} + +var DefaultEncoderSelector = &encoderSelector{} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/client.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/client.go new file mode 100644 index 000000000..1be931e0a --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/client.go @@ -0,0 +1,57 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012-2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "encoding/json" + "io" + "math/rand" +) + +// ---------------------------------------------------------------------------- +// Request and Response +// ---------------------------------------------------------------------------- + +// clientRequest represents a JSON-RPC request sent by a client. +type clientRequest struct { + // A String containing the name of the method to be invoked. + Method string `json:"method"` + // Object to pass as request parameter to the method. + Params [1]interface{} `json:"params"` + // The request id. This can be of any type. It is used to match the + // response with the request that it is replying to. + Id uint64 `json:"id"` +} + +// clientResponse represents a JSON-RPC response returned to a client. +type clientResponse struct { + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` + Id uint64 `json:"id"` +} + +// EncodeClientRequest encodes parameters for a JSON-RPC client request. +func EncodeClientRequest(method string, args interface{}) ([]byte, error) { + c := &clientRequest{ + Method: method, + Params: [1]interface{}{args}, + Id: uint64(rand.Int63()), + } + return json.Marshal(c) +} + +// DecodeClientResponse decodes the response body of a client request into +// the interface reply. +func DecodeClientResponse(r io.Reader, reply interface{}) error { + var c clientResponse + if err := json.NewDecoder(r).Decode(&c); err != nil { + return err + } + if c.Error != nil { + return &Error{Data: c.Error} + } + return json.Unmarshal(*c.Result, reply) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/doc.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/doc.go new file mode 100644 index 000000000..3f92b9cb3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/doc.go @@ -0,0 +1,58 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/rpc/json provides a codec for JSON-RPC over HTTP services. + +To register the codec in a RPC server: + + import ( + "http" + "github.com/gorilla/rpc/v2" + "github.com/gorilla/rpc/v2/json" + ) + + func init() { + s := rpc.NewServer() + s.RegisterCodec(json.NewCodec(), "application/json") + // [...] + http.Handle("/rpc", s) + } + +A codec is tied to a content type. In the example above, the server will use +the JSON codec for requests with "application/json" as the value for the +"Content-Type" header. + +This package follows the JSON-RPC 1.0 specification: + + http://json-rpc.org/wiki/specification + +Request format is: + + method: + The name of the method to be invoked, as a string in dotted notation + as in "Service.Method". + params: + An array with a single object to pass as argument to the method. + id: + The request id, a uint. It is used to match the response with the + request that it is replying to. + +Response format is: + + result: + The Object that was returned by the invoked method, + or null in case there was an error invoking the method. + error: + An Error object if there was an error invoking the method, + or null if there was no error. + id: + The same id as the request it is responding to. + +Check the gorilla/rpc documentation for more details: + + http://gorilla-web.appspot.com/pkg/rpc +*/ +package json diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/json_test.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/json_test.go new file mode 100644 index 000000000..1f798f3a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/json_test.go @@ -0,0 +1,117 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "github.com/gorilla/rpc/v2" +) + +var ( + ErrResponseError = errors.New("response error") + ErrResponseJsonError = &Error{Data: map[string]interface{}{ + "stackstrace": map[string]interface{}{"0": "foo()"}, + "error": "a message", + }} +) + +type Service1Request struct { + A int + B int +} + +type Service1Response struct { + Result int +} + +type Service1 struct { +} + +func (t *Service1) Multiply(r *http.Request, req *Service1Request, res *Service1Response) error { + res.Result = req.A * req.B + return nil +} + +func (t *Service1) ResponseError(r *http.Request, req *Service1Request, res *Service1Response) error { + return ErrResponseError +} + +func (t *Service1) ResponseJsonError(r *http.Request, req *Service1Request, res *Service1Response) error { + return ErrResponseJsonError +} + +func execute(t *testing.T, s *rpc.Server, method string, req, res interface{}) error { + if !s.HasMethod(method) { + t.Fatal("Expected to be registered:", method) + } + + buf, _ := EncodeClientRequest(method, req) + body := bytes.NewBuffer(buf) + r, _ := http.NewRequest("POST", "http://localhost:8080/", body) + r.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + s.ServeHTTP(w, r) + + return DecodeClientResponse(w.Body, res) +} + +func executeRaw(t *testing.T, s *rpc.Server, req json.RawMessage) (int, *bytes.Buffer) { + r, _ := http.NewRequest("POST", "http://localhost:8080/", bytes.NewBuffer(req)) + r.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + s.ServeHTTP(w, r) + + return w.Code, w.Body +} + +func field(name string, blob json.RawMessage) (v interface{}, ok bool) { + var obj map[string]interface{} + if err := json.Unmarshal(blob, &obj); err != nil { + return nil, false + } + v, ok = obj[name] + return +} + +func TestService(t *testing.T) { + s := rpc.NewServer() + s.RegisterCodec(NewCodec(), "application/json") + s.RegisterService(new(Service1), "") + + var res Service1Response + if err := execute(t, s, "Service1.Multiply", &Service1Request{4, 2}, &res); err != nil { + t.Error("Expected err to be nil, but got", err) + } + if res.Result != 8 { + t.Error("Expected res.Result to be 8, but got", res.Result) + } + if err := execute(t, s, "Service1.ResponseError", &Service1Request{4, 2}, &res); err == nil { + t.Errorf("Expected to get %q, but got nil", ErrResponseError) + } else if err.Error() != ErrResponseError.Error() { + t.Errorf("Expected to get %q, but got %q", ErrResponseError, err) + } + if code, res := executeRaw(t, s, json.RawMessage(`{"method":"Service1.Multiply","params":null,"id":5}`)); code != 400 { + t.Error("Expected response code to be 400, but got", code) + } else if v, ok := field("result", res.Bytes()); !ok || v != nil { + t.Errorf("Expected ok to be true and v to be nil, but got %v and %v", ok, v) + } + if err := execute(t, s, "Service1.ResponseJsonError", &Service1Request{4, 2}, &res); err == nil { + t.Errorf("Expected to get %q, but got nil", ErrResponseError) + } else if jsonErr, ok := err.(*Error); !ok { + t.Error("Expected err to be of a *json.Error type") + } else if !reflect.DeepEqual(jsonErr.Data, ErrResponseJsonError.Data) { + t.Errorf("Expected jsonErr to be %q, but got %q", ErrResponseJsonError, jsonErr) + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/server.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/server.go new file mode 100644 index 000000000..8fafbe3ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json/server.go @@ -0,0 +1,155 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/gorilla/rpc/v2" +) + +var null = json.RawMessage([]byte("null")) + +// An Error is a wrapper for a JSON interface value. It can be used by either +// a service's handler func to write more complex JSON data to an error field +// of a server's response, or by a client to read it. +type Error struct { + Data interface{} +} + +func (e *Error) Error() string { + return fmt.Sprintf("%v", e.Data) +} + +// ---------------------------------------------------------------------------- +// Request and Response +// ---------------------------------------------------------------------------- + +// serverRequest represents a JSON-RPC request received by the server. +type serverRequest struct { + // A String containing the name of the method to be invoked. + Method string `json:"method"` + // An Array of objects to pass as arguments to the method. + Params *json.RawMessage `json:"params"` + // The request id. This can be of any type. It is used to match the + // response with the request that it is replying to. + Id *json.RawMessage `json:"id"` +} + +// serverResponse represents a JSON-RPC response returned by the server. +type serverResponse struct { + // The Object that was returned by the invoked method. This must be null + // in case there was an error invoking the method. + Result interface{} `json:"result"` + // An Error object if there was an error invoking the method. It must be + // null if there was no error. + Error interface{} `json:"error"` + // This must be the same id as the request it is responding to. + Id *json.RawMessage `json:"id"` +} + +// ---------------------------------------------------------------------------- +// Codec +// ---------------------------------------------------------------------------- + +// NewCodec returns a new JSON Codec. +func NewCodec() *Codec { + return &Codec{} +} + +// Codec creates a CodecRequest to process each request. +type Codec struct { +} + +// NewRequest returns a CodecRequest. +func (c *Codec) NewRequest(r *http.Request) rpc.CodecRequest { + return newCodecRequest(r) +} + +// ---------------------------------------------------------------------------- +// CodecRequest +// ---------------------------------------------------------------------------- + +// newCodecRequest returns a new CodecRequest. +func newCodecRequest(r *http.Request) rpc.CodecRequest { + // Decode the request body and check if RPC method is valid. + req := new(serverRequest) + err := json.NewDecoder(r.Body).Decode(req) + r.Body.Close() + return &CodecRequest{request: req, err: err} +} + +// CodecRequest decodes and encodes a single request. +type CodecRequest struct { + request *serverRequest + err error +} + +// Method returns the RPC method for the current request. +// +// The method uses a dotted notation as in "Service.Method". +func (c *CodecRequest) Method() (string, error) { + if c.err == nil { + return c.request.Method, nil + } + return "", c.err +} + +// ReadRequest fills the request object for the RPC method. +func (c *CodecRequest) ReadRequest(args interface{}) error { + if c.err == nil { + if c.request.Params != nil { + // JSON params is array value. RPC params is struct. + // Unmarshal into array containing the request struct. + params := [1]interface{}{args} + c.err = json.Unmarshal(*c.request.Params, ¶ms) + } else { + c.err = errors.New("rpc: method request ill-formed: missing params field") + } + } + return c.err +} + +// WriteResponse encodes the response and writes it to the ResponseWriter. +func (c *CodecRequest) WriteResponse(w http.ResponseWriter, reply interface{}) { + if c.request.Id != nil { + // Id is null for notifications and they don't have a response. + res := &serverResponse{ + Result: reply, + Error: &null, + Id: c.request.Id, + } + c.writeServerResponse(w, 200, res) + } +} + +func (c *CodecRequest) WriteError(w http.ResponseWriter, _ int, err error) { + res := &serverResponse{ + Result: &null, + Id: c.request.Id, + } + if jsonErr, ok := err.(*Error); ok { + res.Error = jsonErr.Data + } else { + res.Error = err.Error() + } + c.writeServerResponse(w, 400, res) +} + +func (c *CodecRequest) writeServerResponse(w http.ResponseWriter, status int, res *serverResponse) { + b, err := json.Marshal(res) + if err == nil { + w.WriteHeader(status) + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Write(b) + } else { + // Not sure in which case will this happen. But seems harmless. + rpc.WriteError(w, 400, err.Error()) + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/client.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/client.go new file mode 100644 index 000000000..8f4c1f5ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/client.go @@ -0,0 +1,75 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json2 + +import ( + "encoding/json" + "io" + "math/rand" +) + +// ---------------------------------------------------------------------------- +// Request and Response +// ---------------------------------------------------------------------------- + +// clientRequest represents a JSON-RPC request sent by a client. +type clientRequest struct { + // JSON-RPC protocol. + Version string `json:"jsonrpc"` + + // A String containing the name of the method to be invoked. + Method string `json:"method"` + + // Object to pass as request parameter to the method. + Params interface{} `json:"params"` + + // The request id. This can be of any type. It is used to match the + // response with the request that it is replying to. + Id uint64 `json:"id"` +} + +// clientResponse represents a JSON-RPC response returned to a client. +type clientResponse struct { + Version string `json:"jsonrpc"` + Result *json.RawMessage `json:"result"` + Error *json.RawMessage `json:"error"` +} + +// EncodeClientRequest encodes parameters for a JSON-RPC client request. +func EncodeClientRequest(method string, args interface{}) ([]byte, error) { + c := &clientRequest{ + Version: "2.0", + Method: method, + Params: args, + Id: uint64(rand.Int63()), + } + return json.Marshal(c) +} + +// DecodeClientResponse decodes the response body of a client request into +// the interface reply. +func DecodeClientResponse(r io.Reader, reply interface{}) error { + var c clientResponse + if err := json.NewDecoder(r).Decode(&c); err != nil { + return err + } + if c.Error != nil { + jsonErr := &Error{} + if err := json.Unmarshal(*c.Error, jsonErr); err != nil { + return &Error{ + Code: E_SERVER, + Message: string(*c.Error), + } + } + return jsonErr + } + + if c.Result == nil { + return ErrNullResult + } + + return json.Unmarshal(*c.Result, reply) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/error.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/error.go new file mode 100644 index 000000000..9d2cbd9ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/error.go @@ -0,0 +1,39 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json2 + +import ( + "errors" +) + +type ErrorCode int + +const ( + E_PARSE ErrorCode = -32700 + E_INVALID_REQ ErrorCode = -32600 + E_NO_METHOD ErrorCode = -32601 + E_BAD_PARAMS ErrorCode = -32602 + E_INTERNAL ErrorCode = -32603 + E_SERVER ErrorCode = -32000 +) + +var ErrNullResult = errors.New("result is null") + +type Error struct { + // A Number that indicates the error type that occurred. + Code ErrorCode `json:"code"` /* required */ + + // A String providing a short description of the error. + // The message SHOULD be limited to a concise single sentence. + Message string `json:"message"` /* required */ + + // A Primitive or Structured value that contains additional information about the error. + Data interface{} `json:"data"` /* optional */ +} + +func (e *Error) Error() string { + return e.Message +} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/json_test.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/json_test.go new file mode 100644 index 000000000..f3ed2c64b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/json_test.go @@ -0,0 +1,161 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json2 + +import ( + "bytes" + "encoding/json" + "errors" + "net/http" + "testing" + + "github.com/gorilla/rpc/v2" +) + +// ResponseRecorder is an implementation of http.ResponseWriter that +// records its mutations for later inspection in tests. +type ResponseRecorder struct { + Code int // the HTTP response code from WriteHeader + HeaderMap http.Header // the HTTP response headers + Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to + Flushed bool +} + +// NewRecorder returns an initialized ResponseRecorder. +func NewRecorder() *ResponseRecorder { + return &ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +const DefaultRemoteAddr = "1.2.3.4" + +// Header returns the response headers. +func (rw *ResponseRecorder) Header() http.Header { + return rw.HeaderMap +} + +// Write always succeeds and writes to rw.Body, if not nil. +func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + if rw.Body != nil { + rw.Body.Write(buf) + } + if rw.Code == 0 { + rw.Code = http.StatusOK + } + return len(buf), nil +} + +// WriteHeader sets rw.Code. +func (rw *ResponseRecorder) WriteHeader(code int) { + rw.Code = code +} + +// Flush sets rw.Flushed to true. +func (rw *ResponseRecorder) Flush() { + rw.Flushed = true +} + +// ---------------------------------------------------------------------------- + +var ErrResponseError = errors.New("response error") + +type Service1Request struct { + A int + B int +} + +type Service1BadRequest struct { + V string `json:"jsonrpc"` + M string `json:"method"` + ID uint64 `json:"id"` +} + +type Service1Response struct { + Result int +} + +type Service1 struct { +} + +func (t *Service1) Multiply(r *http.Request, req *Service1Request, res *Service1Response) error { + res.Result = req.A * req.B + return nil +} + +func (t *Service1) ResponseError(r *http.Request, req *Service1Request, res *Service1Response) error { + return ErrResponseError +} + +func execute(t *testing.T, s *rpc.Server, method string, req, res interface{}) error { + if !s.HasMethod(method) { + t.Fatal("Expected to be registered:", method) + } + + buf, _ := EncodeClientRequest(method, req) + body := bytes.NewBuffer(buf) + r, _ := http.NewRequest("POST", "http://localhost:8080/", body) + r.Header.Set("Content-Type", "application/json") + + w := NewRecorder() + s.ServeHTTP(w, r) + + return DecodeClientResponse(w.Body, res) +} + +func executeRaw(t *testing.T, s *rpc.Server, req interface{}, res interface{}) error { + j, _ := json.Marshal(req) + r, _ := http.NewRequest("POST", "http://localhost:8080/", bytes.NewBuffer(j)) + r.Header.Set("Content-Type", "application/json") + + w := NewRecorder() + s.ServeHTTP(w, r) + + return DecodeClientResponse(w.Body, res) +} + +func TestService(t *testing.T) { + s := rpc.NewServer() + s.RegisterCodec(NewCodec(), "application/json") + s.RegisterService(new(Service1), "") + + var res Service1Response + if err := execute(t, s, "Service1.Multiply", &Service1Request{4, 2}, &res); err != nil { + t.Error("Expected err to be nil, but got:", err) + } + if res.Result != 8 { + t.Errorf("Wrong response: %v.", res.Result) + } + + if err := execute(t, s, "Service1.ResponseError", &Service1Request{4, 2}, &res); err == nil { + t.Errorf("Expected to get %q, but got nil", ErrResponseError) + } else if err.Error() != ErrResponseError.Error() { + t.Errorf("Expected to get %q, but got %q", ErrResponseError, err) + } + + if err := executeRaw(t, s, &Service1BadRequest{"2.0", "Service1.Multiply", 1}, &res); err == nil { + t.Errorf("Expected error but error in nil") + } +} + +func TestDecodeNullResult(t *testing.T) { + data := `{"jsonrpc": "2.0", "id": 12345, "result": null}` + reader := bytes.NewReader([]byte(data)) + var result interface{} + + err := DecodeClientResponse(reader, &result) + + if err != ErrNullResult { + t.Error("Expected err no be ErrNullResult, but got:", err) + } + + if result != nil { + t.Error("Expected result to be nil, but got:", result) + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/server.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/server.go new file mode 100644 index 000000000..f04f44a18 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/server.go @@ -0,0 +1,190 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json2 + +import ( + "encoding/json" + "net/http" + + "github.com/gorilla/rpc/v2" +) + +var null = json.RawMessage([]byte("null")) +var Version = "2.0" + +// ---------------------------------------------------------------------------- +// Request and Response +// ---------------------------------------------------------------------------- + +// serverRequest represents a JSON-RPC request received by the server. +type serverRequest struct { + // JSON-RPC protocol. + Version string `json:"jsonrpc"` + + // A String containing the name of the method to be invoked. + Method string `json:"method"` + + // A Structured value to pass as arguments to the method. + Params *json.RawMessage `json:"params"` + + // The request id. MUST be a string, number or null. + // Our implementation will not do type checking for id. + // It will be copied as it is. + Id *json.RawMessage `json:"id"` +} + +// serverResponse represents a JSON-RPC response returned by the server. +type serverResponse struct { + // JSON-RPC protocol. + Version string `json:"jsonrpc"` + + // The Object that was returned by the invoked method. This must be null + // in case there was an error invoking the method. + // As per spec the member will be omitted if there was an error. + Result interface{} `json:"result,omitempty"` + + // An Error object if there was an error invoking the method. It must be + // null if there was no error. + // As per spec the member will be omitted if there was no error. + Error *Error `json:"error,omitempty"` + + // This must be the same id as the request it is responding to. + Id *json.RawMessage `json:"id"` +} + +// ---------------------------------------------------------------------------- +// Codec +// ---------------------------------------------------------------------------- + +// NewcustomCodec returns a new JSON Codec based on passed encoder selector. +func NewCustomCodec(encSel rpc.EncoderSelector) *Codec { + return &Codec{encSel: encSel} +} + +// NewCodec returns a new JSON Codec. +func NewCodec() *Codec { + return NewCustomCodec(rpc.DefaultEncoderSelector) +} + +// Codec creates a CodecRequest to process each request. +type Codec struct { + encSel rpc.EncoderSelector +} + +// NewRequest returns a CodecRequest. +func (c *Codec) NewRequest(r *http.Request) rpc.CodecRequest { + return newCodecRequest(r, c.encSel.Select(r)) +} + +// ---------------------------------------------------------------------------- +// CodecRequest +// ---------------------------------------------------------------------------- + +// newCodecRequest returns a new CodecRequest. +func newCodecRequest(r *http.Request, encoder rpc.Encoder) rpc.CodecRequest { + // Decode the request body and check if RPC method is valid. + req := new(serverRequest) + err := json.NewDecoder(r.Body).Decode(req) + if err != nil { + err = &Error{ + Code: E_PARSE, + Message: err.Error(), + Data: req, + } + } + if req.Version != Version { + err = &Error{ + Code: E_INVALID_REQ, + Message: "jsonrpc must be " + Version, + Data: req, + } + } + r.Body.Close() + return &CodecRequest{request: req, err: err, encoder: encoder} +} + +// CodecRequest decodes and encodes a single request. +type CodecRequest struct { + request *serverRequest + err error + encoder rpc.Encoder +} + +// Method returns the RPC method for the current request. +// +// The method uses a dotted notation as in "Service.Method". +func (c *CodecRequest) Method() (string, error) { + if c.err == nil { + return c.request.Method, nil + } + return "", c.err +} + +// ReadRe %v", *res) + return nil +} + +func main() { + address := flag.String("address", ":65534", "") + s := rpc.NewServer() + s.RegisterCodec(json2.NewCustomCodec(&rpc.CompressionSelector{}), "application/json") + s.RegisterService(new(Counter), "") + http.Handle("/", http.StripPrefix("/", http.FileServer(http.Dir("./")))) + http.Handle("/jsonrpc/", s) + log.Fatal(http.ListenAndServe(*address, nil)) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/counter.js b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/counter.js new file mode 100644 index 000000000..9ec18b82b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/counter.js @@ -0,0 +1,64 @@ +function log(m, label) { + msg = $("
  • " + m + "
  • "); + msg.find("span").addClass(label); + out = $("#output"); + out.append(msg); + out.animate({"scrollTop": out[0].scrollHeight}, "fast"); +} + +$(document).ready(function() { + $("#incr").click(function() { + req = { + method : "Counter.Incr", + params : {delta: 1}, + }; + log("<- " + JSON.stringify(req), "secondary label"); + $.jsonrpc(req); + }); + $("#get").click(function() { + req = { + method : "Counter.Get", + params : {}, + }; + log("<- " + JSON.stringify(req), "label"); + $.jsonrpc(req, { + success : function(result) { + $("#get").addClass("success"); + setTimeout(function() { + $("#get").removeClass("success"); + }, 2000); + log("-> " + JSON.stringify(result), "success label"); + }, + error : function(error) { + $("#get").addClass("alert"); + setTimeout(function() { + $("#get").removeClass("alert"); + }, 2000); + log("-> " + JSON.stringify(error), "alert label"); + }, + }); + }); + $("#nan").click(function() { + req = { + method : "Counter.Nan", + params : {}, + }; + log("<- " + JSON.stringify(req), "label"); + $.jsonrpc(req, { + success : function(result) { + $("#nan").addClass("success"); + setTimeout(function() { + $("#nan").removeClass("success"); + }, 2000); + log("-> " + JSON.stringify(result), "success label"); + }, + error : function(error) { + $("#nan").addClass("alert"); + setTimeout(function() { + $("#nan").removeClass("alert"); + }, 2000); + log("-> " + JSON.stringify(error), "alert label"); + }, + }); + }); +}); diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/index.html b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/index.html new file mode 100644 index 000000000..51a2fea02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/index.html @@ -0,0 +1,32 @@ + + + + + + + + + Counter + + + + +
    + + + +
    +
    +
    +
    +
      +
    +
    +
    + + + diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/jquery.jsonrpc.js b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/jquery.jsonrpc.js new file mode 100644 index 000000000..2a0cadfe5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/json2/testapp/jquery.jsonrpc.js @@ -0,0 +1,158 @@ +/* + * jQuery JSON-RPC Plugin + * + * @version: 0.3(2012-05-17) + * @author hagino3000 (Takashi Nishibayashi) + * @author alanjds (Alan Justino da Silva) + * + * A JSON-RPC 2.0 implementation for jQuery. + * JSON-RPC is a stateless, light-weight remote procedure call (RPC) protocol. + * Read more in the + * + * Requires json2.js if browser has not window.JSON. + * + * Usage: + * $.jsonrpc(data [, callbacks [, debug]]); + * + * where data = {url: '/rpc/', method:'simplefunc', params:['posi', 'tional']} + * or data = {url: '/rpc/', method:'complexfunc', params:{nam:'ed', par:'ams'}} + * and callbacks = {success: successFunc, error: errorFunc} + * + * Setting no callback produces a JSON-RPC Notification. + * 'data' accepts 'timeout' keyword too, who sets the $.ajax request timeout. + * Setting 'debug' to true prints responses to Firebug's console.info + * + * Examples: + * // A RPC call with named parameters + * $.jsonrpc({ + * url : '/rpc', + * method : 'createUser', + * params : {name : 'John Smith', userId : '1000'} + * }, { + * success : function(result) { + * //doSomething + * }, + * error : function(error) { + * //doSomething + * } + * }); + * + * // Once set defaultUrl, url option is no need + * $.jsonrpc.defaultUrl = '/rpc'; + * + * // A Notification + * $.jsonrpc({ + * method : 'notify', + * params : {action : 'logout', userId : '1000'} + * }); + * + * // A Notification using console to debug and with timeout set + * $.jsonrpc({ + * method : 'notify', + * params : {action : 'logout', userId : '1000'}, + * debug : true, + * timeout : 500, + * }); + * + * // Set DataFilter. It is useful for buggy API that returns sometimes not json but html (when 500, 403..). + * $.jsonrpc({ + * method : 'getUser', + * dataFilter : function(data, type) { + * try { + * return JSON.parse(data); + * } catch(e) { + * return {error : {message : 'Cannot parse response', data : data}}; + * } + * }, function(result){ doSomething... } + * }, { + * success : handleSuccess + * error : handleFailure + * }); + * + * This document is licensed as free software under the terms of the + * MIT License: http://www.opensource.org/licenses/mit-license.php + */ +(function($) { + + var rpcid = 1, + emptyFn = function() {}; + + $.jsonrpc = $.jsonrpc || function(data, callbacks, debug) { + debug = debug || false; + + var postdata = { + jsonrpc: '2.0', + method: data.method || '', + params: data.params || {} + }; + if (callbacks) { + postdata.id = data.id || rpcid++; + } else { + callbacks = emptyFn; + } + + if (typeof(callbacks) === 'function') { + callbacks = { + success: callbacks, + error: callbacks + }; + } + + var dataFilter = data.dataFilter; + + var ajaxopts = { + url: data.url || $.jsonrpc.defaultUrl, + contentType: 'application/json', + dataType: 'text', + dataFilter: function(data, type) { + if (dataFilter) { + return dataFilter(data); + } else { + if (data != "") return JSON.parse(data); + } + }, + type: 'POST', + processData: false, + data: JSON.stringify(postdata), + success: function(resp) { + if (resp && !resp.error) { + return callbacks.success && callbacks.success(resp.result); + } else if (resp && resp.error) { + return callbacks.error && callbacks.error(resp.error); + } else { + return callbacks.error && callbacks.error(resp); + } + }, + error: function(xhr, status, error) { + if (error === 'timeout') { + callbacks.error({ + status: status, + code: 0, + message: 'Request Timeout' + }); + return; + } + // If response code is 404, 400, 500, server returns error object + try { + var res = JSON.parse(xhr.responseText); + callbacks.error(res.error); + } catch (e) { + callbacks.error({ + status: status, + code: 0, + message: error + }); + } + } + }; + if (data.timeout) { + ajaxopts['timeout'] = data.timeout; + } + + $.ajax(ajaxopts); + + return $; + } + $.jsonrpc.defaultUrl = $.jsonrpc.defaultUrl || '/jsonrpc/'; + +})(jQuery); diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/map.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/map.go new file mode 100644 index 000000000..dda42161c --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/map.go @@ -0,0 +1,164 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "fmt" + "net/http" + "reflect" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +var ( + // Precompute the reflect.Type of error and http.Request + typeOfError = reflect.TypeOf((*error)(nil)).Elem() + typeOfRequest = reflect.TypeOf((*http.Request)(nil)).Elem() +) + +// ---------------------------------------------------------------------------- +// service +// ---------------------------------------------------------------------------- + +type service struct { + name string // name of service + rcvr reflect.Value // receiver of methods for the service + rcvrType reflect.Type // type of the receiver + methods map[string]*serviceMethod // registered methods +} + +type serviceMethod struct { + method reflect.Method // receiver method + argsType reflect.Type // type of the request argument + replyType reflect.Type // type of the response argument +} + +// ---------------------------------------------------------------------------- +// serviceMap +// ---------------------------------------------------------------------------- + +// serviceMap is a registry for services. +type serviceMap struct { + mutex sync.Mutex + services map[string]*service +} + +// register adds a new service using reflection to extract its methods. +func (m *serviceMap) register(rcvr interface{}, name string) error { + // Setup service. + s := &service{ + name: name, + rcvr: reflect.ValueOf(rcvr), + rcvrType: reflect.TypeOf(rcvr), + methods: make(map[string]*serviceMethod), + } + if name == "" { + s.name = reflect.Indirect(s.rcvr).Type().Name() + if !isExported(s.name) { + return fmt.Errorf("rpc: type %q is not exported", s.name) + } + } + if s.name == "" { + return fmt.Errorf("rpc: no service name for type %q", + s.rcvrType.String()) + } + // Setup methods. + for i := 0; i < s.rcvrType.NumMethod(); i++ { + method := s.rcvrType.Method(i) + mtype := method.Type + // Method must be exported. + if method.PkgPath != "" { + continue + } + // Method needs four ins: receiver, *http.Request, *args, *reply. + if mtype.NumIn() != 4 { + continue + } + // First argument must be a pointer and must be http.Request. + reqType := mtype.In(1) + if reqType.Kind() != reflect.Ptr || reqType.Elem() != typeOfRequest { + continue + } + // Second argument must be a pointer and must be exported. + args := mtype.In(2) + if args.Kind() != reflect.Ptr || !isExportedOrBuiltin(args) { + continue + } + // Third argument must be a pointer and must be exported. + reply := mtype.In(3) + if reply.Kind() != reflect.Ptr || !isExportedOrBuiltin(reply) { + continue + } + // Method needs one out: error. + if mtype.NumOut() != 1 { + continue + } + if returnType := mtype.Out(0); returnType != typeOfError { + continue + } + s.methods[method.Name] = &serviceMethod{ + method: method, + argsType: args.Elem(), + replyType: reply.Elem(), + } + } + if len(s.methods) == 0 { + return fmt.Errorf("rpc: %q has no exported methods of suitable type", + s.name) + } + // Add to the map. + m.mutex.Lock() + defer m.mutex.Unlock() + if m.services == nil { + m.services = make(map[string]*service) + } else if _, ok := m.services[s.name]; ok { + return fmt.Errorf("rpc: service already defined: %q", s.name) + } + m.services[s.name] = s + return nil +} + +// get returns a registered service given a method name. +// +// The method name uses a dotted notation as in "Service.Method". +func (m *serviceMap) get(method string) (*service, *serviceMethod, error) { + parts := strings.Split(method, ".") + if len(parts) != 2 { + err := fmt.Errorf("rpc: service/method request ill-formed: %q", method) + return nil, nil, err + } + m.mutex.Lock() + service := m.services[parts[0]] + m.mutex.Unlock() + if service == nil { + err := fmt.Errorf("rpc: can't find service %q", method) + return nil, nil, err + } + serviceMethod := service.methods[parts[1]] + if serviceMethod == nil { + err := fmt.Errorf("rpc: can't find method %q", method) + return nil, nil, err + } + return service, serviceMethod, nil +} + +// isExported returns true of a string is an exported (upper case) name. +func isExported(name string) bool { + rune, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(rune) +} + +// isExportedOrBuiltin returns true if a type is exported or a builtin. +func isExportedOrBuiltin(t reflect.Type) bool { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + // PkgPath will be non-empty even for an exported type, + // so we need to check the type name as well. + return isExported(t.Name()) || t.PkgPath() == "" +} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/protorpc/doc.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/protorpc/doc.go new file mode 100644 index 000000000..482e6037b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/protorpc/doc.go @@ -0,0 +1,48 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/rpc/protorpc provides a codec for ProtoRPC over HTTP services. + +To register the codec in a RPC server: + + import ( + "http" + "github.com/gorilla/rpc/v2" + "github.com/gorilla/rpc/v2/protorpc" + ) + + func init() { + s := rpc.NewServer() + s.RegisterCodec(protorpc.NewCodec(), "application/json") + // [...] + http.Handle("/rpc", s) + } + +A codec is tied to a content type. In the example above, the server +will use the ProtoRPC codec for requests with "application/json" as +the value for the "Content-Type" header. + +This package implement ProtoRPC, based on the JSON-RPC transport, it +differs in that it uses HTTP as its envelope. + +Example: +POST /Service.Method +Request: +{ + "requestField1": "value1", + "requestField2": "value2", +} +Response: +{ + "responseField1": "value1", + "responseField2": "value2", +} + +Check the gorilla/rpc documentation for more details: + + http://gorilla-web.appspot.com/pkg/rpc +*/ +package protorpc diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/protorpc/protorpc_test.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/protorpc/protorpc_test.go new file mode 100644 index 000000000..08f3c8b8d --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/protorpc/protorpc_test.go @@ -0,0 +1,87 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protorpc + +import ( + "bytes" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gorilla/rpc/v2" +) + +var ErrResponseError = errors.New("response error") + +type Service1Request struct { + A int + B int +} + +type Service1BadRequest struct { +} + +type Service1Response struct { + Result int + ErrorMessage string `json:"error_message"` +} + +type Service1 struct { +} + +func (t *Service1) Multiply(r *http.Request, req *Service1Request, res *Service1Response) error { + res.Result = req.A * req.B + return nil +} + +func (t *Service1) ResponseError(r *http.Request, req *Service1Request, res *Service1Response) error { + return ErrResponseError +} + +func execute(t *testing.T, s *rpc.Server, method string, req, res interface{}) (int, error) { + if !s.HasMethod(method) { + t.Fatal("Expected to be registered:", method) + } + + buf, _ := json.Marshal(req) + body := bytes.NewBuffer(buf) + r, _ := http.NewRequest("POST", "http://localhost:8080/"+method, body) + r.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + s.ServeHTTP(w, r) + + err := json.NewDecoder(w.Body).Decode(res) + return w.Code, err +} + +func TestService(t *testing.T) { + s := rpc.NewServer() + s.RegisterCodec(NewCodec(), "application/json") + s.RegisterService(new(Service1), "") + + var res Service1Response + if _, err := execute(t, s, "Service1.Multiply", &Service1Request{4, 2}, &res); err != nil { + t.Error("Expected err to be nil, but got:", err) + } + if res.Result != 8 { + t.Error("Expected res.Result to be 8, but got:", res.Result) + } + if res.ErrorMessage != "" { + t.Error("Expected error_message to be empty, but got:", res.ErrorMessage) + } + if code, err := execute(t, s, "Service1.ResponseError", &Service1Request{4, 2}, &res); err != nil || code != 400 { + t.Errorf("Expected code to be 400 and error to be nil, but got %v (%v)", code, err) + } + if res.ErrorMessage == "" { + t.Errorf("Expected error_message to be %q, but got %q", ErrResponseError, res.ErrorMessage) + } + if code, _ := execute(t, s, "Service1.Multiply", nil, &res); code != 400 { + t.Error("Expected http response code 400, but got", code) + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/protorpc/server.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/protorpc/server.go new file mode 100644 index 000000000..eada0a164 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/protorpc/server.go @@ -0,0 +1,147 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protorpc + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "github.com/gorilla/rpc/v2" +) + +var null = json.RawMessage([]byte("null")) + +// ---------------------------------------------------------------------------- +// Request and Response +// ---------------------------------------------------------------------------- + +// serverRequest represents a ProtoRPC request received by the server. +type serverRequest struct { + // A String containing the name of the method to be invoked. + Method string `json:"method"` + // An Array of objects to pass as arguments to the method. + Params *json.RawMessage `json:"params"` + // The request id. This can be of any type. It is used to match the + // response with the request that it is replying to. + Id *json.RawMessage `json:"id"` +} + +// serverResponse represents a ProtoRPC response returned by the server. +type serverResponse struct { + // The Object that was returned by the invoked method. This must be null + // in case there was an error invoking the method. + Result interface{} `json:"result"` + // An Error object if there was an error invoking the method. It must be + // null if there was no error. + Error interface{} `json:"error"` + // This must be the same id as the request it is responding to. + Id *json.RawMessage `json:"id"` +} + +// ---------------------------------------------------------------------------- +// Codec +// ---------------------------------------------------------------------------- + +// NewCodec returns a new ProtoRPC Codec. +func NewCodec() *Codec { + return &Codec{} +} + +// Codec creates a CodecRequest to process each request. +type Codec struct { +} + +// NewRequest returns a CodecRequest. +func (c *Codec) NewRequest(r *http.Request) rpc.CodecRequest { + return newCodecRequest(r) +} + +// ---------------------------------------------------------------------------- +// CodecRequest +// ---------------------------------------------------------------------------- + +// newCodecRequest returns a new CodecRequest. +func newCodecRequest(r *http.Request) rpc.CodecRequest { + // Decode the request body and check if RPC method is valid. + req := new(serverRequest) + path := r.URL.Path + index := strings.LastIndex(path, "/") + if index < 0 { + return &CodecRequest{request: req, err: fmt.Errorf("rpc: no method: %s", path)} + } + req.Method = path[index+1:] + err := json.NewDecoder(r.Body).Decode(&req.Params) + r.Body.Close() + var errr error + if err != io.EOF { + errr = err + } + return &CodecRequest{request: req, err: errr} +} + +// CodecRequest decodes and encodes a single request. +type CodecRequest struct { + request *serverRequest + err error +} + +// Method returns the RPC method for the current request. +// +// The method uses a dotted notation as in "Service.Method". +func (c *CodecRequest) Method() (string, error) { + if c.err == nil { + return c.request.Method, nil + } + return "", c.err +} + +// ReadRequest fills the request object for the RPC method. +func (c *CodecRequest) ReadRequest(args interface{}) error { + if c.err == nil { + if c.request.Params != nil { + c.err = json.Unmarshal(*c.request.Params, args) + } else { + c.err = errors.New("rpc: method request ill-formed: missing params field") + } + } + return c.err +} + +// WriteResponse encodes the response and writes it to the ResponseWriter. +func (c *CodecRequest) WriteResponse(w http.ResponseWriter, reply interface{}) { + res := &serverResponse{ + Result: reply, + Error: &null, + Id: c.request.Id, + } + c.writeServerResponse(w, 200, res) +} + +func (c *CodecRequest) WriteError(w http.ResponseWriter, status int, err error) { + res := &serverResponse{ + Result: &struct { + ErrorMessage interface{} `json:"error_message"` + }{err.Error()}, + Id: c.request.Id, + } + c.writeServerResponse(w, status, res) +} + +func (c *CodecRequest) writeServerResponse(w http.ResponseWriter, status int, res *serverResponse) { + b, err := json.Marshal(res.Result) + if err == nil { + w.WriteHeader(status) + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Write(b) + } else { + // Not sure in which case will this happen. But seems harmless. + rpc.WriteError(w, 400, err.Error()) + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/server.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/server.go new file mode 100644 index 000000000..b552cb3c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/server.go @@ -0,0 +1,158 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "fmt" + "net/http" + "reflect" + "strings" +) + +// ---------------------------------------------------------------------------- +// Codec +// ---------------------------------------------------------------------------- + +// Codec creates a CodecRequest to process each request. +type Codec interface { + NewRequest(*http.Request) CodecRequest +} + +// CodecRequest decodes a request and encodes a response using a specific +// serialization scheme. +type CodecRequest interface { + // Reads the request and returns the RPC method name. + Method() (string, error) + // Reads the request filling the RPC method args. + ReadRequest(interface{}) error + // Writes the response using the RPC method reply. + WriteResponse(http.ResponseWriter, interface{}) + // Writes an error produced by the server. + WriteError(w http.ResponseWriter, status int, err error) +} + +// ---------------------------------------------------------------------------- +// Server +// ---------------------------------------------------------------------------- + +// NewServer returns a new RPC server. +func NewServer() *Server { + return &Server{ + codecs: make(map[string]Codec), + services: new(serviceMap), + } +} + +// Server serves registered RPC services using registered codecs. +type Server struct { + codecs map[string]Codec + services *serviceMap +} + +// RegisterCodec adds a new codec to the server. +// +// Codecs are defined to process a given serialization scheme, e.g., JSON or +// XML. A codec is chosen based on the "Content-Type" header from the request, +// excluding the charset definition. +func (s *Server) RegisterCodec(codec Codec, contentType string) { + s.codecs[strings.ToLower(contentType)] = codec +} + +// RegisterService adds a new service to the server. +// +// The name parameter is optional: if empty it will be inferred from +// the receiver type name. +// +// Methods from the receiver will be extracted if these rules are satisfied: +// +// - The receiver is exported (begins with an upper case letter) or local +// (defined in the package registering the service). +// - The method name is exported. +// - The method has three arguments: *http.Request, *args, *reply. +// - All three arguments are pointers. +// - The second and third arguments are exported or local. +// - The method has return type error. +// +// All other methods are ignored. +func (s *Server) RegisterService(receiver interface{}, name string) error { + return s.services.register(receiver, name) +} + +// HasMethod returns true if the given method is registered. +// +// The method uses a dotted notation as in "Service.Method". +func (s *Server) HasMethod(method string) bool { + if _, _, err := s.services.get(method); err == nil { + return true + } + return false +} + +// ServeHTTP +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + WriteError(w, 405, "rpc: POST method required, received "+r.Method) + return + } + contentType := r.Header.Get("Content-Type") + idx := strings.Index(contentType, ";") + if idx != -1 { + contentType = contentType[:idx] + } + codec := s.codecs[strings.ToLower(contentType)] + if codec == nil { + WriteError(w, 415, "rpc: unrecognized Content-Type: "+contentType) + return + } + // Create a new codec request. + codecReq := codec.NewRequest(r) + // Get service method to be called. + method, errMethod := codecReq.Method() + if errMethod != nil { + codecReq.WriteError(w, 400, errMethod) + return + } + serviceSpec, methodSpec, errGet := s.services.get(method) + if errGet != nil { + codecReq.WriteError(w, 400, errGet) + return + } + // Decode the args. + args := reflect.New(methodSpec.argsType) + if errRead := codecReq.ReadRequest(args.Interface()); errRead != nil { + codecReq.WriteError(w, 400, errRead) + return + } + // Call the service method. + reply := reflect.New(methodSpec.replyType) + errValue := methodSpec.method.Func.Call([]reflect.Value{ + serviceSpec.rcvr, + reflect.ValueOf(r), + args, + reply, + }) + // Cast the result to error if needed. + var errResult error + errInter := errValue[0].Interface() + if errInter != nil { + errResult = errInter.(error) + } + // Prevents Internet Explorer from MIME-sniffing a response away + // from the declared content-type + w.Header().Set("x-content-type-options", "nosniff") + // Encode the response. + if errResult == nil { + codecReq.WriteResponse(w, reply.Interface()) + } else { + codecReq.WriteError(w, 400, errResult) + } +} + +func WriteError(w http.ResponseWriter, status int, msg string) { + w.WriteHeader(status) + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprint(w, msg) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/rpc/v2/server_test.go b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/server_test.go new file mode 100644 index 000000000..d2cddfca2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/rpc/v2/server_test.go @@ -0,0 +1,54 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "net/http" + "testing" +) + +type Service1Request struct { + A int + B int +} + +type Service1Response struct { + Result int +} + +type Service1 struct { +} + +func (t *Service1) Multiply(r *http.Request, req *Service1Request, res *Service1Response) error { + res.Result = req.A * req.B + return nil +} + +type Service2 struct { +} + +func TestRegisterService(t *testing.T) { + var err error + s := NewServer() + service1 := new(Service1) + service2 := new(Service2) + + // Inferred name. + err = s.RegisterService(service1, "") + if err != nil || !s.HasMethod("Service1.Multiply") { + t.Errorf("Expected to be registered: Service1.Multiply") + } + // Provided name. + err = s.RegisterService(service1, "Foo") + if err != nil || !s.HasMethod("Foo.Multiply") { + t.Errorf("Expected to be registered: Foo.Multiply") + } + // No methods. + err = s.RegisterService(service2, "") + if err == nil { + t.Errorf("Expected error on service2") + } +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore b/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md b/Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md deleted file mode 100644 index 219994581..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md +++ /dev/null @@ -1,23 +0,0 @@ -objx - by Mat Ryer and Tyler Bunnell - -The MIT License (MIT) - -Copyright (c) 2014 Stretchr, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/README.md b/Godeps/_workspace/src/github.com/stretchr/objx/README.md deleted file mode 100644 index 4aa180687..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# objx - - * Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx) diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go b/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go deleted file mode 100644 index 721bcac79..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go +++ /dev/null @@ -1,179 +0,0 @@ -package objx - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// arrayAccesRegexString is the regex used to extract the array number -// from the access path -const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` - -// arrayAccesRegex is the compiled arrayAccesRegexString -var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) - -// Get gets the value using the specified selector and -// returns it inside a new Obj object. -// -// If it cannot find the value, Get will return a nil -// value inside an instance of Obj. -// -// Get can only operate directly on map[string]interface{} and []interface. -// -// Example -// -// To access the title of the third chapter of the second book, do: -// -// o.Get("books[1].chapters[2].title") -func (m Map) Get(selector string) *Value { - rawObj := access(m, selector, nil, false, false) - return &Value{data: rawObj} -} - -// Set sets the value using the specified selector and -// returns the object on which Set was called. -// -// Set can only operate directly on map[string]interface{} and []interface -// -// Example -// -// To set the title of the third chapter of the second book, do: -// -// o.Set("books[1].chapters[2].title","Time to Go") -func (m Map) Set(selector string, value interface{}) Map { - access(m, selector, value, true, false) - return m -} - -// access accesses the object using the selector and performs the -// appropriate action. -func access(current, selector, value interface{}, isSet, panics bool) interface{} { - - switch selector.(type) { - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - - if array, ok := current.([]interface{}); ok { - index := intFromInterface(selector) - - if index >= len(array) { - if panics { - panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) - } - return nil - } - - return array[index] - } - - return nil - - case string: - - selStr := selector.(string) - selSegs := strings.SplitN(selStr, PathSeparator, 2) - thisSel := selSegs[0] - index := -1 - var err error - - // https://github.com/stretchr/objx/issues/12 - if strings.Contains(thisSel, "[") { - - arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) - - if len(arrayMatches) > 0 { - - // Get the key into the map - thisSel = arrayMatches[1] - - // Get the index into the array at the key - index, err = strconv.Atoi(arrayMatches[2]) - - if err != nil { - // This should never happen. If it does, something has gone - // seriously wrong. Panic. - panic("objx: Array index is not an integer. Must use array[int].") - } - - } - } - - if curMap, ok := current.(Map); ok { - current = map[string]interface{}(curMap) - } - - // get the object in question - switch current.(type) { - case map[string]interface{}: - curMSI := current.(map[string]interface{}) - if len(selSegs) <= 1 && isSet { - curMSI[thisSel] = value - return nil - } else { - current = curMSI[thisSel] - } - default: - current = nil - } - - if current == nil && panics { - panic(fmt.Sprintf("objx: '%v' invalid on object.", selector)) - } - - // do we need to access the item of an array? - if index > -1 { - if array, ok := current.([]interface{}); ok { - if index < len(array) { - current = array[index] - } else { - if panics { - panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) - } - current = nil - } - } - } - - if len(selSegs) > 1 { - current = access(current, selSegs[1], value, isSet, panics) - } - - } - - return current - -} - -// intFromInterface converts an interface object to the largest -// representation of an unsigned integer using a type switch and -// assertions -func intFromInterface(selector interface{}) int { - var value int - switch selector.(type) { - case int: - value = selector.(int) - case int8: - value = int(selector.(int8)) - case int16: - value = int(selector.(int16)) - case int32: - value = int(selector.(int32)) - case int64: - value = int(selector.(int64)) - case uint: - value = int(selector.(uint)) - case uint8: - value = int(selector.(uint8)) - case uint16: - value = int(selector.(uint16)) - case uint32: - value = int(selector.(uint32)) - case uint64: - value = int(selector.(uint64)) - default: - panic("objx: array access argument is not an integer type (this should never happen)") - } - - return value -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go deleted file mode 100644 index ce5d8e4aa..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestAccessorsAccessGetSingleField(t *testing.T) { - - current := map[string]interface{}{"name": "Tyler"} - assert.Equal(t, "Tyler", access(current, "name", nil, false, true)) - -} -func TestAccessorsAccessGetDeep(t *testing.T) { - - current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}} - assert.Equal(t, "Tyler", access(current, "name.first", nil, false, true)) - assert.Equal(t, "Bunnell", access(current, "name.last", nil, false, true)) - -} -func TestAccessorsAccessGetDeepDeep(t *testing.T) { - - current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}} - assert.Equal(t, 4, access(current, "one.two.three.four", nil, false, true)) - -} -func TestAccessorsAccessGetInsideArray(t *testing.T) { - - current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}} - assert.Equal(t, "Tyler", access(current, "names[0].first", nil, false, true)) - assert.Equal(t, "Bunnell", access(current, "names[0].last", nil, false, true)) - assert.Equal(t, "Capitol", access(current, "names[1].first", nil, false, true)) - assert.Equal(t, "Bollocks", access(current, "names[1].last", nil, false, true)) - - assert.Panics(t, func() { - access(current, "names[2]", nil, false, true) - }) - assert.Nil(t, access(current, "names[2]", nil, false, false)) - -} - -func TestAccessorsAccessGetFromArrayWithInt(t *testing.T) { - - current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}} - one := access(current, 0, nil, false, false) - two := access(current, 1, nil, false, false) - three := access(current, 2, nil, false, false) - - assert.Equal(t, "Tyler", one.(map[string]interface{})["first"]) - assert.Equal(t, "Capitol", two.(map[string]interface{})["first"]) - assert.Nil(t, three) - -} - -func TestAccessorsGet(t *testing.T) { - - current := New(map[string]interface{}{"name": "Tyler"}) - assert.Equal(t, "Tyler", current.Get("name").data) - -} - -func TestAccessorsAccessSetSingleField(t *testing.T) { - - current := map[string]interface{}{"name": "Tyler"} - access(current, "name", "Mat", true, false) - assert.Equal(t, current["name"], "Mat") - - access(current, "age", 29, true, true) - assert.Equal(t, current["age"], 29) - -} - -func TestAccessorsAccessSetSingleFieldNotExisting(t *testing.T) { - - current := map[string]interface{}{} - access(current, "name", "Mat", true, false) - assert.Equal(t, current["name"], "Mat") - -} - -func TestAccessorsAccessSetDeep(t *testing.T) { - - current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}} - - access(current, "name.first", "Mat", true, true) - access(current, "name.last", "Ryer", true, true) - - assert.Equal(t, "Mat", access(current, "name.first", nil, false, true)) - assert.Equal(t, "Ryer", access(current, "name.last", nil, false, true)) - -} -func TestAccessorsAccessSetDeepDeep(t *testing.T) { - - current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}} - - access(current, "one.two.three.four", 5, true, true) - - assert.Equal(t, 5, access(current, "one.two.three.four", nil, false, true)) - -} -func TestAccessorsAccessSetArray(t *testing.T) { - - current := map[string]interface{}{"names": []interface{}{"Tyler"}} - - access(current, "names[0]", "Mat", true, true) - - assert.Equal(t, "Mat", access(current, "names[0]", nil, false, true)) - -} -func TestAccessorsAccessSetInsideArray(t *testing.T) { - - current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}} - - access(current, "names[0].first", "Mat", true, true) - access(current, "names[0].last", "Ryer", true, true) - access(current, "names[1].first", "Captain", true, true) - access(current, "names[1].last", "Underpants", true, true) - - assert.Equal(t, "Mat", access(current, "names[0].first", nil, false, true)) - assert.Equal(t, "Ryer", access(current, "names[0].last", nil, false, true)) - assert.Equal(t, "Captain", access(current, "names[1].first", nil, false, true)) - assert.Equal(t, "Underpants", access(current, "names[1].last", nil, false, true)) - -} - -func TestAccessorsAccessSetFromArrayWithInt(t *testing.T) { - - current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}} - one := access(current, 0, nil, false, false) - two := access(current, 1, nil, false, false) - three := access(current, 2, nil, false, false) - - assert.Equal(t, "Tyler", one.(map[string]interface{})["first"]) - assert.Equal(t, "Capitol", two.(map[string]interface{})["first"]) - assert.Nil(t, three) - -} - -func TestAccessorsSet(t *testing.T) { - - current := New(map[string]interface{}{"name": "Tyler"}) - current.Set("name", "Mat") - assert.Equal(t, "Mat", current.Get("name").data) - -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt deleted file mode 100644 index 306023475..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt +++ /dev/null @@ -1,14 +0,0 @@ - case []{1}: - a := object.([]{1}) - if isSet { - a[index] = value.({1}) - } else { - if index >= len(a) { - if panics { - panic(fmt.Sprintf("objx: Index %d is out of range because the []{1} only contains %d items.", index, len(a))) - } - return nil - } else { - return a[index] - } - } diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html deleted file mode 100644 index 379ffc3c0..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html +++ /dev/null @@ -1,86 +0,0 @@ - - - - Codegen - - - - - -

    - Template -

    -

    - Use {x} as a placeholder for each argument. -

    - - -

    - Arguments (comma separated) -

    -

    - One block per line -

    - - -

    - Output -

    - - - - - - - - diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt deleted file mode 100644 index b396900b8..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt +++ /dev/null @@ -1,286 +0,0 @@ -/* - {4} ({1} and []{1}) - -------------------------------------------------- -*/ - -// {4} gets the value as a {1}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) {4}(optionalDefault ...{1}) {1} { - if s, ok := v.data.({1}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return {3} -} - -// Must{4} gets the value as a {1}. -// -// Panics if the object is not a {1}. -func (v *Value) Must{4}() {1} { - return v.data.({1}) -} - -// {4}Slice gets the value as a []{1}, returns the optionalDefault -// value or nil if the value is not a []{1}. -func (v *Value) {4}Slice(optionalDefault ...[]{1}) []{1} { - if s, ok := v.data.([]{1}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// Must{4}Slice gets the value as a []{1}. -// -// Panics if the object is not a []{1}. -func (v *Value) Must{4}Slice() []{1} { - return v.data.([]{1}) -} - -// Is{4} gets whether the object contained is a {1} or not. -func (v *Value) Is{4}() bool { - _, ok := v.data.({1}) - return ok -} - -// Is{4}Slice gets whether the object contained is a []{1} or not. -func (v *Value) Is{4}Slice() bool { - _, ok := v.data.([]{1}) - return ok -} - -// Each{4} calls the specified callback for each object -// in the []{1}. -// -// Panics if the object is the wrong type. -func (v *Value) Each{4}(callback func(int, {1}) bool) *Value { - - for index, val := range v.Must{4}Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// Where{4} uses the specified decider function to select items -// from the []{1}. The object contained in the result will contain -// only the selected items. -func (v *Value) Where{4}(decider func(int, {1}) bool) *Value { - - var selected []{1} - - v.Each{4}(func(index int, val {1}) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data:selected} - -} - -// Group{4} uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]{1}. -func (v *Value) Group{4}(grouper func(int, {1}) string) *Value { - - groups := make(map[string][]{1}) - - v.Each{4}(func(index int, val {1}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]{1}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data:groups} - -} - -// Replace{4} uses the specified function to replace each {1}s -// by iterating each item. The data in the returned result will be a -// []{1} containing the replaced items. -func (v *Value) Replace{4}(replacer func(int, {1}) {1}) *Value { - - arr := v.Must{4}Slice() - replaced := make([]{1}, len(arr)) - - v.Each{4}(func(index int, val {1}) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data:replaced} - -} - -// Collect{4} uses the specified collector function to collect a value -// for each of the {1}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) Collect{4}(collector func(int, {1}) interface{}) *Value { - - arr := v.Must{4}Slice() - collected := make([]interface{}, len(arr)) - - v.Each{4}(func(index int, val {1}) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data:collected} -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func Test{4}(t *testing.T) { - - val := {1}( {2} ) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").{4}()) - assert.Equal(t, val, New(m).Get("value").Must{4}()) - assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}()) - assert.Equal(t, val, New(m).Get("nothing").{4}({2})) - - assert.Panics(t, func() { - New(m).Get("age").Must{4}() - }) - -} - -func Test{4}Slice(t *testing.T) { - - val := {1}( {2} ) - m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").{4}Slice()[0]) - assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0]) - assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice()) - assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").Must{4}Slice() - }) - -} - -func TestIs{4}(t *testing.T) { - - var v *Value - - v = &Value{data: {1}({2})} - assert.True(t, v.Is{4}()) - - v = &Value{data: []{1}{ {1}({2}) }} - assert.True(t, v.Is{4}Slice()) - -} - -func TestEach{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - count := 0 - replacedVals := make([]{1}, 0) - assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0]) - assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1]) - assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2]) - -} - -func TestWhere{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - - selected := v.Where{4}(func(i int, val {1}) bool { - return i%2==0 - }).Must{4}Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroup{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - - grouped := v.Group{4}(func(i int, val {1}) string { - return fmt.Sprintf("%v", i%2==0) - }).data.(map[string][]{1}) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplace{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - - rawArr := v.Must{4}Slice() - - replaced := v.Replace{4}(func(index int, val {1}) {1} { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.Must{4}Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollect{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - - collected := v.Collect{4}(func(index int, val {1}) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt deleted file mode 100644 index 069d43d8e..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt +++ /dev/null @@ -1,20 +0,0 @@ -Interface,interface{},"something",nil,Inter -Map,map[string]interface{},map[string]interface{}{"name":"Tyler"},nil,MSI -ObjxMap,(Map),New(1),New(nil),ObjxMap -Bool,bool,true,false,Bool -String,string,"hello","",Str -Int,int,1,0,Int -Int8,int8,1,0,Int8 -Int16,int16,1,0,Int16 -Int32,int32,1,0,Int32 -Int64,int64,1,0,Int64 -Uint,uint,1,0,Uint -Uint8,uint8,1,0,Uint8 -Uint16,uint16,1,0,Uint16 -Uint32,uint32,1,0,Uint32 -Uint64,uint64,1,0,Uint64 -Uintptr,uintptr,1,0,Uintptr -Float32,float32,1,0,Float32 -Float64,float64,1,0,Float64 -Complex64,complex64,1,0,Complex64 -Complex128,complex128,1,0,Complex128 diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/constants.go b/Godeps/_workspace/src/github.com/stretchr/objx/constants.go deleted file mode 100644 index f9eb42a25..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/constants.go +++ /dev/null @@ -1,13 +0,0 @@ -package objx - -const ( - // PathSeparator is the character used to separate the elements - // of the keypath. - // - // For example, `location.address.city` - PathSeparator string = "." - - // SignatureSeparator is the character that is used to - // separate the Base64 string from the security signature. - SignatureSeparator = "_" -) diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go b/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go deleted file mode 100644 index 9cdfa9f9f..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go +++ /dev/null @@ -1,117 +0,0 @@ -package objx - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/url" -) - -// JSON converts the contained object to a JSON string -// representation -func (m Map) JSON() (string, error) { - - result, err := json.Marshal(m) - - if err != nil { - err = errors.New("objx: JSON encode failed with: " + err.Error()) - } - - return string(result), err - -} - -// MustJSON converts the contained object to a JSON string -// representation and panics if there is an error -func (m Map) MustJSON() string { - result, err := m.JSON() - if err != nil { - panic(err.Error()) - } - return result -} - -// Base64 converts the contained object to a Base64 string -// representation of the JSON string representation -func (m Map) Base64() (string, error) { - - var buf bytes.Buffer - - jsonData, err := m.JSON() - if err != nil { - return "", err - } - - encoder := base64.NewEncoder(base64.StdEncoding, &buf) - encoder.Write([]byte(jsonData)) - encoder.Close() - - return buf.String(), nil - -} - -// MustBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and panics -// if there is an error -func (m Map) MustBase64() string { - result, err := m.Base64() - if err != nil { - panic(err.Error()) - } - return result -} - -// SignedBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and signs it -// using the provided key. -func (m Map) SignedBase64(key string) (string, error) { - - base64, err := m.Base64() - if err != nil { - return "", err - } - - sig := HashWithKey(base64, key) - - return base64 + SignatureSeparator + sig, nil - -} - -// MustSignedBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and signs it -// using the provided key and panics if there is an error -func (m Map) MustSignedBase64(key string) string { - result, err := m.SignedBase64(key) - if err != nil { - panic(err.Error()) - } - return result -} - -/* - URL Query - ------------------------------------------------ -*/ - -// URLValues creates a url.Values object from an Obj. This -// function requires that the wrapped object be a map[string]interface{} -func (m Map) URLValues() url.Values { - - vals := make(url.Values) - - for k, v := range m { - //TODO: can this be done without sprintf? - vals.Set(k, fmt.Sprintf("%v", v)) - } - - return vals -} - -// URLQuery gets an encoded URL query representing the given -// Obj. This function requires that the wrapped object be a -// map[string]interface{} -func (m Map) URLQuery() (string, error) { - return m.URLValues().Encode(), nil -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go deleted file mode 100644 index e9ccd2987..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestConversionJSON(t *testing.T) { - - jsonString := `{"name":"Mat"}` - o := MustFromJSON(jsonString) - - result, err := o.JSON() - - if assert.NoError(t, err) { - assert.Equal(t, jsonString, result) - } - - assert.Equal(t, jsonString, o.MustJSON()) - -} - -func TestConversionJSONWithError(t *testing.T) { - - o := MSI() - o["test"] = func() {} - - assert.Panics(t, func() { - o.MustJSON() - }) - - _, err := o.JSON() - - assert.Error(t, err) - -} - -func TestConversionBase64(t *testing.T) { - - o := New(map[string]interface{}{"name": "Mat"}) - - result, err := o.Base64() - - if assert.NoError(t, err) { - assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", result) - } - - assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", o.MustBase64()) - -} - -func TestConversionBase64WithError(t *testing.T) { - - o := MSI() - o["test"] = func() {} - - assert.Panics(t, func() { - o.MustBase64() - }) - - _, err := o.Base64() - - assert.Error(t, err) - -} - -func TestConversionSignedBase64(t *testing.T) { - - o := New(map[string]interface{}{"name": "Mat"}) - - result, err := o.SignedBase64("key") - - if assert.NoError(t, err) { - assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", result) - } - - assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", o.MustSignedBase64("key")) - -} - -func TestConversionSignedBase64WithError(t *testing.T) { - - o := MSI() - o["test"] = func() {} - - assert.Panics(t, func() { - o.MustSignedBase64("key") - }) - - _, err := o.SignedBase64("key") - - assert.Error(t, err) - -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/doc.go b/Godeps/_workspace/src/github.com/stretchr/objx/doc.go deleted file mode 100644 index 47bf85e46..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/doc.go +++ /dev/null @@ -1,72 +0,0 @@ -// objx - Go package for dealing with maps, slices, JSON and other data. -// -// Overview -// -// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes -// a powerful `Get` method (among others) that allows you to easily and quickly get -// access to data within the map, without having to worry too much about type assertions, -// missing data, default values etc. -// -// Pattern -// -// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s -// easy. -// -// Call one of the `objx.` functions to create your `objx.Map` to get going: -// -// m, err := objx.FromJSON(json) -// -// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, -// the rest will be optimistic and try to figure things out without panicking. -// -// Use `Get` to access the value you're interested in. You can use dot and array -// notation too: -// -// m.Get("places[0].latlng") -// -// Once you have saught the `Value` you're interested in, you can use the `Is*` methods -// to determine its type. -// -// if m.Get("code").IsStr() { /* ... */ } -// -// Or you can just assume the type, and use one of the strong type methods to -// extract the real value: -// -// m.Get("code").Int() -// -// If there's no value there (or if it's the wrong type) then a default value -// will be returned, or you can be explicit about the default value. -// -// Get("code").Int(-1) -// -// If you're dealing with a slice of data as a value, Objx provides many useful -// methods for iterating, manipulating and selecting that data. You can find out more -// by exploring the index below. -// -// Reading data -// -// A simple example of how to use Objx: -// -// // use MustFromJSON to make an objx.Map from some JSON -// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) -// -// // get the details -// name := m.Get("name").Str() -// age := m.Get("age").Int() -// -// // get their nickname (or use their name if they -// // don't have one) -// nickname := m.Get("nickname").Str(name) -// -// Ranging -// -// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For -// example, to `range` the data, do what you would expect: -// -// m := objx.MustFromJSON(json) -// for key, value := range m { -// -// /* ... do your magic ... */ -// -// } -package objx diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go deleted file mode 100644 index 27f7d9049..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -var fixtures = []struct { - // name is the name of the fixture (used for reporting - // failures) - name string - // data is the JSON data to be worked on - data string - // get is the argument(s) to pass to Get - get interface{} - // output is the expected output - output interface{} -}{ - { - name: "Simple get", - data: `{"name": "Mat"}`, - get: "name", - output: "Mat", - }, - { - name: "Get with dot notation", - data: `{"address": {"city": "Boulder"}}`, - get: "address.city", - output: "Boulder", - }, - { - name: "Deep get with dot notation", - data: `{"one": {"two": {"three": {"four": "hello"}}}}`, - get: "one.two.three.four", - output: "hello", - }, - { - name: "Get missing with dot notation", - data: `{"one": {"two": {"three": {"four": "hello"}}}}`, - get: "one.ten", - output: nil, - }, - { - name: "Get with array notation", - data: `{"tags": ["one", "two", "three"]}`, - get: "tags[1]", - output: "two", - }, - { - name: "Get with array and dot notation", - data: `{"types": { "tags": ["one", "two", "three"]}}`, - get: "types.tags[1]", - output: "two", - }, - { - name: "Get with array and dot notation - field after array", - data: `{"tags": [{"name":"one"}, {"name":"two"}, {"name":"three"}]}`, - get: "tags[1].name", - output: "two", - }, - { - name: "Complex get with array and dot notation", - data: `{"tags": [{"list": [{"one":"pizza"}]}]}`, - get: "tags[0].list[0].one", - output: "pizza", - }, - { - name: "Get field from within string should be nil", - data: `{"name":"Tyler"}`, - get: "name.something", - output: nil, - }, - { - name: "Get field from within string (using array accessor) should be nil", - data: `{"numbers":["one", "two", "three"]}`, - get: "numbers[0].nope", - output: nil, - }, -} - -func TestFixtures(t *testing.T) { - - for _, fixture := range fixtures { - - m := MustFromJSON(fixture.data) - - // get the value - t.Logf("Running get fixture: \"%s\" (%v)", fixture.name, fixture) - value := m.Get(fixture.get.(string)) - - // make sure it matches - assert.Equal(t, fixture.output, value.data, - "Get fixture \"%s\" failed: %v", fixture.name, fixture, - ) - - } - -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map.go b/Godeps/_workspace/src/github.com/stretchr/objx/map.go deleted file mode 100644 index eb6ed8e28..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/map.go +++ /dev/null @@ -1,222 +0,0 @@ -package objx - -import ( - "encoding/base64" - "encoding/json" - "errors" - "io/ioutil" - "net/url" - "strings" -) - -// MSIConvertable is an interface that defines methods for converting your -// custom types to a map[string]interface{} representation. -type MSIConvertable interface { - // MSI gets a map[string]interface{} (msi) representing the - // object. - MSI() map[string]interface{} -} - -// Map provides extended functionality for working with -// untyped data, in particular map[string]interface (msi). -type Map map[string]interface{} - -// Value returns the internal value instance -func (m Map) Value() *Value { - return &Value{data: m} -} - -// Nil represents a nil Map. -var Nil Map = New(nil) - -// New creates a new Map containing the map[string]interface{} in the data argument. -// If the data argument is not a map[string]interface, New attempts to call the -// MSI() method on the MSIConvertable interface to create one. -func New(data interface{}) Map { - if _, ok := data.(map[string]interface{}); !ok { - if converter, ok := data.(MSIConvertable); ok { - data = converter.MSI() - } else { - return nil - } - } - return Map(data.(map[string]interface{})) -} - -// MSI creates a map[string]interface{} and puts it inside a new Map. -// -// The arguments follow a key, value pattern. -// -// Panics -// -// Panics if any key arugment is non-string or if there are an odd number of arguments. -// -// Example -// -// To easily create Maps: -// -// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) -// -// // creates an Map equivalent to -// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) -func MSI(keyAndValuePairs ...interface{}) Map { - - newMap := make(map[string]interface{}) - keyAndValuePairsLen := len(keyAndValuePairs) - - if keyAndValuePairsLen%2 != 0 { - panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") - } - - for i := 0; i < keyAndValuePairsLen; i = i + 2 { - - key := keyAndValuePairs[i] - value := keyAndValuePairs[i+1] - - // make sure the key is a string - keyString, keyStringOK := key.(string) - if !keyStringOK { - panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") - } - - newMap[keyString] = value - - } - - return New(newMap) -} - -// ****** Conversion Constructors - -// MustFromJSON creates a new Map containing the data specified in the -// jsonString. -// -// Panics if the JSON is invalid. -func MustFromJSON(jsonString string) Map { - o, err := FromJSON(jsonString) - - if err != nil { - panic("objx: MustFromJSON failed with error: " + err.Error()) - } - - return o -} - -// FromJSON creates a new Map containing the data specified in the -// jsonString. -// -// Returns an error if the JSON is invalid. -func FromJSON(jsonString string) (Map, error) { - - var data interface{} - err := json.Unmarshal([]byte(jsonString), &data) - - if err != nil { - return Nil, err - } - - return New(data), nil - -} - -// FromBase64 creates a new Obj containing the data specified -// in the Base64 string. -// -// The string is an encoded JSON string returned by Base64 -func FromBase64(base64String string) (Map, error) { - - decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) - - decoded, err := ioutil.ReadAll(decoder) - if err != nil { - return nil, err - } - - return FromJSON(string(decoded)) -} - -// MustFromBase64 creates a new Obj containing the data specified -// in the Base64 string and panics if there is an error. -// -// The string is an encoded JSON string returned by Base64 -func MustFromBase64(base64String string) Map { - - result, err := FromBase64(base64String) - - if err != nil { - panic("objx: MustFromBase64 failed with error: " + err.Error()) - } - - return result -} - -// FromSignedBase64 creates a new Obj containing the data specified -// in the Base64 string. -// -// The string is an encoded JSON string returned by SignedBase64 -func FromSignedBase64(base64String, key string) (Map, error) { - parts := strings.Split(base64String, SignatureSeparator) - if len(parts) != 2 { - return nil, errors.New("objx: Signed base64 string is malformed.") - } - - sig := HashWithKey(parts[0], key) - if parts[1] != sig { - return nil, errors.New("objx: Signature for base64 data does not match.") - } - - return FromBase64(parts[0]) -} - -// MustFromSignedBase64 creates a new Obj containing the data specified -// in the Base64 string and panics if there is an error. -// -// The string is an encoded JSON string returned by Base64 -func MustFromSignedBase64(base64String, key string) Map { - - result, err := FromSignedBase64(base64String, key) - - if err != nil { - panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) - } - - return result -} - -// FromURLQuery generates a new Obj by parsing the specified -// query. -// -// For queries with multiple values, the first value is selected. -func FromURLQuery(query string) (Map, error) { - - vals, err := url.ParseQuery(query) - - if err != nil { - return nil, err - } - - m := make(map[string]interface{}) - for k, vals := range vals { - m[k] = vals[0] - } - - return New(m), nil -} - -// MustFromURLQuery generates a new Obj by parsing the specified -// query. -// -// For queries with multiple values, the first value is selected. -// -// Panics if it encounters an error -func MustFromURLQuery(query string) Map { - - o, err := FromURLQuery(query) - - if err != nil { - panic("objx: MustFromURLQuery failed with error: " + err.Error()) - } - - return o - -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go deleted file mode 100644 index 6beb50675..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go +++ /dev/null @@ -1,10 +0,0 @@ -package objx - -var TestMap map[string]interface{} = map[string]interface{}{ - "name": "Tyler", - "address": map[string]interface{}{ - "city": "Salt Lake City", - "state": "UT", - }, - "numbers": []interface{}{"one", "two", "three", "four", "five"}, -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/map_test.go deleted file mode 100644 index 1f8b45c61..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/map_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -type Convertable struct { - name string -} - -func (c *Convertable) MSI() map[string]interface{} { - return map[string]interface{}{"name": c.name} -} - -type Unconvertable struct { - name string -} - -func TestMapCreation(t *testing.T) { - - o := New(nil) - assert.Nil(t, o) - - o = New("Tyler") - assert.Nil(t, o) - - unconvertable := &Unconvertable{name: "Tyler"} - o = New(unconvertable) - assert.Nil(t, o) - - convertable := &Convertable{name: "Tyler"} - o = New(convertable) - if assert.NotNil(t, convertable) { - assert.Equal(t, "Tyler", o["name"], "Tyler") - } - - o = MSI() - if assert.NotNil(t, o) { - assert.NotNil(t, o) - } - - o = MSI("name", "Tyler") - if assert.NotNil(t, o) { - if assert.NotNil(t, o) { - assert.Equal(t, o["name"], "Tyler") - } - } - -} - -func TestMapMustFromJSONWithError(t *testing.T) { - - _, err := FromJSON(`"name":"Mat"}`) - assert.Error(t, err) - -} - -func TestMapFromJSON(t *testing.T) { - - o := MustFromJSON(`{"name":"Mat"}`) - - if assert.NotNil(t, o) { - if assert.NotNil(t, o) { - assert.Equal(t, "Mat", o["name"]) - } - } - -} - -func TestMapFromJSONWithError(t *testing.T) { - - var m Map - - assert.Panics(t, func() { - m = MustFromJSON(`"name":"Mat"}`) - }) - - assert.Nil(t, m) - -} - -func TestMapFromBase64String(t *testing.T) { - - base64String := "eyJuYW1lIjoiTWF0In0=" - - o, err := FromBase64(base64String) - - if assert.NoError(t, err) { - assert.Equal(t, o.Get("name").Str(), "Mat") - } - - assert.Equal(t, MustFromBase64(base64String).Get("name").Str(), "Mat") - -} - -func TestMapFromBase64StringWithError(t *testing.T) { - - base64String := "eyJuYW1lIjoiTWFasd0In0=" - - _, err := FromBase64(base64String) - - assert.Error(t, err) - - assert.Panics(t, func() { - MustFromBase64(base64String) - }) - -} - -func TestMapFromSignedBase64String(t *testing.T) { - - base64String := "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" - - o, err := FromSignedBase64(base64String, "key") - - if assert.NoError(t, err) { - assert.Equal(t, o.Get("name").Str(), "Mat") - } - - assert.Equal(t, MustFromSignedBase64(base64String, "key").Get("name").Str(), "Mat") - -} - -func TestMapFromSignedBase64StringWithError(t *testing.T) { - - base64String := "eyJuYW1lasdIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" - - _, err := FromSignedBase64(base64String, "key") - - assert.Error(t, err) - - assert.Panics(t, func() { - MustFromSignedBase64(base64String, "key") - }) - -} - -func TestMapFromURLQuery(t *testing.T) { - - m, err := FromURLQuery("name=tyler&state=UT") - if assert.NoError(t, err) && assert.NotNil(t, m) { - assert.Equal(t, "tyler", m.Get("name").Str()) - assert.Equal(t, "UT", m.Get("state").Str()) - } - -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go b/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go deleted file mode 100644 index b35c86392..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go +++ /dev/null @@ -1,81 +0,0 @@ -package objx - -// Exclude returns a new Map with the keys in the specified []string -// excluded. -func (d Map) Exclude(exclude []string) Map { - - excluded := make(Map) - for k, v := range d { - var shouldInclude bool = true - for _, toExclude := range exclude { - if k == toExclude { - shouldInclude = false - break - } - } - if shouldInclude { - excluded[k] = v - } - } - - return excluded -} - -// Copy creates a shallow copy of the Obj. -func (m Map) Copy() Map { - copied := make(map[string]interface{}) - for k, v := range m { - copied[k] = v - } - return New(copied) -} - -// Merge blends the specified map with a copy of this map and returns the result. -// -// Keys that appear in both will be selected from the specified map. -// This method requires that the wrapped object be a map[string]interface{} -func (m Map) Merge(merge Map) Map { - return m.Copy().MergeHere(merge) -} - -// Merge blends the specified map with this map and returns the current map. -// -// Keys that appear in both will be selected from the specified map. The original map -// will be modified. This method requires that -// the wrapped object be a map[string]interface{} -func (m Map) MergeHere(merge Map) Map { - - for k, v := range merge { - m[k] = v - } - - return m - -} - -// Transform builds a new Obj giving the transformer a chance -// to change the keys and values as it goes. This method requires that -// the wrapped object be a map[string]interface{} -func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { - newMap := make(map[string]interface{}) - for k, v := range m { - modifiedKey, modifiedVal := transformer(k, v) - newMap[modifiedKey] = modifiedVal - } - return New(newMap) -} - -// TransformKeys builds a new map using the specified key mapping. -// -// Unspecified keys will be unaltered. -// This method requires that the wrapped object be a map[string]interface{} -func (m Map) TransformKeys(mapping map[string]string) Map { - return m.Transform(func(key string, value interface{}) (string, interface{}) { - - if newKey, ok := mapping[key]; ok { - return newKey, value - } - - return key, value - }) -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go deleted file mode 100644 index e20ee23bc..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestExclude(t *testing.T) { - - d := make(Map) - d["name"] = "Mat" - d["age"] = 29 - d["secret"] = "ABC" - - excluded := d.Exclude([]string{"secret"}) - - assert.Equal(t, d["name"], excluded["name"]) - assert.Equal(t, d["age"], excluded["age"]) - assert.False(t, excluded.Has("secret"), "secret should be excluded") - -} - -func TestCopy(t *testing.T) { - - d1 := make(map[string]interface{}) - d1["name"] = "Tyler" - d1["location"] = "UT" - - d1Obj := New(d1) - d2Obj := d1Obj.Copy() - - d2Obj["name"] = "Mat" - - assert.Equal(t, d1Obj.Get("name").Str(), "Tyler") - assert.Equal(t, d2Obj.Get("name").Str(), "Mat") - -} - -func TestMerge(t *testing.T) { - - d := make(map[string]interface{}) - d["name"] = "Mat" - - d1 := make(map[string]interface{}) - d1["name"] = "Tyler" - d1["location"] = "UT" - - dObj := New(d) - d1Obj := New(d1) - - merged := dObj.Merge(d1Obj) - - assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) - assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) - assert.Empty(t, dObj.Get("location").Str()) - -} - -func TestMergeHere(t *testing.T) { - - d := make(map[string]interface{}) - d["name"] = "Mat" - - d1 := make(map[string]interface{}) - d1["name"] = "Tyler" - d1["location"] = "UT" - - dObj := New(d) - d1Obj := New(d1) - - merged := dObj.MergeHere(d1Obj) - - assert.Equal(t, dObj, merged, "With MergeHere, it should return the first modified map") - assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) - assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) - assert.Equal(t, merged.Get("location").Str(), dObj.Get("location").Str()) -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/security.go b/Godeps/_workspace/src/github.com/stretchr/objx/security.go deleted file mode 100644 index fdd6be9cf..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/security.go +++ /dev/null @@ -1,14 +0,0 @@ -package objx - -import ( - "crypto/sha1" - "encoding/hex" -) - -// HashWithKey hashes the specified string using the security -// key. -func HashWithKey(data, key string) string { - hash := sha1.New() - hash.Write([]byte(data + ":" + key)) - return hex.EncodeToString(hash.Sum(nil)) -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/security_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/security_test.go deleted file mode 100644 index 8f0898f62..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/security_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestHashWithKey(t *testing.T) { - - assert.Equal(t, "0ce84d8d01f2c7b6e0882b784429c54d280ea2d9", HashWithKey("abc", "def")) - -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go deleted file mode 100644 index 5408c7fd3..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestSimpleExample(t *testing.T) { - - // build a map from a JSON object - o := MustFromJSON(`{"name":"Mat","foods":["indian","chinese"], "location":{"county":"hobbiton","city":"the shire"}}`) - - // Map can be used as a straight map[string]interface{} - assert.Equal(t, o["name"], "Mat") - - // Get an Value object - v := o.Get("name") - assert.Equal(t, v, &Value{data: "Mat"}) - - // Test the contained value - assert.False(t, v.IsInt()) - assert.False(t, v.IsBool()) - assert.True(t, v.IsStr()) - - // Get the contained value - assert.Equal(t, v.Str(), "Mat") - - // Get a default value if the contained value is not of the expected type or does not exist - assert.Equal(t, 1, v.Int(1)) - - // Get a value by using array notation - assert.Equal(t, "indian", o.Get("foods[0]").Data()) - - // Set a value by using array notation - o.Set("foods[0]", "italian") - assert.Equal(t, "italian", o.Get("foods[0]").Str()) - - // Get a value by using dot notation - assert.Equal(t, "hobbiton", o.Get("location.county").Str()) - -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/tests.go b/Godeps/_workspace/src/github.com/stretchr/objx/tests.go deleted file mode 100644 index d9e0b479a..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/tests.go +++ /dev/null @@ -1,17 +0,0 @@ -package objx - -// Has gets whether there is something at the specified selector -// or not. -// -// If m is nil, Has will always return false. -func (m Map) Has(selector string) bool { - if m == nil { - return false - } - return !m.Get(selector).IsNil() -} - -// IsNil gets whether the data is nil or not. -func (v *Value) IsNil() bool { - return v == nil || v.data == nil -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go deleted file mode 100644 index bcc1eb03d..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestHas(t *testing.T) { - - m := New(TestMap) - - assert.True(t, m.Has("name")) - assert.True(t, m.Has("address.state")) - assert.True(t, m.Has("numbers[4]")) - - assert.False(t, m.Has("address.state.nope")) - assert.False(t, m.Has("address.nope")) - assert.False(t, m.Has("nope")) - assert.False(t, m.Has("numbers[5]")) - - m = nil - assert.False(t, m.Has("nothing")) - -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go deleted file mode 100644 index f3ecb29b9..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go +++ /dev/null @@ -1,2881 +0,0 @@ -package objx - -/* - Inter (interface{} and []interface{}) - -------------------------------------------------- -*/ - -// Inter gets the value as a interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Inter(optionalDefault ...interface{}) interface{} { - if s, ok := v.data.(interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInter gets the value as a interface{}. -// -// Panics if the object is not a interface{}. -func (v *Value) MustInter() interface{} { - return v.data.(interface{}) -} - -// InterSlice gets the value as a []interface{}, returns the optionalDefault -// value or nil if the value is not a []interface{}. -func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { - if s, ok := v.data.([]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInterSlice gets the value as a []interface{}. -// -// Panics if the object is not a []interface{}. -func (v *Value) MustInterSlice() []interface{} { - return v.data.([]interface{}) -} - -// IsInter gets whether the object contained is a interface{} or not. -func (v *Value) IsInter() bool { - _, ok := v.data.(interface{}) - return ok -} - -// IsInterSlice gets whether the object contained is a []interface{} or not. -func (v *Value) IsInterSlice() bool { - _, ok := v.data.([]interface{}) - return ok -} - -// EachInter calls the specified callback for each object -// in the []interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { - - for index, val := range v.MustInterSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInter uses the specified decider function to select items -// from the []interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { - - var selected []interface{} - - v.EachInter(func(index int, val interface{}) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInter uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]interface{}. -func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { - - groups := make(map[string][]interface{}) - - v.EachInter(func(index int, val interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInter uses the specified function to replace each interface{}s -// by iterating each item. The data in the returned result will be a -// []interface{} containing the replaced items. -func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { - - arr := v.MustInterSlice() - replaced := make([]interface{}, len(arr)) - - v.EachInter(func(index int, val interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInter uses the specified collector function to collect a value -// for each of the interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { - - arr := v.MustInterSlice() - collected := make([]interface{}, len(arr)) - - v.EachInter(func(index int, val interface{}) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - MSI (map[string]interface{} and []map[string]interface{}) - -------------------------------------------------- -*/ - -// MSI gets the value as a map[string]interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { - if s, ok := v.data.(map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSI gets the value as a map[string]interface{}. -// -// Panics if the object is not a map[string]interface{}. -func (v *Value) MustMSI() map[string]interface{} { - return v.data.(map[string]interface{}) -} - -// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault -// value or nil if the value is not a []map[string]interface{}. -func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { - if s, ok := v.data.([]map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSISlice gets the value as a []map[string]interface{}. -// -// Panics if the object is not a []map[string]interface{}. -func (v *Value) MustMSISlice() []map[string]interface{} { - return v.data.([]map[string]interface{}) -} - -// IsMSI gets whether the object contained is a map[string]interface{} or not. -func (v *Value) IsMSI() bool { - _, ok := v.data.(map[string]interface{}) - return ok -} - -// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. -func (v *Value) IsMSISlice() bool { - _, ok := v.data.([]map[string]interface{}) - return ok -} - -// EachMSI calls the specified callback for each object -// in the []map[string]interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { - - for index, val := range v.MustMSISlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereMSI uses the specified decider function to select items -// from the []map[string]interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { - - var selected []map[string]interface{} - - v.EachMSI(func(index int, val map[string]interface{}) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupMSI uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]map[string]interface{}. -func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { - - groups := make(map[string][]map[string]interface{}) - - v.EachMSI(func(index int, val map[string]interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]map[string]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceMSI uses the specified function to replace each map[string]interface{}s -// by iterating each item. The data in the returned result will be a -// []map[string]interface{} containing the replaced items. -func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { - - arr := v.MustMSISlice() - replaced := make([]map[string]interface{}, len(arr)) - - v.EachMSI(func(index int, val map[string]interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectMSI uses the specified collector function to collect a value -// for each of the map[string]interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { - - arr := v.MustMSISlice() - collected := make([]interface{}, len(arr)) - - v.EachMSI(func(index int, val map[string]interface{}) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - ObjxMap ((Map) and [](Map)) - -------------------------------------------------- -*/ - -// ObjxMap gets the value as a (Map), returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { - if s, ok := v.data.((Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return New(nil) -} - -// MustObjxMap gets the value as a (Map). -// -// Panics if the object is not a (Map). -func (v *Value) MustObjxMap() Map { - return v.data.((Map)) -} - -// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault -// value or nil if the value is not a [](Map). -func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { - if s, ok := v.data.([](Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustObjxMapSlice gets the value as a [](Map). -// -// Panics if the object is not a [](Map). -func (v *Value) MustObjxMapSlice() [](Map) { - return v.data.([](Map)) -} - -// IsObjxMap gets whether the object contained is a (Map) or not. -func (v *Value) IsObjxMap() bool { - _, ok := v.data.((Map)) - return ok -} - -// IsObjxMapSlice gets whether the object contained is a [](Map) or not. -func (v *Value) IsObjxMapSlice() bool { - _, ok := v.data.([](Map)) - return ok -} - -// EachObjxMap calls the specified callback for each object -// in the [](Map). -// -// Panics if the object is the wrong type. -func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { - - for index, val := range v.MustObjxMapSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereObjxMap uses the specified decider function to select items -// from the [](Map). The object contained in the result will contain -// only the selected items. -func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { - - var selected [](Map) - - v.EachObjxMap(func(index int, val Map) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupObjxMap uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][](Map). -func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { - - groups := make(map[string][](Map)) - - v.EachObjxMap(func(index int, val Map) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([](Map), 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceObjxMap uses the specified function to replace each (Map)s -// by iterating each item. The data in the returned result will be a -// [](Map) containing the replaced items. -func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { - - arr := v.MustObjxMapSlice() - replaced := make([](Map), len(arr)) - - v.EachObjxMap(func(index int, val Map) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectObjxMap uses the specified collector function to collect a value -// for each of the (Map)s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { - - arr := v.MustObjxMapSlice() - collected := make([]interface{}, len(arr)) - - v.EachObjxMap(func(index int, val Map) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Bool (bool and []bool) - -------------------------------------------------- -*/ - -// Bool gets the value as a bool, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Bool(optionalDefault ...bool) bool { - if s, ok := v.data.(bool); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return false -} - -// MustBool gets the value as a bool. -// -// Panics if the object is not a bool. -func (v *Value) MustBool() bool { - return v.data.(bool) -} - -// BoolSlice gets the value as a []bool, returns the optionalDefault -// value or nil if the value is not a []bool. -func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { - if s, ok := v.data.([]bool); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustBoolSlice gets the value as a []bool. -// -// Panics if the object is not a []bool. -func (v *Value) MustBoolSlice() []bool { - return v.data.([]bool) -} - -// IsBool gets whether the object contained is a bool or not. -func (v *Value) IsBool() bool { - _, ok := v.data.(bool) - return ok -} - -// IsBoolSlice gets whether the object contained is a []bool or not. -func (v *Value) IsBoolSlice() bool { - _, ok := v.data.([]bool) - return ok -} - -// EachBool calls the specified callback for each object -// in the []bool. -// -// Panics if the object is the wrong type. -func (v *Value) EachBool(callback func(int, bool) bool) *Value { - - for index, val := range v.MustBoolSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereBool uses the specified decider function to select items -// from the []bool. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereBool(decider func(int, bool) bool) *Value { - - var selected []bool - - v.EachBool(func(index int, val bool) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupBool uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]bool. -func (v *Value) GroupBool(grouper func(int, bool) string) *Value { - - groups := make(map[string][]bool) - - v.EachBool(func(index int, val bool) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]bool, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceBool uses the specified function to replace each bools -// by iterating each item. The data in the returned result will be a -// []bool containing the replaced items. -func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { - - arr := v.MustBoolSlice() - replaced := make([]bool, len(arr)) - - v.EachBool(func(index int, val bool) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectBool uses the specified collector function to collect a value -// for each of the bools in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { - - arr := v.MustBoolSlice() - collected := make([]interface{}, len(arr)) - - v.EachBool(func(index int, val bool) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Str (string and []string) - -------------------------------------------------- -*/ - -// Str gets the value as a string, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Str(optionalDefault ...string) string { - if s, ok := v.data.(string); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return "" -} - -// MustStr gets the value as a string. -// -// Panics if the object is not a string. -func (v *Value) MustStr() string { - return v.data.(string) -} - -// StrSlice gets the value as a []string, returns the optionalDefault -// value or nil if the value is not a []string. -func (v *Value) StrSlice(optionalDefault ...[]string) []string { - if s, ok := v.data.([]string); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustStrSlice gets the value as a []string. -// -// Panics if the object is not a []string. -func (v *Value) MustStrSlice() []string { - return v.data.([]string) -} - -// IsStr gets whether the object contained is a string or not. -func (v *Value) IsStr() bool { - _, ok := v.data.(string) - return ok -} - -// IsStrSlice gets whether the object contained is a []string or not. -func (v *Value) IsStrSlice() bool { - _, ok := v.data.([]string) - return ok -} - -// EachStr calls the specified callback for each object -// in the []string. -// -// Panics if the object is the wrong type. -func (v *Value) EachStr(callback func(int, string) bool) *Value { - - for index, val := range v.MustStrSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereStr uses the specified decider function to select items -// from the []string. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereStr(decider func(int, string) bool) *Value { - - var selected []string - - v.EachStr(func(index int, val string) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupStr uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]string. -func (v *Value) GroupStr(grouper func(int, string) string) *Value { - - groups := make(map[string][]string) - - v.EachStr(func(index int, val string) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]string, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceStr uses the specified function to replace each strings -// by iterating each item. The data in the returned result will be a -// []string containing the replaced items. -func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { - - arr := v.MustStrSlice() - replaced := make([]string, len(arr)) - - v.EachStr(func(index int, val string) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectStr uses the specified collector function to collect a value -// for each of the strings in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { - - arr := v.MustStrSlice() - collected := make([]interface{}, len(arr)) - - v.EachStr(func(index int, val string) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int (int and []int) - -------------------------------------------------- -*/ - -// Int gets the value as a int, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int(optionalDefault ...int) int { - if s, ok := v.data.(int); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt gets the value as a int. -// -// Panics if the object is not a int. -func (v *Value) MustInt() int { - return v.data.(int) -} - -// IntSlice gets the value as a []int, returns the optionalDefault -// value or nil if the value is not a []int. -func (v *Value) IntSlice(optionalDefault ...[]int) []int { - if s, ok := v.data.([]int); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustIntSlice gets the value as a []int. -// -// Panics if the object is not a []int. -func (v *Value) MustIntSlice() []int { - return v.data.([]int) -} - -// IsInt gets whether the object contained is a int or not. -func (v *Value) IsInt() bool { - _, ok := v.data.(int) - return ok -} - -// IsIntSlice gets whether the object contained is a []int or not. -func (v *Value) IsIntSlice() bool { - _, ok := v.data.([]int) - return ok -} - -// EachInt calls the specified callback for each object -// in the []int. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt(callback func(int, int) bool) *Value { - - for index, val := range v.MustIntSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt uses the specified decider function to select items -// from the []int. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt(decider func(int, int) bool) *Value { - - var selected []int - - v.EachInt(func(index int, val int) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int. -func (v *Value) GroupInt(grouper func(int, int) string) *Value { - - groups := make(map[string][]int) - - v.EachInt(func(index int, val int) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt uses the specified function to replace each ints -// by iterating each item. The data in the returned result will be a -// []int containing the replaced items. -func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { - - arr := v.MustIntSlice() - replaced := make([]int, len(arr)) - - v.EachInt(func(index int, val int) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt uses the specified collector function to collect a value -// for each of the ints in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { - - arr := v.MustIntSlice() - collected := make([]interface{}, len(arr)) - - v.EachInt(func(index int, val int) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int8 (int8 and []int8) - -------------------------------------------------- -*/ - -// Int8 gets the value as a int8, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int8(optionalDefault ...int8) int8 { - if s, ok := v.data.(int8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt8 gets the value as a int8. -// -// Panics if the object is not a int8. -func (v *Value) MustInt8() int8 { - return v.data.(int8) -} - -// Int8Slice gets the value as a []int8, returns the optionalDefault -// value or nil if the value is not a []int8. -func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { - if s, ok := v.data.([]int8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt8Slice gets the value as a []int8. -// -// Panics if the object is not a []int8. -func (v *Value) MustInt8Slice() []int8 { - return v.data.([]int8) -} - -// IsInt8 gets whether the object contained is a int8 or not. -func (v *Value) IsInt8() bool { - _, ok := v.data.(int8) - return ok -} - -// IsInt8Slice gets whether the object contained is a []int8 or not. -func (v *Value) IsInt8Slice() bool { - _, ok := v.data.([]int8) - return ok -} - -// EachInt8 calls the specified callback for each object -// in the []int8. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt8(callback func(int, int8) bool) *Value { - - for index, val := range v.MustInt8Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt8 uses the specified decider function to select items -// from the []int8. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { - - var selected []int8 - - v.EachInt8(func(index int, val int8) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt8 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int8. -func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { - - groups := make(map[string][]int8) - - v.EachInt8(func(index int, val int8) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int8, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt8 uses the specified function to replace each int8s -// by iterating each item. The data in the returned result will be a -// []int8 containing the replaced items. -func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { - - arr := v.MustInt8Slice() - replaced := make([]int8, len(arr)) - - v.EachInt8(func(index int, val int8) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt8 uses the specified collector function to collect a value -// for each of the int8s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { - - arr := v.MustInt8Slice() - collected := make([]interface{}, len(arr)) - - v.EachInt8(func(index int, val int8) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int16 (int16 and []int16) - -------------------------------------------------- -*/ - -// Int16 gets the value as a int16, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int16(optionalDefault ...int16) int16 { - if s, ok := v.data.(int16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt16 gets the value as a int16. -// -// Panics if the object is not a int16. -func (v *Value) MustInt16() int16 { - return v.data.(int16) -} - -// Int16Slice gets the value as a []int16, returns the optionalDefault -// value or nil if the value is not a []int16. -func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { - if s, ok := v.data.([]int16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt16Slice gets the value as a []int16. -// -// Panics if the object is not a []int16. -func (v *Value) MustInt16Slice() []int16 { - return v.data.([]int16) -} - -// IsInt16 gets whether the object contained is a int16 or not. -func (v *Value) IsInt16() bool { - _, ok := v.data.(int16) - return ok -} - -// IsInt16Slice gets whether the object contained is a []int16 or not. -func (v *Value) IsInt16Slice() bool { - _, ok := v.data.([]int16) - return ok -} - -// EachInt16 calls the specified callback for each object -// in the []int16. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt16(callback func(int, int16) bool) *Value { - - for index, val := range v.MustInt16Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt16 uses the specified decider function to select items -// from the []int16. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { - - var selected []int16 - - v.EachInt16(func(index int, val int16) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt16 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int16. -func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { - - groups := make(map[string][]int16) - - v.EachInt16(func(index int, val int16) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int16, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt16 uses the specified function to replace each int16s -// by iterating each item. The data in the returned result will be a -// []int16 containing the replaced items. -func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { - - arr := v.MustInt16Slice() - replaced := make([]int16, len(arr)) - - v.EachInt16(func(index int, val int16) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt16 uses the specified collector function to collect a value -// for each of the int16s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { - - arr := v.MustInt16Slice() - collected := make([]interface{}, len(arr)) - - v.EachInt16(func(index int, val int16) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int32 (int32 and []int32) - -------------------------------------------------- -*/ - -// Int32 gets the value as a int32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int32(optionalDefault ...int32) int32 { - if s, ok := v.data.(int32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt32 gets the value as a int32. -// -// Panics if the object is not a int32. -func (v *Value) MustInt32() int32 { - return v.data.(int32) -} - -// Int32Slice gets the value as a []int32, returns the optionalDefault -// value or nil if the value is not a []int32. -func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { - if s, ok := v.data.([]int32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt32Slice gets the value as a []int32. -// -// Panics if the object is not a []int32. -func (v *Value) MustInt32Slice() []int32 { - return v.data.([]int32) -} - -// IsInt32 gets whether the object contained is a int32 or not. -func (v *Value) IsInt32() bool { - _, ok := v.data.(int32) - return ok -} - -// IsInt32Slice gets whether the object contained is a []int32 or not. -func (v *Value) IsInt32Slice() bool { - _, ok := v.data.([]int32) - return ok -} - -// EachInt32 calls the specified callback for each object -// in the []int32. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt32(callback func(int, int32) bool) *Value { - - for index, val := range v.MustInt32Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt32 uses the specified decider function to select items -// from the []int32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { - - var selected []int32 - - v.EachInt32(func(index int, val int32) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int32. -func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { - - groups := make(map[string][]int32) - - v.EachInt32(func(index int, val int32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt32 uses the specified function to replace each int32s -// by iterating each item. The data in the returned result will be a -// []int32 containing the replaced items. -func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { - - arr := v.MustInt32Slice() - replaced := make([]int32, len(arr)) - - v.EachInt32(func(index int, val int32) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt32 uses the specified collector function to collect a value -// for each of the int32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { - - arr := v.MustInt32Slice() - collected := make([]interface{}, len(arr)) - - v.EachInt32(func(index int, val int32) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int64 (int64 and []int64) - -------------------------------------------------- -*/ - -// Int64 gets the value as a int64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int64(optionalDefault ...int64) int64 { - if s, ok := v.data.(int64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt64 gets the value as a int64. -// -// Panics if the object is not a int64. -func (v *Value) MustInt64() int64 { - return v.data.(int64) -} - -// Int64Slice gets the value as a []int64, returns the optionalDefault -// value or nil if the value is not a []int64. -func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { - if s, ok := v.data.([]int64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt64Slice gets the value as a []int64. -// -// Panics if the object is not a []int64. -func (v *Value) MustInt64Slice() []int64 { - return v.data.([]int64) -} - -// IsInt64 gets whether the object contained is a int64 or not. -func (v *Value) IsInt64() bool { - _, ok := v.data.(int64) - return ok -} - -// IsInt64Slice gets whether the object contained is a []int64 or not. -func (v *Value) IsInt64Slice() bool { - _, ok := v.data.([]int64) - return ok -} - -// EachInt64 calls the specified callback for each object -// in the []int64. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt64(callback func(int, int64) bool) *Value { - - for index, val := range v.MustInt64Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt64 uses the specified decider function to select items -// from the []int64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { - - var selected []int64 - - v.EachInt64(func(index int, val int64) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int64. -func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { - - groups := make(map[string][]int64) - - v.EachInt64(func(index int, val int64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt64 uses the specified function to replace each int64s -// by iterating each item. The data in the returned result will be a -// []int64 containing the replaced items. -func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { - - arr := v.MustInt64Slice() - replaced := make([]int64, len(arr)) - - v.EachInt64(func(index int, val int64) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt64 uses the specified collector function to collect a value -// for each of the int64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { - - arr := v.MustInt64Slice() - collected := make([]interface{}, len(arr)) - - v.EachInt64(func(index int, val int64) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint (uint and []uint) - -------------------------------------------------- -*/ - -// Uint gets the value as a uint, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint(optionalDefault ...uint) uint { - if s, ok := v.data.(uint); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint gets the value as a uint. -// -// Panics if the object is not a uint. -func (v *Value) MustUint() uint { - return v.data.(uint) -} - -// UintSlice gets the value as a []uint, returns the optionalDefault -// value or nil if the value is not a []uint. -func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { - if s, ok := v.data.([]uint); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUintSlice gets the value as a []uint. -// -// Panics if the object is not a []uint. -func (v *Value) MustUintSlice() []uint { - return v.data.([]uint) -} - -// IsUint gets whether the object contained is a uint or not. -func (v *Value) IsUint() bool { - _, ok := v.data.(uint) - return ok -} - -// IsUintSlice gets whether the object contained is a []uint or not. -func (v *Value) IsUintSlice() bool { - _, ok := v.data.([]uint) - return ok -} - -// EachUint calls the specified callback for each object -// in the []uint. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint(callback func(int, uint) bool) *Value { - - for index, val := range v.MustUintSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint uses the specified decider function to select items -// from the []uint. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint(decider func(int, uint) bool) *Value { - - var selected []uint - - v.EachUint(func(index int, val uint) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint. -func (v *Value) GroupUint(grouper func(int, uint) string) *Value { - - groups := make(map[string][]uint) - - v.EachUint(func(index int, val uint) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint uses the specified function to replace each uints -// by iterating each item. The data in the returned result will be a -// []uint containing the replaced items. -func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { - - arr := v.MustUintSlice() - replaced := make([]uint, len(arr)) - - v.EachUint(func(index int, val uint) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint uses the specified collector function to collect a value -// for each of the uints in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { - - arr := v.MustUintSlice() - collected := make([]interface{}, len(arr)) - - v.EachUint(func(index int, val uint) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint8 (uint8 and []uint8) - -------------------------------------------------- -*/ - -// Uint8 gets the value as a uint8, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint8(optionalDefault ...uint8) uint8 { - if s, ok := v.data.(uint8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint8 gets the value as a uint8. -// -// Panics if the object is not a uint8. -func (v *Value) MustUint8() uint8 { - return v.data.(uint8) -} - -// Uint8Slice gets the value as a []uint8, returns the optionalDefault -// value or nil if the value is not a []uint8. -func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { - if s, ok := v.data.([]uint8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint8Slice gets the value as a []uint8. -// -// Panics if the object is not a []uint8. -func (v *Value) MustUint8Slice() []uint8 { - return v.data.([]uint8) -} - -// IsUint8 gets whether the object contained is a uint8 or not. -func (v *Value) IsUint8() bool { - _, ok := v.data.(uint8) - return ok -} - -// IsUint8Slice gets whether the object contained is a []uint8 or not. -func (v *Value) IsUint8Slice() bool { - _, ok := v.data.([]uint8) - return ok -} - -// EachUint8 calls the specified callback for each object -// in the []uint8. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { - - for index, val := range v.MustUint8Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint8 uses the specified decider function to select items -// from the []uint8. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { - - var selected []uint8 - - v.EachUint8(func(index int, val uint8) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint8 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint8. -func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { - - groups := make(map[string][]uint8) - - v.EachUint8(func(index int, val uint8) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint8, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint8 uses the specified function to replace each uint8s -// by iterating each item. The data in the returned result will be a -// []uint8 containing the replaced items. -func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { - - arr := v.MustUint8Slice() - replaced := make([]uint8, len(arr)) - - v.EachUint8(func(index int, val uint8) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint8 uses the specified collector function to collect a value -// for each of the uint8s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { - - arr := v.MustUint8Slice() - collected := make([]interface{}, len(arr)) - - v.EachUint8(func(index int, val uint8) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint16 (uint16 and []uint16) - -------------------------------------------------- -*/ - -// Uint16 gets the value as a uint16, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint16(optionalDefault ...uint16) uint16 { - if s, ok := v.data.(uint16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint16 gets the value as a uint16. -// -// Panics if the object is not a uint16. -func (v *Value) MustUint16() uint16 { - return v.data.(uint16) -} - -// Uint16Slice gets the value as a []uint16, returns the optionalDefault -// value or nil if the value is not a []uint16. -func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { - if s, ok := v.data.([]uint16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint16Slice gets the value as a []uint16. -// -// Panics if the object is not a []uint16. -func (v *Value) MustUint16Slice() []uint16 { - return v.data.([]uint16) -} - -// IsUint16 gets whether the object contained is a uint16 or not. -func (v *Value) IsUint16() bool { - _, ok := v.data.(uint16) - return ok -} - -// IsUint16Slice gets whether the object contained is a []uint16 or not. -func (v *Value) IsUint16Slice() bool { - _, ok := v.data.([]uint16) - return ok -} - -// EachUint16 calls the specified callback for each object -// in the []uint16. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { - - for index, val := range v.MustUint16Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint16 uses the specified decider function to select items -// from the []uint16. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { - - var selected []uint16 - - v.EachUint16(func(index int, val uint16) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint16 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint16. -func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { - - groups := make(map[string][]uint16) - - v.EachUint16(func(index int, val uint16) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint16, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint16 uses the specified function to replace each uint16s -// by iterating each item. The data in the returned result will be a -// []uint16 containing the replaced items. -func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { - - arr := v.MustUint16Slice() - replaced := make([]uint16, len(arr)) - - v.EachUint16(func(index int, val uint16) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint16 uses the specified collector function to collect a value -// for each of the uint16s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { - - arr := v.MustUint16Slice() - collected := make([]interface{}, len(arr)) - - v.EachUint16(func(index int, val uint16) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint32 (uint32 and []uint32) - -------------------------------------------------- -*/ - -// Uint32 gets the value as a uint32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint32(optionalDefault ...uint32) uint32 { - if s, ok := v.data.(uint32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint32 gets the value as a uint32. -// -// Panics if the object is not a uint32. -func (v *Value) MustUint32() uint32 { - return v.data.(uint32) -} - -// Uint32Slice gets the value as a []uint32, returns the optionalDefault -// value or nil if the value is not a []uint32. -func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { - if s, ok := v.data.([]uint32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint32Slice gets the value as a []uint32. -// -// Panics if the object is not a []uint32. -func (v *Value) MustUint32Slice() []uint32 { - return v.data.([]uint32) -} - -// IsUint32 gets whether the object contained is a uint32 or not. -func (v *Value) IsUint32() bool { - _, ok := v.data.(uint32) - return ok -} - -// IsUint32Slice gets whether the object contained is a []uint32 or not. -func (v *Value) IsUint32Slice() bool { - _, ok := v.data.([]uint32) - return ok -} - -// EachUint32 calls the specified callback for each object -// in the []uint32. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { - - for index, val := range v.MustUint32Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint32 uses the specified decider function to select items -// from the []uint32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { - - var selected []uint32 - - v.EachUint32(func(index int, val uint32) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint32. -func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { - - groups := make(map[string][]uint32) - - v.EachUint32(func(index int, val uint32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint32 uses the specified function to replace each uint32s -// by iterating each item. The data in the returned result will be a -// []uint32 containing the replaced items. -func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { - - arr := v.MustUint32Slice() - replaced := make([]uint32, len(arr)) - - v.EachUint32(func(index int, val uint32) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint32 uses the specified collector function to collect a value -// for each of the uint32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { - - arr := v.MustUint32Slice() - collected := make([]interface{}, len(arr)) - - v.EachUint32(func(index int, val uint32) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint64 (uint64 and []uint64) - -------------------------------------------------- -*/ - -// Uint64 gets the value as a uint64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint64(optionalDefault ...uint64) uint64 { - if s, ok := v.data.(uint64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint64 gets the value as a uint64. -// -// Panics if the object is not a uint64. -func (v *Value) MustUint64() uint64 { - return v.data.(uint64) -} - -// Uint64Slice gets the value as a []uint64, returns the optionalDefault -// value or nil if the value is not a []uint64. -func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { - if s, ok := v.data.([]uint64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint64Slice gets the value as a []uint64. -// -// Panics if the object is not a []uint64. -func (v *Value) MustUint64Slice() []uint64 { - return v.data.([]uint64) -} - -// IsUint64 gets whether the object contained is a uint64 or not. -func (v *Value) IsUint64() bool { - _, ok := v.data.(uint64) - return ok -} - -// IsUint64Slice gets whether the object contained is a []uint64 or not. -func (v *Value) IsUint64Slice() bool { - _, ok := v.data.([]uint64) - return ok -} - -// EachUint64 calls the specified callback for each object -// in the []uint64. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { - - for index, val := range v.MustUint64Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint64 uses the specified decider function to select items -// from the []uint64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { - - var selected []uint64 - - v.EachUint64(func(index int, val uint64) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint64. -func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { - - groups := make(map[string][]uint64) - - v.EachUint64(func(index int, val uint64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint64 uses the specified function to replace each uint64s -// by iterating each item. The data in the returned result will be a -// []uint64 containing the replaced items. -func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { - - arr := v.MustUint64Slice() - replaced := make([]uint64, len(arr)) - - v.EachUint64(func(index int, val uint64) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint64 uses the specified collector function to collect a value -// for each of the uint64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { - - arr := v.MustUint64Slice() - collected := make([]interface{}, len(arr)) - - v.EachUint64(func(index int, val uint64) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uintptr (uintptr and []uintptr) - -------------------------------------------------- -*/ - -// Uintptr gets the value as a uintptr, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { - if s, ok := v.data.(uintptr); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUintptr gets the value as a uintptr. -// -// Panics if the object is not a uintptr. -func (v *Value) MustUintptr() uintptr { - return v.data.(uintptr) -} - -// UintptrSlice gets the value as a []uintptr, returns the optionalDefault -// value or nil if the value is not a []uintptr. -func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { - if s, ok := v.data.([]uintptr); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUintptrSlice gets the value as a []uintptr. -// -// Panics if the object is not a []uintptr. -func (v *Value) MustUintptrSlice() []uintptr { - return v.data.([]uintptr) -} - -// IsUintptr gets whether the object contained is a uintptr or not. -func (v *Value) IsUintptr() bool { - _, ok := v.data.(uintptr) - return ok -} - -// IsUintptrSlice gets whether the object contained is a []uintptr or not. -func (v *Value) IsUintptrSlice() bool { - _, ok := v.data.([]uintptr) - return ok -} - -// EachUintptr calls the specified callback for each object -// in the []uintptr. -// -// Panics if the object is the wrong type. -func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { - - for index, val := range v.MustUintptrSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUintptr uses the specified decider function to select items -// from the []uintptr. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { - - var selected []uintptr - - v.EachUintptr(func(index int, val uintptr) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUintptr uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uintptr. -func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { - - groups := make(map[string][]uintptr) - - v.EachUintptr(func(index int, val uintptr) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uintptr, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUintptr uses the specified function to replace each uintptrs -// by iterating each item. The data in the returned result will be a -// []uintptr containing the replaced items. -func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { - - arr := v.MustUintptrSlice() - replaced := make([]uintptr, len(arr)) - - v.EachUintptr(func(index int, val uintptr) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUintptr uses the specified collector function to collect a value -// for each of the uintptrs in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { - - arr := v.MustUintptrSlice() - collected := make([]interface{}, len(arr)) - - v.EachUintptr(func(index int, val uintptr) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Float32 (float32 and []float32) - -------------------------------------------------- -*/ - -// Float32 gets the value as a float32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Float32(optionalDefault ...float32) float32 { - if s, ok := v.data.(float32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustFloat32 gets the value as a float32. -// -// Panics if the object is not a float32. -func (v *Value) MustFloat32() float32 { - return v.data.(float32) -} - -// Float32Slice gets the value as a []float32, returns the optionalDefault -// value or nil if the value is not a []float32. -func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { - if s, ok := v.data.([]float32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustFloat32Slice gets the value as a []float32. -// -// Panics if the object is not a []float32. -func (v *Value) MustFloat32Slice() []float32 { - return v.data.([]float32) -} - -// IsFloat32 gets whether the object contained is a float32 or not. -func (v *Value) IsFloat32() bool { - _, ok := v.data.(float32) - return ok -} - -// IsFloat32Slice gets whether the object contained is a []float32 or not. -func (v *Value) IsFloat32Slice() bool { - _, ok := v.data.([]float32) - return ok -} - -// EachFloat32 calls the specified callback for each object -// in the []float32. -// -// Panics if the object is the wrong type. -func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { - - for index, val := range v.MustFloat32Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereFloat32 uses the specified decider function to select items -// from the []float32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { - - var selected []float32 - - v.EachFloat32(func(index int, val float32) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupFloat32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]float32. -func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { - - groups := make(map[string][]float32) - - v.EachFloat32(func(index int, val float32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]float32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceFloat32 uses the specified function to replace each float32s -// by iterating each item. The data in the returned result will be a -// []float32 containing the replaced items. -func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { - - arr := v.MustFloat32Slice() - replaced := make([]float32, len(arr)) - - v.EachFloat32(func(index int, val float32) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectFloat32 uses the specified collector function to collect a value -// for each of the float32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { - - arr := v.MustFloat32Slice() - collected := make([]interface{}, len(arr)) - - v.EachFloat32(func(index int, val float32) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Float64 (float64 and []float64) - -------------------------------------------------- -*/ - -// Float64 gets the value as a float64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Float64(optionalDefault ...float64) float64 { - if s, ok := v.data.(float64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustFloat64 gets the value as a float64. -// -// Panics if the object is not a float64. -func (v *Value) MustFloat64() float64 { - return v.data.(float64) -} - -// Float64Slice gets the value as a []float64, returns the optionalDefault -// value or nil if the value is not a []float64. -func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { - if s, ok := v.data.([]float64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustFloat64Slice gets the value as a []float64. -// -// Panics if the object is not a []float64. -func (v *Value) MustFloat64Slice() []float64 { - return v.data.([]float64) -} - -// IsFloat64 gets whether the object contained is a float64 or not. -func (v *Value) IsFloat64() bool { - _, ok := v.data.(float64) - return ok -} - -// IsFloat64Slice gets whether the object contained is a []float64 or not. -func (v *Value) IsFloat64Slice() bool { - _, ok := v.data.([]float64) - return ok -} - -// EachFloat64 calls the specified callback for each object -// in the []float64. -// -// Panics if the object is the wrong type. -func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { - - for index, val := range v.MustFloat64Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereFloat64 uses the specified decider function to select items -// from the []float64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { - - var selected []float64 - - v.EachFloat64(func(index int, val float64) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupFloat64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]float64. -func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { - - groups := make(map[string][]float64) - - v.EachFloat64(func(index int, val float64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]float64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceFloat64 uses the specified function to replace each float64s -// by iterating each item. The data in the returned result will be a -// []float64 containing the replaced items. -func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { - - arr := v.MustFloat64Slice() - replaced := make([]float64, len(arr)) - - v.EachFloat64(func(index int, val float64) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectFloat64 uses the specified collector function to collect a value -// for each of the float64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { - - arr := v.MustFloat64Slice() - collected := make([]interface{}, len(arr)) - - v.EachFloat64(func(index int, val float64) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Complex64 (complex64 and []complex64) - -------------------------------------------------- -*/ - -// Complex64 gets the value as a complex64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Complex64(optionalDefault ...complex64) complex64 { - if s, ok := v.data.(complex64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustComplex64 gets the value as a complex64. -// -// Panics if the object is not a complex64. -func (v *Value) MustComplex64() complex64 { - return v.data.(complex64) -} - -// Complex64Slice gets the value as a []complex64, returns the optionalDefault -// value or nil if the value is not a []complex64. -func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { - if s, ok := v.data.([]complex64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustComplex64Slice gets the value as a []complex64. -// -// Panics if the object is not a []complex64. -func (v *Value) MustComplex64Slice() []complex64 { - return v.data.([]complex64) -} - -// IsComplex64 gets whether the object contained is a complex64 or not. -func (v *Value) IsComplex64() bool { - _, ok := v.data.(complex64) - return ok -} - -// IsComplex64Slice gets whether the object contained is a []complex64 or not. -func (v *Value) IsComplex64Slice() bool { - _, ok := v.data.([]complex64) - return ok -} - -// EachComplex64 calls the specified callback for each object -// in the []complex64. -// -// Panics if the object is the wrong type. -func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { - - for index, val := range v.MustComplex64Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereComplex64 uses the specified decider function to select items -// from the []complex64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { - - var selected []complex64 - - v.EachComplex64(func(index int, val complex64) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupComplex64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]complex64. -func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { - - groups := make(map[string][]complex64) - - v.EachComplex64(func(index int, val complex64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]complex64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceComplex64 uses the specified function to replace each complex64s -// by iterating each item. The data in the returned result will be a -// []complex64 containing the replaced items. -func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { - - arr := v.MustComplex64Slice() - replaced := make([]complex64, len(arr)) - - v.EachComplex64(func(index int, val complex64) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectComplex64 uses the specified collector function to collect a value -// for each of the complex64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { - - arr := v.MustComplex64Slice() - collected := make([]interface{}, len(arr)) - - v.EachComplex64(func(index int, val complex64) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Complex128 (complex128 and []complex128) - -------------------------------------------------- -*/ - -// Complex128 gets the value as a complex128, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Complex128(optionalDefault ...complex128) complex128 { - if s, ok := v.data.(complex128); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustComplex128 gets the value as a complex128. -// -// Panics if the object is not a complex128. -func (v *Value) MustComplex128() complex128 { - return v.data.(complex128) -} - -// Complex128Slice gets the value as a []complex128, returns the optionalDefault -// value or nil if the value is not a []complex128. -func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { - if s, ok := v.data.([]complex128); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustComplex128Slice gets the value as a []complex128. -// -// Panics if the object is not a []complex128. -func (v *Value) MustComplex128Slice() []complex128 { - return v.data.([]complex128) -} - -// IsComplex128 gets whether the object contained is a complex128 or not. -func (v *Value) IsComplex128() bool { - _, ok := v.data.(complex128) - return ok -} - -// IsComplex128Slice gets whether the object contained is a []complex128 or not. -func (v *Value) IsComplex128Slice() bool { - _, ok := v.data.([]complex128) - return ok -} - -// EachComplex128 calls the specified callback for each object -// in the []complex128. -// -// Panics if the object is the wrong type. -func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { - - for index, val := range v.MustComplex128Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereComplex128 uses the specified decider function to select items -// from the []complex128. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { - - var selected []complex128 - - v.EachComplex128(func(index int, val complex128) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupComplex128 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]complex128. -func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { - - groups := make(map[string][]complex128) - - v.EachComplex128(func(index int, val complex128) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]complex128, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceComplex128 uses the specified function to replace each complex128s -// by iterating each item. The data in the returned result will be a -// []complex128 containing the replaced items. -func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { - - arr := v.MustComplex128Slice() - replaced := make([]complex128, len(arr)) - - v.EachComplex128(func(index int, val complex128) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectComplex128 uses the specified collector function to collect a value -// for each of the complex128s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { - - arr := v.MustComplex128Slice() - collected := make([]interface{}, len(arr)) - - v.EachComplex128(func(index int, val complex128) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go deleted file mode 100644 index f7a4fceea..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go +++ /dev/null @@ -1,2867 +0,0 @@ -package objx - -import ( - "fmt" - "github.com/stretchr/testify/assert" - "testing" -) - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestInter(t *testing.T) { - - val := interface{}("something") - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Inter()) - assert.Equal(t, val, New(m).Get("value").MustInter()) - assert.Equal(t, interface{}(nil), New(m).Get("nothing").Inter()) - assert.Equal(t, val, New(m).Get("nothing").Inter("something")) - - assert.Panics(t, func() { - New(m).Get("age").MustInter() - }) - -} - -func TestInterSlice(t *testing.T) { - - val := interface{}("something") - m := map[string]interface{}{"value": []interface{}{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").InterSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInterSlice()[0]) - assert.Equal(t, []interface{}(nil), New(m).Get("nothing").InterSlice()) - assert.Equal(t, val, New(m).Get("nothing").InterSlice([]interface{}{interface{}("something")})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustInterSlice() - }) - -} - -func TestIsInter(t *testing.T) { - - var v *Value - - v = &Value{data: interface{}("something")} - assert.True(t, v.IsInter()) - - v = &Value{data: []interface{}{interface{}("something")}} - assert.True(t, v.IsInterSlice()) - -} - -func TestEachInter(t *testing.T) { - - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - count := 0 - replacedVals := make([]interface{}, 0) - assert.Equal(t, v, v.EachInter(func(i int, val interface{}) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInterSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustInterSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustInterSlice()[2]) - -} - -func TestWhereInter(t *testing.T) { - - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - - selected := v.WhereInter(func(i int, val interface{}) bool { - return i%2 == 0 - }).MustInterSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupInter(t *testing.T) { - - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - - grouped := v.GroupInter(func(i int, val interface{}) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]interface{}) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceInter(t *testing.T) { - - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - - rawArr := v.MustInterSlice() - - replaced := v.ReplaceInter(func(index int, val interface{}) interface{} { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustInterSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectInter(t *testing.T) { - - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - - collected := v.CollectInter(func(index int, val interface{}) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestMSI(t *testing.T) { - - val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").MSI()) - assert.Equal(t, val, New(m).Get("value").MustMSI()) - assert.Equal(t, map[string]interface{}(nil), New(m).Get("nothing").MSI()) - assert.Equal(t, val, New(m).Get("nothing").MSI(map[string]interface{}{"name": "Tyler"})) - - assert.Panics(t, func() { - New(m).Get("age").MustMSI() - }) - -} - -func TestMSISlice(t *testing.T) { - - val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) - m := map[string]interface{}{"value": []map[string]interface{}{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").MSISlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustMSISlice()[0]) - assert.Equal(t, []map[string]interface{}(nil), New(m).Get("nothing").MSISlice()) - assert.Equal(t, val, New(m).Get("nothing").MSISlice([]map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustMSISlice() - }) - -} - -func TestIsMSI(t *testing.T) { - - var v *Value - - v = &Value{data: map[string]interface{}(map[string]interface{}{"name": "Tyler"})} - assert.True(t, v.IsMSI()) - - v = &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - assert.True(t, v.IsMSISlice()) - -} - -func TestEachMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - count := 0 - replacedVals := make([]map[string]interface{}, 0) - assert.Equal(t, v, v.EachMSI(func(i int, val map[string]interface{}) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustMSISlice()[0]) - assert.Equal(t, replacedVals[1], v.MustMSISlice()[1]) - assert.Equal(t, replacedVals[2], v.MustMSISlice()[2]) - -} - -func TestWhereMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - - selected := v.WhereMSI(func(i int, val map[string]interface{}) bool { - return i%2 == 0 - }).MustMSISlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - - grouped := v.GroupMSI(func(i int, val map[string]interface{}) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]map[string]interface{}) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - - rawArr := v.MustMSISlice() - - replaced := v.ReplaceMSI(func(index int, val map[string]interface{}) map[string]interface{} { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustMSISlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - - collected := v.CollectMSI(func(index int, val map[string]interface{}) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestObjxMap(t *testing.T) { - - val := (Map)(New(1)) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").ObjxMap()) - assert.Equal(t, val, New(m).Get("value").MustObjxMap()) - assert.Equal(t, (Map)(New(nil)), New(m).Get("nothing").ObjxMap()) - assert.Equal(t, val, New(m).Get("nothing").ObjxMap(New(1))) - - assert.Panics(t, func() { - New(m).Get("age").MustObjxMap() - }) - -} - -func TestObjxMapSlice(t *testing.T) { - - val := (Map)(New(1)) - m := map[string]interface{}{"value": [](Map){val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").ObjxMapSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustObjxMapSlice()[0]) - assert.Equal(t, [](Map)(nil), New(m).Get("nothing").ObjxMapSlice()) - assert.Equal(t, val, New(m).Get("nothing").ObjxMapSlice([](Map){(Map)(New(1))})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustObjxMapSlice() - }) - -} - -func TestIsObjxMap(t *testing.T) { - - var v *Value - - v = &Value{data: (Map)(New(1))} - assert.True(t, v.IsObjxMap()) - - v = &Value{data: [](Map){(Map)(New(1))}} - assert.True(t, v.IsObjxMapSlice()) - -} - -func TestEachObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - count := 0 - replacedVals := make([](Map), 0) - assert.Equal(t, v, v.EachObjxMap(func(i int, val Map) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustObjxMapSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustObjxMapSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustObjxMapSlice()[2]) - -} - -func TestWhereObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - - selected := v.WhereObjxMap(func(i int, val Map) bool { - return i%2 == 0 - }).MustObjxMapSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - - grouped := v.GroupObjxMap(func(i int, val Map) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][](Map)) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - - rawArr := v.MustObjxMapSlice() - - replaced := v.ReplaceObjxMap(func(index int, val Map) Map { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustObjxMapSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - - collected := v.CollectObjxMap(func(index int, val Map) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestBool(t *testing.T) { - - val := bool(true) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Bool()) - assert.Equal(t, val, New(m).Get("value").MustBool()) - assert.Equal(t, bool(false), New(m).Get("nothing").Bool()) - assert.Equal(t, val, New(m).Get("nothing").Bool(true)) - - assert.Panics(t, func() { - New(m).Get("age").MustBool() - }) - -} - -func TestBoolSlice(t *testing.T) { - - val := bool(true) - m := map[string]interface{}{"value": []bool{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").BoolSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustBoolSlice()[0]) - assert.Equal(t, []bool(nil), New(m).Get("nothing").BoolSlice()) - assert.Equal(t, val, New(m).Get("nothing").BoolSlice([]bool{bool(true)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustBoolSlice() - }) - -} - -func TestIsBool(t *testing.T) { - - var v *Value - - v = &Value{data: bool(true)} - assert.True(t, v.IsBool()) - - v = &Value{data: []bool{bool(true)}} - assert.True(t, v.IsBoolSlice()) - -} - -func TestEachBool(t *testing.T) { - - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true)}} - count := 0 - replacedVals := make([]bool, 0) - assert.Equal(t, v, v.EachBool(func(i int, val bool) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustBoolSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustBoolSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustBoolSlice()[2]) - -} - -func TestWhereBool(t *testing.T) { - - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - - selected := v.WhereBool(func(i int, val bool) bool { - return i%2 == 0 - }).MustBoolSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupBool(t *testing.T) { - - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - - grouped := v.GroupBool(func(i int, val bool) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]bool) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceBool(t *testing.T) { - - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - - rawArr := v.MustBoolSlice() - - replaced := v.ReplaceBool(func(index int, val bool) bool { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustBoolSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectBool(t *testing.T) { - - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - - collected := v.CollectBool(func(index int, val bool) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestStr(t *testing.T) { - - val := string("hello") - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Str()) - assert.Equal(t, val, New(m).Get("value").MustStr()) - assert.Equal(t, string(""), New(m).Get("nothing").Str()) - assert.Equal(t, val, New(m).Get("nothing").Str("hello")) - - assert.Panics(t, func() { - New(m).Get("age").MustStr() - }) - -} - -func TestStrSlice(t *testing.T) { - - val := string("hello") - m := map[string]interface{}{"value": []string{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").StrSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustStrSlice()[0]) - assert.Equal(t, []string(nil), New(m).Get("nothing").StrSlice()) - assert.Equal(t, val, New(m).Get("nothing").StrSlice([]string{string("hello")})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustStrSlice() - }) - -} - -func TestIsStr(t *testing.T) { - - var v *Value - - v = &Value{data: string("hello")} - assert.True(t, v.IsStr()) - - v = &Value{data: []string{string("hello")}} - assert.True(t, v.IsStrSlice()) - -} - -func TestEachStr(t *testing.T) { - - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - count := 0 - replacedVals := make([]string, 0) - assert.Equal(t, v, v.EachStr(func(i int, val string) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustStrSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustStrSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustStrSlice()[2]) - -} - -func TestWhereStr(t *testing.T) { - - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - - selected := v.WhereStr(func(i int, val string) bool { - return i%2 == 0 - }).MustStrSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupStr(t *testing.T) { - - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - - grouped := v.GroupStr(func(i int, val string) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]string) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceStr(t *testing.T) { - - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - - rawArr := v.MustStrSlice() - - replaced := v.ReplaceStr(func(index int, val string) string { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustStrSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectStr(t *testing.T) { - - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - - collected := v.CollectStr(func(index int, val string) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestInt(t *testing.T) { - - val := int(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int()) - assert.Equal(t, val, New(m).Get("value").MustInt()) - assert.Equal(t, int(0), New(m).Get("nothing").Int()) - assert.Equal(t, val, New(m).Get("nothing").Int(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustInt() - }) - -} - -func TestIntSlice(t *testing.T) { - - val := int(1) - m := map[string]interface{}{"value": []int{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").IntSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustIntSlice()[0]) - assert.Equal(t, []int(nil), New(m).Get("nothing").IntSlice()) - assert.Equal(t, val, New(m).Get("nothing").IntSlice([]int{int(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustIntSlice() - }) - -} - -func TestIsInt(t *testing.T) { - - var v *Value - - v = &Value{data: int(1)} - assert.True(t, v.IsInt()) - - v = &Value{data: []int{int(1)}} - assert.True(t, v.IsIntSlice()) - -} - -func TestEachInt(t *testing.T) { - - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1)}} - count := 0 - replacedVals := make([]int, 0) - assert.Equal(t, v, v.EachInt(func(i int, val int) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustIntSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustIntSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustIntSlice()[2]) - -} - -func TestWhereInt(t *testing.T) { - - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - - selected := v.WhereInt(func(i int, val int) bool { - return i%2 == 0 - }).MustIntSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupInt(t *testing.T) { - - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - - grouped := v.GroupInt(func(i int, val int) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceInt(t *testing.T) { - - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - - rawArr := v.MustIntSlice() - - replaced := v.ReplaceInt(func(index int, val int) int { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustIntSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectInt(t *testing.T) { - - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - - collected := v.CollectInt(func(index int, val int) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestInt8(t *testing.T) { - - val := int8(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int8()) - assert.Equal(t, val, New(m).Get("value").MustInt8()) - assert.Equal(t, int8(0), New(m).Get("nothing").Int8()) - assert.Equal(t, val, New(m).Get("nothing").Int8(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustInt8() - }) - -} - -func TestInt8Slice(t *testing.T) { - - val := int8(1) - m := map[string]interface{}{"value": []int8{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int8Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInt8Slice()[0]) - assert.Equal(t, []int8(nil), New(m).Get("nothing").Int8Slice()) - assert.Equal(t, val, New(m).Get("nothing").Int8Slice([]int8{int8(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustInt8Slice() - }) - -} - -func TestIsInt8(t *testing.T) { - - var v *Value - - v = &Value{data: int8(1)} - assert.True(t, v.IsInt8()) - - v = &Value{data: []int8{int8(1)}} - assert.True(t, v.IsInt8Slice()) - -} - -func TestEachInt8(t *testing.T) { - - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1)}} - count := 0 - replacedVals := make([]int8, 0) - assert.Equal(t, v, v.EachInt8(func(i int, val int8) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInt8Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustInt8Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustInt8Slice()[2]) - -} - -func TestWhereInt8(t *testing.T) { - - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - - selected := v.WhereInt8(func(i int, val int8) bool { - return i%2 == 0 - }).MustInt8Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupInt8(t *testing.T) { - - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - - grouped := v.GroupInt8(func(i int, val int8) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int8) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceInt8(t *testing.T) { - - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - - rawArr := v.MustInt8Slice() - - replaced := v.ReplaceInt8(func(index int, val int8) int8 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustInt8Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectInt8(t *testing.T) { - - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - - collected := v.CollectInt8(func(index int, val int8) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestInt16(t *testing.T) { - - val := int16(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int16()) - assert.Equal(t, val, New(m).Get("value").MustInt16()) - assert.Equal(t, int16(0), New(m).Get("nothing").Int16()) - assert.Equal(t, val, New(m).Get("nothing").Int16(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustInt16() - }) - -} - -func TestInt16Slice(t *testing.T) { - - val := int16(1) - m := map[string]interface{}{"value": []int16{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int16Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInt16Slice()[0]) - assert.Equal(t, []int16(nil), New(m).Get("nothing").Int16Slice()) - assert.Equal(t, val, New(m).Get("nothing").Int16Slice([]int16{int16(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustInt16Slice() - }) - -} - -func TestIsInt16(t *testing.T) { - - var v *Value - - v = &Value{data: int16(1)} - assert.True(t, v.IsInt16()) - - v = &Value{data: []int16{int16(1)}} - assert.True(t, v.IsInt16Slice()) - -} - -func TestEachInt16(t *testing.T) { - - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1)}} - count := 0 - replacedVals := make([]int16, 0) - assert.Equal(t, v, v.EachInt16(func(i int, val int16) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInt16Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustInt16Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustInt16Slice()[2]) - -} - -func TestWhereInt16(t *testing.T) { - - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - - selected := v.WhereInt16(func(i int, val int16) bool { - return i%2 == 0 - }).MustInt16Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupInt16(t *testing.T) { - - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - - grouped := v.GroupInt16(func(i int, val int16) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int16) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceInt16(t *testing.T) { - - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - - rawArr := v.MustInt16Slice() - - replaced := v.ReplaceInt16(func(index int, val int16) int16 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustInt16Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectInt16(t *testing.T) { - - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - - collected := v.CollectInt16(func(index int, val int16) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestInt32(t *testing.T) { - - val := int32(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int32()) - assert.Equal(t, val, New(m).Get("value").MustInt32()) - assert.Equal(t, int32(0), New(m).Get("nothing").Int32()) - assert.Equal(t, val, New(m).Get("nothing").Int32(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustInt32() - }) - -} - -func TestInt32Slice(t *testing.T) { - - val := int32(1) - m := map[string]interface{}{"value": []int32{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int32Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInt32Slice()[0]) - assert.Equal(t, []int32(nil), New(m).Get("nothing").Int32Slice()) - assert.Equal(t, val, New(m).Get("nothing").Int32Slice([]int32{int32(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustInt32Slice() - }) - -} - -func TestIsInt32(t *testing.T) { - - var v *Value - - v = &Value{data: int32(1)} - assert.True(t, v.IsInt32()) - - v = &Value{data: []int32{int32(1)}} - assert.True(t, v.IsInt32Slice()) - -} - -func TestEachInt32(t *testing.T) { - - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1)}} - count := 0 - replacedVals := make([]int32, 0) - assert.Equal(t, v, v.EachInt32(func(i int, val int32) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInt32Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustInt32Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustInt32Slice()[2]) - -} - -func TestWhereInt32(t *testing.T) { - - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - - selected := v.WhereInt32(func(i int, val int32) bool { - return i%2 == 0 - }).MustInt32Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupInt32(t *testing.T) { - - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - - grouped := v.GroupInt32(func(i int, val int32) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int32) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceInt32(t *testing.T) { - - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - - rawArr := v.MustInt32Slice() - - replaced := v.ReplaceInt32(func(index int, val int32) int32 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustInt32Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectInt32(t *testing.T) { - - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - - collected := v.CollectInt32(func(index int, val int32) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestInt64(t *testing.T) { - - val := int64(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int64()) - assert.Equal(t, val, New(m).Get("value").MustInt64()) - assert.Equal(t, int64(0), New(m).Get("nothing").Int64()) - assert.Equal(t, val, New(m).Get("nothing").Int64(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustInt64() - }) - -} - -func TestInt64Slice(t *testing.T) { - - val := int64(1) - m := map[string]interface{}{"value": []int64{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int64Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInt64Slice()[0]) - assert.Equal(t, []int64(nil), New(m).Get("nothing").Int64Slice()) - assert.Equal(t, val, New(m).Get("nothing").Int64Slice([]int64{int64(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustInt64Slice() - }) - -} - -func TestIsInt64(t *testing.T) { - - var v *Value - - v = &Value{data: int64(1)} - assert.True(t, v.IsInt64()) - - v = &Value{data: []int64{int64(1)}} - assert.True(t, v.IsInt64Slice()) - -} - -func TestEachInt64(t *testing.T) { - - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1)}} - count := 0 - replacedVals := make([]int64, 0) - assert.Equal(t, v, v.EachInt64(func(i int, val int64) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInt64Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustInt64Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustInt64Slice()[2]) - -} - -func TestWhereInt64(t *testing.T) { - - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - - selected := v.WhereInt64(func(i int, val int64) bool { - return i%2 == 0 - }).MustInt64Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupInt64(t *testing.T) { - - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - - grouped := v.GroupInt64(func(i int, val int64) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int64) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceInt64(t *testing.T) { - - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - - rawArr := v.MustInt64Slice() - - replaced := v.ReplaceInt64(func(index int, val int64) int64 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustInt64Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectInt64(t *testing.T) { - - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - - collected := v.CollectInt64(func(index int, val int64) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestUint(t *testing.T) { - - val := uint(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint()) - assert.Equal(t, val, New(m).Get("value").MustUint()) - assert.Equal(t, uint(0), New(m).Get("nothing").Uint()) - assert.Equal(t, val, New(m).Get("nothing").Uint(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustUint() - }) - -} - -func TestUintSlice(t *testing.T) { - - val := uint(1) - m := map[string]interface{}{"value": []uint{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").UintSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUintSlice()[0]) - assert.Equal(t, []uint(nil), New(m).Get("nothing").UintSlice()) - assert.Equal(t, val, New(m).Get("nothing").UintSlice([]uint{uint(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustUintSlice() - }) - -} - -func TestIsUint(t *testing.T) { - - var v *Value - - v = &Value{data: uint(1)} - assert.True(t, v.IsUint()) - - v = &Value{data: []uint{uint(1)}} - assert.True(t, v.IsUintSlice()) - -} - -func TestEachUint(t *testing.T) { - - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1)}} - count := 0 - replacedVals := make([]uint, 0) - assert.Equal(t, v, v.EachUint(func(i int, val uint) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUintSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustUintSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustUintSlice()[2]) - -} - -func TestWhereUint(t *testing.T) { - - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - - selected := v.WhereUint(func(i int, val uint) bool { - return i%2 == 0 - }).MustUintSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupUint(t *testing.T) { - - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - - grouped := v.GroupUint(func(i int, val uint) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceUint(t *testing.T) { - - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - - rawArr := v.MustUintSlice() - - replaced := v.ReplaceUint(func(index int, val uint) uint { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustUintSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectUint(t *testing.T) { - - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - - collected := v.CollectUint(func(index int, val uint) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestUint8(t *testing.T) { - - val := uint8(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint8()) - assert.Equal(t, val, New(m).Get("value").MustUint8()) - assert.Equal(t, uint8(0), New(m).Get("nothing").Uint8()) - assert.Equal(t, val, New(m).Get("nothing").Uint8(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustUint8() - }) - -} - -func TestUint8Slice(t *testing.T) { - - val := uint8(1) - m := map[string]interface{}{"value": []uint8{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint8Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUint8Slice()[0]) - assert.Equal(t, []uint8(nil), New(m).Get("nothing").Uint8Slice()) - assert.Equal(t, val, New(m).Get("nothing").Uint8Slice([]uint8{uint8(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustUint8Slice() - }) - -} - -func TestIsUint8(t *testing.T) { - - var v *Value - - v = &Value{data: uint8(1)} - assert.True(t, v.IsUint8()) - - v = &Value{data: []uint8{uint8(1)}} - assert.True(t, v.IsUint8Slice()) - -} - -func TestEachUint8(t *testing.T) { - - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - count := 0 - replacedVals := make([]uint8, 0) - assert.Equal(t, v, v.EachUint8(func(i int, val uint8) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUint8Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustUint8Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustUint8Slice()[2]) - -} - -func TestWhereUint8(t *testing.T) { - - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - - selected := v.WhereUint8(func(i int, val uint8) bool { - return i%2 == 0 - }).MustUint8Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupUint8(t *testing.T) { - - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - - grouped := v.GroupUint8(func(i int, val uint8) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint8) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceUint8(t *testing.T) { - - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - - rawArr := v.MustUint8Slice() - - replaced := v.ReplaceUint8(func(index int, val uint8) uint8 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustUint8Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectUint8(t *testing.T) { - - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - - collected := v.CollectUint8(func(index int, val uint8) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestUint16(t *testing.T) { - - val := uint16(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint16()) - assert.Equal(t, val, New(m).Get("value").MustUint16()) - assert.Equal(t, uint16(0), New(m).Get("nothing").Uint16()) - assert.Equal(t, val, New(m).Get("nothing").Uint16(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustUint16() - }) - -} - -func TestUint16Slice(t *testing.T) { - - val := uint16(1) - m := map[string]interface{}{"value": []uint16{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint16Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUint16Slice()[0]) - assert.Equal(t, []uint16(nil), New(m).Get("nothing").Uint16Slice()) - assert.Equal(t, val, New(m).Get("nothing").Uint16Slice([]uint16{uint16(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustUint16Slice() - }) - -} - -func TestIsUint16(t *testing.T) { - - var v *Value - - v = &Value{data: uint16(1)} - assert.True(t, v.IsUint16()) - - v = &Value{data: []uint16{uint16(1)}} - assert.True(t, v.IsUint16Slice()) - -} - -func TestEachUint16(t *testing.T) { - - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - count := 0 - replacedVals := make([]uint16, 0) - assert.Equal(t, v, v.EachUint16(func(i int, val uint16) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUint16Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustUint16Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustUint16Slice()[2]) - -} - -func TestWhereUint16(t *testing.T) { - - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - - selected := v.WhereUint16(func(i int, val uint16) bool { - return i%2 == 0 - }).MustUint16Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupUint16(t *testing.T) { - - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - - grouped := v.GroupUint16(func(i int, val uint16) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint16) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceUint16(t *testing.T) { - - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - - rawArr := v.MustUint16Slice() - - replaced := v.ReplaceUint16(func(index int, val uint16) uint16 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustUint16Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectUint16(t *testing.T) { - - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - - collected := v.CollectUint16(func(index int, val uint16) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestUint32(t *testing.T) { - - val := uint32(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint32()) - assert.Equal(t, val, New(m).Get("value").MustUint32()) - assert.Equal(t, uint32(0), New(m).Get("nothing").Uint32()) - assert.Equal(t, val, New(m).Get("nothing").Uint32(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustUint32() - }) - -} - -func TestUint32Slice(t *testing.T) { - - val := uint32(1) - m := map[string]interface{}{"value": []uint32{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint32Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUint32Slice()[0]) - assert.Equal(t, []uint32(nil), New(m).Get("nothing").Uint32Slice()) - assert.Equal(t, val, New(m).Get("nothing").Uint32Slice([]uint32{uint32(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustUint32Slice() - }) - -} - -func TestIsUint32(t *testing.T) { - - var v *Value - - v = &Value{data: uint32(1)} - assert.True(t, v.IsUint32()) - - v = &Value{data: []uint32{uint32(1)}} - assert.True(t, v.IsUint32Slice()) - -} - -func TestEachUint32(t *testing.T) { - - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - count := 0 - replacedVals := make([]uint32, 0) - assert.Equal(t, v, v.EachUint32(func(i int, val uint32) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUint32Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustUint32Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustUint32Slice()[2]) - -} - -func TestWhereUint32(t *testing.T) { - - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - - selected := v.WhereUint32(func(i int, val uint32) bool { - return i%2 == 0 - }).MustUint32Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupUint32(t *testing.T) { - - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - - grouped := v.GroupUint32(func(i int, val uint32) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint32) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceUint32(t *testing.T) { - - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - - rawArr := v.MustUint32Slice() - - replaced := v.ReplaceUint32(func(index int, val uint32) uint32 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustUint32Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectUint32(t *testing.T) { - - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - - collected := v.CollectUint32(func(index int, val uint32) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestUint64(t *testing.T) { - - val := uint64(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint64()) - assert.Equal(t, val, New(m).Get("value").MustUint64()) - assert.Equal(t, uint64(0), New(m).Get("nothing").Uint64()) - assert.Equal(t, val, New(m).Get("nothing").Uint64(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustUint64() - }) - -} - -func TestUint64Slice(t *testing.T) { - - val := uint64(1) - m := map[string]interface{}{"value": []uint64{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint64Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUint64Slice()[0]) - assert.Equal(t, []uint64(nil), New(m).Get("nothing").Uint64Slice()) - assert.Equal(t, val, New(m).Get("nothing").Uint64Slice([]uint64{uint64(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustUint64Slice() - }) - -} - -func TestIsUint64(t *testing.T) { - - var v *Value - - v = &Value{data: uint64(1)} - assert.True(t, v.IsUint64()) - - v = &Value{data: []uint64{uint64(1)}} - assert.True(t, v.IsUint64Slice()) - -} - -func TestEachUint64(t *testing.T) { - - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - count := 0 - replacedVals := make([]uint64, 0) - assert.Equal(t, v, v.EachUint64(func(i int, val uint64) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUint64Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustUint64Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustUint64Slice()[2]) - -} - -func TestWhereUint64(t *testing.T) { - - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - - selected := v.WhereUint64(func(i int, val uint64) bool { - return i%2 == 0 - }).MustUint64Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupUint64(t *testing.T) { - - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - - grouped := v.GroupUint64(func(i int, val uint64) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint64) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceUint64(t *testing.T) { - - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - - rawArr := v.MustUint64Slice() - - replaced := v.ReplaceUint64(func(index int, val uint64) uint64 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustUint64Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectUint64(t *testing.T) { - - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - - collected := v.CollectUint64(func(index int, val uint64) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestUintptr(t *testing.T) { - - val := uintptr(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uintptr()) - assert.Equal(t, val, New(m).Get("value").MustUintptr()) - assert.Equal(t, uintptr(0), New(m).Get("nothing").Uintptr()) - assert.Equal(t, val, New(m).Get("nothing").Uintptr(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustUintptr() - }) - -} - -func TestUintptrSlice(t *testing.T) { - - val := uintptr(1) - m := map[string]interface{}{"value": []uintptr{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").UintptrSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUintptrSlice()[0]) - assert.Equal(t, []uintptr(nil), New(m).Get("nothing").UintptrSlice()) - assert.Equal(t, val, New(m).Get("nothing").UintptrSlice([]uintptr{uintptr(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustUintptrSlice() - }) - -} - -func TestIsUintptr(t *testing.T) { - - var v *Value - - v = &Value{data: uintptr(1)} - assert.True(t, v.IsUintptr()) - - v = &Value{data: []uintptr{uintptr(1)}} - assert.True(t, v.IsUintptrSlice()) - -} - -func TestEachUintptr(t *testing.T) { - - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - count := 0 - replacedVals := make([]uintptr, 0) - assert.Equal(t, v, v.EachUintptr(func(i int, val uintptr) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUintptrSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustUintptrSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustUintptrSlice()[2]) - -} - -func TestWhereUintptr(t *testing.T) { - - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - - selected := v.WhereUintptr(func(i int, val uintptr) bool { - return i%2 == 0 - }).MustUintptrSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupUintptr(t *testing.T) { - - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - - grouped := v.GroupUintptr(func(i int, val uintptr) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uintptr) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceUintptr(t *testing.T) { - - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - - rawArr := v.MustUintptrSlice() - - replaced := v.ReplaceUintptr(func(index int, val uintptr) uintptr { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustUintptrSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectUintptr(t *testing.T) { - - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - - collected := v.CollectUintptr(func(index int, val uintptr) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestFloat32(t *testing.T) { - - val := float32(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Float32()) - assert.Equal(t, val, New(m).Get("value").MustFloat32()) - assert.Equal(t, float32(0), New(m).Get("nothing").Float32()) - assert.Equal(t, val, New(m).Get("nothing").Float32(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustFloat32() - }) - -} - -func TestFloat32Slice(t *testing.T) { - - val := float32(1) - m := map[string]interface{}{"value": []float32{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Float32Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustFloat32Slice()[0]) - assert.Equal(t, []float32(nil), New(m).Get("nothing").Float32Slice()) - assert.Equal(t, val, New(m).Get("nothing").Float32Slice([]float32{float32(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustFloat32Slice() - }) - -} - -func TestIsFloat32(t *testing.T) { - - var v *Value - - v = &Value{data: float32(1)} - assert.True(t, v.IsFloat32()) - - v = &Value{data: []float32{float32(1)}} - assert.True(t, v.IsFloat32Slice()) - -} - -func TestEachFloat32(t *testing.T) { - - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1)}} - count := 0 - replacedVals := make([]float32, 0) - assert.Equal(t, v, v.EachFloat32(func(i int, val float32) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustFloat32Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustFloat32Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustFloat32Slice()[2]) - -} - -func TestWhereFloat32(t *testing.T) { - - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - - selected := v.WhereFloat32(func(i int, val float32) bool { - return i%2 == 0 - }).MustFloat32Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupFloat32(t *testing.T) { - - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - - grouped := v.GroupFloat32(func(i int, val float32) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]float32) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceFloat32(t *testing.T) { - - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - - rawArr := v.MustFloat32Slice() - - replaced := v.ReplaceFloat32(func(index int, val float32) float32 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustFloat32Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectFloat32(t *testing.T) { - - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - - collected := v.CollectFloat32(func(index int, val float32) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestFloat64(t *testing.T) { - - val := float64(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Float64()) - assert.Equal(t, val, New(m).Get("value").MustFloat64()) - assert.Equal(t, float64(0), New(m).Get("nothing").Float64()) - assert.Equal(t, val, New(m).Get("nothing").Float64(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustFloat64() - }) - -} - -func TestFloat64Slice(t *testing.T) { - - val := float64(1) - m := map[string]interface{}{"value": []float64{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Float64Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustFloat64Slice()[0]) - assert.Equal(t, []float64(nil), New(m).Get("nothing").Float64Slice()) - assert.Equal(t, val, New(m).Get("nothing").Float64Slice([]float64{float64(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustFloat64Slice() - }) - -} - -func TestIsFloat64(t *testing.T) { - - var v *Value - - v = &Value{data: float64(1)} - assert.True(t, v.IsFloat64()) - - v = &Value{data: []float64{float64(1)}} - assert.True(t, v.IsFloat64Slice()) - -} - -func TestEachFloat64(t *testing.T) { - - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1)}} - count := 0 - replacedVals := make([]float64, 0) - assert.Equal(t, v, v.EachFloat64(func(i int, val float64) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustFloat64Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustFloat64Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustFloat64Slice()[2]) - -} - -func TestWhereFloat64(t *testing.T) { - - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - - selected := v.WhereFloat64(func(i int, val float64) bool { - return i%2 == 0 - }).MustFloat64Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupFloat64(t *testing.T) { - - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - - grouped := v.GroupFloat64(func(i int, val float64) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]float64) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceFloat64(t *testing.T) { - - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - - rawArr := v.MustFloat64Slice() - - replaced := v.ReplaceFloat64(func(index int, val float64) float64 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustFloat64Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectFloat64(t *testing.T) { - - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - - collected := v.CollectFloat64(func(index int, val float64) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestComplex64(t *testing.T) { - - val := complex64(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Complex64()) - assert.Equal(t, val, New(m).Get("value").MustComplex64()) - assert.Equal(t, complex64(0), New(m).Get("nothing").Complex64()) - assert.Equal(t, val, New(m).Get("nothing").Complex64(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustComplex64() - }) - -} - -func TestComplex64Slice(t *testing.T) { - - val := complex64(1) - m := map[string]interface{}{"value": []complex64{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Complex64Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustComplex64Slice()[0]) - assert.Equal(t, []complex64(nil), New(m).Get("nothing").Complex64Slice()) - assert.Equal(t, val, New(m).Get("nothing").Complex64Slice([]complex64{complex64(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustComplex64Slice() - }) - -} - -func TestIsComplex64(t *testing.T) { - - var v *Value - - v = &Value{data: complex64(1)} - assert.True(t, v.IsComplex64()) - - v = &Value{data: []complex64{complex64(1)}} - assert.True(t, v.IsComplex64Slice()) - -} - -func TestEachComplex64(t *testing.T) { - - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - count := 0 - replacedVals := make([]complex64, 0) - assert.Equal(t, v, v.EachComplex64(func(i int, val complex64) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustComplex64Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustComplex64Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustComplex64Slice()[2]) - -} - -func TestWhereComplex64(t *testing.T) { - - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - - selected := v.WhereComplex64(func(i int, val complex64) bool { - return i%2 == 0 - }).MustComplex64Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupComplex64(t *testing.T) { - - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - - grouped := v.GroupComplex64(func(i int, val complex64) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]complex64) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceComplex64(t *testing.T) { - - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - - rawArr := v.MustComplex64Slice() - - replaced := v.ReplaceComplex64(func(index int, val complex64) complex64 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustComplex64Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectComplex64(t *testing.T) { - - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - - collected := v.CollectComplex64(func(index int, val complex64) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestComplex128(t *testing.T) { - - val := complex128(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Complex128()) - assert.Equal(t, val, New(m).Get("value").MustComplex128()) - assert.Equal(t, complex128(0), New(m).Get("nothing").Complex128()) - assert.Equal(t, val, New(m).Get("nothing").Complex128(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustComplex128() - }) - -} - -func TestComplex128Slice(t *testing.T) { - - val := complex128(1) - m := map[string]interface{}{"value": []complex128{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Complex128Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustComplex128Slice()[0]) - assert.Equal(t, []complex128(nil), New(m).Get("nothing").Complex128Slice()) - assert.Equal(t, val, New(m).Get("nothing").Complex128Slice([]complex128{complex128(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustComplex128Slice() - }) - -} - -func TestIsComplex128(t *testing.T) { - - var v *Value - - v = &Value{data: complex128(1)} - assert.True(t, v.IsComplex128()) - - v = &Value{data: []complex128{complex128(1)}} - assert.True(t, v.IsComplex128Slice()) - -} - -func TestEachComplex128(t *testing.T) { - - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - count := 0 - replacedVals := make([]complex128, 0) - assert.Equal(t, v, v.EachComplex128(func(i int, val complex128) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustComplex128Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustComplex128Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustComplex128Slice()[2]) - -} - -func TestWhereComplex128(t *testing.T) { - - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - - selected := v.WhereComplex128(func(i int, val complex128) bool { - return i%2 == 0 - }).MustComplex128Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupComplex128(t *testing.T) { - - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - - grouped := v.GroupComplex128(func(i int, val complex128) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]complex128) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceComplex128(t *testing.T) { - - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - - rawArr := v.MustComplex128Slice() - - replaced := v.ReplaceComplex128(func(index int, val complex128) complex128 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustComplex128Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectComplex128(t *testing.T) { - - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - - collected := v.CollectComplex128(func(index int, val complex128) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/value.go b/Godeps/_workspace/src/github.com/stretchr/objx/value.go deleted file mode 100644 index 7aaef06b1..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/value.go +++ /dev/null @@ -1,13 +0,0 @@ -package objx - -// Value provides methods for extracting interface{} data in various -// types. -type Value struct { - // data contains the raw data being managed by this Value - data interface{} -} - -// Data returns the raw data contained by this Value -func (v *Value) Data() interface{} { - return v.data -} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/value_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/value_test.go deleted file mode 100644 index 0bc65d92c..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/objx/value_test.go +++ /dev/null @@ -1 +0,0 @@ -package objx diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go deleted file mode 100644 index 58e178165..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go +++ /dev/null @@ -1,805 +0,0 @@ -package assert - -import ( - "bufio" - "bytes" - "fmt" - "reflect" - "regexp" - "runtime" - "strings" - "time" -) - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -// Comparison a custom function that returns true on success and false on failure -type Comparison func() (success bool) - -/* - Helper functions -*/ - -// ObjectsAreEqual determines if two objects are considered equal. -// -// This function does no assertion of any kind. -func ObjectsAreEqual(expected, actual interface{}) bool { - - if expected == nil || actual == nil { - return expected == actual - } - - if reflect.DeepEqual(expected, actual) { - return true - } - - // Last ditch effort - if fmt.Sprintf("%#v", expected) == fmt.Sprintf("%#v", actual) { - return true - } - - return false - -} - -func ObjectsAreEqualValues(expected, actual interface{}) bool { - if ObjectsAreEqual(expected, actual) { - return true - } - - actualType := reflect.TypeOf(actual) - expectedValue := reflect.ValueOf(expected) - if expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - if reflect.DeepEqual(actual, expectedValue.Convert(actualType).Interface()) { - return true - } - } - - return false -} - -/* CallerInfo is necessary because the assert functions use the testing object -internally, causing it to print the file:line of the assert method, rather than where -the problem actually occured in calling code.*/ - -// CallerInfo returns a string containing the file and line number of the assert call -// that failed. -func CallerInfo() string { - - file := "" - line := 0 - ok := false - - for i := 0; ; i++ { - _, file, line, ok = runtime.Caller(i) - if !ok { - return "" - } - parts := strings.Split(file, "/") - dir := parts[len(parts)-2] - file = parts[len(parts)-1] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - break - } - } - - return fmt.Sprintf("%s:%d", file, line) -} - -// getWhitespaceString returns a string that is long enough to overwrite the default -// output from the go testing framework. -func getWhitespaceString() string { - - _, file, line, ok := runtime.Caller(1) - if !ok { - return "" - } - parts := strings.Split(file, "/") - file = parts[len(parts)-1] - - return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) - -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - return msgAndArgs[0].(string) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's -// test printing (see inner comment for specifics) -func indentMessageLines(message string, tabs int) string { - outBuf := new(bytes.Buffer) - - for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { - if i != 0 { - outBuf.WriteRune('\n') - } - for ii := 0; ii < tabs; ii++ { - outBuf.WriteRune('\t') - // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter - // by 1 prematurely. - if ii == 0 && i > 0 { - ii++ - } - } - outBuf.WriteString(scanner.Text()) - } - - return outBuf.String() -} - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - - if len(message) > 0 { - t.Errorf("\r%s\r\tLocation:\t%s\n"+ - "\r\tError:%s\n"+ - "\r\tMessages:\t%s\n\r", - getWhitespaceString(), - CallerInfo(), - indentMessageLines(failureMessage, 2), - message) - } else { - t.Errorf("\r%s\r\tLocation:\t%s\n"+ - "\r\tError:%s\n\r", - getWhitespaceString(), - CallerInfo(), - indentMessageLines(failureMessage, 2)) - } - - return false -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if !reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("Object must implement %v", interfaceType), msgAndArgs...) - } - - return true - -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) - } - - return true -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqual(expected, actual) { - return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ - " != %#v (actual)", expected, actual), msgAndArgs...) - } - - return true - -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqualValues(expected, actual) { - return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ - " != %#v (actual)", expected, actual), msgAndArgs...) - } - - return true - -} - -// Exactly asserts that two objects are equal is value and type. -// -// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, "Types expected to match exactly", "%v != %v", aType, bType) - } - - return Equal(t, expected, actual, msgAndArgs...) - -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - - success := true - - if object == nil { - success = false - } else { - value := reflect.ValueOf(object) - kind := value.Kind() - if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { - success = false - } - } - - if !success { - Fail(t, "Expected not to be nil.", msgAndArgs...) - } - - return success -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - kind := value.Kind() - if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { - return true - } - - return false -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if isNil(object) { - return true - } - return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) -} - -var zeros = []interface{}{ - int(0), - int8(0), - int16(0), - int32(0), - int64(0), - uint(0), - uint8(0), - uint16(0), - uint32(0), - uint64(0), - float32(0), - float64(0), -} - -// isEmpty gets whether the specified object is considered empty or not. -func isEmpty(object interface{}) bool { - - if object == nil { - return true - } else if object == "" { - return true - } else if object == false { - return true - } - - for _, v := range zeros { - if object == v { - return true - } - } - - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - case reflect.Map: - fallthrough - case reflect.Slice, reflect.Chan: - { - return (objValue.Len() == 0) - } - case reflect.Ptr: - { - switch object.(type) { - case *time.Time: - return object.(*time.Time).IsZero() - default: - return false - } - } - } - return false -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -// -// Returns whether the assertion was successful (true) or not (false). -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - - pass := isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - - pass := !isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { - v := reflect.ValueOf(x) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - return true, v.Len() -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { - ok, l := getLen(object) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) - } - - if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) - } - return true -} - -// True asserts that the specified value is true. -// -// assert.True(t, myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - - if value != true { - return Fail(t, "Should be true", msgAndArgs...) - } - - return true - -} - -// False asserts that the specified value is true. -// -// assert.False(t, myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - - if value != false { - return Fail(t, "Should be false", msgAndArgs...) - } - - return true - -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if ObjectsAreEqual(expected, actual) { - return Fail(t, "Should not be equal", msgAndArgs...) - } - - return true - -} - -// containsElement try loop over the list check if the list includes the element. -// return (false, false) if impossible. -// return (true, false) if element was not found. -// return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { - - listValue := reflect.ValueOf(list) - elementValue := reflect.ValueOf(element) - defer func() { - if e := recover(); e != nil { - ok = false - found = false - } - }() - - if reflect.TypeOf(list).Kind() == reflect.String { - return true, strings.Contains(listValue.String(), elementValue.String()) - } - - for i := 0; i < listValue.Len(); i++ { - if ObjectsAreEqual(listValue.Index(i).Interface(), element) { - return true, true - } - } - return true, false - -} - -// Contains asserts that the specified string or list(array, slice...) contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") -// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// -// Returns whether the assertion was successful (true) or not (false). -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// NotContains asserts that the specified string or list(array, slice...) does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if found { - return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { - result := comp() - if !result { - Fail(t, "Condition failed!", msgAndArgs...) - } - return result -} - -// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics -// methods, and represents a simple func that takes no arguments, and returns nothing. -type PanicTestFunc func() - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}) { - - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - f() - - }() - - return didPanic, message - -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - - if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - - if funcDidPanic, panicValue := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - - dt := expected.Sub(actual) - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -func toFloat(x interface{}) (float64, bool) { - var xf float64 - xok := true - - switch xn := x.(type) { - case uint8: - xf = float64(xn) - case uint16: - xf = float64(xn) - case uint32: - xf = float64(xn) - case uint64: - xf = float64(xn) - case int: - xf = float64(xn) - case int8: - xf = float64(xn) - case int16: - xf = float64(xn) - case int32: - xf = float64(xn) - case int64: - xf = float64(xn) - case float32: - xf = float64(xn) - case float64: - xf = float64(xn) - default: - xok = false - } - - return xf, xok -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) - } - - dt := af - bf - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// min(|expected|, |actual|) * epsilon -func calcEpsilonDelta(expected, actual interface{}, epsilon float64) float64 { - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - // invalid input - return 0 - } - - if af < 0 { - af = -af - } - if bf < 0 { - bf = -bf - } - var delta float64 - if af < bf { - delta = af * epsilon - } else { - delta = bf * epsilon - } - return delta -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - delta := calcEpsilonDelta(expected, actual, epsilon) - - return InDelta(t, expected, actual, delta, msgAndArgs...) -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if isNil(err) { - return true - } - - return Fail(t, fmt.Sprintf("No error is expected but got %v", err), msgAndArgs...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - return NotNil(t, err, "An error is expected but got nil. %s", message) - -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - if !NotNil(t, theError, "An error is expected but got nil. %s", message) { - return false - } - s := "An error with value \"%s\" is expected but got \"%s\". %s" - return Equal(t, theError.Error(), errString, - s, errString, theError.Error(), message) -} - -// matchRegexp return true if a specified regexp matches a string. -func matchRegexp(rx interface{}, str interface{}) bool { - - var r *regexp.Regexp - if rr, ok := rx.(*regexp.Regexp); ok { - r = rr - } else { - r = regexp.MustCompile(fmt.Sprint(rx)) - } - - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - - match := matchRegexp(rx, str) - - if !match { - Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) - } - - return match -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - match := matchRegexp(rx, str) - - if match { - Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) - } - - return !match - -} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go deleted file mode 100644 index 2cb58dbd7..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go +++ /dev/null @@ -1,768 +0,0 @@ -package assert - -import ( - "errors" - "regexp" - "testing" - "time" -) - -// AssertionTesterInterface defines an interface to be used for testing assertion methods -type AssertionTesterInterface interface { - TestMethod() -} - -// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface -type AssertionTesterConformingObject struct { -} - -func (a *AssertionTesterConformingObject) TestMethod() { -} - -// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface -type AssertionTesterNonConformingObject struct { -} - -func TestObjectsAreEqual(t *testing.T) { - - if !ObjectsAreEqual("Hello World", "Hello World") { - t.Error("objectsAreEqual should return true") - } - if !ObjectsAreEqual(123, 123) { - t.Error("objectsAreEqual should return true") - } - if !ObjectsAreEqual(123.5, 123.5) { - t.Error("objectsAreEqual should return true") - } - if !ObjectsAreEqual([]byte("Hello World"), []byte("Hello World")) { - t.Error("objectsAreEqual should return true") - } - if !ObjectsAreEqual(nil, nil) { - t.Error("objectsAreEqual should return true") - } - if ObjectsAreEqual(map[int]int{5: 10}, map[int]int{10: 20}) { - t.Error("objectsAreEqual should return false") - } - if ObjectsAreEqual('x', "x") { - t.Error("objectsAreEqual should return false") - } - if ObjectsAreEqual("x", 'x') { - t.Error("objectsAreEqual should return false") - } - if ObjectsAreEqual(0, 0.1) { - t.Error("objectsAreEqual should return false") - } - if ObjectsAreEqual(0.1, 0) { - t.Error("objectsAreEqual should return false") - } - if ObjectsAreEqual(uint32(10), int32(10)) { - t.Error("objectsAreEqual should return false") - } - if !ObjectsAreEqualValues(uint32(10), int32(10)) { - t.Error("ObjectsAreEqualValues should return true") - } - -} - -func TestImplements(t *testing.T) { - - mockT := new(testing.T) - - if !Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { - t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") - } - if Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { - t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") - } - -} - -func TestIsType(t *testing.T) { - - mockT := new(testing.T) - - if !IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { - t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") - } - if IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { - t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") - } - -} - -func TestEqual(t *testing.T) { - - mockT := new(testing.T) - - if !Equal(mockT, "Hello World", "Hello World") { - t.Error("Equal should return true") - } - if !Equal(mockT, 123, 123) { - t.Error("Equal should return true") - } - if !Equal(mockT, 123.5, 123.5) { - t.Error("Equal should return true") - } - if !Equal(mockT, []byte("Hello World"), []byte("Hello World")) { - t.Error("Equal should return true") - } - if !Equal(mockT, nil, nil) { - t.Error("Equal should return true") - } - if !Equal(mockT, int32(123), int32(123)) { - t.Error("Equal should return true") - } - if !Equal(mockT, uint64(123), uint64(123)) { - t.Error("Equal should return true") - } - funcA := func() int { return 42 } - if !Equal(mockT, funcA, funcA) { - t.Error("Equal should return true") - } - -} - -func TestNotNil(t *testing.T) { - - mockT := new(testing.T) - - if !NotNil(mockT, new(AssertionTesterConformingObject)) { - t.Error("NotNil should return true: object is not nil") - } - if NotNil(mockT, nil) { - t.Error("NotNil should return false: object is nil") - } - -} - -func TestNil(t *testing.T) { - - mockT := new(testing.T) - - if !Nil(mockT, nil) { - t.Error("Nil should return true: object is nil") - } - if Nil(mockT, new(AssertionTesterConformingObject)) { - t.Error("Nil should return false: object is not nil") - } - -} - -func TestTrue(t *testing.T) { - - mockT := new(testing.T) - - if !True(mockT, true) { - t.Error("True should return true") - } - if True(mockT, false) { - t.Error("True should return false") - } - -} - -func TestFalse(t *testing.T) { - - mockT := new(testing.T) - - if !False(mockT, false) { - t.Error("False should return true") - } - if False(mockT, true) { - t.Error("False should return false") - } - -} - -func TestExactly(t *testing.T) { - - mockT := new(testing.T) - - a := float32(1) - b := float64(1) - c := float32(1) - d := float32(2) - - if Exactly(mockT, a, b) { - t.Error("Exactly should return false") - } - if Exactly(mockT, a, d) { - t.Error("Exactly should return false") - } - if !Exactly(mockT, a, c) { - t.Error("Exactly should return true") - } - - if Exactly(mockT, nil, a) { - t.Error("Exactly should return false") - } - if Exactly(mockT, a, nil) { - t.Error("Exactly should return false") - } - -} - -func TestNotEqual(t *testing.T) { - - mockT := new(testing.T) - - if !NotEqual(mockT, "Hello World", "Hello World!") { - t.Error("NotEqual should return true") - } - if !NotEqual(mockT, 123, 1234) { - t.Error("NotEqual should return true") - } - if !NotEqual(mockT, 123.5, 123.55) { - t.Error("NotEqual should return true") - } - if !NotEqual(mockT, []byte("Hello World"), []byte("Hello World!")) { - t.Error("NotEqual should return true") - } - if !NotEqual(mockT, nil, new(AssertionTesterConformingObject)) { - t.Error("NotEqual should return true") - } - funcA := func() int { return 23 } - funcB := func() int { return 42 } - if !NotEqual(mockT, funcA, funcB) { - t.Error("NotEqual should return true") - } - - if NotEqual(mockT, "Hello World", "Hello World") { - t.Error("NotEqual should return false") - } - if NotEqual(mockT, 123, 123) { - t.Error("NotEqual should return false") - } - if NotEqual(mockT, 123.5, 123.5) { - t.Error("NotEqual should return false") - } - if NotEqual(mockT, []byte("Hello World"), []byte("Hello World")) { - t.Error("NotEqual should return false") - } - if NotEqual(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { - t.Error("NotEqual should return false") - } -} - -type A struct { - Name, Value string -} - -func TestContains(t *testing.T) { - - mockT := new(testing.T) - list := []string{"Foo", "Bar"} - complexList := []*A{ - {"b", "c"}, - {"d", "e"}, - {"g", "h"}, - {"j", "k"}, - } - - if !Contains(mockT, "Hello World", "Hello") { - t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") - } - if Contains(mockT, "Hello World", "Salut") { - t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") - } - - if !Contains(mockT, list, "Bar") { - t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Bar\"") - } - if Contains(mockT, list, "Salut") { - t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") - } - if !Contains(mockT, complexList, &A{"g", "h"}) { - t.Error("Contains should return true: complexList contains {\"g\", \"h\"}") - } - if Contains(mockT, complexList, &A{"g", "e"}) { - t.Error("Contains should return false: complexList contains {\"g\", \"e\"}") - } -} - -func TestNotContains(t *testing.T) { - - mockT := new(testing.T) - list := []string{"Foo", "Bar"} - - if !NotContains(mockT, "Hello World", "Hello!") { - t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") - } - if NotContains(mockT, "Hello World", "Hello") { - t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") - } - - if !NotContains(mockT, list, "Foo!") { - t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") - } - if NotContains(mockT, list, "Foo") { - t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") - } - -} - -func Test_includeElement(t *testing.T) { - - list1 := []string{"Foo", "Bar"} - list2 := []int{1, 2} - - ok, found := includeElement("Hello World", "World") - True(t, ok) - True(t, found) - - ok, found = includeElement(list1, "Foo") - True(t, ok) - True(t, found) - - ok, found = includeElement(list1, "Bar") - True(t, ok) - True(t, found) - - ok, found = includeElement(list2, 1) - True(t, ok) - True(t, found) - - ok, found = includeElement(list2, 2) - True(t, ok) - True(t, found) - - ok, found = includeElement(list1, "Foo!") - True(t, ok) - False(t, found) - - ok, found = includeElement(list2, 3) - True(t, ok) - False(t, found) - - ok, found = includeElement(list2, "1") - True(t, ok) - False(t, found) - - ok, found = includeElement(1433, "1") - False(t, ok) - False(t, found) - -} - -func TestCondition(t *testing.T) { - mockT := new(testing.T) - - if !Condition(mockT, func() bool { return true }, "Truth") { - t.Error("Condition should return true") - } - - if Condition(mockT, func() bool { return false }, "Lie") { - t.Error("Condition should return false") - } - -} - -func TestDidPanic(t *testing.T) { - - if funcDidPanic, _ := didPanic(func() { - panic("Panic!") - }); !funcDidPanic { - t.Error("didPanic should return true") - } - - if funcDidPanic, _ := didPanic(func() { - }); funcDidPanic { - t.Error("didPanic should return false") - } - -} - -func TestPanics(t *testing.T) { - - mockT := new(testing.T) - - if !Panics(mockT, func() { - panic("Panic!") - }) { - t.Error("Panics should return true") - } - - if Panics(mockT, func() { - }) { - t.Error("Panics should return false") - } - -} - -func TestNotPanics(t *testing.T) { - - mockT := new(testing.T) - - if !NotPanics(mockT, func() { - }) { - t.Error("NotPanics should return true") - } - - if NotPanics(mockT, func() { - panic("Panic!") - }) { - t.Error("NotPanics should return false") - } - -} - -func TestEqual_Funcs(t *testing.T) { - - type f func() int - f1 := func() int { return 1 } - f2 := func() int { return 2 } - - f1Copy := f1 - - Equal(t, f1Copy, f1, "Funcs are the same and should be considered equal") - NotEqual(t, f1, f2, "f1 and f2 are different") - -} - -func TestNoError(t *testing.T) { - - mockT := new(testing.T) - - // start with a nil error - var err error - - True(t, NoError(mockT, err), "NoError should return True for nil arg") - - // now set an error - err = errors.New("some error") - - False(t, NoError(mockT, err), "NoError with error should return False") - -} - -func TestError(t *testing.T) { - - mockT := new(testing.T) - - // start with a nil error - var err error - - False(t, Error(mockT, err), "Error should return False for nil arg") - - // now set an error - err = errors.New("some error") - - True(t, Error(mockT, err), "Error with error should return True") - -} - -func TestEqualError(t *testing.T) { - mockT := new(testing.T) - - // start with a nil error - var err error - False(t, EqualError(mockT, err, ""), - "EqualError should return false for nil arg") - - // now set an error - err = errors.New("some error") - False(t, EqualError(mockT, err, "Not some error"), - "EqualError should return false for different error string") - True(t, EqualError(mockT, err, "some error"), - "EqualError should return true") -} - -func Test_isEmpty(t *testing.T) { - - chWithValue := make(chan struct{}, 1) - chWithValue <- struct{}{} - - True(t, isEmpty("")) - True(t, isEmpty(nil)) - True(t, isEmpty([]string{})) - True(t, isEmpty(0)) - True(t, isEmpty(int32(0))) - True(t, isEmpty(int64(0))) - True(t, isEmpty(false)) - True(t, isEmpty(map[string]string{})) - True(t, isEmpty(new(time.Time))) - True(t, isEmpty(make(chan struct{}))) - False(t, isEmpty("something")) - False(t, isEmpty(errors.New("something"))) - False(t, isEmpty([]string{"something"})) - False(t, isEmpty(1)) - False(t, isEmpty(true)) - False(t, isEmpty(map[string]string{"Hello": "World"})) - False(t, isEmpty(chWithValue)) - -} - -func TestEmpty(t *testing.T) { - - mockT := new(testing.T) - chWithValue := make(chan struct{}, 1) - chWithValue <- struct{}{} - - True(t, Empty(mockT, ""), "Empty string is empty") - True(t, Empty(mockT, nil), "Nil is empty") - True(t, Empty(mockT, []string{}), "Empty string array is empty") - True(t, Empty(mockT, 0), "Zero int value is empty") - True(t, Empty(mockT, false), "False value is empty") - True(t, Empty(mockT, make(chan struct{})), "Channel without values is empty") - - False(t, Empty(mockT, "something"), "Non Empty string is not empty") - False(t, Empty(mockT, errors.New("something")), "Non nil object is not empty") - False(t, Empty(mockT, []string{"something"}), "Non empty string array is not empty") - False(t, Empty(mockT, 1), "Non-zero int value is not empty") - False(t, Empty(mockT, true), "True value is not empty") - False(t, Empty(mockT, chWithValue), "Channel with values is not empty") -} - -func TestNotEmpty(t *testing.T) { - - mockT := new(testing.T) - chWithValue := make(chan struct{}, 1) - chWithValue <- struct{}{} - - False(t, NotEmpty(mockT, ""), "Empty string is empty") - False(t, NotEmpty(mockT, nil), "Nil is empty") - False(t, NotEmpty(mockT, []string{}), "Empty string array is empty") - False(t, NotEmpty(mockT, 0), "Zero int value is empty") - False(t, NotEmpty(mockT, false), "False value is empty") - False(t, NotEmpty(mockT, make(chan struct{})), "Channel without values is empty") - - True(t, NotEmpty(mockT, "something"), "Non Empty string is not empty") - True(t, NotEmpty(mockT, errors.New("something")), "Non nil object is not empty") - True(t, NotEmpty(mockT, []string{"something"}), "Non empty string array is not empty") - True(t, NotEmpty(mockT, 1), "Non-zero int value is not empty") - True(t, NotEmpty(mockT, true), "True value is not empty") - True(t, NotEmpty(mockT, chWithValue), "Channel with values is not empty") -} - -func Test_getLen(t *testing.T) { - falseCases := []interface{}{ - nil, - 0, - true, - false, - 'A', - struct{}{}, - } - for _, v := range falseCases { - ok, l := getLen(v) - False(t, ok, "Expected getLen fail to get length of %#v", v) - Equal(t, 0, l, "getLen should return 0 for %#v", v) - } - - ch := make(chan int, 5) - ch <- 1 - ch <- 2 - ch <- 3 - trueCases := []struct { - v interface{} - l int - }{ - {[]int{1, 2, 3}, 3}, - {[...]int{1, 2, 3}, 3}, - {"ABC", 3}, - {map[int]int{1: 2, 2: 4, 3: 6}, 3}, - {ch, 3}, - - {[]int{}, 0}, - {map[int]int{}, 0}, - {make(chan int), 0}, - - {[]int(nil), 0}, - {map[int]int(nil), 0}, - {(chan int)(nil), 0}, - } - - for _, c := range trueCases { - ok, l := getLen(c.v) - True(t, ok, "Expected getLen success to get length of %#v", c.v) - Equal(t, c.l, l) - } -} - -func TestLen(t *testing.T) { - mockT := new(testing.T) - - False(t, Len(mockT, nil, 0), "nil does not have length") - False(t, Len(mockT, 0, 0), "int does not have length") - False(t, Len(mockT, true, 0), "true does not have length") - False(t, Len(mockT, false, 0), "false does not have length") - False(t, Len(mockT, 'A', 0), "Rune does not have length") - False(t, Len(mockT, struct{}{}, 0), "Struct does not have length") - - ch := make(chan int, 5) - ch <- 1 - ch <- 2 - ch <- 3 - - cases := []struct { - v interface{} - l int - }{ - {[]int{1, 2, 3}, 3}, - {[...]int{1, 2, 3}, 3}, - {"ABC", 3}, - {map[int]int{1: 2, 2: 4, 3: 6}, 3}, - {ch, 3}, - - {[]int{}, 0}, - {map[int]int{}, 0}, - {make(chan int), 0}, - - {[]int(nil), 0}, - {map[int]int(nil), 0}, - {(chan int)(nil), 0}, - } - - for _, c := range cases { - True(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) - } - - cases = []struct { - v interface{} - l int - }{ - {[]int{1, 2, 3}, 4}, - {[...]int{1, 2, 3}, 2}, - {"ABC", 2}, - {map[int]int{1: 2, 2: 4, 3: 6}, 4}, - {ch, 2}, - - {[]int{}, 1}, - {map[int]int{}, 1}, - {make(chan int), 1}, - - {[]int(nil), 1}, - {map[int]int(nil), 1}, - {(chan int)(nil), 1}, - } - - for _, c := range cases { - False(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) - } -} - -func TestWithinDuration(t *testing.T) { - - mockT := new(testing.T) - a := time.Now() - b := a.Add(10 * time.Second) - - True(t, WithinDuration(mockT, a, b, 10*time.Second), "A 10s difference is within a 10s time difference") - True(t, WithinDuration(mockT, b, a, 10*time.Second), "A 10s difference is within a 10s time difference") - - False(t, WithinDuration(mockT, a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") - False(t, WithinDuration(mockT, b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") - - False(t, WithinDuration(mockT, a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") - False(t, WithinDuration(mockT, b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") - - False(t, WithinDuration(mockT, a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") - False(t, WithinDuration(mockT, b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") -} - -func TestInDelta(t *testing.T) { - mockT := new(testing.T) - - True(t, InDelta(mockT, 1.001, 1, 0.01), "|1.001 - 1| <= 0.01") - True(t, InDelta(mockT, 1, 1.001, 0.01), "|1 - 1.001| <= 0.01") - True(t, InDelta(mockT, 1, 2, 1), "|1 - 2| <= 1") - False(t, InDelta(mockT, 1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") - False(t, InDelta(mockT, 2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") - False(t, InDelta(mockT, "", nil, 1), "Expected non numerals to fail") - - cases := []struct { - a, b interface{} - delta float64 - }{ - {uint8(2), uint8(1), 1}, - {uint16(2), uint16(1), 1}, - {uint32(2), uint32(1), 1}, - {uint64(2), uint64(1), 1}, - - {int(2), int(1), 1}, - {int8(2), int8(1), 1}, - {int16(2), int16(1), 1}, - {int32(2), int32(1), 1}, - {int64(2), int64(1), 1}, - - {float32(2), float32(1), 1}, - {float64(2), float64(1), 1}, - } - - for _, tc := range cases { - True(t, InDelta(mockT, tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) - } -} - -func TestInEpsilon(t *testing.T) { - mockT := new(testing.T) - - cases := []struct { - a, b interface{} - epsilon float64 - }{ - {uint8(2), uint16(2), .001}, - {2.1, 2.2, 0.1}, - {2.2, 2.1, 0.1}, - {-2.1, -2.2, 0.1}, - {-2.2, -2.1, 0.1}, - {uint64(100), uint8(101), 0.01}, - {0.1, -0.1, 2}, - } - - for _, tc := range cases { - True(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) - } - - cases = []struct { - a, b interface{} - epsilon float64 - }{ - {uint8(2), int16(-2), .001}, - {uint64(100), uint8(102), 0.01}, - {2.1, 2.2, 0.001}, - {2.2, 2.1, 0.001}, - {2.1, -2.2, 1}, - {2.1, "bla-bla", 0}, - {0.1, -0.1, 1.99}, - } - - for _, tc := range cases { - False(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) - } - -} - -func TestRegexp(t *testing.T) { - mockT := new(testing.T) - - cases := []struct { - rx, str string - }{ - {"^start", "start of the line"}, - {"end$", "in the end"}, - {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, - } - - for _, tc := range cases { - True(t, Regexp(mockT, tc.rx, tc.str)) - True(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) - False(t, NotRegexp(mockT, tc.rx, tc.str)) - False(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) - } - - cases = []struct { - rx, str string - }{ - {"^asdfastart", "Not the start of the line"}, - {"end$", "in the end."}, - {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, - } - - for _, tc := range cases { - False(t, Regexp(mockT, tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) - False(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) - True(t, NotRegexp(mockT, tc.rx, tc.str)) - True(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) - } -} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go deleted file mode 100644 index 1c6de283d..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go +++ /dev/null @@ -1,150 +0,0 @@ -// A set of comprehensive testing tools for use with the normal Go testing system. -// -// Example Usage -// -// The following is a complete example using assert in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// if you assert many times, use the below: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// assert := assert.New(t) -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(a, b, "The two words should be the same.") -// } -// -// Assertions -// -// Assertions allow you to easily write test code, and are global funcs in the `assert` package. -// All assertion functions take, as the first argument, the `*testing.T` object provided by the -// testing framework. This allows the assertion funcs to write the failings and other details to -// the correct place. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -// -// Here is an overview of the assert functions: -// -// assert.Equal(t, expected, actual [, message [, format-args]) -// -// assert.NotEqual(t, notExpected, actual [, message [, format-args]]) -// -// assert.True(t, actualBool [, message [, format-args]]) -// -// assert.False(t, actualBool [, message [, format-args]]) -// -// assert.Nil(t, actualObject [, message [, format-args]]) -// -// assert.NotNil(t, actualObject [, message [, format-args]]) -// -// assert.Empty(t, actualObject [, message [, format-args]]) -// -// assert.NotEmpty(t, actualObject [, message [, format-args]]) -// -// assert.Len(t, actualObject, expectedLength, [, message [, format-args]]) -// -// assert.Error(t, errorObject [, message [, format-args]]) -// -// assert.NoError(t, errorObject [, message [, format-args]]) -// -// assert.EqualError(t, theError, errString [, message [, format-args]]) -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject) [,message [, format-args]]) -// -// assert.IsType(t, expectedObject, actualObject [, message [, format-args]]) -// -// assert.Contains(t, stringOrSlice, substringOrElement [, message [, format-args]]) -// -// assert.NotContains(t, stringOrSlice, substringOrElement [, message [, format-args]]) -// -// assert.Panics(t, func(){ -// -// // call code that should panic -// -// } [, message [, format-args]]) -// -// assert.NotPanics(t, func(){ -// -// // call code that should not panic -// -// } [, message [, format-args]]) -// -// assert.WithinDuration(t, timeA, timeB, deltaTime, [, message [, format-args]]) -// -// assert.InDelta(t, numA, numB, delta, [, message [, format-args]]) -// -// assert.InEpsilon(t, numA, numB, epsilon, [, message [, format-args]]) -// -// assert package contains Assertions object. it has assertion methods. -// -// Here is an overview of the assert functions: -// assert.Equal(expected, actual [, message [, format-args]) -// -// assert.NotEqual(notExpected, actual [, message [, format-args]]) -// -// assert.True(actualBool [, message [, format-args]]) -// -// assert.False(actualBool [, message [, format-args]]) -// -// assert.Nil(actualObject [, message [, format-args]]) -// -// assert.NotNil(actualObject [, message [, format-args]]) -// -// assert.Empty(actualObject [, message [, format-args]]) -// -// assert.NotEmpty(actualObject [, message [, format-args]]) -// -// assert.Len(actualObject, expectedLength, [, message [, format-args]]) -// -// assert.Error(errorObject [, message [, format-args]]) -// -// assert.NoError(errorObject [, message [, format-args]]) -// -// assert.EqualError(theError, errString [, message [, format-args]]) -// -// assert.Implements((*MyInterface)(nil), new(MyObject) [,message [, format-args]]) -// -// assert.IsType(expectedObject, actualObject [, message [, format-args]]) -// -// assert.Contains(stringOrSlice, substringOrElement [, message [, format-args]]) -// -// assert.NotContains(stringOrSlice, substringOrElement [, message [, format-args]]) -// -// assert.Panics(func(){ -// -// // call code that should panic -// -// } [, message [, format-args]]) -// -// assert.NotPanics(func(){ -// -// // call code that should not panic -// -// } [, message [, format-args]]) -// -// assert.WithinDuration(timeA, timeB, deltaTime, [, message [, format-args]]) -// -// assert.InDelta(numA, numB, delta, [, message [, format-args]]) -// -// assert.InEpsilon(numA, numB, epsilon, [, message [, format-args]]) -package assert diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go deleted file mode 100644 index ac9dc9d1d..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package assert - -import ( - "errors" -) - -// AnError is an error instance useful for testing. If the code does not care -// about error specifics, and only needs to return the error for example, this -// error should be used to make the test code more readable. -var AnError = errors.New("assert.AnError general error for testing") diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go deleted file mode 100644 index 67a6925a1..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go +++ /dev/null @@ -1,262 +0,0 @@ -package assert - -import "time" - -type Assertions struct { - t TestingT -} - -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - return Fail(a.t, failureMessage, msgAndArgs...) -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements((*MyInterface)(nil), new(MyObject), "MyObject") -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - return IsType(a.t, expectedType, object, msgAndArgs...) -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Equal(expected, actual interface{}, msgAndArgs ...interface{}) bool { - return Equal(a.t, expected, actual, msgAndArgs...) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualValues(expected, actual interface{}, msgAndArgs ...interface{}) bool { - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - -// Exactly asserts that two objects are equal is value and type. -// -// assert.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Exactly(expected, actual interface{}, msgAndArgs ...interface{}) bool { - return Exactly(a.t, expected, actual, msgAndArgs...) -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - return NotNil(a.t, object, msgAndArgs...) -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - return Nil(a.t, object, msgAndArgs...) -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or a -// slice with len == 0. -// -// assert.Empty(obj) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - return Empty(a.t, object, msgAndArgs...) -} - -// Empty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or a -// slice with len == 0. -// -// if assert.NotEmpty(obj) { -// assert.Equal("two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - return NotEmpty(a.t, object, msgAndArgs...) -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - return Len(a.t, object, length, msgAndArgs...) -} - -// True asserts that the specified value is true. -// -// assert.True(myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - return True(a.t, value, msgAndArgs...) -} - -// False asserts that the specified value is true. -// -// assert.False(myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - return False(a.t, value, msgAndArgs...) -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool { - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - -// Contains asserts that the specified string contains the specified substring. -// -// assert.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Contains(s, contains interface{}, msgAndArgs ...interface{}) bool { - return Contains(a.t, s, contains, msgAndArgs...) -} - -// NotContains asserts that the specified string does NOT contain the specified substring. -// -// assert.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotContains(s, contains interface{}, msgAndArgs ...interface{}) bool { - return NotContains(a.t, s, contains, msgAndArgs...) -} - -// Uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - return Condition(a.t, comp, msgAndArgs...) -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - return Panics(a.t, f, msgAndArgs...) -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - return NotPanics(a.t, f, msgAndArgs...) -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) WithinDuration(expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InDelta(expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InEpsilon(expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(err) { -// assert.Equal(actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NoError(theError error, msgAndArgs ...interface{}) bool { - return NoError(a.t, theError, msgAndArgs...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(err, "An error was expected") { -// assert.Equal(err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Error(theError error, msgAndArgs ...interface{}) bool { - return Error(a.t, theError, msgAndArgs...) -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// if assert.Error(err, "An error was expected") { -// assert.Equal(err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - return EqualError(a.t, theError, errString, msgAndArgs...) -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - return Regexp(a.t, rx, str, msgAndArgs...) -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - return NotRegexp(a.t, rx, str, msgAndArgs...) -} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go deleted file mode 100644 index 0dac6c7ae..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go +++ /dev/null @@ -1,526 +0,0 @@ -package assert - -import ( - "errors" - "regexp" - "testing" - "time" -) - -func TestImplementsWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { - t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") - } - if assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { - t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") - } -} - -func TestIsTypeWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { - t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") - } - if assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { - t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") - } - -} - -func TestEqualWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.Equal("Hello World", "Hello World") { - t.Error("Equal should return true") - } - if !assert.Equal(123, 123) { - t.Error("Equal should return true") - } - if !assert.Equal(123.5, 123.5) { - t.Error("Equal should return true") - } - if !assert.Equal([]byte("Hello World"), []byte("Hello World")) { - t.Error("Equal should return true") - } - if !assert.Equal(nil, nil) { - t.Error("Equal should return true") - } -} - -func TestEqualValuesWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.EqualValues(uint32(10), int32(10)) { - t.Error("EqualValues should return true") - } -} - -func TestNotNilWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.NotNil(new(AssertionTesterConformingObject)) { - t.Error("NotNil should return true: object is not nil") - } - if assert.NotNil(nil) { - t.Error("NotNil should return false: object is nil") - } - -} - -func TestNilWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.Nil(nil) { - t.Error("Nil should return true: object is nil") - } - if assert.Nil(new(AssertionTesterConformingObject)) { - t.Error("Nil should return false: object is not nil") - } - -} - -func TestTrueWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.True(true) { - t.Error("True should return true") - } - if assert.True(false) { - t.Error("True should return false") - } - -} - -func TestFalseWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.False(false) { - t.Error("False should return true") - } - if assert.False(true) { - t.Error("False should return false") - } - -} - -func TestExactlyWrapper(t *testing.T) { - assert := New(new(testing.T)) - - a := float32(1) - b := float64(1) - c := float32(1) - d := float32(2) - - if assert.Exactly(a, b) { - t.Error("Exactly should return false") - } - if assert.Exactly(a, d) { - t.Error("Exactly should return false") - } - if !assert.Exactly(a, c) { - t.Error("Exactly should return true") - } - - if assert.Exactly(nil, a) { - t.Error("Exactly should return false") - } - if assert.Exactly(a, nil) { - t.Error("Exactly should return false") - } - -} - -func TestNotEqualWrapper(t *testing.T) { - - assert := New(new(testing.T)) - - if !assert.NotEqual("Hello World", "Hello World!") { - t.Error("NotEqual should return true") - } - if !assert.NotEqual(123, 1234) { - t.Error("NotEqual should return true") - } - if !assert.NotEqual(123.5, 123.55) { - t.Error("NotEqual should return true") - } - if !assert.NotEqual([]byte("Hello World"), []byte("Hello World!")) { - t.Error("NotEqual should return true") - } - if !assert.NotEqual(nil, new(AssertionTesterConformingObject)) { - t.Error("NotEqual should return true") - } -} - -func TestContainsWrapper(t *testing.T) { - - assert := New(new(testing.T)) - list := []string{"Foo", "Bar"} - - if !assert.Contains("Hello World", "Hello") { - t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") - } - if assert.Contains("Hello World", "Salut") { - t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") - } - - if !assert.Contains(list, "Foo") { - t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") - } - if assert.Contains(list, "Salut") { - t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") - } - -} - -func TestNotContainsWrapper(t *testing.T) { - - assert := New(new(testing.T)) - list := []string{"Foo", "Bar"} - - if !assert.NotContains("Hello World", "Hello!") { - t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") - } - if assert.NotContains("Hello World", "Hello") { - t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") - } - - if !assert.NotContains(list, "Foo!") { - t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") - } - if assert.NotContains(list, "Foo") { - t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") - } - -} - -func TestConditionWrapper(t *testing.T) { - - assert := New(new(testing.T)) - - if !assert.Condition(func() bool { return true }, "Truth") { - t.Error("Condition should return true") - } - - if assert.Condition(func() bool { return false }, "Lie") { - t.Error("Condition should return false") - } - -} - -func TestDidPanicWrapper(t *testing.T) { - - if funcDidPanic, _ := didPanic(func() { - panic("Panic!") - }); !funcDidPanic { - t.Error("didPanic should return true") - } - - if funcDidPanic, _ := didPanic(func() { - }); funcDidPanic { - t.Error("didPanic should return false") - } - -} - -func TestPanicsWrapper(t *testing.T) { - - assert := New(new(testing.T)) - - if !assert.Panics(func() { - panic("Panic!") - }) { - t.Error("Panics should return true") - } - - if assert.Panics(func() { - }) { - t.Error("Panics should return false") - } - -} - -func TestNotPanicsWrapper(t *testing.T) { - - assert := New(new(testing.T)) - - if !assert.NotPanics(func() { - }) { - t.Error("NotPanics should return true") - } - - if assert.NotPanics(func() { - panic("Panic!") - }) { - t.Error("NotPanics should return false") - } - -} - -func TestEqualWrapper_Funcs(t *testing.T) { - - assert := New(t) - - type f func() int - var f1 f = func() int { return 1 } - var f2 f = func() int { return 2 } - - var f1_copy f = f1 - - assert.Equal(f1_copy, f1, "Funcs are the same and should be considered equal") - assert.NotEqual(f1, f2, "f1 and f2 are different") - -} - -func TestNoErrorWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - // start with a nil error - var err error = nil - - assert.True(mockAssert.NoError(err), "NoError should return True for nil arg") - - // now set an error - err = errors.New("Some error") - - assert.False(mockAssert.NoError(err), "NoError with error should return False") - -} - -func TestErrorWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - // start with a nil error - var err error = nil - - assert.False(mockAssert.Error(err), "Error should return False for nil arg") - - // now set an error - err = errors.New("Some error") - - assert.True(mockAssert.Error(err), "Error with error should return True") - -} - -func TestEqualErrorWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - // start with a nil error - var err error - assert.False(mockAssert.EqualError(err, ""), - "EqualError should return false for nil arg") - - // now set an error - err = errors.New("some error") - assert.False(mockAssert.EqualError(err, "Not some error"), - "EqualError should return false for different error string") - assert.True(mockAssert.EqualError(err, "some error"), - "EqualError should return true") -} - -func TestEmptyWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - assert.True(mockAssert.Empty(""), "Empty string is empty") - assert.True(mockAssert.Empty(nil), "Nil is empty") - assert.True(mockAssert.Empty([]string{}), "Empty string array is empty") - assert.True(mockAssert.Empty(0), "Zero int value is empty") - assert.True(mockAssert.Empty(false), "False value is empty") - - assert.False(mockAssert.Empty("something"), "Non Empty string is not empty") - assert.False(mockAssert.Empty(errors.New("something")), "Non nil object is not empty") - assert.False(mockAssert.Empty([]string{"something"}), "Non empty string array is not empty") - assert.False(mockAssert.Empty(1), "Non-zero int value is not empty") - assert.False(mockAssert.Empty(true), "True value is not empty") - -} - -func TestNotEmptyWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - assert.False(mockAssert.NotEmpty(""), "Empty string is empty") - assert.False(mockAssert.NotEmpty(nil), "Nil is empty") - assert.False(mockAssert.NotEmpty([]string{}), "Empty string array is empty") - assert.False(mockAssert.NotEmpty(0), "Zero int value is empty") - assert.False(mockAssert.NotEmpty(false), "False value is empty") - - assert.True(mockAssert.NotEmpty("something"), "Non Empty string is not empty") - assert.True(mockAssert.NotEmpty(errors.New("something")), "Non nil object is not empty") - assert.True(mockAssert.NotEmpty([]string{"something"}), "Non empty string array is not empty") - assert.True(mockAssert.NotEmpty(1), "Non-zero int value is not empty") - assert.True(mockAssert.NotEmpty(true), "True value is not empty") - -} - -func TestLenWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - assert.False(mockAssert.Len(nil, 0), "nil does not have length") - assert.False(mockAssert.Len(0, 0), "int does not have length") - assert.False(mockAssert.Len(true, 0), "true does not have length") - assert.False(mockAssert.Len(false, 0), "false does not have length") - assert.False(mockAssert.Len('A', 0), "Rune does not have length") - assert.False(mockAssert.Len(struct{}{}, 0), "Struct does not have length") - - ch := make(chan int, 5) - ch <- 1 - ch <- 2 - ch <- 3 - - cases := []struct { - v interface{} - l int - }{ - {[]int{1, 2, 3}, 3}, - {[...]int{1, 2, 3}, 3}, - {"ABC", 3}, - {map[int]int{1: 2, 2: 4, 3: 6}, 3}, - {ch, 3}, - - {[]int{}, 0}, - {map[int]int{}, 0}, - {make(chan int), 0}, - - {[]int(nil), 0}, - {map[int]int(nil), 0}, - {(chan int)(nil), 0}, - } - - for _, c := range cases { - assert.True(mockAssert.Len(c.v, c.l), "%#v have %d items", c.v, c.l) - } -} - -func TestWithinDurationWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - a := time.Now() - b := a.Add(10 * time.Second) - - assert.True(mockAssert.WithinDuration(a, b, 10*time.Second), "A 10s difference is within a 10s time difference") - assert.True(mockAssert.WithinDuration(b, a, 10*time.Second), "A 10s difference is within a 10s time difference") - - assert.False(mockAssert.WithinDuration(a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") - assert.False(mockAssert.WithinDuration(b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") - - assert.False(mockAssert.WithinDuration(a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") - assert.False(mockAssert.WithinDuration(b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") - - assert.False(mockAssert.WithinDuration(a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") - assert.False(mockAssert.WithinDuration(b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") -} - -func TestInDeltaWrapper(t *testing.T) { - assert := New(new(testing.T)) - - True(t, assert.InDelta(1.001, 1, 0.01), "|1.001 - 1| <= 0.01") - True(t, assert.InDelta(1, 1.001, 0.01), "|1 - 1.001| <= 0.01") - True(t, assert.InDelta(1, 2, 1), "|1 - 2| <= 1") - False(t, assert.InDelta(1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") - False(t, assert.InDelta(2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") - False(t, assert.InDelta("", nil, 1), "Expected non numerals to fail") - - cases := []struct { - a, b interface{} - delta float64 - }{ - {uint8(2), uint8(1), 1}, - {uint16(2), uint16(1), 1}, - {uint32(2), uint32(1), 1}, - {uint64(2), uint64(1), 1}, - - {int(2), int(1), 1}, - {int8(2), int8(1), 1}, - {int16(2), int16(1), 1}, - {int32(2), int32(1), 1}, - {int64(2), int64(1), 1}, - - {float32(2), float32(1), 1}, - {float64(2), float64(1), 1}, - } - - for _, tc := range cases { - True(t, assert.InDelta(tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) - } -} - -func TestInEpsilonWrapper(t *testing.T) { - assert := New(new(testing.T)) - - cases := []struct { - a, b interface{} - epsilon float64 - }{ - {uint8(2), uint16(2), .001}, - {2.1, 2.2, 0.1}, - {2.2, 2.1, 0.1}, - {-2.1, -2.2, 0.1}, - {-2.2, -2.1, 0.1}, - {uint64(100), uint8(101), 0.01}, - {0.1, -0.1, 2}, - } - - for _, tc := range cases { - True(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) - } - - cases = []struct { - a, b interface{} - epsilon float64 - }{ - {uint8(2), int16(-2), .001}, - {uint64(100), uint8(102), 0.01}, - {2.1, 2.2, 0.001}, - {2.2, 2.1, 0.001}, - {2.1, -2.2, 1}, - {2.1, "bla-bla", 0}, - {0.1, -0.1, 1.99}, - } - - for _, tc := range cases { - False(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) - } -} - -func TestRegexpWrapper(t *testing.T) { - - assert := New(new(testing.T)) - - cases := []struct { - rx, str string - }{ - {"^start", "start of the line"}, - {"end$", "in the end"}, - {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, - } - - for _, tc := range cases { - True(t, assert.Regexp(tc.rx, tc.str)) - True(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) - False(t, assert.NotRegexp(tc.rx, tc.str)) - False(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) - } - - cases = []struct { - rx, str string - }{ - {"^asdfastart", "Not the start of the line"}, - {"end$", "in the end."}, - {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, - } - - for _, tc := range cases { - False(t, assert.Regexp(tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) - False(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) - True(t, assert.NotRegexp(tc.rx, tc.str)) - True(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) - } -} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go deleted file mode 100644 index 0419c1b60..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go +++ /dev/null @@ -1,157 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" -) - -// httpCode is a helper that returns HTTP code of the response. It returns -1 -// if building a new request fails. -func httpCode(handler http.HandlerFunc, mode, url string, values url.Values) int { - w := httptest.NewRecorder() - req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil) - if err != nil { - return -1 - } - handler(w, req) - return w.Code -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool { - code := httpCode(handler, mode, url, values) - if code == -1 { - return false - } - return code >= http.StatusOK && code <= http.StatusPartialContent -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool { - code := httpCode(handler, mode, url, values) - if code == -1 { - return false - } - return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool { - code := httpCode(handler, mode, url, values) - if code == -1 { - return false - } - return code >= http.StatusBadRequest -} - -// HttpBody is a helper that returns HTTP body of the response. It returns -// empty string if building a new request fails. -func HttpBody(handler http.HandlerFunc, mode, url string, values url.Values) string { - w := httptest.NewRecorder() - req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil) - if err != nil { - return "" - } - handler(w, req) - return w.Body.String() -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { - body := HttpBody(handler, mode, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return contains -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { - body := HttpBody(handler, mode, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if contains { - Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body) - } - - return !contains -} - -// -// Assertions Wrappers -// - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, mode, url string, values url.Values) bool { - return HTTPSuccess(a.t, handler, mode, url, values) -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, mode, url string, values url.Values) bool { - return HTTPRedirect(a.t, handler, mode, url, values) -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, mode, url string, values url.Values) bool { - return HTTPError(a.t, handler, mode, url, values) -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { - return HTTPBodyContains(a.t, handler, mode, url, values, str) -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { - return HTTPBodyNotContains(a.t, handler, mode, url, values, str) -} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go deleted file mode 100644 index 684c2d5d1..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/url" - "testing" -) - -func httpOK(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) -} - -func httpRedirect(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusTemporaryRedirect) -} - -func httpError(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) -} - -func TestHTTPStatuses(t *testing.T) { - assert := New(t) - mockT := new(testing.T) - - assert.Equal(HTTPSuccess(mockT, httpOK, "GET", "/", nil), true) - assert.Equal(HTTPSuccess(mockT, httpRedirect, "GET", "/", nil), false) - assert.Equal(HTTPSuccess(mockT, httpError, "GET", "/", nil), false) - - assert.Equal(HTTPRedirect(mockT, httpOK, "GET", "/", nil), false) - assert.Equal(HTTPRedirect(mockT, httpRedirect, "GET", "/", nil), true) - assert.Equal(HTTPRedirect(mockT, httpError, "GET", "/", nil), false) - - assert.Equal(HTTPError(mockT, httpOK, "GET", "/", nil), false) - assert.Equal(HTTPError(mockT, httpRedirect, "GET", "/", nil), false) - assert.Equal(HTTPError(mockT, httpError, "GET", "/", nil), true) -} - -func TestHTTPStatusesWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - assert.Equal(mockAssert.HTTPSuccess(httpOK, "GET", "/", nil), true) - assert.Equal(mockAssert.HTTPSuccess(httpRedirect, "GET", "/", nil), false) - assert.Equal(mockAssert.HTTPSuccess(httpError, "GET", "/", nil), false) - - assert.Equal(mockAssert.HTTPRedirect(httpOK, "GET", "/", nil), false) - assert.Equal(mockAssert.HTTPRedirect(httpRedirect, "GET", "/", nil), true) - assert.Equal(mockAssert.HTTPRedirect(httpError, "GET", "/", nil), false) - - assert.Equal(mockAssert.HTTPError(httpOK, "GET", "/", nil), false) - assert.Equal(mockAssert.HTTPError(httpRedirect, "GET", "/", nil), false) - assert.Equal(mockAssert.HTTPError(httpError, "GET", "/", nil), true) -} - -func httpHelloName(w http.ResponseWriter, r *http.Request) { - name := r.FormValue("name") - w.Write([]byte(fmt.Sprintf("Hello, %s!", name))) -} - -func TestHttpBody(t *testing.T) { - assert := New(t) - mockT := new(testing.T) - - assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) - assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) - assert.False(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) - - assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) - assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) - assert.True(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) -} - -func TestHttpBodyWrappers(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) - assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) - assert.False(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) - - assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) - assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) - assert.True(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) - -} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go deleted file mode 100644 index dd385074b..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Provides a system by which it is possible to mock your objects and verify calls are happening as expected. -// -// Example Usage -// -// The mock package provides an object, Mock, that tracks activity on another object. It is usually -// embedded into a test object as shown below: -// -// type MyTestObject struct { -// // add a Mock object instance -// mock.Mock -// -// // other fields go here as normal -// } -// -// When implementing the methods of an interface, you wire your functions up -// to call the Mock.Called(args...) method, and return the appropriate values. -// -// For example, to mock a method that saves the name and age of a person and returns -// the year of their birth or an error, you might write this: -// -// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { -// args := o.Called(firstname, lastname, age) -// return args.Int(0), args.Error(1) -// } -// -// The Int, Error and Bool methods are examples of strongly typed getters that take the argument -// index position. Given this argument list: -// -// (12, true, "Something") -// -// You could read them out strongly typed like this: -// -// args.Int(0) -// args.Bool(1) -// args.String(2) -// -// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: -// -// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) -// -// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those -// cases you should check for nil first. -package mock diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go deleted file mode 100644 index f73fa2516..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go +++ /dev/null @@ -1,510 +0,0 @@ -package mock - -import ( - "fmt" - "github.com/stretchr/objx" - "github.com/stretchr/testify/assert" - "reflect" - "runtime" - "strings" - "sync" -) - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Logf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) -} - -/* - Call -*/ - -// Call represents a method call and is used for setting expectations, -// as well as recording activity. -type Call struct { - - // The name of the method that was or will be called. - Method string - - // Holds the arguments of the method. - Arguments Arguments - - // Holds the arguments that should be returned when - // this method is called. - ReturnArguments Arguments - - // The number of times to return the return arguments when setting - // expectations. 0 means to always return the value. - Repeatability int -} - -// Mock is the workhorse used to track activity on another object. -// For an example of its usage, refer to the "Example Usage" section at the top of this document. -type Mock struct { - - // The method name that is currently - // being referred to by the On method. - onMethodName string - - // An array of the arguments that are - // currently being referred to by the On method. - onMethodArguments Arguments - - // Represents the calls that are expected of - // an object. - ExpectedCalls []Call - - // Holds the calls that were made to this mocked object. - Calls []Call - - // TestData holds any data that might be useful for testing. Testify ignores - // this data completely allowing you to do whatever you like with it. - testData objx.Map - - mutex sync.Mutex -} - -// TestData holds any data that might be useful for testing. Testify ignores -// this data completely allowing you to do whatever you like with it. -func (m *Mock) TestData() objx.Map { - - if m.testData == nil { - m.testData = make(objx.Map) - } - - return m.testData -} - -/* - Setting expectations -*/ - -// On starts a description of an expectation of the specified method -// being called. -// -// Mock.On("MyMethod", arg1, arg2) -func (m *Mock) On(methodName string, arguments ...interface{}) *Mock { - m.onMethodName = methodName - m.onMethodArguments = arguments - return m -} - -// Return finishes a description of an expectation of the method (and arguments) -// specified in the most recent On method call. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2) -func (m *Mock) Return(returnArguments ...interface{}) *Mock { - m.ExpectedCalls = append(m.ExpectedCalls, Call{m.onMethodName, m.onMethodArguments, returnArguments, 0}) - return m -} - -// Once indicates that that the mock should only return the value once. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() -func (m *Mock) Once() { - m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = 1 -} - -// Twice indicates that that the mock should only return the value twice. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() -func (m *Mock) Twice() { - m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = 2 -} - -// Times indicates that that the mock should only return the indicated number -// of times. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) -func (m *Mock) Times(i int) { - m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = i -} - -/* - Recording and responding to activity -*/ - -func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { - for i, call := range m.ExpectedCalls { - if call.Method == method && call.Repeatability > -1 { - - _, diffCount := call.Arguments.Diff(arguments) - if diffCount == 0 { - return i, &call - } - - } - } - return -1, nil -} - -func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) { - - diffCount := 0 - var closestCall *Call = nil - - for _, call := range m.ExpectedCalls { - if call.Method == method { - - _, tempDiffCount := call.Arguments.Diff(arguments) - if tempDiffCount < diffCount || diffCount == 0 { - diffCount = tempDiffCount - closestCall = &call - } - - } - } - - if closestCall == nil { - return false, nil - } - - return true, closestCall -} - -func callString(method string, arguments Arguments, includeArgumentValues bool) string { - - var argValsString string = "" - if includeArgumentValues { - var argVals []string - for argIndex, arg := range arguments { - argVals = append(argVals, fmt.Sprintf("%d: %v", argIndex, arg)) - } - argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) - } - - return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) -} - -// Called tells the mock object that a method has been called, and gets an array -// of arguments to return. Panics if the call is unexpected (i.e. not preceeded by -// appropriate .On .Return() calls) -func (m *Mock) Called(arguments ...interface{}) Arguments { - defer m.mutex.Unlock() - m.mutex.Lock() - - // get the calling function's name - pc, _, _, ok := runtime.Caller(1) - if !ok { - panic("Couldn't get the caller information") - } - functionPath := runtime.FuncForPC(pc).Name() - parts := strings.Split(functionPath, ".") - functionName := parts[len(parts)-1] - - found, call := m.findExpectedCall(functionName, arguments...) - - switch { - case found < 0: - // we have to fail here - because we don't know what to do - // as the return arguments. This is because: - // - // a) this is a totally unexpected call to this method, - // b) the arguments are not what was expected, or - // c) the developer has forgotten to add an accompanying On...Return pair. - - closestFound, closestCall := m.findClosestCall(functionName, arguments...) - - if closestFound { - panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true))) - } else { - panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo())) - } - case call.Repeatability == 1: - call.Repeatability = -1 - m.ExpectedCalls[found] = *call - case call.Repeatability > 1: - call.Repeatability -= 1 - m.ExpectedCalls[found] = *call - } - - // add the call - m.Calls = append(m.Calls, Call{functionName, arguments, make([]interface{}, 0), 0}) - - return call.ReturnArguments - -} - -/* - Assertions -*/ - -// AssertExpectationsForObjects asserts that everything specified with On and Return -// of the specified objects was in fact called as expected. -// -// Calls may have occurred in any order. -func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { - var success bool = true - for _, obj := range testObjects { - mockObj := obj.(Mock) - success = success && mockObj.AssertExpectations(t) - } - return success -} - -// AssertExpectations asserts that everything specified with On and Return was -// in fact called as expected. Calls may have occurred in any order. -func (m *Mock) AssertExpectations(t TestingT) bool { - - var somethingMissing bool = false - var failedExpectations int = 0 - - // iterate through each expectation - for _, expectedCall := range m.ExpectedCalls { - switch { - case !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments): - somethingMissing = true - failedExpectations++ - t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) - case expectedCall.Repeatability > 0: - somethingMissing = true - failedExpectations++ - default: - t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) - } - } - - if somethingMissing { - t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(m.ExpectedCalls)-failedExpectations, len(m.ExpectedCalls), failedExpectations, assert.CallerInfo()) - } - - return !somethingMissing -} - -// AssertNumberOfCalls asserts that the method was called expectedCalls times. -func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { - var actualCalls int = 0 - for _, call := range m.Calls { - if call.Method == methodName { - actualCalls++ - } - } - return assert.Equal(t, actualCalls, expectedCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) -} - -// AssertCalled asserts that the method was called. -func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { - if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) { - t.Logf("%s", m.ExpectedCalls) - return false - } - return true -} - -// AssertNotCalled asserts that the method was not called. -func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { - if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) { - t.Logf("%s", m.ExpectedCalls) - return false - } - return true -} - -func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { - for _, call := range m.Calls { - if call.Method == methodName { - - _, differences := Arguments(expected).Diff(call.Arguments) - - if differences == 0 { - // found the expected call - return true - } - - } - } - // we didn't find the expected call - return false -} - -/* - Arguments -*/ - -// Arguments holds an array of method arguments or return values. -type Arguments []interface{} - -const ( - // The "any" argument. Used in Diff and Assert when - // the argument being tested shouldn't be taken into consideration. - Anything string = "mock.Anything" -) - -// AnythingOfTypeArgument is a string that contains the type of an argument -// for use when type checking. Used in Diff and Assert. -type AnythingOfTypeArgument string - -// AnythingOfType returns an AnythingOfTypeArgument object containing the -// name of the type to check for. Used in Diff and Assert. -// -// For example: -// Assert(t, AnythingOfType("string"), AnythingOfType("int")) -func AnythingOfType(t string) AnythingOfTypeArgument { - return AnythingOfTypeArgument(t) -} - -// Get Returns the argument at the specified index. -func (args Arguments) Get(index int) interface{} { - if index+1 > len(args) { - panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) - } - return args[index] -} - -// Is gets whether the objects match the arguments specified. -func (args Arguments) Is(objects ...interface{}) bool { - for i, obj := range args { - if obj != objects[i] { - return false - } - } - return true -} - -// Diff gets a string describing the differences between the arguments -// and the specified objects. -// -// Returns the diff string and number of differences found. -func (args Arguments) Diff(objects []interface{}) (string, int) { - - var output string = "\n" - var differences int - - var maxArgCount int = len(args) - if len(objects) > maxArgCount { - maxArgCount = len(objects) - } - - for i := 0; i < maxArgCount; i++ { - var actual, expected interface{} - - if len(objects) <= i { - actual = "(Missing)" - } else { - actual = objects[i] - } - - if len(args) <= i { - expected = "(Missing)" - } else { - expected = args[i] - } - - if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - - // type checking - if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: \u274C type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual) - } - - } else { - - // normal checking - - if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { - // match - output = fmt.Sprintf("%s\t%d: \u2705 %s == %s\n", output, i, actual, expected) - } else { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: \u274C %s != %s\n", output, i, actual, expected) - } - } - - } - - if differences == 0 { - return "No differences.", differences - } - - return output, differences - -} - -// Assert compares the arguments with the specified objects and fails if -// they do not exactly match. -func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { - - // get the differences - diff, diffCount := args.Diff(objects) - - if diffCount == 0 { - return true - } - - // there are differences... report them... - t.Logf(diff) - t.Errorf("%sArguments do not match.", assert.CallerInfo()) - - return false - -} - -// String gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -// -// If no index is provided, String() returns a complete string representation -// of the arguments. -func (args Arguments) String(indexOrNil ...int) string { - - if len(indexOrNil) == 0 { - // normal String() method - return a string representation of the args - var argsStr []string - for _, arg := range args { - argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) - } - return strings.Join(argsStr, ",") - } else if len(indexOrNil) == 1 { - // Index has been specified - get the argument at that index - var index int = indexOrNil[0] - var s string - var ok bool - if s, ok = args.Get(index).(string); !ok { - panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) - } - return s - } - - panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) - -} - -// Int gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Int(index int) int { - var s int - var ok bool - if s, ok = args.Get(index).(int); !ok { - panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -// Error gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Error(index int) error { - obj := args.Get(index) - var s error - var ok bool - if obj == nil { - return nil - } - if s, ok = obj.(error); !ok { - panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -// Bool gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Bool(index int) bool { - var s bool - var ok bool - if s, ok = args.Get(index).(bool); !ok { - panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go deleted file mode 100644 index a4457e071..000000000 --- a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go +++ /dev/null @@ -1,669 +0,0 @@ -package mock - -import ( - "errors" - "github.com/stretchr/testify/assert" - "testing" -) - -/* - Test objects -*/ - -// ExampleInterface represents an example interface. -type ExampleInterface interface { - TheExampleMethod(a, b, c int) (int, error) -} - -// TestExampleImplementation is a test implementation of ExampleInterface -type TestExampleImplementation struct { - Mock -} - -func (i *TestExampleImplementation) TheExampleMethod(a, b, c int) (int, error) { - args := i.Called(a, b, c) - return args.Int(0), errors.New("Whoops") -} - -func (i *TestExampleImplementation) TheExampleMethod2(yesorno bool) { - i.Called(yesorno) -} - -type ExampleType struct{} - -func (i *TestExampleImplementation) TheExampleMethod3(et *ExampleType) error { - args := i.Called(et) - return args.Error(0) -} - -/* - Mock -*/ - -func Test_Mock_TestData(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - if assert.NotNil(t, mockedService.TestData()) { - - mockedService.TestData().Set("something", 123) - assert.Equal(t, 123, mockedService.TestData().Get("something").Data()) - - } - -} - -func Test_Mock_On(t *testing.T) { - - // make a test impl object - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - assert.Equal(t, mockedService.On("TheExampleMethod"), &mockedService.Mock) - assert.Equal(t, "TheExampleMethod", mockedService.onMethodName) - -} - -func Test_Mock_On_WithArgs(t *testing.T) { - - // make a test impl object - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - assert.Equal(t, mockedService.On("TheExampleMethod", 1, 2, 3), &mockedService.Mock) - assert.Equal(t, "TheExampleMethod", mockedService.onMethodName) - assert.Equal(t, 1, mockedService.onMethodArguments[0]) - assert.Equal(t, 2, mockedService.onMethodArguments[1]) - assert.Equal(t, 3, mockedService.onMethodArguments[2]) - -} - -func Test_Mock_Return(t *testing.T) { - - // make a test impl object - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - assert.Equal(t, mockedService.On("TheExampleMethod", "A", "B", true).Return(1, "two", true), &mockedService.Mock) - - // ensure the call was created - if assert.Equal(t, 1, len(mockedService.ExpectedCalls)) { - call := mockedService.ExpectedCalls[0] - - assert.Equal(t, "TheExampleMethod", call.Method) - assert.Equal(t, "A", call.Arguments[0]) - assert.Equal(t, "B", call.Arguments[1]) - assert.Equal(t, true, call.Arguments[2]) - assert.Equal(t, 1, call.ReturnArguments[0]) - assert.Equal(t, "two", call.ReturnArguments[1]) - assert.Equal(t, true, call.ReturnArguments[2]) - assert.Equal(t, 0, call.Repeatability) - - } - -} - -func Test_Mock_Return_Once(t *testing.T) { - - // make a test impl object - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).Once() - - // ensure the call was created - if assert.Equal(t, 1, len(mockedService.ExpectedCalls)) { - call := mockedService.ExpectedCalls[0] - - assert.Equal(t, "TheExampleMethod", call.Method) - assert.Equal(t, "A", call.Arguments[0]) - assert.Equal(t, "B", call.Arguments[1]) - assert.Equal(t, true, call.Arguments[2]) - assert.Equal(t, 1, call.ReturnArguments[0]) - assert.Equal(t, "two", call.ReturnArguments[1]) - assert.Equal(t, true, call.ReturnArguments[2]) - assert.Equal(t, 1, call.Repeatability) - - } - -} - -func Test_Mock_Return_Twice(t *testing.T) { - - // make a test impl object - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).Twice() - - // ensure the call was created - if assert.Equal(t, 1, len(mockedService.ExpectedCalls)) { - call := mockedService.ExpectedCalls[0] - - assert.Equal(t, "TheExampleMethod", call.Method) - assert.Equal(t, "A", call.Arguments[0]) - assert.Equal(t, "B", call.Arguments[1]) - assert.Equal(t, true, call.Arguments[2]) - assert.Equal(t, 1, call.ReturnArguments[0]) - assert.Equal(t, "two", call.ReturnArguments[1]) - assert.Equal(t, true, call.ReturnArguments[2]) - assert.Equal(t, 2, call.Repeatability) - - } - -} - -func Test_Mock_Return_Times(t *testing.T) { - - // make a test impl object - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).Times(5) - - // ensure the call was created - if assert.Equal(t, 1, len(mockedService.ExpectedCalls)) { - call := mockedService.ExpectedCalls[0] - - assert.Equal(t, "TheExampleMethod", call.Method) - assert.Equal(t, "A", call.Arguments[0]) - assert.Equal(t, "B", call.Arguments[1]) - assert.Equal(t, true, call.Arguments[2]) - assert.Equal(t, 1, call.ReturnArguments[0]) - assert.Equal(t, "two", call.ReturnArguments[1]) - assert.Equal(t, true, call.ReturnArguments[2]) - assert.Equal(t, 5, call.Repeatability) - - } - -} - -func Test_Mock_Return_Nothing(t *testing.T) { - - // make a test impl object - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - assert.Equal(t, mockedService.On("TheExampleMethod", "A", "B", true).Return(), &mockedService.Mock) - - // ensure the call was created - if assert.Equal(t, 1, len(mockedService.ExpectedCalls)) { - call := mockedService.ExpectedCalls[0] - - assert.Equal(t, "TheExampleMethod", call.Method) - assert.Equal(t, "A", call.Arguments[0]) - assert.Equal(t, "B", call.Arguments[1]) - assert.Equal(t, true, call.Arguments[2]) - assert.Equal(t, 0, len(call.ReturnArguments)) - - } - -} - -func Test_Mock_findExpectedCall(t *testing.T) { - - m := new(Mock) - m.On("One", 1).Return("one") - m.On("Two", 2).Return("two") - m.On("Two", 3).Return("three") - - f, c := m.findExpectedCall("Two", 3) - - if assert.Equal(t, 2, f) { - if assert.NotNil(t, c) { - assert.Equal(t, "Two", c.Method) - assert.Equal(t, 3, c.Arguments[0]) - assert.Equal(t, "three", c.ReturnArguments[0]) - } - } - -} - -func Test_Mock_findExpectedCall_For_Unknown_Method(t *testing.T) { - - m := new(Mock) - m.On("One", 1).Return("one") - m.On("Two", 2).Return("two") - m.On("Two", 3).Return("three") - - f, _ := m.findExpectedCall("Two") - - assert.Equal(t, -1, f) - -} - -func Test_Mock_findExpectedCall_Respects_Repeatability(t *testing.T) { - - m := new(Mock) - m.On("One", 1).Return("one") - m.On("Two", 2).Return("two").Once() - m.On("Two", 3).Return("three").Twice() - m.On("Two", 3).Return("three").Times(8) - - f, c := m.findExpectedCall("Two", 3) - - if assert.Equal(t, 2, f) { - if assert.NotNil(t, c) { - assert.Equal(t, "Two", c.Method) - assert.Equal(t, 3, c.Arguments[0]) - assert.Equal(t, "three", c.ReturnArguments[0]) - } - } - -} - -func Test_callString(t *testing.T) { - - assert.Equal(t, `Method(int,bool,string)`, callString("Method", []interface{}{1, true, "something"}, false)) - -} - -func Test_Mock_Called(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("Test_Mock_Called", 1, 2, 3).Return(5, "6", true) - - returnArguments := mockedService.Called(1, 2, 3) - - if assert.Equal(t, 1, len(mockedService.Calls)) { - assert.Equal(t, "Test_Mock_Called", mockedService.Calls[0].Method) - assert.Equal(t, 1, mockedService.Calls[0].Arguments[0]) - assert.Equal(t, 2, mockedService.Calls[0].Arguments[1]) - assert.Equal(t, 3, mockedService.Calls[0].Arguments[2]) - } - - if assert.Equal(t, 3, len(returnArguments)) { - assert.Equal(t, 5, returnArguments[0]) - assert.Equal(t, "6", returnArguments[1]) - assert.Equal(t, true, returnArguments[2]) - } - -} - -func Test_Mock_Called_For_Bounded_Repeatability(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3).Return(5, "6", true).Once() - mockedService.On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3).Return(-1, "hi", false) - - returnArguments1 := mockedService.Called(1, 2, 3) - returnArguments2 := mockedService.Called(1, 2, 3) - - if assert.Equal(t, 2, len(mockedService.Calls)) { - assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[0].Method) - assert.Equal(t, 1, mockedService.Calls[0].Arguments[0]) - assert.Equal(t, 2, mockedService.Calls[0].Arguments[1]) - assert.Equal(t, 3, mockedService.Calls[0].Arguments[2]) - - assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[1].Method) - assert.Equal(t, 1, mockedService.Calls[1].Arguments[0]) - assert.Equal(t, 2, mockedService.Calls[1].Arguments[1]) - assert.Equal(t, 3, mockedService.Calls[1].Arguments[2]) - } - - if assert.Equal(t, 3, len(returnArguments1)) { - assert.Equal(t, 5, returnArguments1[0]) - assert.Equal(t, "6", returnArguments1[1]) - assert.Equal(t, true, returnArguments1[2]) - } - - if assert.Equal(t, 3, len(returnArguments2)) { - assert.Equal(t, -1, returnArguments2[0]) - assert.Equal(t, "hi", returnArguments2[1]) - assert.Equal(t, false, returnArguments2[2]) - } - -} - -func Test_Mock_Called_For_SetTime_Expectation(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("TheExampleMethod", 1, 2, 3).Return(5, "6", true).Times(4) - - mockedService.TheExampleMethod(1, 2, 3) - mockedService.TheExampleMethod(1, 2, 3) - mockedService.TheExampleMethod(1, 2, 3) - mockedService.TheExampleMethod(1, 2, 3) - assert.Panics(t, func() { - mockedService.TheExampleMethod(1, 2, 3) - }) - -} - -func Test_Mock_Called_Unexpected(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - // make sure it panics if no expectation was made - assert.Panics(t, func() { - mockedService.Called(1, 2, 3) - }, "Calling unexpected method should panic") - -} - -func Test_AssertExpectationsForObjects_Helper(t *testing.T) { - - var mockedService1 *TestExampleImplementation = new(TestExampleImplementation) - var mockedService2 *TestExampleImplementation = new(TestExampleImplementation) - var mockedService3 *TestExampleImplementation = new(TestExampleImplementation) - - mockedService1.On("Test_AssertExpectationsForObjects_Helper", 1).Return() - mockedService2.On("Test_AssertExpectationsForObjects_Helper", 2).Return() - mockedService3.On("Test_AssertExpectationsForObjects_Helper", 3).Return() - - mockedService1.Called(1) - mockedService2.Called(2) - mockedService3.Called(3) - - assert.True(t, AssertExpectationsForObjects(t, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock)) - -} - -func Test_AssertExpectationsForObjects_Helper_Failed(t *testing.T) { - - var mockedService1 *TestExampleImplementation = new(TestExampleImplementation) - var mockedService2 *TestExampleImplementation = new(TestExampleImplementation) - var mockedService3 *TestExampleImplementation = new(TestExampleImplementation) - - mockedService1.On("Test_AssertExpectationsForObjects_Helper_Failed", 1).Return() - mockedService2.On("Test_AssertExpectationsForObjects_Helper_Failed", 2).Return() - mockedService3.On("Test_AssertExpectationsForObjects_Helper_Failed", 3).Return() - - mockedService1.Called(1) - mockedService3.Called(3) - - tt := new(testing.T) - assert.False(t, AssertExpectationsForObjects(tt, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock)) - -} - -func Test_Mock_AssertExpectations(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("Test_Mock_AssertExpectations", 1, 2, 3).Return(5, 6, 7) - - tt := new(testing.T) - assert.False(t, mockedService.AssertExpectations(tt)) - - // make the call now - mockedService.Called(1, 2, 3) - - // now assert expectations - assert.True(t, mockedService.AssertExpectations(tt)) - -} - -func Test_Mock_AssertExpectationsCustomType(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).Return(nil).Once() - - tt := new(testing.T) - assert.False(t, mockedService.AssertExpectations(tt)) - - // make the call now - mockedService.TheExampleMethod3(&ExampleType{}) - - // now assert expectations - assert.True(t, mockedService.AssertExpectations(tt)) - -} - -func Test_Mock_AssertExpectations_With_Repeatability(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("Test_Mock_AssertExpectations_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Twice() - - tt := new(testing.T) - assert.False(t, mockedService.AssertExpectations(tt)) - - // make the call now - mockedService.Called(1, 2, 3) - - assert.False(t, mockedService.AssertExpectations(tt)) - - mockedService.Called(1, 2, 3) - - // now assert expectations - assert.True(t, mockedService.AssertExpectations(tt)) - -} - -func Test_Mock_TwoCallsWithDifferentArguments(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 1, 2, 3).Return(5, 6, 7) - mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 4, 5, 6).Return(5, 6, 7) - - args1 := mockedService.Called(1, 2, 3) - assert.Equal(t, 5, args1.Int(0)) - assert.Equal(t, 6, args1.Int(1)) - assert.Equal(t, 7, args1.Int(2)) - - args2 := mockedService.Called(4, 5, 6) - assert.Equal(t, 5, args2.Int(0)) - assert.Equal(t, 6, args2.Int(1)) - assert.Equal(t, 7, args2.Int(2)) - -} - -func Test_Mock_AssertNumberOfCalls(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("Test_Mock_AssertNumberOfCalls", 1, 2, 3).Return(5, 6, 7) - - mockedService.Called(1, 2, 3) - assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 1)) - - mockedService.Called(1, 2, 3) - assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 2)) - -} - -func Test_Mock_AssertCalled(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("Test_Mock_AssertCalled", 1, 2, 3).Return(5, 6, 7) - - mockedService.Called(1, 2, 3) - - assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled", 1, 2, 3)) - -} - -func Test_Mock_AssertCalled_WithAnythingOfTypeArgument(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("Test_Mock_AssertCalled_WithAnythingOfTypeArgument", Anything, Anything, Anything).Return() - - mockedService.Called(1, "two", []uint8("three")) - - assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled_WithAnythingOfTypeArgument", AnythingOfType("int"), AnythingOfType("string"), AnythingOfType("[]uint8"))) - -} - -func Test_Mock_AssertCalled_WithArguments(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("Test_Mock_AssertCalled_WithArguments", 1, 2, 3).Return(5, 6, 7) - - mockedService.Called(1, 2, 3) - - tt := new(testing.T) - assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 1, 2, 3)) - assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 2, 3, 4)) - -} - -func Test_Mock_AssertCalled_WithArguments_With_Repeatability(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Once() - mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4).Return(5, 6, 7).Once() - - mockedService.Called(1, 2, 3) - mockedService.Called(2, 3, 4) - - tt := new(testing.T) - assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3)) - assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4)) - assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 3, 4, 5)) - -} - -func Test_Mock_AssertNotCalled(t *testing.T) { - - var mockedService *TestExampleImplementation = new(TestExampleImplementation) - - mockedService.On("Test_Mock_AssertNotCalled", 1, 2, 3).Return(5, 6, 7) - - mockedService.Called(1, 2, 3) - - assert.True(t, mockedService.AssertNotCalled(t, "Test_Mock_NotCalled")) - -} - -/* - Arguments helper methods -*/ -func Test_Arguments_Get(t *testing.T) { - - var args Arguments = []interface{}{"string", 123, true} - - assert.Equal(t, "string", args.Get(0).(string)) - assert.Equal(t, 123, args.Get(1).(int)) - assert.Equal(t, true, args.Get(2).(bool)) - -} - -func Test_Arguments_Is(t *testing.T) { - - var args Arguments = []interface{}{"string", 123, true} - - assert.True(t, args.Is("string", 123, true)) - assert.False(t, args.Is("wrong", 456, false)) - -} - -func Test_Arguments_Diff(t *testing.T) { - - var args Arguments = []interface{}{"Hello World", 123, true} - var diff string - var count int - diff, count = args.Diff([]interface{}{"Hello World", 456, "false"}) - - assert.Equal(t, 2, count) - assert.Contains(t, diff, `%!s(int=456) != %!s(int=123)`) - assert.Contains(t, diff, `false != %!s(bool=true)`) - -} - -func Test_Arguments_Diff_DifferentNumberOfArgs(t *testing.T) { - - var args Arguments = []interface{}{"string", 123, true} - var diff string - var count int - diff, count = args.Diff([]interface{}{"string", 456, "false", "extra"}) - - assert.Equal(t, 3, count) - assert.Contains(t, diff, `extra != (Missing)`) - -} - -func Test_Arguments_Diff_WithAnythingArgument(t *testing.T) { - - var args Arguments = []interface{}{"string", 123, true} - var count int - _, count = args.Diff([]interface{}{"string", Anything, true}) - - assert.Equal(t, 0, count) - -} - -func Test_Arguments_Diff_WithAnythingArgument_InActualToo(t *testing.T) { - - var args Arguments = []interface{}{"string", Anything, true} - var count int - _, count = args.Diff([]interface{}{"string", 123, true}) - - assert.Equal(t, 0, count) - -} - -func Test_Arguments_Diff_WithAnythingOfTypeArgument(t *testing.T) { - - var args Arguments = []interface{}{"string", AnythingOfType("int"), true} - var count int - _, count = args.Diff([]interface{}{"string", 123, true}) - - assert.Equal(t, 0, count) - -} - -func Test_Arguments_Diff_WithAnythingOfTypeArgument_Failing(t *testing.T) { - - var args Arguments = []interface{}{"string", AnythingOfType("string"), true} - var count int - var diff string - diff, count = args.Diff([]interface{}{"string", 123, true}) - - assert.Equal(t, 1, count) - assert.Contains(t, diff, `string != type int - %!s(int=123)`) - -} - -func Test_Arguments_Assert(t *testing.T) { - - var args Arguments = []interface{}{"string", 123, true} - - assert.True(t, args.Assert(t, "string", 123, true)) - -} - -func Test_Arguments_String_Representation(t *testing.T) { - - var args Arguments = []interface{}{"string", 123, true} - assert.Equal(t, `string,int,bool`, args.String()) - -} - -func Test_Arguments_String(t *testing.T) { - - var args Arguments = []interface{}{"string", 123, true} - assert.Equal(t, "string", args.String(0)) - -} - -func Test_Arguments_Error(t *testing.T) { - - var err error = errors.New("An Error") - var args Arguments = []interface{}{"string", 123, true, err} - assert.Equal(t, err, args.Error(3)) - -} - -func Test_Arguments_Error_Nil(t *testing.T) { - - var args Arguments = []interface{}{"string", 123, true, nil} - assert.Equal(t, nil, args.Error(3)) - -} - -func Test_Arguments_Int(t *testing.T) { - - var args Arguments = []interface{}{"string", 123, true} - assert.Equal(t, 123, args.Int(1)) - -} - -func Test_Arguments_Bool(t *testing.T) { - - var args Arguments = []interface{}{"string", 123, true} - assert.Equal(t, true, args.Bool(2)) - -} From 701c3e5242bd33b3b5035a9edc8a87c525f66f6d Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 1 Jul 2015 00:37:43 -0700 Subject: [PATCH 07/19] Add new RPC helpers wrapping over regular rpc packages, add middleware chaining ability --- pkg/server/router.go | 62 +++++++++++++++++++++++++++++++++------- pkg/server/rpc/server.go | 31 ++++++++++++++++---- pkg/server/server.go | 29 ++++--------------- 3 files changed, 83 insertions(+), 39 deletions(-) diff --git a/pkg/server/router.go b/pkg/server/router.go index 416f45da0..7c786f550 100644 --- a/pkg/server/router.go +++ b/pkg/server/router.go @@ -20,11 +20,11 @@ import ( "net/http" router "github.com/gorilla/mux" - jsonRPC "github.com/gorilla/rpc/v2" "github.com/minio/minio/pkg/server/api" "github.com/minio/minio/pkg/server/rpc" ) +// registerAPI - register all the object API handlers to their respective paths func registerAPI(mux *router.Router) http.Handler { api := api.MinioAPI{} @@ -50,28 +50,68 @@ func registerAPI(mux *router.Router) http.Handler { return mux } -func registerOthers(mux http.Handler, conf api.Config) http.Handler { - mux = api.ValidContentTypeHandler(mux) - mux = api.TimeValidityHandler(mux) - mux = api.IgnoreResourcesHandler(mux) - mux = api.ValidateAuthHeaderHandler(mux) +// add a handlerFunc typedef +type handlerFunc func(http.Handler) http.Handler + +// chain struct to hold handlers +type chain struct { + handlers []handlerFunc +} + +// loop through handlers and return a final one +func (c chain) final(mux http.Handler) http.Handler { + var f http.Handler + if mux != nil { + f = mux + } else { + f = http.DefaultServeMux + } + for _, handler := range c.handlers { + f = handler(f) + } + return f +} + +// registerChain - register an array of handlers in a chain of style -> handler(handler(handler(handler...))) +func registerChain(handlers ...handlerFunc) chain { + ch := chain{} + ch.handlers = append(ch.handlers, handlers...) + return ch +} + +// registerOtherMiddleware register all available middleware +func registerOtherMiddleware(mux http.Handler, conf api.Config) http.Handler { + ch := registerChain( + api.ValidContentTypeHandler, + api.TimeValidityHandler, + api.IgnoreResourcesHandler, + api.ValidateAuthHeaderHandler, + api.LoggingHandler, + // Add new middleware here + ) + + mux = ch.final(mux) mux = api.RateLimitHandler(mux, conf.RateLimit) - mux = api.LoggingHandler(mux) return mux } -func registerRPC(mux *router.Router, r *jsonRPC.Server) http.Handler { - mux.Handle("/rpc", r) +// registerRPC - register rpc handlers +func registerRPC(mux *router.Router, s *rpc.Server) http.Handler { + mux.Handle("/rpc", s) return mux } // APIHandler api handler func APIHandler(conf api.Config) http.Handler { mux := router.NewRouter() - return registerOthers(registerAPI(mux), conf) + return registerOtherMiddleware(registerAPI(mux), conf) } // RPCHandler rpc handler func RPCHandler() http.Handler { - return registerRPC(router.NewRouter(), rpc.HelloServiceHandler()) + s := rpc.NewServer() + s.RegisterJSONCodec() + s.RegisterService(new(rpc.HelloService), "") + // add more services here + return registerRPC(router.NewRouter(), s) } diff --git a/pkg/server/rpc/server.go b/pkg/server/rpc/server.go index 2b98b5213..935b91d96 100644 --- a/pkg/server/rpc/server.go +++ b/pkg/server/rpc/server.go @@ -17,14 +17,35 @@ package rpc import ( + "net/http" + "github.com/gorilla/rpc/v2" "github.com/gorilla/rpc/v2/json" ) -// HelloServiceHandler - -func HelloServiceHandler() *rpc.Server { - s := rpc.NewServer() - s.RegisterCodec(json.NewCodec(), "application/json") - s.RegisterService(new(HelloService), "") +// Server rpc server container +type Server struct { + RPCServer *rpc.Server +} + +// RegisterJSONCodec - register standard json codec +func (s Server) RegisterJSONCodec() { + s.RPCServer.RegisterCodec(json.NewCodec(), "application/json") +} + +// RegisterService - register new services +func (s Server) RegisterService(recv interface{}, name string) { + s.RPCServer.RegisterService(recv, name) +} + +// NewServer - provide a new instance of RPC server +func NewServer() *Server { + s := &Server{} + s.RPCServer = rpc.NewServer() return s } + +// ServeHTTP wrapper method for http.Handler interface +func (s Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.RPCServer.ServeHTTP(w, r) +} diff --git a/pkg/server/server.go b/pkg/server/server.go index bf76caa06..83426d888 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -28,7 +28,6 @@ import ( func startAPI(errCh chan error, conf api.Config) { defer close(errCh) - var err error // Minio server config httpServer := &http.Server{ Addr: conf.Address, @@ -66,37 +65,26 @@ func startAPI(errCh chan error, conf api.Config) { for _, host := range hosts { fmt.Printf("Starting minio server on: http://%s:%s\n", host, port) } - err = httpServer.ListenAndServe() + errCh <- httpServer.ListenAndServe() case conf.TLS == true: for _, host := range hosts { fmt.Printf("Starting minio server on: https://%s:%s\n", host, port) } - err = httpServer.ListenAndServeTLS(conf.CertFile, conf.KeyFile) + errCh <- httpServer.ListenAndServeTLS(conf.CertFile, conf.KeyFile) } - if err != nil { - errCh <- err - } - errCh <- nil - return } func startRPC(errCh chan error) { defer close(errCh) rpcHandler := RPCHandler() - var err error // Minio server config httpServer := &http.Server{ - Addr: "127.0.0.1:9001", + Addr: "127.0.0.1:9001", // TODO make this configurable Handler: rpcHandler, MaxHeaderBytes: 1 << 20, } - err = httpServer.ListenAndServe() - if err != nil { - errCh <- err - } - errCh <- nil - return + errCh <- httpServer.ListenAndServe() } // StartServices starts basic services for a server @@ -109,13 +97,8 @@ func StartServices(conf api.Config) error { select { case err := <-apiErrCh: - if err != nil { - return err - } + return err case err := <-rpcErrCh: - if err != nil { - return err - } + return err } - return nil } From eb5aa19dfa11b155283325b588e1a707872fc6c7 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 1 Jul 2015 11:07:46 -0700 Subject: [PATCH 08/19] Remove custom Config, will use quick Config instead for user access keys --- pkg/server/api/api-generic-handlers.go | 41 ++++++- pkg/server/config/config.go | 152 ------------------------- pkg/server/config/config_test.go | 75 ------------ 3 files changed, 37 insertions(+), 231 deletions(-) delete mode 100644 pkg/server/config/config.go delete mode 100644 pkg/server/config/config_test.go diff --git a/pkg/server/api/api-generic-handlers.go b/pkg/server/api/api-generic-handlers.go index 1cff82502..5d7f3fb00 100644 --- a/pkg/server/api/api-generic-handlers.go +++ b/pkg/server/api/api-generic-handlers.go @@ -19,10 +19,13 @@ package api import ( "errors" "net/http" + "os" + "os/user" + "path/filepath" "strings" "time" - "github.com/minio/minio/pkg/server/config" + "github.com/minio/minio/pkg/quick" "github.com/minio/minio/pkg/utils/crypto/keys" ) @@ -179,18 +182,48 @@ func ValidateAuthHeaderHandler(h http.Handler) http.Handler { return validateAuthHandler{h} } +// User context +type User struct { + Version string + Name string + AccessKey string + SecretKey string +} + +func getConfigFile() string { + u, err := user.Current() + if err != nil { + return "" + } + confPath := filepath.Join(u.HomeDir, ".minio") + if err := os.MkdirAll(confPath, 0700); err != nil { + return "" + } + return filepath.Join(confPath, "config.json") +} + // validate auth header handler ServeHTTP() wrapper func (h validateAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { acceptsContentType := getContentType(r) _, err := stripAuth(r) switch err.(type) { case nil: - var conf = config.Config{} - if err := conf.SetupConfig(); err != nil { + users := make(map[string]User) + configFile := getConfigFile() + if configFile == "" { writeErrorResponse(w, r, InternalError, acceptsContentType, r.URL.Path) return } - if err := conf.ReadConfig(); err != nil { + qconf, err := quick.New(&users) + if err != nil { + writeErrorResponse(w, r, InternalError, acceptsContentType, r.URL.Path) + return + } + if err := qconf.Save(configFile); err != nil { + writeErrorResponse(w, r, InternalError, acceptsContentType, r.URL.Path) + return + } + if err := qconf.Load(configFile); err != nil { writeErrorResponse(w, r, InternalError, acceptsContentType, r.URL.Path) return } diff --git a/pkg/server/config/config.go b/pkg/server/config/config.go deleted file mode 100644 index 96f9e9efe..000000000 --- a/pkg/server/config/config.go +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package config - -import ( - "encoding/json" - "io" - "os" - "os/user" - "path/filepath" - "sync" - - "github.com/minio/minio/pkg/iodine" -) - -// Config context -type Config struct { - ConfigPath string - ConfigFile string - ConfigLock *sync.RWMutex - Users map[string]User -} - -// User context -type User struct { - Name string - AccessKey string - SecretKey string -} - -// SetupConfig initialize config directory and template config -func (c *Config) SetupConfig() error { - u, err := user.Current() - if err != nil { - return iodine.New(err, nil) - } - - confPath := filepath.Join(u.HomeDir, ".minio") - if err := os.MkdirAll(confPath, 0700); err != nil { - return iodine.New(err, nil) - } - - c.ConfigPath = confPath - c.ConfigFile = filepath.Join(c.ConfigPath, "config.json") - if _, err := os.Stat(c.ConfigFile); os.IsNotExist(err) { - _, err = os.Create(c.ConfigFile) - if err != nil { - return iodine.New(err, nil) - } - } - - c.ConfigLock = new(sync.RWMutex) - return nil -} - -// GetConfigPath config file location -func (c *Config) GetConfigPath() string { - return c.ConfigPath -} - -// IsUserExists verify if user exists -func (c *Config) IsUserExists(username string) bool { - for _, user := range c.Users { - if user.Name == username { - return true - } - } - return false -} - -// GetUser - get user from username -func (c *Config) GetUser(username string) User { - for _, user := range c.Users { - if user.Name == username { - return user - } - } - return User{} -} - -// AddUser - add a user into existing User list -func (c *Config) AddUser(user User) { - var currentUsers map[string]User - if len(c.Users) == 0 { - currentUsers = make(map[string]User) - } else { - currentUsers = c.Users - } - currentUsers[user.AccessKey] = user - c.Users = currentUsers -} - -// WriteConfig - write encoded json in config file -func (c *Config) WriteConfig() error { - c.ConfigLock.Lock() - defer c.ConfigLock.Unlock() - - var file *os.File - var err error - - file, err = os.OpenFile(c.ConfigFile, os.O_WRONLY, 0666) - defer file.Close() - if err != nil { - return iodine.New(err, nil) - } - - encoder := json.NewEncoder(file) - encoder.Encode(c.Users) - return nil -} - -// ReadConfig - read json config file and decode -func (c *Config) ReadConfig() error { - c.ConfigLock.RLock() - defer c.ConfigLock.RUnlock() - - var file *os.File - var err error - - file, err = os.OpenFile(c.ConfigFile, os.O_RDONLY, 0666) - defer file.Close() - if err != nil { - return iodine.New(err, nil) - } - - users := make(map[string]User) - decoder := json.NewDecoder(file) - err = decoder.Decode(&users) - switch err { - case io.EOF: - return nil - case nil: - c.Users = users - return nil - default: - return iodine.New(err, nil) - } -} diff --git a/pkg/server/config/config_test.go b/pkg/server/config/config_test.go deleted file mode 100644 index f2ca5bd2e..000000000 --- a/pkg/server/config/config_test.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package config - -import ( - "io/ioutil" - "os" - "path/filepath" - "sync" - "testing" - - . "github.com/minio/check" - "github.com/minio/minio/pkg/utils/crypto/keys" -) - -type MySuite struct{} - -var _ = Suite(&MySuite{}) - -func Test(t *testing.T) { TestingT(t) } - -func (s *MySuite) TestConfig(c *C) { - conf := Config{} - conf.ConfigLock = new(sync.RWMutex) - conf.ConfigPath, _ = ioutil.TempDir("/tmp", "minio-test-") - defer os.RemoveAll(conf.ConfigPath) - conf.ConfigFile = filepath.Join(conf.ConfigPath, "config.json") - if _, err := os.Stat(conf.ConfigFile); os.IsNotExist(err) { - _, err = os.Create(conf.ConfigFile) - if err != nil { - c.Fatal(err) - } - } - - accesskey, _ := keys.GenerateRandomAlphaNumeric(keys.MinioAccessID) - secretkey, _ := keys.GenerateRandomBase64(keys.MinioSecretID) - - user := User{ - Name: "gnubot", - AccessKey: string(accesskey), - SecretKey: string(secretkey), - } - - conf.AddUser(user) - err := conf.WriteConfig() - c.Assert(err, IsNil) - - err = conf.ReadConfig() - c.Assert(err, IsNil) - - accesskey, _ = keys.GenerateRandomAlphaNumeric(keys.MinioAccessID) - secretkey, _ = keys.GenerateRandomBase64(keys.MinioSecretID) - user = User{ - Name: "minio", - AccessKey: string(accesskey), - SecretKey: string(secretkey), - } - conf.AddUser(user) - err = conf.WriteConfig() - c.Assert(err, IsNil) -} From 14ec42d6466b817bc290827451ccd60321ca5541 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 1 Jul 2015 12:28:34 -0700 Subject: [PATCH 09/19] Add initial implementation of priority queue, uses container/heap --- pkg/storage/pq/pq.go | 70 +++++++++++++++++++++++++++++++++ pkg/storage/pq/pq_test.go | 81 +++++++++++++++++++++++++++++++++++++++ pkg/storage/pq/task.go | 38 ++++++++++++++++++ 3 files changed, 189 insertions(+) create mode 100644 pkg/storage/pq/pq.go create mode 100644 pkg/storage/pq/pq_test.go create mode 100644 pkg/storage/pq/task.go diff --git a/pkg/storage/pq/pq.go b/pkg/storage/pq/pq.go new file mode 100644 index 000000000..c491f4bb6 --- /dev/null +++ b/pkg/storage/pq/pq.go @@ -0,0 +1,70 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pq + +import "container/heap" + +// Item container for tasks in priority queue +type Item struct { + task Task // task + + // The index is needed by Fix and is maintained by the heap.Interface methods. + index int // The index of the item in the heap. +} + +// A PriorityQueue implements heap.Interface and holds Items. +type PriorityQueue []*Item + +// Len length of current priority queue +func (pq PriorityQueue) Len() int { return len(pq) } + +// Less used internally by heap.Interface to arrange items in order +func (pq PriorityQueue) Less(i, j int) bool { + // We want Pop to give us the highest, not lowest, priority so we use greater than here. + return pq[i].task.GetPriority() > pq[j].task.GetPriority() +} + +// Swap used internally by heap.Interface to arrange incoming items +func (pq PriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +// Push push items onto priority queue +func (pq *PriorityQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*Item) + item.index = n + *pq = append(*pq, item) +} + +// Pop pop items with highest priority +func (pq *PriorityQueue) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + item.index = -1 // for safety + *pq = old[0 : n-1] + return item +} + +// Fix modifies an item in-place on the queue +func (pq *PriorityQueue) Fix(item *Item, task Task) { + item.task = task + heap.Fix(pq, item.index) +} diff --git a/pkg/storage/pq/pq_test.go b/pkg/storage/pq/pq_test.go new file mode 100644 index 000000000..0723d273f --- /dev/null +++ b/pkg/storage/pq/pq_test.go @@ -0,0 +1,81 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pq + +import ( + "container/heap" + "fmt" + "testing" + + . "github.com/minio/check" +) + +func Test(t *testing.T) { TestingT(t) } + +type MySuite struct{} + +var _ = Suite(&MySuite{}) + +func helloTask1() error { + fmt.Println("Hello task1") + return nil +} + +func helloTask2() error { + fmt.Println("Hello task2") + return nil +} + +func newJob1() error { + fmt.Println("New Job1") + return nil +} + +func newJob2() error { + fmt.Println("New Job2") + return nil +} + +func (s *MySuite) TestPQ(c *C) { + // Create a priority queue, put the items in it, and + // establish the priority queue (heap) invariants. + pq := make(PriorityQueue, 2) + pq[0] = &Item{ + task: Task{job: helloTask1, priority: 2}, + index: 0, + } + pq[1] = &Item{ + task: Task{job: helloTask2, priority: 1}, + index: 1, + } + heap.Init(&pq) + + // Insert a new item and then modify its priority. + item := &Item{ + task: Task{job: newJob1, priority: 5}, + } + heap.Push(&pq, item) + newTask := Task{job: newJob2, priority: 6} + pq.Fix(item, newTask) + + // Take the items out; they arrive in decreasing priority order. + for pq.Len() > 0 { + item := heap.Pop(&pq).(*Item) + fmt.Printf("%.2d", item.task.GetPriority()) + item.task.Execute() + } +} diff --git a/pkg/storage/pq/task.go b/pkg/storage/pq/task.go new file mode 100644 index 000000000..1c092fbd5 --- /dev/null +++ b/pkg/storage/pq/task.go @@ -0,0 +1,38 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pq + +// Task container for any generic tasks +type Task struct { + job func() error + priority int +} + +// GetPriority get current task priority +func (t Task) GetPriority() int { + return t.priority +} + +// UpdatePriority update current task priority +func (t Task) UpdatePriority(p int) { + t.priority = p +} + +// Execute execute current task +func (t Task) Execute() error { + return t.job() +} From 84810162f5e4f12e553349ff931592047dfab7bb Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 1 Jul 2015 15:27:03 -0700 Subject: [PATCH 10/19] Add simple Version and GetSysInfo services --- pkg/server/router.go | 2 + pkg/server/rpc/methods.go | 78 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 77 insertions(+), 3 deletions(-) diff --git a/pkg/server/router.go b/pkg/server/router.go index 7c786f550..e9fcaaed1 100644 --- a/pkg/server/router.go +++ b/pkg/server/router.go @@ -112,6 +112,8 @@ func RPCHandler() http.Handler { s := rpc.NewServer() s.RegisterJSONCodec() s.RegisterService(new(rpc.HelloService), "") + s.RegisterService(new(rpc.VersionService), "") + s.RegisterService(new(rpc.GetSysInfoService), "") // add more services here return registerRPC(router.NewRouter(), s) } diff --git a/pkg/server/rpc/methods.go b/pkg/server/rpc/methods.go index 4c654264b..ab7e9a590 100644 --- a/pkg/server/rpc/methods.go +++ b/pkg/server/rpc/methods.go @@ -16,7 +16,12 @@ package rpc -import "net/http" +import ( + "net/http" + "os" + "runtime" + "time" +) // HelloArgs - hello args type HelloArgs struct { @@ -28,11 +33,78 @@ type HelloReply struct { Message string } -// HelloService - +// HelloService - hello service type HelloService struct{} -// Say - +// Say method func (h *HelloService) Say(r *http.Request, args *HelloArgs, reply *HelloReply) error { reply.Message = "Hello, " + args.Who + "!" return nil } + +// Args basic json RPC params +type Args struct { + Request string +} + +// VersionReply version reply +type VersionReply struct { + Version string `json:"version"` + BuildDate string `json:"build-date"` +} + +// VersionService - +type VersionService struct{} + +func getVersion() string { + return "0.0.1" +} +func getBuildDate() string { + return time.Now().UTC().Format(http.TimeFormat) +} + +func setVersionReply(reply *VersionReply) { + reply.Version = getVersion() + reply.BuildDate = getBuildDate() + return +} + +// Get method +func (v *VersionService) Get(r *http.Request, args *Args, reply *VersionReply) error { + setVersionReply(reply) + return nil +} + +// GetSysInfoService - +type GetSysInfoService struct{} + +// GetSysInfoReply - +type GetSysInfoReply struct { + Hostname string `json:"hostname"` + SysARCH string `json:"sys.arch"` + SysOS string `json:"sys.os"` + SysCPUS int `json:"sys.ncpus"` + Routines int `json:"goroutines"` + GOVersion string `json:"goversion"` + MemStats runtime.MemStats `json:"memstats"` +} + +func setSysInfoReply(sis *GetSysInfoReply) error { + sis.SysARCH = runtime.GOARCH + sis.SysOS = runtime.GOOS + sis.SysCPUS = runtime.NumCPU() + sis.Routines = runtime.NumGoroutine() + sis.GOVersion = runtime.Version() + sis.Hostname, _ = os.Hostname() + + var memStats runtime.MemStats + runtime.ReadMemStats(&memStats) + sis.MemStats = memStats + + return nil +} + +// Get method +func (s *GetSysInfoService) Get(r *http.Request, args *Args, reply *GetSysInfoReply) error { + return setSysInfoReply(reply) +} From 38a6ce36e508c3a57d485384f5f9a253c6c8227f Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 1 Jul 2015 15:49:15 -0700 Subject: [PATCH 11/19] Remove slow AppendUniq code, rolling through over a slice is in-efficient Remove it and use map instead --- pkg/storage/donut/bucket.go | 9 +++-- pkg/storage/donut/cache.go | 60 +++++++++++++-------------------- pkg/storage/donut/common.go | 27 +++++++++++---- pkg/storage/donut/donut_test.go | 2 +- 4 files changed, 51 insertions(+), 47 deletions(-) diff --git a/pkg/storage/donut/bucket.go b/pkg/storage/donut/bucket.go index 1c798ffa9..6e4012902 100644 --- a/pkg/storage/donut/bucket.go +++ b/pkg/storage/donut/bucket.go @@ -145,7 +145,7 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List for objectName := range bucketMetadata.Buckets[b.getBucketName()].BucketObjects { if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) { if objectName > marker { - objects = AppendU(objects, objectName) + objects = append(objects, objectName) } } } @@ -166,16 +166,19 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List var commonPrefixes []string for _, commonPrefix := range prefixes { - commonPrefixes = AppendU(commonPrefixes, prefix+commonPrefix) + commonPrefixes = append(commonPrefixes, prefix+commonPrefix) } + filteredObjects = RemoveDuplicates(filteredObjects) sort.Strings(filteredObjects) for _, objectName := range filteredObjects { if len(results) >= maxkeys { isTruncated = true break } - results = AppendU(results, prefix+objectName) + results = append(results, prefix+objectName) } + results = RemoveDuplicates(results) + commonPrefixes = RemoveDuplicates(commonPrefixes) sort.Strings(commonPrefixes) listObjects := ListObjects{} diff --git a/pkg/storage/donut/cache.go b/pkg/storage/donut/cache.go index ad8ab4929..3c089cd5b 100644 --- a/pkg/storage/donut/cache.go +++ b/pkg/storage/donut/cache.go @@ -17,7 +17,6 @@ package donut import ( - "bufio" "bytes" "crypto/md5" "encoding/base64" @@ -419,63 +418,47 @@ func (cache Cache) CreateBucket(bucketName, acl string) error { return nil } -func delimiter(object, delimiter string) string { - readBuffer := bytes.NewBufferString(object) - reader := bufio.NewReader(readBuffer) - stringReader := strings.NewReader(delimiter) - delimited, _ := stringReader.ReadByte() - delimitedStr, _ := reader.ReadString(delimited) - return delimitedStr -} - -func appendUniq(slice []string, i string) []string { - for _, ele := range slice { - if ele == i { - return slice - } - } - return append(slice, i) -} - -func (cache Cache) filterDelimiterPrefix(keys []string, key, delim string, r BucketResourcesMetadata) ([]string, BucketResourcesMetadata) { +func (cache Cache) filterDelimiterPrefix(keys []string, key, prefix, delim string) ([]string, []string) { + var commonPrefixes []string switch true { - case key == r.Prefix: - keys = appendUniq(keys, key) + case key == prefix: + keys = append(keys, key) // delim - requires r.Prefix as it was trimmed off earlier - case key == r.Prefix+delim: - keys = appendUniq(keys, key) + case key == prefix+delim: + keys = append(keys, key) case delim != "": - r.CommonPrefixes = appendUniq(r.CommonPrefixes, r.Prefix+delim) + commonPrefixes = append(commonPrefixes, prefix+delim) } - return keys, r + return RemoveDuplicates(keys), RemoveDuplicates(commonPrefixes) } -func (cache Cache) listObjects(keys []string, key string, r BucketResourcesMetadata) ([]string, BucketResourcesMetadata) { +func (cache Cache) listObjects(keys []string, key string, r BucketResourcesMetadata) ([]string, []string) { + var commonPrefixes []string switch true { // Prefix absent, delimit object key based on delimiter case r.IsDelimiterSet(): - delim := delimiter(key, r.Delimiter) + delim := Delimiter(key, r.Delimiter) switch true { case delim == "" || delim == key: - keys = appendUniq(keys, key) + keys = append(keys, key) case delim != "": - r.CommonPrefixes = appendUniq(r.CommonPrefixes, delim) + commonPrefixes = append(commonPrefixes, delim) } // Prefix present, delimit object key with prefix key based on delimiter case r.IsDelimiterPrefixSet(): if strings.HasPrefix(key, r.Prefix) { trimmedName := strings.TrimPrefix(key, r.Prefix) - delim := delimiter(trimmedName, r.Delimiter) - keys, r = cache.filterDelimiterPrefix(keys, key, delim, r) + delim := Delimiter(trimmedName, r.Delimiter) + keys, commonPrefixes = cache.filterDelimiterPrefix(keys, key, r.Prefix, delim) } // Prefix present, nothing to delimit case r.IsPrefixSet(): - keys = appendUniq(keys, key) + keys = append(keys, key) // Prefix and delimiter absent case r.IsDefault(): - keys = appendUniq(keys, key) + keys = append(keys, key) } - return keys, r + return RemoveDuplicates(keys), RemoveDuplicates(commonPrefixes) } // ListObjects - list objects from cache @@ -493,11 +476,12 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) } var results []ObjectMetadata var keys []string + var commonPrefixes []string storedBucket := cache.storedBuckets[bucket] for key := range storedBucket.objectMetadata { if strings.HasPrefix(key, bucket+"/") { key = key[len(bucket)+1:] - keys, resources = cache.listObjects(keys, key, resources) + keys, commonPrefixes = cache.listObjects(keys, key, resources) } } var newKeys []string @@ -505,12 +489,13 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) case resources.Marker != "": for _, key := range keys { if key > resources.Marker { - newKeys = appendUniq(newKeys, key) + newKeys = append(newKeys, key) } } default: newKeys = keys } + newKeys = RemoveDuplicates(newKeys) sort.Strings(newKeys) for _, key := range newKeys { if len(results) == resources.Maxkeys { @@ -523,6 +508,7 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) object := storedBucket.objectMetadata[bucket+"/"+key] results = append(results, object) } + resources.CommonPrefixes = commonPrefixes return results, resources, nil } diff --git a/pkg/storage/donut/common.go b/pkg/storage/donut/common.go index 9fbb18eb6..e55fc3739 100644 --- a/pkg/storage/donut/common.go +++ b/pkg/storage/donut/common.go @@ -17,18 +17,33 @@ package donut import ( + "bufio" + "bytes" "sort" "strings" ) -// AppendU append to an input slice if the element is unique and provides a new slice -func AppendU(slice []string, i string) []string { - for _, ele := range slice { - if ele == i { - return slice +// Delimiter delims the string at delimiter +func Delimiter(object, delimiter string) string { + readBuffer := bytes.NewBufferString(object) + reader := bufio.NewReader(readBuffer) + stringReader := strings.NewReader(delimiter) + delimited, _ := stringReader.ReadByte() + delimitedStr, _ := reader.ReadString(delimited) + return delimitedStr +} + +// RemoveDuplicates removes duplicate elements from a slice +func RemoveDuplicates(slice []string) []string { + newSlice := []string{} + seen := make(map[string]struct{}) + for _, val := range slice { + if _, ok := seen[val]; !ok { + newSlice = append(newSlice, val) + seen[val] = struct{}{} // avoiding byte allocation } } - return append(slice, i) + return newSlice } // TrimPrefix trims off a prefix string from all the elements in a given slice diff --git a/pkg/storage/donut/donut_test.go b/pkg/storage/donut/donut_test.go index d1dad9c45..fad0a26fa 100644 --- a/pkg/storage/donut/donut_test.go +++ b/pkg/storage/donut/donut_test.go @@ -94,7 +94,7 @@ func (s *MySuite) TestEmptyBucket(c *C) { listObjects, err := donut.ListObjects("foo", "", "", "", 1) c.Assert(err, IsNil) c.Assert(len(listObjects.Objects), Equals, 0) - c.Assert(listObjects.CommonPrefixes, IsNil) + c.Assert(listObjects.CommonPrefixes, DeepEquals, []string{}) c.Assert(listObjects.IsTruncated, Equals, false) } From 0533abf6a8716cae33e66cf87e04ff4511945f99 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 1 Jul 2015 16:30:57 -0700 Subject: [PATCH 12/19] Make priority queue lambda function return error over a channel --- ...-bucket-handlers.go => bucket-handlers.go} | 0 .../{api-definitions.go => definitions.go} | 0 ...eneric-handlers.go => generic-handlers.go} | 0 ...ogging-handlers.go => logging-handlers.go} | 0 ...-object-handlers.go => object-handlers.go} | 0 ...imit-handlers.go => ratelimit-handlers.go} | 0 .../api/{api-response.go => response.go} | 0 pkg/storage/pq/pq_test.go | 44 ++++++++++++++----- pkg/storage/pq/task.go | 4 +- 9 files changed, 34 insertions(+), 14 deletions(-) rename pkg/server/api/{api-bucket-handlers.go => bucket-handlers.go} (100%) rename pkg/server/api/{api-definitions.go => definitions.go} (100%) rename pkg/server/api/{api-generic-handlers.go => generic-handlers.go} (100%) rename pkg/server/api/{api-logging-handlers.go => logging-handlers.go} (100%) rename pkg/server/api/{api-object-handlers.go => object-handlers.go} (100%) rename pkg/server/api/{api-ratelimit-handlers.go => ratelimit-handlers.go} (100%) rename pkg/server/api/{api-response.go => response.go} (100%) diff --git a/pkg/server/api/api-bucket-handlers.go b/pkg/server/api/bucket-handlers.go similarity index 100% rename from pkg/server/api/api-bucket-handlers.go rename to pkg/server/api/bucket-handlers.go diff --git a/pkg/server/api/api-definitions.go b/pkg/server/api/definitions.go similarity index 100% rename from pkg/server/api/api-definitions.go rename to pkg/server/api/definitions.go diff --git a/pkg/server/api/api-generic-handlers.go b/pkg/server/api/generic-handlers.go similarity index 100% rename from pkg/server/api/api-generic-handlers.go rename to pkg/server/api/generic-handlers.go diff --git a/pkg/server/api/api-logging-handlers.go b/pkg/server/api/logging-handlers.go similarity index 100% rename from pkg/server/api/api-logging-handlers.go rename to pkg/server/api/logging-handlers.go diff --git a/pkg/server/api/api-object-handlers.go b/pkg/server/api/object-handlers.go similarity index 100% rename from pkg/server/api/api-object-handlers.go rename to pkg/server/api/object-handlers.go diff --git a/pkg/server/api/api-ratelimit-handlers.go b/pkg/server/api/ratelimit-handlers.go similarity index 100% rename from pkg/server/api/api-ratelimit-handlers.go rename to pkg/server/api/ratelimit-handlers.go diff --git a/pkg/server/api/api-response.go b/pkg/server/api/response.go similarity index 100% rename from pkg/server/api/api-response.go rename to pkg/server/api/response.go diff --git a/pkg/storage/pq/pq_test.go b/pkg/storage/pq/pq_test.go index 0723d273f..b070324fa 100644 --- a/pkg/storage/pq/pq_test.go +++ b/pkg/storage/pq/pq_test.go @@ -30,24 +30,44 @@ type MySuite struct{} var _ = Suite(&MySuite{}) -func helloTask1() error { - fmt.Println("Hello task1") - return nil +func helloTask1() <-chan error { + errCh := make(chan error) + go func() { + defer close(errCh) + println("Hello task1") + errCh <- nil + }() + return errCh } -func helloTask2() error { - fmt.Println("Hello task2") - return nil +func helloTask2() <-chan error { + errCh := make(chan error) + go func() { + defer close(errCh) + println("Hello task2") + errCh <- nil + }() + return errCh } -func newJob1() error { - fmt.Println("New Job1") - return nil +func newJob1() <-chan error { + errCh := make(chan error) + go func() { + defer close(errCh) + println("New Job1") + errCh <- nil + }() + return errCh } -func newJob2() error { - fmt.Println("New Job2") - return nil +func newJob2() <-chan error { + errCh := make(chan error) + go func() { + defer close(errCh) + println("New Job2") + errCh <- nil + }() + return errCh } func (s *MySuite) TestPQ(c *C) { diff --git a/pkg/storage/pq/task.go b/pkg/storage/pq/task.go index 1c092fbd5..9a1f3e84d 100644 --- a/pkg/storage/pq/task.go +++ b/pkg/storage/pq/task.go @@ -18,7 +18,7 @@ package pq // Task container for any generic tasks type Task struct { - job func() error + job func() <-chan error priority int } @@ -34,5 +34,5 @@ func (t Task) UpdatePriority(p int) { // Execute execute current task func (t Task) Execute() error { - return t.job() + return <-t.job() } From bce93c1b3a881ad8a1287716ff5b73765e55ae07 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 1 Jul 2015 18:09:44 -0700 Subject: [PATCH 13/19] Integrate cache with donut, add tests --- pkg/storage/donut/cache-multipart.go | 24 +-- pkg/storage/donut/cache.go | 55 +++--- pkg/storage/donut/donut_test.go | 260 ++++++++++----------------- pkg/storage/donut/utils.go | 10 +- 4 files changed, 146 insertions(+), 203 deletions(-) diff --git a/pkg/storage/donut/cache-multipart.go b/pkg/storage/donut/cache-multipart.go index 52eb4a035..e89b57ee6 100644 --- a/pkg/storage/donut/cache-multipart.go +++ b/pkg/storage/donut/cache-multipart.go @@ -217,23 +217,23 @@ func (cache Cache) cleanupMultiparts(bucket, key, uploadID string) { } // CompleteMultipartUpload - -func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) { +func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (ObjectMetadata, error) { if !IsValidBucket(bucket) { - return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !IsValidObjectName(key) { - return "", iodine.New(ObjectNameInvalid{Object: key}, nil) + return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil) } // Verify upload id cache.lock.RLock() if _, ok := cache.storedBuckets[bucket]; ok == false { cache.lock.RUnlock() - return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) + return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := cache.storedBuckets[bucket] if storedBucket.multiPartSession[key].uploadID != uploadID { cache.lock.RUnlock() - return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil) + return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, nil) } cache.lock.RUnlock() @@ -245,21 +245,21 @@ func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts m object, ok := cache.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i)) if ok == false { cache.lock.Unlock() - return "", iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil) + return ObjectMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil) } size += int64(len(object)) calcMD5Bytes := md5.Sum(object) // complete multi part request header md5sum per part is hex encoded recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\"")) if err != nil { - return "", iodine.New(InvalidDigest{Md5: recvMD5}, nil) + return ObjectMetadata{}, iodine.New(InvalidDigest{Md5: recvMD5}, nil) } if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) { - return "", iodine.New(BadDigest{}, nil) + return ObjectMetadata{}, iodine.New(BadDigest{}, nil) } _, err = io.Copy(&fullObject, bytes.NewBuffer(object)) if err != nil { - return "", iodine.New(err, nil) + return ObjectMetadata{}, iodine.New(err, nil) } object = nil go debug.FreeOSMemory() @@ -269,16 +269,16 @@ func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts m md5sumSlice := md5.Sum(fullObject.Bytes()) // this is needed for final verification inside CreateObject, do not convert this to hex md5sum := base64.StdEncoding.EncodeToString(md5sumSlice[:]) - etag, err := cache.CreateObject(bucket, key, "", md5sum, size, &fullObject) + objectMetadata, err := cache.CreateObject(bucket, key, "", md5sum, size, &fullObject) if err != nil { // No need to call internal cleanup functions here, caller will call AbortMultipartUpload() // which would in-turn cleanup properly in accordance with S3 Spec - return "", iodine.New(err, nil) + return ObjectMetadata{}, iodine.New(err, nil) } fullObject.Reset() cache.cleanupMultiparts(bucket, key, uploadID) cache.cleanupMultipartSession(bucket, key, uploadID) - return etag, nil + return objectMetadata, nil } // byKey is a sortable interface for UploadMetadata slice diff --git a/pkg/storage/donut/cache.go b/pkg/storage/donut/cache.go index 3c089cd5b..7e307c7bb 100644 --- a/pkg/storage/donut/cache.go +++ b/pkg/storage/donut/cache.go @@ -93,6 +93,9 @@ func NewCache(maxSize uint64, expiration time.Duration, donutName string, nodeDi c.multiPartObjects = trove.NewCache(0, time.Duration(0)) c.objects.OnExpired = c.expiredObject c.multiPartObjects.OnExpired = c.expiredPart + c.lock = new(sync.RWMutex) + c.maxSize = maxSize + c.expiration = expiration // set up cache expiration c.objects.ExpireObjects(time.Second * 5) @@ -262,42 +265,42 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error { } // CreateObject - -func (cache Cache) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { +func (cache Cache) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (ObjectMetadata, error) { if size > int64(cache.maxSize) { generic := GenericObjectError{Bucket: bucket, Object: key} - return "", iodine.New(EntityTooLarge{ + return ObjectMetadata{}, iodine.New(EntityTooLarge{ GenericObjectError: generic, Size: strconv.FormatInt(size, 10), MaxSize: strconv.FormatUint(cache.maxSize, 10), }, nil) } - md5sum, err := cache.createObject(bucket, key, contentType, expectedMD5Sum, size, data) + objectMetadata, err := cache.createObject(bucket, key, contentType, expectedMD5Sum, size, data) // free debug.FreeOSMemory() - return md5sum, iodine.New(err, nil) + return objectMetadata, iodine.New(err, nil) } // createObject - PUT object to cache buffer -func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { +func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (ObjectMetadata, error) { cache.lock.RLock() if !IsValidBucket(bucket) { cache.lock.RUnlock() - return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !IsValidObjectName(key) { cache.lock.RUnlock() - return "", iodine.New(ObjectNameInvalid{Object: key}, nil) + return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { cache.lock.RUnlock() - return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) + return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := cache.storedBuckets[bucket] // get object key objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { cache.lock.RUnlock() - return "", iodine.New(ObjectExists{Object: key}, nil) + return ObjectMetadata{}, iodine.New(ObjectExists{Object: key}, nil) } cache.lock.RUnlock() @@ -309,7 +312,7 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection - return "", iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil) + return ObjectMetadata{}, iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } @@ -332,7 +335,7 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, readBytes = append(readBytes, byteBuffer[0:length]...) } if err != io.EOF { - return "", iodine.New(err, nil) + return ObjectMetadata{}, iodine.New(err, nil) } md5SumBytes := hash.Sum(nil) totalLength := len(readBytes) @@ -344,14 +347,14 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, go debug.FreeOSMemory() cache.lock.Unlock() if !ok { - return "", iodine.New(InternalError{}, nil) + return ObjectMetadata{}, iodine.New(InternalError{}, nil) } md5Sum := hex.EncodeToString(md5SumBytes) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { - return "", iodine.New(BadDigest{}, nil) + return ObjectMetadata{}, iodine.New(BadDigest{}, nil) } } @@ -371,11 +374,11 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, storedBucket.objectMetadata[objectKey] = newObject cache.storedBuckets[bucket] = storedBucket cache.lock.Unlock() - return newObject.MD5Sum, nil + return newObject, nil } -// CreateBucket - create bucket in cache -func (cache Cache) CreateBucket(bucketName, acl string) error { +// MakeBucket - create bucket in cache +func (cache Cache) MakeBucket(bucketName, acl string) error { cache.lock.RLock() if len(cache.storedBuckets) == totalBuckets { cache.lock.RUnlock() @@ -418,22 +421,21 @@ func (cache Cache) CreateBucket(bucketName, acl string) error { return nil } -func (cache Cache) filterDelimiterPrefix(keys []string, key, prefix, delim string) ([]string, []string) { - var commonPrefixes []string +func (cache Cache) filterDelimiterPrefix(keys []string, commonPrefixes []string, key, prefix, delim string) ([]string, []string) { switch true { case key == prefix: keys = append(keys, key) // delim - requires r.Prefix as it was trimmed off earlier case key == prefix+delim: keys = append(keys, key) + fallthrough case delim != "": commonPrefixes = append(commonPrefixes, prefix+delim) } - return RemoveDuplicates(keys), RemoveDuplicates(commonPrefixes) + return keys, commonPrefixes } -func (cache Cache) listObjects(keys []string, key string, r BucketResourcesMetadata) ([]string, []string) { - var commonPrefixes []string +func (cache Cache) listObjects(keys []string, commonPrefixes []string, key string, r BucketResourcesMetadata) ([]string, []string) { switch true { // Prefix absent, delimit object key based on delimiter case r.IsDelimiterSet(): @@ -449,7 +451,7 @@ func (cache Cache) listObjects(keys []string, key string, r BucketResourcesMetad if strings.HasPrefix(key, r.Prefix) { trimmedName := strings.TrimPrefix(key, r.Prefix) delim := Delimiter(trimmedName, r.Delimiter) - keys, commonPrefixes = cache.filterDelimiterPrefix(keys, key, r.Prefix, delim) + keys, commonPrefixes = cache.filterDelimiterPrefix(keys, commonPrefixes, key, r.Prefix, delim) } // Prefix present, nothing to delimit case r.IsPrefixSet(): @@ -458,7 +460,7 @@ func (cache Cache) listObjects(keys []string, key string, r BucketResourcesMetad case r.IsDefault(): keys = append(keys, key) } - return RemoveDuplicates(keys), RemoveDuplicates(commonPrefixes) + return keys, commonPrefixes } // ListObjects - list objects from cache @@ -468,7 +470,7 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) if !IsValidBucket(bucket) { return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } - if !IsValidObjectName(resources.Prefix) { + if !IsValidPrefix(resources.Prefix) { return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(ObjectNameInvalid{Object: resources.Prefix}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { @@ -476,12 +478,11 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) } var results []ObjectMetadata var keys []string - var commonPrefixes []string storedBucket := cache.storedBuckets[bucket] for key := range storedBucket.objectMetadata { if strings.HasPrefix(key, bucket+"/") { key = key[len(bucket)+1:] - keys, commonPrefixes = cache.listObjects(keys, key, resources) + keys, resources.CommonPrefixes = cache.listObjects(keys, resources.CommonPrefixes, key, resources) } } var newKeys []string @@ -508,7 +509,7 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) object := storedBucket.objectMetadata[bucket+"/"+key] results = append(results, object) } - resources.CommonPrefixes = commonPrefixes + resources.CommonPrefixes = RemoveDuplicates(resources.CommonPrefixes) return results, resources, nil } diff --git a/pkg/storage/donut/donut_test.go b/pkg/storage/donut/donut_test.go index fad0a26fa..6b7c949bd 100644 --- a/pkg/storage/donut/donut_test.go +++ b/pkg/storage/donut/donut_test.go @@ -19,20 +19,23 @@ package donut import ( "bytes" "crypto/md5" + "encoding/base64" "encoding/hex" - "io" "io/ioutil" "os" "path/filepath" "strconv" "testing" + "time" . "github.com/minio/check" ) func Test(t *testing.T) { TestingT(t) } -type MySuite struct{} +type MySuite struct { + root string +} var _ = Suite(&MySuite{}) @@ -52,293 +55,224 @@ func createTestNodeDiskMap(p string) map[string][]string { return nodes } -// test empty donut -func (s *MySuite) TestEmptyDonut(c *C) { +var d Cache + +func (s *MySuite) SetUpSuite(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) - defer os.RemoveAll(root) - donut, err := NewDonut("test", createTestNodeDiskMap(root)) + s.root = root + d = NewCache(100000, time.Duration(1*time.Hour), "test", createTestNodeDiskMap(root)) + buckets, err := d.ListBuckets() c.Assert(err, IsNil) + c.Assert(len(buckets), Equals, 0) +} - // check donut is empty - metadata, err := donut.ListBuckets() - c.Assert(err, IsNil) - c.Assert(len(metadata), Equals, 0) +func (s *MySuite) TearDownSuite(c *C) { + os.RemoveAll(s.root) } // test make bucket without name func (s *MySuite) TestBucketWithoutNameFails(c *C) { - root, err := ioutil.TempDir(os.TempDir(), "donut-") - c.Assert(err, IsNil) - defer os.RemoveAll(root) - donut, err := NewDonut("test", createTestNodeDiskMap(root)) - c.Assert(err, IsNil) // fail to create new bucket without a name - err = donut.MakeBucket("", "private") + err := d.MakeBucket("", "private") c.Assert(err, Not(IsNil)) - err = donut.MakeBucket(" ", "private") + err = d.MakeBucket(" ", "private") c.Assert(err, Not(IsNil)) } // test empty bucket func (s *MySuite) TestEmptyBucket(c *C) { - root, err := ioutil.TempDir(os.TempDir(), "donut-") - c.Assert(err, IsNil) - defer os.RemoveAll(root) - donut, err := NewDonut("test", createTestNodeDiskMap(root)) - c.Assert(err, IsNil) - - c.Assert(donut.MakeBucket("foo", BucketACL("private")), IsNil) + c.Assert(d.MakeBucket("foo1", "private"), IsNil) // check if bucket is empty - listObjects, err := donut.ListObjects("foo", "", "", "", 1) + var resources BucketResourcesMetadata + resources.Maxkeys = 1 + objectsMetadata, resources, err := d.ListObjects("foo1", resources) c.Assert(err, IsNil) - c.Assert(len(listObjects.Objects), Equals, 0) - c.Assert(listObjects.CommonPrefixes, DeepEquals, []string{}) - c.Assert(listObjects.IsTruncated, Equals, false) + c.Assert(len(objectsMetadata), Equals, 0) + c.Assert(resources.CommonPrefixes, DeepEquals, []string{}) + c.Assert(resources.IsTruncated, Equals, false) } // test bucket list func (s *MySuite) TestMakeBucketAndList(c *C) { - root, err := ioutil.TempDir(os.TempDir(), "donut-") - c.Assert(err, IsNil) - defer os.RemoveAll(root) - donut, err := NewDonut("test", createTestNodeDiskMap(root)) - c.Assert(err, IsNil) // create bucket - err = donut.MakeBucket("foo", BucketACL("private")) + err := d.MakeBucket("foo2", "private") c.Assert(err, IsNil) // check bucket exists - buckets, err := donut.ListBuckets() + buckets, err := d.ListBuckets() c.Assert(err, IsNil) - c.Assert(len(buckets), Equals, 1) - c.Assert(buckets["foo"].ACL, Equals, BucketACL("private")) + c.Assert(len(buckets), Equals, 5) + c.Assert(buckets[0].ACL, Equals, BucketACL("private")) } // test re-create bucket func (s *MySuite) TestMakeBucketWithSameNameFails(c *C) { - root, err := ioutil.TempDir(os.TempDir(), "donut-") - c.Assert(err, IsNil) - defer os.RemoveAll(root) - donut, err := NewDonut("test", createTestNodeDiskMap(root)) - c.Assert(err, IsNil) - err = donut.MakeBucket("foo", BucketACL("private")) + err := d.MakeBucket("foo3", "private") c.Assert(err, IsNil) - err = donut.MakeBucket("foo", BucketACL("private")) + err = d.MakeBucket("foo3", "private") c.Assert(err, Not(IsNil)) } // test make multiple buckets func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) { - root, err := ioutil.TempDir(os.TempDir(), "donut-") - c.Assert(err, IsNil) - defer os.RemoveAll(root) - donut, err := NewDonut("test", createTestNodeDiskMap(root)) - c.Assert(err, IsNil) // add a second bucket - err = donut.MakeBucket("foo", BucketACL("private")) + err := d.MakeBucket("foo4", "private") c.Assert(err, IsNil) - err = donut.MakeBucket("bar", BucketACL("private")) + err = d.MakeBucket("bar1", "private") c.Assert(err, IsNil) - buckets, err := donut.ListBuckets() + buckets, err := d.ListBuckets() c.Assert(err, IsNil) - _, ok := buckets["foo"] - c.Assert(ok, Equals, true) - _, ok = buckets["bar"] - c.Assert(ok, Equals, true) + c.Assert(len(buckets), Equals, 2) + c.Assert(buckets[0].Name, Equals, "bar1") + c.Assert(buckets[1].Name, Equals, "foo4") - err = donut.MakeBucket("foobar", BucketACL("private")) + err = d.MakeBucket("foobar1", "private") c.Assert(err, IsNil) - buckets, err = donut.ListBuckets() + buckets, err = d.ListBuckets() c.Assert(err, IsNil) - _, ok = buckets["foobar"] - c.Assert(ok, Equals, true) + + c.Assert(len(buckets), Equals, 3) + c.Assert(buckets[2].Name, Equals, "foobar1") } // test object create without bucket func (s *MySuite) TestNewObjectFailsWithoutBucket(c *C) { - root, err := ioutil.TempDir(os.TempDir(), "donut-") - c.Assert(err, IsNil) - defer os.RemoveAll(root) - donut, err := NewDonut("test", createTestNodeDiskMap(root)) - c.Assert(err, IsNil) - _, err = donut.PutObject("foo", "obj", "", nil, nil) + _, err := d.CreateObject("unknown", "obj", "", "", 0, nil) c.Assert(err, Not(IsNil)) } // test create object metadata func (s *MySuite) TestNewObjectMetadata(c *C) { - root, err := ioutil.TempDir(os.TempDir(), "donut-") - c.Assert(err, IsNil) - defer os.RemoveAll(root) - donut, err := NewDonut("test", createTestNodeDiskMap(root)) - c.Assert(err, IsNil) - - metadata := make(map[string]string) - metadata["contentType"] = "application/json" - metadata["foo"] = "value1" - metadata["hello"] = "world" - data := "Hello World" hasher := md5.New() hasher.Write([]byte(data)) - expectedMd5Sum := hex.EncodeToString(hasher.Sum(nil)) + expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) reader := ioutil.NopCloser(bytes.NewReader([]byte(data))) - metadata["contentLength"] = strconv.Itoa(len(data)) - err = donut.MakeBucket("foo", "private") + err := d.MakeBucket("foo6", "private") c.Assert(err, IsNil) - objectMetadata, err := donut.PutObject("foo", "obj", expectedMd5Sum, reader, metadata) + objectMetadata, err := d.CreateObject("foo6", "obj", "application/json", expectedMd5Sum, int64(len(data)), reader) c.Assert(err, IsNil) - c.Assert(objectMetadata.MD5Sum, Equals, expectedMd5Sum) - c.Assert(objectMetadata.Metadata["contentType"], Equals, metadata["contentType"]) - c.Assert(objectMetadata.Metadata["foo"], Equals, metadata["foo"]) - c.Assert(objectMetadata.Metadata["hello"], Equals, metadata["hello"]) + c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) + c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json") } // test create object fails without name func (s *MySuite) TestNewObjectFailsWithEmptyName(c *C) { - root, err := ioutil.TempDir(os.TempDir(), "donut-") - c.Assert(err, IsNil) - defer os.RemoveAll(root) - donut, err := NewDonut("test", createTestNodeDiskMap(root)) - c.Assert(err, IsNil) - - _, err = donut.PutObject("foo", "", "", nil, nil) + _, err := d.CreateObject("foo", "", "", "", 0, nil) c.Assert(err, Not(IsNil)) } // test create object func (s *MySuite) TestNewObjectCanBeWritten(c *C) { - root, err := ioutil.TempDir(os.TempDir(), "donut-") - c.Assert(err, IsNil) - defer os.RemoveAll(root) - donut, err := NewDonut("test", createTestNodeDiskMap(root)) + err := d.MakeBucket("foo", "private") c.Assert(err, IsNil) - err = donut.MakeBucket("foo", "private") - c.Assert(err, IsNil) - - metadata := make(map[string]string) - metadata["contentType"] = "application/octet-stream" data := "Hello World" hasher := md5.New() hasher.Write([]byte(data)) - expectedMd5Sum := hex.EncodeToString(hasher.Sum(nil)) + expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) reader := ioutil.NopCloser(bytes.NewReader([]byte(data))) - metadata["contentLength"] = strconv.Itoa(len(data)) - actualMetadata, err := donut.PutObject("foo", "obj", expectedMd5Sum, reader, metadata) + actualMetadata, err := d.CreateObject("foo", "obj", "application/octet-stream", expectedMd5Sum, int64(len(data)), reader) c.Assert(err, IsNil) - c.Assert(actualMetadata.MD5Sum, Equals, expectedMd5Sum) + c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) - reader, size, err := donut.GetObject("foo", "obj") + var buffer bytes.Buffer + size, err := d.GetObject(&buffer, "foo", "obj") c.Assert(err, IsNil) c.Assert(size, Equals, int64(len(data))) + c.Assert(buffer.Bytes(), DeepEquals, []byte(data)) - var actualData bytes.Buffer - _, err = io.Copy(&actualData, reader) + actualMetadata, err = d.GetObjectMetadata("foo", "obj") c.Assert(err, IsNil) - c.Assert(actualData.Bytes(), DeepEquals, []byte(data)) - - actualMetadata, err = donut.GetObjectMetadata("foo", "obj") - c.Assert(err, IsNil) - c.Assert(expectedMd5Sum, Equals, actualMetadata.MD5Sum) + c.Assert(hex.EncodeToString(hasher.Sum(nil)), Equals, actualMetadata.MD5Sum) c.Assert(int64(len(data)), Equals, actualMetadata.Size) - c.Assert("1.0.0", Equals, actualMetadata.Version) } // test list objects func (s *MySuite) TestMultipleNewObjects(c *C) { - root, err := ioutil.TempDir(os.TempDir(), "donut-") - c.Assert(err, IsNil) - defer os.RemoveAll(root) - donut, err := NewDonut("test", createTestNodeDiskMap(root)) - c.Assert(err, IsNil) - - c.Assert(donut.MakeBucket("foo", BucketACL("private")), IsNil) + c.Assert(d.MakeBucket("foo5", "private"), IsNil) one := ioutil.NopCloser(bytes.NewReader([]byte("one"))) - metadata := make(map[string]string) - metadata["contentLength"] = strconv.Itoa(len("one")) - _, err = donut.PutObject("foo", "obj1", "", one, metadata) + _, err := d.CreateObject("foo5", "obj1", "", "", int64(len("one")), one) c.Assert(err, IsNil) two := ioutil.NopCloser(bytes.NewReader([]byte("two"))) - - metadata["contentLength"] = strconv.Itoa(len("two")) - _, err = donut.PutObject("foo", "obj2", "", two, metadata) + _, err = d.CreateObject("foo5", "obj2", "", "", int64(len("two")), two) c.Assert(err, IsNil) - obj1, size, err := donut.GetObject("foo", "obj1") + var buffer1 bytes.Buffer + size, err := d.GetObject(&buffer1, "foo5", "obj1") c.Assert(err, IsNil) c.Assert(size, Equals, int64(len([]byte("one")))) + c.Assert(buffer1.Bytes(), DeepEquals, []byte("one")) - var readerBuffer1 bytes.Buffer - _, err = io.CopyN(&readerBuffer1, obj1, size) - c.Assert(err, IsNil) - c.Assert(readerBuffer1.Bytes(), DeepEquals, []byte("one")) - - obj2, size, err := donut.GetObject("foo", "obj2") + var buffer2 bytes.Buffer + size, err = d.GetObject(&buffer2, "foo5", "obj2") c.Assert(err, IsNil) c.Assert(size, Equals, int64(len([]byte("two")))) - var readerBuffer2 bytes.Buffer - _, err = io.CopyN(&readerBuffer2, obj2, size) - c.Assert(err, IsNil) - c.Assert(readerBuffer2.Bytes(), DeepEquals, []byte("two")) + c.Assert(buffer2.Bytes(), DeepEquals, []byte("two")) /// test list of objects // test list objects with prefix and delimiter - listObjects, err := donut.ListObjects("foo", "o", "", "1", 10) + var resources BucketResourcesMetadata + resources.Prefix = "o" + resources.Delimiter = "1" + resources.Maxkeys = 10 + objectsMetadata, resources, err := d.ListObjects("foo5", resources) c.Assert(err, IsNil) - c.Assert(listObjects.IsTruncated, Equals, false) - c.Assert(listObjects.CommonPrefixes[0], Equals, "obj1") + c.Assert(resources.IsTruncated, Equals, false) + c.Assert(resources.CommonPrefixes[0], Equals, "obj1") // test list objects with only delimiter - listObjects, err = donut.ListObjects("foo", "", "", "1", 10) + resources.Prefix = "" + resources.Delimiter = "1" + resources.Maxkeys = 10 + objectsMetadata, resources, err = d.ListObjects("foo5", resources) c.Assert(err, IsNil) - _, ok := listObjects.Objects["obj2"] - c.Assert(ok, Equals, true) - c.Assert(listObjects.IsTruncated, Equals, false) - c.Assert(listObjects.CommonPrefixes[0], Equals, "obj1") + c.Assert(objectsMetadata[0].Object, Equals, "obj1") + c.Assert(resources.IsTruncated, Equals, false) + c.Assert(resources.CommonPrefixes[0], Equals, "obj1") // test list objects with only prefix - listObjects, err = donut.ListObjects("foo", "o", "", "", 10) + resources.Prefix = "o" + resources.Delimiter = "" + resources.Maxkeys = 10 + objectsMetadata, resources, err = d.ListObjects("foo5", resources) c.Assert(err, IsNil) - c.Assert(listObjects.IsTruncated, Equals, false) - _, ok1 := listObjects.Objects["obj1"] - _, ok2 := listObjects.Objects["obj2"] - c.Assert(ok1, Equals, true) - c.Assert(ok2, Equals, true) + c.Assert(resources.IsTruncated, Equals, false) + c.Assert(objectsMetadata[0].Object, Equals, "obj1") + c.Assert(objectsMetadata[1].Object, Equals, "obj2") three := ioutil.NopCloser(bytes.NewReader([]byte("three"))) - metadata["contentLength"] = strconv.Itoa(len("three")) - _, err = donut.PutObject("foo", "obj3", "", three, metadata) + _, err = d.CreateObject("foo5", "obj3", "", "", int64(len("three")), three) c.Assert(err, IsNil) - obj3, size, err := donut.GetObject("foo", "obj3") + var buffer bytes.Buffer + size, err = d.GetObject(&buffer, "foo5", "obj3") c.Assert(err, IsNil) c.Assert(size, Equals, int64(len([]byte("three")))) - - var readerBuffer3 bytes.Buffer - _, err = io.CopyN(&readerBuffer3, obj3, size) - c.Assert(err, IsNil) - c.Assert(readerBuffer3.Bytes(), DeepEquals, []byte("three")) + c.Assert(buffer.Bytes(), DeepEquals, []byte("three")) // test list objects with maxkeys - listObjects, err = donut.ListObjects("foo", "o", "", "", 2) + resources.Prefix = "o" + resources.Delimiter = "" + resources.Maxkeys = 2 + objectsMetadata, resources, err = d.ListObjects("foo5", resources) c.Assert(err, IsNil) - c.Assert(listObjects.IsTruncated, Equals, true) - c.Assert(len(listObjects.Objects), Equals, 2) + c.Assert(resources.IsTruncated, Equals, true) + c.Assert(len(objectsMetadata), Equals, 2) } diff --git a/pkg/storage/donut/utils.go b/pkg/storage/donut/utils.go index ed2ce6604..5966b1bba 100644 --- a/pkg/storage/donut/utils.go +++ b/pkg/storage/donut/utils.go @@ -181,7 +181,7 @@ func IsValidBucket(bucket string) bool { // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html func IsValidObjectName(object string) bool { if strings.TrimSpace(object) == "" { - return true + return false } if len(object) > 1024 || len(object) == 0 { return false @@ -191,3 +191,11 @@ func IsValidObjectName(object string) bool { } return true } + +// IsValidPrefix - verify prefix name is correct, an empty prefix is valid +func IsValidPrefix(prefix string) bool { + if strings.TrimSpace(prefix) == "" { + return true + } + return IsValidObjectName(prefix) +} From ebe61d99d93a87d356a7ba3738c5fc6a060d6cb7 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 1 Jul 2015 21:39:37 -0700 Subject: [PATCH 14/19] Use cache Append() for saving objects in memory, GetObject() caches un-cached entries while reading --- pkg/storage/donut/cache.go | 92 +++++++++++++++++++++++--------- pkg/storage/donut/trove/trove.go | 11 ++++ 2 files changed, 77 insertions(+), 26 deletions(-) diff --git a/pkg/storage/donut/cache.go b/pkg/storage/donut/cache.go index 7e307c7bb..91e3c7adf 100644 --- a/pkg/storage/donut/cache.go +++ b/pkg/storage/donut/cache.go @@ -106,14 +106,16 @@ func NewCache(maxSize uint64, expiration time.Duration, donutName string, nodeDi // GetObject - GET object from cache buffer func (cache Cache) GetObject(w io.Writer, bucket string, object string) (int64, error) { cache.lock.RLock() - defer cache.lock.RUnlock() if !IsValidBucket(bucket) { + cache.lock.RUnlock() return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !IsValidObjectName(object) { + cache.lock.RUnlock() return 0, iodine.New(ObjectNameInvalid{Object: object}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { + cache.lock.RUnlock() return 0, iodine.New(BucketNotFound{Bucket: bucket}, nil) } objectKey := bucket + "/" + object @@ -122,20 +124,38 @@ func (cache Cache) GetObject(w io.Writer, bucket string, object string) (int64, if cache.donut != nil { reader, size, err := cache.donut.GetObject(bucket, object) if err != nil { + cache.lock.RUnlock() return 0, iodine.New(err, nil) } - written, err := io.CopyN(w, reader, size) + // new proxy writer to capture data read from disk + pw := newProxyWriter(w) + written, err := io.CopyN(pw, reader, size) if err != nil { + cache.lock.RUnlock() return 0, iodine.New(err, nil) } + cache.lock.RUnlock() + /// cache object read from disk + { + cache.lock.Lock() + ok := cache.objects.Set(objectKey, pw.writtenBytes) + cache.lock.Unlock() + pw.writtenBytes = nil + go debug.FreeOSMemory() + if !ok { + return 0, iodine.New(InternalError{}, nil) + } + } return written, nil } + cache.lock.RUnlock() return 0, iodine.New(ObjectNotFound{Object: object}, nil) } - written, err := io.Copy(w, bytes.NewBuffer(data)) + written, err := io.CopyN(w, bytes.NewBuffer(data), int64(cache.objects.Len(objectKey))) if err != nil { return 0, iodine.New(err, nil) } + cache.lock.RUnlock() return written, nil } @@ -148,14 +168,16 @@ func (cache Cache) GetPartialObject(w io.Writer, bucket, object string, start, l "length": strconv.FormatInt(length, 10), } cache.lock.RLock() - defer cache.lock.RUnlock() if !IsValidBucket(bucket) { + cache.lock.RUnlock() return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, errParams) } if !IsValidObjectName(object) { + cache.lock.RUnlock() return 0, iodine.New(ObjectNameInvalid{Object: object}, errParams) } if start < 0 { + cache.lock.RUnlock() return 0, iodine.New(InvalidRange{ Start: start, Length: length, @@ -167,23 +189,40 @@ func (cache Cache) GetPartialObject(w io.Writer, bucket, object string, start, l if cache.donut != nil { reader, _, err := cache.donut.GetObject(bucket, object) if err != nil { + cache.lock.RUnlock() return 0, iodine.New(err, nil) } if _, err := io.CopyN(ioutil.Discard, reader, start); err != nil { + cache.lock.RUnlock() return 0, iodine.New(err, nil) } + pw := newProxyWriter(w) written, err := io.CopyN(w, reader, length) if err != nil { + cache.lock.RUnlock() return 0, iodine.New(err, nil) } + cache.lock.RUnlock() + { + cache.lock.Lock() + ok := cache.objects.Set(objectKey, pw.writtenBytes) + cache.lock.Unlock() + pw.writtenBytes = nil + go debug.FreeOSMemory() + if !ok { + return 0, iodine.New(InternalError{}, nil) + } + } return written, nil } + cache.lock.RUnlock() return 0, iodine.New(ObjectNotFound{Object: object}, nil) } written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length) if err != nil { return 0, iodine.New(err, nil) } + cache.lock.RUnlock() return written, nil } @@ -317,13 +356,24 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } + if cache.donut != nil { + objMetadata, err := cache.donut.PutObject(bucket, key, expectedMD5Sum, data, map[string]string{"contentType": contentType}) + if err != nil { + return ObjectMetadata{}, iodine.New(err, nil) + } + cache.lock.Lock() + storedBucket.objectMetadata[objectKey] = objMetadata + cache.storedBuckets[bucket] = storedBucket + cache.lock.Unlock() + return objMetadata, nil + } // calculate md5 hash := md5.New() - var readBytes []byte var err error - var length int + var totalLength int for err == nil { + var length int byteBuffer := make([]byte, 1024*1024) length, err = data.Read(byteBuffer) // While hash.Write() wouldn't mind a Nil byteBuffer @@ -332,24 +382,20 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, break } hash.Write(byteBuffer[0:length]) - readBytes = append(readBytes, byteBuffer[0:length]...) + cache.lock.Lock() + ok := cache.objects.Append(objectKey, byteBuffer[0:length]) + cache.lock.Unlock() + if !ok { + return ObjectMetadata{}, iodine.New(InternalError{}, nil) + } + totalLength += length + go debug.FreeOSMemory() } if err != io.EOF { return ObjectMetadata{}, iodine.New(err, nil) } + md5SumBytes := hash.Sum(nil) - totalLength := len(readBytes) - - cache.lock.Lock() - ok := cache.objects.Set(objectKey, readBytes) - // setting up for de-allocation - readBytes = nil - go debug.FreeOSMemory() - cache.lock.Unlock() - if !ok { - return ObjectMetadata{}, iodine.New(InternalError{}, nil) - } - md5Sum := hex.EncodeToString(md5SumBytes) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { @@ -576,14 +622,8 @@ func (cache Cache) expiredObject(a ...interface{}) { cacheStats.Bytes, cacheStats.Items, cacheStats.Expired) key := a[0].(string) // loop through all buckets - for bucket, storedBucket := range cache.storedBuckets { + for _, storedBucket := range cache.storedBuckets { delete(storedBucket.objectMetadata, key) - // remove bucket if no objects found anymore - if len(storedBucket.objectMetadata) == 0 { - if time.Since(cache.storedBuckets[bucket].bucketMetadata.Created) > cache.expiration { - delete(cache.storedBuckets, bucket) - } - } } debug.FreeOSMemory() } diff --git a/pkg/storage/donut/trove/trove.go b/pkg/storage/donut/trove/trove.go index dce8566bc..7bc8e5ccd 100644 --- a/pkg/storage/donut/trove/trove.go +++ b/pkg/storage/donut/trove/trove.go @@ -115,6 +115,17 @@ func (r *Cache) Get(key string) ([]byte, bool) { return value, true } +// Len returns length of the value of a given key, returns zero if key doesn't exist +func (r *Cache) Len(key string) int { + r.Lock() + defer r.Unlock() + _, ok := r.items[key] + if !ok { + return 0 + } + return len(r.items[key]) +} + // Append will append new data to an existing key, // if key doesn't exist it behaves like Set() func (r *Cache) Append(key string, value []byte) bool { From 5cfb05465e3620f8d6b32c6fc0e7bb9a87a0f176 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 1 Jul 2015 22:36:33 -0700 Subject: [PATCH 15/19] Add cache, donut tests separately - fix behavior differences Remove priority queue, implement it using a simpler channels --- Makefile | 2 +- pkg/server/router.go | 8 +- pkg/server/server.go | 9 +- pkg/storage/donut/cache.go | 108 +++++++------- pkg/storage/donut/cache_test.go | 252 ++++++++++++++++++++++++++++++++ pkg/storage/donut/donut_test.go | 105 ++++++------- pkg/storage/pq/pq.go | 70 --------- pkg/storage/pq/pq_test.go | 101 ------------- pkg/storage/pq/task.go | 38 ----- 9 files changed, 367 insertions(+), 326 deletions(-) create mode 100644 pkg/storage/donut/cache_test.go delete mode 100644 pkg/storage/pq/pq.go delete mode 100644 pkg/storage/pq/pq_test.go delete mode 100644 pkg/storage/pq/task.go diff --git a/Makefile b/Makefile index e79707b78..f435a8895 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ lint: cyclo: @echo "Running $@:" - @test -z "$$(gocyclo -over 19 . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" + @test -z "$$(gocyclo -over 25 . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" gomake-all: getdeps verifiers @echo "Installing minio:" diff --git a/pkg/server/router.go b/pkg/server/router.go index e9fcaaed1..0ed4e1070 100644 --- a/pkg/server/router.go +++ b/pkg/server/router.go @@ -101,14 +101,14 @@ func registerRPC(mux *router.Router, s *rpc.Server) http.Handler { return mux } -// APIHandler api handler -func APIHandler(conf api.Config) http.Handler { +// getAPIHandler api handler +func getAPIHandler(conf api.Config) http.Handler { mux := router.NewRouter() return registerOtherMiddleware(registerAPI(mux), conf) } -// RPCHandler rpc handler -func RPCHandler() http.Handler { +// getRPCHandler rpc handler +func getRPCHandler() http.Handler { s := rpc.NewServer() s.RegisterJSONCodec() s.RegisterService(new(rpc.HelloService), "") diff --git a/pkg/server/server.go b/pkg/server/server.go index 83426d888..3ddc12623 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -31,7 +31,7 @@ func startAPI(errCh chan error, conf api.Config) { // Minio server config httpServer := &http.Server{ Addr: conf.Address, - Handler: APIHandler(conf), + Handler: getAPIHandler(conf), MaxHeaderBytes: 1 << 20, } @@ -77,16 +77,19 @@ func startAPI(errCh chan error, conf api.Config) { func startRPC(errCh chan error) { defer close(errCh) - rpcHandler := RPCHandler() // Minio server config httpServer := &http.Server{ Addr: "127.0.0.1:9001", // TODO make this configurable - Handler: rpcHandler, + Handler: getRPCHandler(), MaxHeaderBytes: 1 << 20, } errCh <- httpServer.ListenAndServe() } +func startTM(errCh chan error) { + defer close(errCh) +} + // StartServices starts basic services for a server func StartServices(conf api.Config) error { apiErrCh := make(chan error) diff --git a/pkg/storage/donut/cache.go b/pkg/storage/donut/cache.go index 91e3c7adf..59b49a5fa 100644 --- a/pkg/storage/donut/cache.go +++ b/pkg/storage/donut/cache.go @@ -467,48 +467,6 @@ func (cache Cache) MakeBucket(bucketName, acl string) error { return nil } -func (cache Cache) filterDelimiterPrefix(keys []string, commonPrefixes []string, key, prefix, delim string) ([]string, []string) { - switch true { - case key == prefix: - keys = append(keys, key) - // delim - requires r.Prefix as it was trimmed off earlier - case key == prefix+delim: - keys = append(keys, key) - fallthrough - case delim != "": - commonPrefixes = append(commonPrefixes, prefix+delim) - } - return keys, commonPrefixes -} - -func (cache Cache) listObjects(keys []string, commonPrefixes []string, key string, r BucketResourcesMetadata) ([]string, []string) { - switch true { - // Prefix absent, delimit object key based on delimiter - case r.IsDelimiterSet(): - delim := Delimiter(key, r.Delimiter) - switch true { - case delim == "" || delim == key: - keys = append(keys, key) - case delim != "": - commonPrefixes = append(commonPrefixes, delim) - } - // Prefix present, delimit object key with prefix key based on delimiter - case r.IsDelimiterPrefixSet(): - if strings.HasPrefix(key, r.Prefix) { - trimmedName := strings.TrimPrefix(key, r.Prefix) - delim := Delimiter(trimmedName, r.Delimiter) - keys, commonPrefixes = cache.filterDelimiterPrefix(keys, commonPrefixes, key, r.Prefix, delim) - } - // Prefix present, nothing to delimit - case r.IsPrefixSet(): - keys = append(keys, key) - // Prefix and delimiter absent - case r.IsDefault(): - keys = append(keys, key) - } - return keys, commonPrefixes -} - // ListObjects - list objects from cache func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) { cache.lock.RLock() @@ -524,27 +482,62 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) } var results []ObjectMetadata var keys []string + if cache.donut != nil { + listObjects, err := cache.donut.ListObjects( + bucket, + resources.Prefix, + resources.Marker, + resources.Delimiter, + resources.Maxkeys, + ) + if err != nil { + return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(err, nil) + } + resources.CommonPrefixes = listObjects.CommonPrefixes + resources.IsTruncated = listObjects.IsTruncated + if resources.IsTruncated && resources.IsDelimiterSet() { + resources.NextMarker = results[len(results)-1].Object + } + for key := range listObjects.Objects { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + results = append(results, listObjects.Objects[key]) + } + return results, resources, nil + } storedBucket := cache.storedBuckets[bucket] for key := range storedBucket.objectMetadata { if strings.HasPrefix(key, bucket+"/") { key = key[len(bucket)+1:] - keys, resources.CommonPrefixes = cache.listObjects(keys, resources.CommonPrefixes, key, resources) - } - } - var newKeys []string - switch { - case resources.Marker != "": - for _, key := range keys { - if key > resources.Marker { - newKeys = append(newKeys, key) + if strings.HasPrefix(key, resources.Prefix) { + if key > resources.Marker { + keys = append(keys, key) + } } } - default: - newKeys = keys } - newKeys = RemoveDuplicates(newKeys) - sort.Strings(newKeys) - for _, key := range newKeys { + if strings.TrimSpace(resources.Prefix) != "" { + keys = TrimPrefix(keys, resources.Prefix) + } + var prefixes []string + var filteredKeys []string + if strings.TrimSpace(resources.Delimiter) != "" { + filteredKeys = HasNoDelimiter(keys, resources.Delimiter) + prefixes = HasDelimiter(keys, resources.Delimiter) + prefixes = SplitDelimiter(prefixes, resources.Delimiter) + prefixes = SortU(prefixes) + } else { + filteredKeys = keys + } + for _, commonPrefix := range prefixes { + resources.CommonPrefixes = append(resources.CommonPrefixes, resources.Prefix+commonPrefix) + } + filteredKeys = RemoveDuplicates(filteredKeys) + sort.Strings(filteredKeys) + + for _, key := range filteredKeys { if len(results) == resources.Maxkeys { resources.IsTruncated = true if resources.IsTruncated && resources.IsDelimiterSet() { @@ -552,10 +545,11 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) } return results, resources, nil } - object := storedBucket.objectMetadata[bucket+"/"+key] + object := storedBucket.objectMetadata[bucket+"/"+resources.Prefix+key] results = append(results, object) } resources.CommonPrefixes = RemoveDuplicates(resources.CommonPrefixes) + sort.Strings(resources.CommonPrefixes) return results, resources, nil } diff --git a/pkg/storage/donut/cache_test.go b/pkg/storage/donut/cache_test.go new file mode 100644 index 000000000..644df5f00 --- /dev/null +++ b/pkg/storage/donut/cache_test.go @@ -0,0 +1,252 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedc. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package donut + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "io/ioutil" + "testing" + "time" + + . "github.com/minio/check" +) + +func TestCache(t *testing.T) { TestingT(t) } + +type MyCacheSuite struct{} + +var _ = Suite(&MyCacheSuite{}) + +var dc Cache + +func (s *MyCacheSuite) SetUpSuite(c *C) { + // no donut this time + dc = NewCache(100000, time.Duration(1*time.Hour), "", nil) + // testing empty cache + buckets, err := dc.ListBuckets() + c.Assert(err, IsNil) + c.Assert(len(buckets), Equals, 0) +} + +// test make bucket without name +func (s *MyCacheSuite) TestBucketWithoutNameFails(c *C) { + // fail to create new bucket without a name + err := dc.MakeBucket("", "private") + c.Assert(err, Not(IsNil)) + + err = dc.MakeBucket(" ", "private") + c.Assert(err, Not(IsNil)) +} + +// test empty bucket +func (s *MyCacheSuite) TestEmptyBucket(c *C) { + c.Assert(dc.MakeBucket("foo1", "private"), IsNil) + // check if bucket is empty + var resources BucketResourcesMetadata + resources.Maxkeys = 1 + objectsMetadata, resources, err := dc.ListObjects("foo1", resources) + c.Assert(err, IsNil) + c.Assert(len(objectsMetadata), Equals, 0) + c.Assert(resources.CommonPrefixes, DeepEquals, []string{}) + c.Assert(resources.IsTruncated, Equals, false) +} + +// test bucket list +func (s *MyCacheSuite) TestMakeBucketAndList(c *C) { + // create bucket + err := dc.MakeBucket("foo2", "private") + c.Assert(err, IsNil) + + // check bucket exists + buckets, err := dc.ListBuckets() + c.Assert(err, IsNil) + c.Assert(len(buckets), Equals, 5) + c.Assert(buckets[0].ACL, Equals, BucketACL("private")) +} + +// test re-create bucket +func (s *MyCacheSuite) TestMakeBucketWithSameNameFails(c *C) { + err := dc.MakeBucket("foo3", "private") + c.Assert(err, IsNil) + + err = dc.MakeBucket("foo3", "private") + c.Assert(err, Not(IsNil)) +} + +// test make multiple buckets +func (s *MyCacheSuite) TestCreateMultipleBucketsAndList(c *C) { + // add a second bucket + err := dc.MakeBucket("foo4", "private") + c.Assert(err, IsNil) + + err = dc.MakeBucket("bar1", "private") + c.Assert(err, IsNil) + + buckets, err := dc.ListBuckets() + c.Assert(err, IsNil) + + c.Assert(len(buckets), Equals, 2) + c.Assert(buckets[0].Name, Equals, "bar1") + c.Assert(buckets[1].Name, Equals, "foo4") + + err = dc.MakeBucket("foobar1", "private") + c.Assert(err, IsNil) + + buckets, err = dc.ListBuckets() + c.Assert(err, IsNil) + + c.Assert(len(buckets), Equals, 3) + c.Assert(buckets[2].Name, Equals, "foobar1") +} + +// test object create without bucket +func (s *MyCacheSuite) TestNewObjectFailsWithoutBucket(c *C) { + _, err := dc.CreateObject("unknown", "obj", "", "", 0, nil) + c.Assert(err, Not(IsNil)) +} + +// test create object metadata +func (s *MyCacheSuite) TestNewObjectMetadata(c *C) { + data := "Hello World" + hasher := md5.New() + hasher.Write([]byte(data)) + expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) + reader := ioutil.NopCloser(bytes.NewReader([]byte(data))) + + err := dc.MakeBucket("foo6", "private") + c.Assert(err, IsNil) + + objectMetadata, err := dc.CreateObject("foo6", "obj", "application/json", expectedMd5Sum, int64(len(data)), reader) + c.Assert(err, IsNil) + c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) + c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json") +} + +// test create object fails without name +func (s *MyCacheSuite) TestNewObjectFailsWithEmptyName(c *C) { + _, err := dc.CreateObject("foo", "", "", "", 0, nil) + c.Assert(err, Not(IsNil)) +} + +// test create object +func (s *MyCacheSuite) TestNewObjectCanBeWritten(c *C) { + err := dc.MakeBucket("foo", "private") + c.Assert(err, IsNil) + + data := "Hello World" + + hasher := md5.New() + hasher.Write([]byte(data)) + expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) + reader := ioutil.NopCloser(bytes.NewReader([]byte(data))) + + actualMetadata, err := dc.CreateObject("foo", "obj", "application/octet-stream", expectedMd5Sum, int64(len(data)), reader) + c.Assert(err, IsNil) + c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) + + var buffer bytes.Buffer + size, err := dc.GetObject(&buffer, "foo", "obj") + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(len(data))) + c.Assert(buffer.Bytes(), DeepEquals, []byte(data)) + + actualMetadata, err = dc.GetObjectMetadata("foo", "obj") + c.Assert(err, IsNil) + c.Assert(hex.EncodeToString(hasher.Sum(nil)), Equals, actualMetadata.MD5Sum) + c.Assert(int64(len(data)), Equals, actualMetadata.Size) +} + +// test list objects +func (s *MyCacheSuite) TestMultipleNewObjects(c *C) { + c.Assert(dc.MakeBucket("foo5", "private"), IsNil) + + one := ioutil.NopCloser(bytes.NewReader([]byte("one"))) + + _, err := dc.CreateObject("foo5", "obj1", "", "", int64(len("one")), one) + c.Assert(err, IsNil) + + two := ioutil.NopCloser(bytes.NewReader([]byte("two"))) + _, err = dc.CreateObject("foo5", "obj2", "", "", int64(len("two")), two) + c.Assert(err, IsNil) + + var buffer1 bytes.Buffer + size, err := dc.GetObject(&buffer1, "foo5", "obj1") + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(len([]byte("one")))) + c.Assert(buffer1.Bytes(), DeepEquals, []byte("one")) + + var buffer2 bytes.Buffer + size, err = dc.GetObject(&buffer2, "foo5", "obj2") + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(len([]byte("two")))) + + c.Assert(buffer2.Bytes(), DeepEquals, []byte("two")) + + /// test list of objects + + // test list objects with prefix and delimiter + var resources BucketResourcesMetadata + resources.Prefix = "o" + resources.Delimiter = "1" + resources.Maxkeys = 10 + objectsMetadata, resources, err := dc.ListObjects("foo5", resources) + c.Assert(err, IsNil) + c.Assert(resources.IsTruncated, Equals, false) + c.Assert(resources.CommonPrefixes[0], Equals, "obj1") + + // test list objects with only delimiter + resources.Prefix = "" + resources.Delimiter = "1" + resources.Maxkeys = 10 + objectsMetadata, resources, err = dc.ListObjects("foo5", resources) + c.Assert(err, IsNil) + c.Assert(objectsMetadata[0].Object, Equals, "obj2") + c.Assert(resources.IsTruncated, Equals, false) + c.Assert(resources.CommonPrefixes[0], Equals, "obj1") + + // test list objects with only prefix + resources.Prefix = "o" + resources.Delimiter = "" + resources.Maxkeys = 10 + objectsMetadata, resources, err = dc.ListObjects("foo5", resources) + c.Assert(err, IsNil) + c.Assert(resources.IsTruncated, Equals, false) + c.Assert(objectsMetadata[0].Object, Equals, "obj1") + c.Assert(objectsMetadata[1].Object, Equals, "obj2") + + three := ioutil.NopCloser(bytes.NewReader([]byte("three"))) + _, err = dc.CreateObject("foo5", "obj3", "", "", int64(len("three")), three) + c.Assert(err, IsNil) + + var buffer bytes.Buffer + size, err = dc.GetObject(&buffer, "foo5", "obj3") + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(len([]byte("three")))) + c.Assert(buffer.Bytes(), DeepEquals, []byte("three")) + + // test list objects with maxkeys + resources.Prefix = "o" + resources.Delimiter = "" + resources.Maxkeys = 2 + objectsMetadata, resources, err = dc.ListObjects("foo5", resources) + c.Assert(err, IsNil) + c.Assert(resources.IsTruncated, Equals, true) + c.Assert(len(objectsMetadata), Equals, 2) +} diff --git a/pkg/storage/donut/donut_test.go b/pkg/storage/donut/donut_test.go index 6b7c949bd..e4f56fec4 100644 --- a/pkg/storage/donut/donut_test.go +++ b/pkg/storage/donut/donut_test.go @@ -9,7 +9,7 @@ * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedd. * See the License for the specific language governing permissions and * limitations under the License. */ @@ -31,13 +31,13 @@ import ( . "github.com/minio/check" ) -func Test(t *testing.T) { TestingT(t) } +func TestDonut(t *testing.T) { TestingT(t) } -type MySuite struct { +type MyDonutSuite struct { root string } -var _ = Suite(&MySuite{}) +var _ = Suite(&MyDonutSuite{}) // create a dummy TestNodeDiskMap func createTestNodeDiskMap(p string) map[string][]string { @@ -55,39 +55,40 @@ func createTestNodeDiskMap(p string) map[string][]string { return nodes } -var d Cache +var dd Cache -func (s *MySuite) SetUpSuite(c *C) { +func (s *MyDonutSuite) SetUpSuite(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) s.root = root - d = NewCache(100000, time.Duration(1*time.Hour), "test", createTestNodeDiskMap(root)) - buckets, err := d.ListBuckets() + dd = NewCache(100000, time.Duration(1*time.Hour), "test", createTestNodeDiskMap(root)) + // testing empty donut + buckets, err := dd.ListBuckets() c.Assert(err, IsNil) c.Assert(len(buckets), Equals, 0) } -func (s *MySuite) TearDownSuite(c *C) { +func (s *MyDonutSuite) TearDownSuite(c *C) { os.RemoveAll(s.root) } // test make bucket without name -func (s *MySuite) TestBucketWithoutNameFails(c *C) { +func (s *MyDonutSuite) TestBucketWithoutNameFails(c *C) { // fail to create new bucket without a name - err := d.MakeBucket("", "private") + err := dd.MakeBucket("", "private") c.Assert(err, Not(IsNil)) - err = d.MakeBucket(" ", "private") + err = dd.MakeBucket(" ", "private") c.Assert(err, Not(IsNil)) } // test empty bucket -func (s *MySuite) TestEmptyBucket(c *C) { - c.Assert(d.MakeBucket("foo1", "private"), IsNil) +func (s *MyDonutSuite) TestEmptyBucket(c *C) { + c.Assert(dd.MakeBucket("foo1", "private"), IsNil) // check if bucket is empty var resources BucketResourcesMetadata resources.Maxkeys = 1 - objectsMetadata, resources, err := d.ListObjects("foo1", resources) + objectsMetadata, resources, err := dd.ListObjects("foo1", resources) c.Assert(err, IsNil) c.Assert(len(objectsMetadata), Equals, 0) c.Assert(resources.CommonPrefixes, DeepEquals, []string{}) @@ -95,47 +96,47 @@ func (s *MySuite) TestEmptyBucket(c *C) { } // test bucket list -func (s *MySuite) TestMakeBucketAndList(c *C) { +func (s *MyDonutSuite) TestMakeBucketAndList(c *C) { // create bucket - err := d.MakeBucket("foo2", "private") + err := dd.MakeBucket("foo2", "private") c.Assert(err, IsNil) // check bucket exists - buckets, err := d.ListBuckets() + buckets, err := dd.ListBuckets() c.Assert(err, IsNil) c.Assert(len(buckets), Equals, 5) c.Assert(buckets[0].ACL, Equals, BucketACL("private")) } // test re-create bucket -func (s *MySuite) TestMakeBucketWithSameNameFails(c *C) { - err := d.MakeBucket("foo3", "private") +func (s *MyDonutSuite) TestMakeBucketWithSameNameFails(c *C) { + err := dd.MakeBucket("foo3", "private") c.Assert(err, IsNil) - err = d.MakeBucket("foo3", "private") + err = dd.MakeBucket("foo3", "private") c.Assert(err, Not(IsNil)) } // test make multiple buckets -func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) { +func (s *MyDonutSuite) TestCreateMultipleBucketsAndList(c *C) { // add a second bucket - err := d.MakeBucket("foo4", "private") + err := dd.MakeBucket("foo4", "private") c.Assert(err, IsNil) - err = d.MakeBucket("bar1", "private") + err = dd.MakeBucket("bar1", "private") c.Assert(err, IsNil) - buckets, err := d.ListBuckets() + buckets, err := dd.ListBuckets() c.Assert(err, IsNil) c.Assert(len(buckets), Equals, 2) c.Assert(buckets[0].Name, Equals, "bar1") c.Assert(buckets[1].Name, Equals, "foo4") - err = d.MakeBucket("foobar1", "private") + err = dd.MakeBucket("foobar1", "private") c.Assert(err, IsNil) - buckets, err = d.ListBuckets() + buckets, err = dd.ListBuckets() c.Assert(err, IsNil) c.Assert(len(buckets), Equals, 3) @@ -143,37 +144,37 @@ func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) { } // test object create without bucket -func (s *MySuite) TestNewObjectFailsWithoutBucket(c *C) { - _, err := d.CreateObject("unknown", "obj", "", "", 0, nil) +func (s *MyDonutSuite) TestNewObjectFailsWithoutBucket(c *C) { + _, err := dd.CreateObject("unknown", "obj", "", "", 0, nil) c.Assert(err, Not(IsNil)) } // test create object metadata -func (s *MySuite) TestNewObjectMetadata(c *C) { +func (s *MyDonutSuite) TestNewObjectMetadata(c *C) { data := "Hello World" hasher := md5.New() hasher.Write([]byte(data)) expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) reader := ioutil.NopCloser(bytes.NewReader([]byte(data))) - err := d.MakeBucket("foo6", "private") + err := dd.MakeBucket("foo6", "private") c.Assert(err, IsNil) - objectMetadata, err := d.CreateObject("foo6", "obj", "application/json", expectedMd5Sum, int64(len(data)), reader) + objectMetadata, err := dd.CreateObject("foo6", "obj", "application/json", expectedMd5Sum, int64(len(data)), reader) c.Assert(err, IsNil) c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json") } // test create object fails without name -func (s *MySuite) TestNewObjectFailsWithEmptyName(c *C) { - _, err := d.CreateObject("foo", "", "", "", 0, nil) +func (s *MyDonutSuite) TestNewObjectFailsWithEmptyName(c *C) { + _, err := dd.CreateObject("foo", "", "", "", 0, nil) c.Assert(err, Not(IsNil)) } // test create object -func (s *MySuite) TestNewObjectCanBeWritten(c *C) { - err := d.MakeBucket("foo", "private") +func (s *MyDonutSuite) TestNewObjectCanBeWritten(c *C) { + err := dd.MakeBucket("foo", "private") c.Assert(err, IsNil) data := "Hello World" @@ -183,43 +184,43 @@ func (s *MySuite) TestNewObjectCanBeWritten(c *C) { expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) reader := ioutil.NopCloser(bytes.NewReader([]byte(data))) - actualMetadata, err := d.CreateObject("foo", "obj", "application/octet-stream", expectedMd5Sum, int64(len(data)), reader) + actualMetadata, err := dd.CreateObject("foo", "obj", "application/octet-stream", expectedMd5Sum, int64(len(data)), reader) c.Assert(err, IsNil) c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) var buffer bytes.Buffer - size, err := d.GetObject(&buffer, "foo", "obj") + size, err := dd.GetObject(&buffer, "foo", "obj") c.Assert(err, IsNil) c.Assert(size, Equals, int64(len(data))) c.Assert(buffer.Bytes(), DeepEquals, []byte(data)) - actualMetadata, err = d.GetObjectMetadata("foo", "obj") + actualMetadata, err = dd.GetObjectMetadata("foo", "obj") c.Assert(err, IsNil) c.Assert(hex.EncodeToString(hasher.Sum(nil)), Equals, actualMetadata.MD5Sum) c.Assert(int64(len(data)), Equals, actualMetadata.Size) } // test list objects -func (s *MySuite) TestMultipleNewObjects(c *C) { - c.Assert(d.MakeBucket("foo5", "private"), IsNil) +func (s *MyDonutSuite) TestMultipleNewObjects(c *C) { + c.Assert(dd.MakeBucket("foo5", "private"), IsNil) one := ioutil.NopCloser(bytes.NewReader([]byte("one"))) - _, err := d.CreateObject("foo5", "obj1", "", "", int64(len("one")), one) + _, err := dd.CreateObject("foo5", "obj1", "", "", int64(len("one")), one) c.Assert(err, IsNil) two := ioutil.NopCloser(bytes.NewReader([]byte("two"))) - _, err = d.CreateObject("foo5", "obj2", "", "", int64(len("two")), two) + _, err = dd.CreateObject("foo5", "obj2", "", "", int64(len("two")), two) c.Assert(err, IsNil) var buffer1 bytes.Buffer - size, err := d.GetObject(&buffer1, "foo5", "obj1") + size, err := dd.GetObject(&buffer1, "foo5", "obj1") c.Assert(err, IsNil) c.Assert(size, Equals, int64(len([]byte("one")))) c.Assert(buffer1.Bytes(), DeepEquals, []byte("one")) var buffer2 bytes.Buffer - size, err = d.GetObject(&buffer2, "foo5", "obj2") + size, err = dd.GetObject(&buffer2, "foo5", "obj2") c.Assert(err, IsNil) c.Assert(size, Equals, int64(len([]byte("two")))) @@ -232,7 +233,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) { resources.Prefix = "o" resources.Delimiter = "1" resources.Maxkeys = 10 - objectsMetadata, resources, err := d.ListObjects("foo5", resources) + objectsMetadata, resources, err := dd.ListObjects("foo5", resources) c.Assert(err, IsNil) c.Assert(resources.IsTruncated, Equals, false) c.Assert(resources.CommonPrefixes[0], Equals, "obj1") @@ -241,9 +242,9 @@ func (s *MySuite) TestMultipleNewObjects(c *C) { resources.Prefix = "" resources.Delimiter = "1" resources.Maxkeys = 10 - objectsMetadata, resources, err = d.ListObjects("foo5", resources) + objectsMetadata, resources, err = dd.ListObjects("foo5", resources) c.Assert(err, IsNil) - c.Assert(objectsMetadata[0].Object, Equals, "obj1") + c.Assert(objectsMetadata[0].Object, Equals, "obj2") c.Assert(resources.IsTruncated, Equals, false) c.Assert(resources.CommonPrefixes[0], Equals, "obj1") @@ -251,18 +252,18 @@ func (s *MySuite) TestMultipleNewObjects(c *C) { resources.Prefix = "o" resources.Delimiter = "" resources.Maxkeys = 10 - objectsMetadata, resources, err = d.ListObjects("foo5", resources) + objectsMetadata, resources, err = dd.ListObjects("foo5", resources) c.Assert(err, IsNil) c.Assert(resources.IsTruncated, Equals, false) c.Assert(objectsMetadata[0].Object, Equals, "obj1") c.Assert(objectsMetadata[1].Object, Equals, "obj2") three := ioutil.NopCloser(bytes.NewReader([]byte("three"))) - _, err = d.CreateObject("foo5", "obj3", "", "", int64(len("three")), three) + _, err = dd.CreateObject("foo5", "obj3", "", "", int64(len("three")), three) c.Assert(err, IsNil) var buffer bytes.Buffer - size, err = d.GetObject(&buffer, "foo5", "obj3") + size, err = dd.GetObject(&buffer, "foo5", "obj3") c.Assert(err, IsNil) c.Assert(size, Equals, int64(len([]byte("three")))) c.Assert(buffer.Bytes(), DeepEquals, []byte("three")) @@ -271,7 +272,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) { resources.Prefix = "o" resources.Delimiter = "" resources.Maxkeys = 2 - objectsMetadata, resources, err = d.ListObjects("foo5", resources) + objectsMetadata, resources, err = dd.ListObjects("foo5", resources) c.Assert(err, IsNil) c.Assert(resources.IsTruncated, Equals, true) c.Assert(len(objectsMetadata), Equals, 2) diff --git a/pkg/storage/pq/pq.go b/pkg/storage/pq/pq.go deleted file mode 100644 index c491f4bb6..000000000 --- a/pkg/storage/pq/pq.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package pq - -import "container/heap" - -// Item container for tasks in priority queue -type Item struct { - task Task // task - - // The index is needed by Fix and is maintained by the heap.Interface methods. - index int // The index of the item in the heap. -} - -// A PriorityQueue implements heap.Interface and holds Items. -type PriorityQueue []*Item - -// Len length of current priority queue -func (pq PriorityQueue) Len() int { return len(pq) } - -// Less used internally by heap.Interface to arrange items in order -func (pq PriorityQueue) Less(i, j int) bool { - // We want Pop to give us the highest, not lowest, priority so we use greater than here. - return pq[i].task.GetPriority() > pq[j].task.GetPriority() -} - -// Swap used internally by heap.Interface to arrange incoming items -func (pq PriorityQueue) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] - pq[i].index = i - pq[j].index = j -} - -// Push push items onto priority queue -func (pq *PriorityQueue) Push(x interface{}) { - n := len(*pq) - item := x.(*Item) - item.index = n - *pq = append(*pq, item) -} - -// Pop pop items with highest priority -func (pq *PriorityQueue) Pop() interface{} { - old := *pq - n := len(old) - item := old[n-1] - item.index = -1 // for safety - *pq = old[0 : n-1] - return item -} - -// Fix modifies an item in-place on the queue -func (pq *PriorityQueue) Fix(item *Item, task Task) { - item.task = task - heap.Fix(pq, item.index) -} diff --git a/pkg/storage/pq/pq_test.go b/pkg/storage/pq/pq_test.go deleted file mode 100644 index b070324fa..000000000 --- a/pkg/storage/pq/pq_test.go +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package pq - -import ( - "container/heap" - "fmt" - "testing" - - . "github.com/minio/check" -) - -func Test(t *testing.T) { TestingT(t) } - -type MySuite struct{} - -var _ = Suite(&MySuite{}) - -func helloTask1() <-chan error { - errCh := make(chan error) - go func() { - defer close(errCh) - println("Hello task1") - errCh <- nil - }() - return errCh -} - -func helloTask2() <-chan error { - errCh := make(chan error) - go func() { - defer close(errCh) - println("Hello task2") - errCh <- nil - }() - return errCh -} - -func newJob1() <-chan error { - errCh := make(chan error) - go func() { - defer close(errCh) - println("New Job1") - errCh <- nil - }() - return errCh -} - -func newJob2() <-chan error { - errCh := make(chan error) - go func() { - defer close(errCh) - println("New Job2") - errCh <- nil - }() - return errCh -} - -func (s *MySuite) TestPQ(c *C) { - // Create a priority queue, put the items in it, and - // establish the priority queue (heap) invariants. - pq := make(PriorityQueue, 2) - pq[0] = &Item{ - task: Task{job: helloTask1, priority: 2}, - index: 0, - } - pq[1] = &Item{ - task: Task{job: helloTask2, priority: 1}, - index: 1, - } - heap.Init(&pq) - - // Insert a new item and then modify its priority. - item := &Item{ - task: Task{job: newJob1, priority: 5}, - } - heap.Push(&pq, item) - newTask := Task{job: newJob2, priority: 6} - pq.Fix(item, newTask) - - // Take the items out; they arrive in decreasing priority order. - for pq.Len() > 0 { - item := heap.Pop(&pq).(*Item) - fmt.Printf("%.2d", item.task.GetPriority()) - item.task.Execute() - } -} diff --git a/pkg/storage/pq/task.go b/pkg/storage/pq/task.go deleted file mode 100644 index 9a1f3e84d..000000000 --- a/pkg/storage/pq/task.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package pq - -// Task container for any generic tasks -type Task struct { - job func() <-chan error - priority int -} - -// GetPriority get current task priority -func (t Task) GetPriority() int { - return t.priority -} - -// UpdatePriority update current task priority -func (t Task) UpdatePriority(p int) { - t.priority = p -} - -// Execute execute current task -func (t Task) Execute() error { - return <-t.job() -} From 12bde7df302bb5c428487cc0c300a066392797ea Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 2 Jul 2015 15:40:16 -0700 Subject: [PATCH 16/19] Add simple Ticket Master which pro-actively sends messages on proceedChannel Handlers are going to wait on proceedChannel, this the initial step towards providing priority for different set of API operations --- pkg/server/api/api.go | 32 +++++++++ pkg/server/api/bucket-handlers.go | 75 ++++++++++++++++++--- pkg/server/api/object-handlers.go | 108 +++++++++++++++++++++++++++--- pkg/server/router.go | 44 ++++++------ pkg/server/server.go | 23 ++++--- 5 files changed, 235 insertions(+), 47 deletions(-) create mode 100644 pkg/server/api/api.go diff --git a/pkg/server/api/api.go b/pkg/server/api/api.go new file mode 100644 index 000000000..b84a3563e --- /dev/null +++ b/pkg/server/api/api.go @@ -0,0 +1,32 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package api + +// Operation container for individual operations read by Ticket Master +type Operation struct { + ProceedCh chan struct{} +} + +// Minio container for API and also carries OP (operation) channel +type Minio struct { + OP chan Operation +} + +// New instantiate a new minio API +func New() Minio { + return Minio{OP: make(chan Operation)} +} diff --git a/pkg/server/api/bucket-handlers.go b/pkg/server/api/bucket-handlers.go index bae237e0b..ef6f08bda 100644 --- a/pkg/server/api/bucket-handlers.go +++ b/pkg/server/api/bucket-handlers.go @@ -23,10 +23,7 @@ import ( "github.com/gorilla/mux" ) -// MinioAPI - -type MinioAPI struct{} - -func (api MinioAPI) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool { +func (api Minio) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool { vars := mux.Vars(req) bucket := vars["bucket"] log.Println(bucket) @@ -40,8 +37,17 @@ func (api MinioAPI) isValidOp(w http.ResponseWriter, req *http.Request, acceptsC // using the Initiate Multipart Upload request, but has not yet been completed or aborted. // This operation returns at most 1,000 multipart uploads in the response. // -func (api MinioAPI) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) + + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } log.Println(acceptsContentType) resources := getBucketMultipartResources(req.URL.Query()) @@ -60,8 +66,19 @@ func (api MinioAPI) ListMultipartUploadsHandler(w http.ResponseWriter, req *http // of the objects in a bucket. You can use the request parameters as selection // criteria to return a subset of the objects in a bucket. // -func (api MinioAPI) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) + + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } + log.Println(acceptsContentType) + // verify if bucket allows this operation if !api.isValidOp(w, req, acceptsContentType) { return @@ -87,7 +104,7 @@ func (api MinioAPI) ListObjectsHandler(w http.ResponseWriter, req *http.Request) // ----------- // This implementation of the GET operation returns a list of all buckets // owned by the authenticated sender of the request. -func (api MinioAPI) ListBucketsHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) ListBucketsHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // uncomment this when we have webcli // without access key credentials one cannot list buckets @@ -95,13 +112,21 @@ func (api MinioAPI) ListBucketsHandler(w http.ResponseWriter, req *http.Request) // writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path) // return // } + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } log.Println(acceptsContentType) } // PutBucketHandler - PUT Bucket // ---------- // This implementation of the PUT operation creates a new bucket for authenticated request -func (api MinioAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) // uncomment this when we have webcli // without access key credentials one cannot create a bucket @@ -109,6 +134,16 @@ func (api MinioAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) { // writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path) // return // } + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } + log.Println(acceptsContentType) + if isRequestBucketACL(req.URL.Query()) { api.PutBucketACLHandler(w, req) return @@ -128,8 +163,19 @@ func (api MinioAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) { // PutBucketACLHandler - PUT Bucket ACL // ---------- // This implementation of the PUT operation modifies the bucketACL for authenticated request -func (api MinioAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) + + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } + log.Println(acceptsContentType) + // read from 'x-amz-acl' aclType := getACLType(req) if aclType == unsupportedACLType { @@ -148,8 +194,17 @@ func (api MinioAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.Request // The operation returns a 200 OK if the bucket exists and you // have permission to access it. Otherwise, the operation might // return responses such as 404 Not Found and 403 Forbidden. -func (api MinioAPI) HeadBucketHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) HeadBucketHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) + + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } log.Println(acceptsContentType) vars := mux.Vars(req) diff --git a/pkg/server/api/object-handlers.go b/pkg/server/api/object-handlers.go index 0ec281500..b1c9dc807 100644 --- a/pkg/server/api/object-handlers.go +++ b/pkg/server/api/object-handlers.go @@ -36,8 +36,19 @@ const ( // ---------- // This implementation of the GET operation retrieves object. To use GET, // you must have READ access to the object. -func (api MinioAPI) GetObjectHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) + + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } + log.Println(acceptsContentType) + // verify if this operation is allowed if !api.isValidOp(w, req, acceptsContentType) { return @@ -54,8 +65,19 @@ func (api MinioAPI) GetObjectHandler(w http.ResponseWriter, req *http.Request) { // HeadObjectHandler - HEAD Object // ----------- // The HEAD operation retrieves metadata from an object without returning the object itself. -func (api MinioAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) + + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } + log.Println(acceptsContentType) + // verify if this operation is allowed if !api.isValidOp(w, req, acceptsContentType) { return @@ -71,8 +93,19 @@ func (api MinioAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Request) // PutObjectHandler - PUT Object // ---------- // This implementation of the PUT operation adds an object to a bucket. -func (api MinioAPI) PutObjectHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) + + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } + log.Println(acceptsContentType) + // verify if this operation is allowed if !api.isValidOp(w, req, acceptsContentType) { return @@ -121,8 +154,19 @@ func (api MinioAPI) PutObjectHandler(w http.ResponseWriter, req *http.Request) { /// Multipart API // NewMultipartUploadHandler - New multipart upload -func (api MinioAPI) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) + + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } + log.Println(acceptsContentType) + // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { return @@ -141,8 +185,19 @@ func (api MinioAPI) NewMultipartUploadHandler(w http.ResponseWriter, req *http.R } // PutObjectPartHandler - Upload part -func (api MinioAPI) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) + + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } + log.Println(acceptsContentType) + // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { return @@ -190,8 +245,19 @@ func (api MinioAPI) PutObjectPartHandler(w http.ResponseWriter, req *http.Reques } // AbortMultipartUploadHandler - Abort multipart upload -func (api MinioAPI) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) + + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } + log.Println(acceptsContentType) + // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { return @@ -206,8 +272,19 @@ func (api MinioAPI) AbortMultipartUploadHandler(w http.ResponseWriter, req *http } // ListObjectPartsHandler - List object parts -func (api MinioAPI) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) + + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } + log.Println(acceptsContentType) + // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { return @@ -225,8 +302,19 @@ func (api MinioAPI) ListObjectPartsHandler(w http.ResponseWriter, req *http.Requ } // CompleteMultipartUploadHandler - Complete multipart upload -func (api MinioAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) + + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + { + // do you operation + } + log.Println(acceptsContentType) + // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { return @@ -262,13 +350,13 @@ func (api MinioAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, req *h /// Delete API // DeleteBucketHandler - Delete bucket -func (api MinioAPI) DeleteBucketHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) DeleteBucketHandler(w http.ResponseWriter, req *http.Request) { error := getErrorCode(NotImplemented) w.WriteHeader(error.HTTPStatusCode) } // DeleteObjectHandler - Delete object -func (api MinioAPI) DeleteObjectHandler(w http.ResponseWriter, req *http.Request) { +func (api Minio) DeleteObjectHandler(w http.ResponseWriter, req *http.Request) { error := getErrorCode(NotImplemented) w.WriteHeader(error.HTTPStatusCode) } diff --git a/pkg/server/router.go b/pkg/server/router.go index 0ed4e1070..c3ba765e1 100644 --- a/pkg/server/router.go +++ b/pkg/server/router.go @@ -24,28 +24,31 @@ import ( "github.com/minio/minio/pkg/server/rpc" ) -// registerAPI - register all the object API handlers to their respective paths -func registerAPI(mux *router.Router) http.Handler { - api := api.MinioAPI{} +func getAPI() api.Minio { + a := api.New() + return a +} - mux.HandleFunc("/", api.ListBucketsHandler).Methods("GET") - mux.HandleFunc("/{bucket}", api.ListObjectsHandler).Methods("GET") - mux.HandleFunc("/{bucket}", api.PutBucketHandler).Methods("PUT") - mux.HandleFunc("/{bucket}", api.HeadBucketHandler).Methods("HEAD") - mux.HandleFunc("/{bucket}/{object:.*}", api.HeadObjectHandler).Methods("HEAD") - mux.HandleFunc("/{bucket}/{object:.*}", api.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}").Methods("PUT") - mux.HandleFunc("/{bucket}/{object:.*}", api.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}").Methods("GET") - mux.HandleFunc("/{bucket}/{object:.*}", api.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}").Methods("POST") - mux.HandleFunc("/{bucket}/{object:.*}", api.NewMultipartUploadHandler).Methods("POST") - mux.HandleFunc("/{bucket}/{object:.*}", api.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}").Methods("DELETE") - mux.HandleFunc("/{bucket}/{object:.*}", api.GetObjectHandler).Methods("GET") - mux.HandleFunc("/{bucket}/{object:.*}", api.PutObjectHandler).Methods("PUT") +// registerAPI - register all the object API handlers to their respective paths +func registerAPI(mux *router.Router, a api.Minio) http.Handler { + mux.HandleFunc("/", a.ListBucketsHandler).Methods("GET") + mux.HandleFunc("/{bucket}", a.ListObjectsHandler).Methods("GET") + mux.HandleFunc("/{bucket}", a.PutBucketHandler).Methods("PUT") + mux.HandleFunc("/{bucket}", a.HeadBucketHandler).Methods("HEAD") + mux.HandleFunc("/{bucket}/{object:.*}", a.HeadObjectHandler).Methods("HEAD") + mux.HandleFunc("/{bucket}/{object:.*}", a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}").Methods("PUT") + mux.HandleFunc("/{bucket}/{object:.*}", a.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}").Methods("GET") + mux.HandleFunc("/{bucket}/{object:.*}", a.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}").Methods("POST") + mux.HandleFunc("/{bucket}/{object:.*}", a.NewMultipartUploadHandler).Methods("POST") + mux.HandleFunc("/{bucket}/{object:.*}", a.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}").Methods("DELETE") + mux.HandleFunc("/{bucket}/{object:.*}", a.GetObjectHandler).Methods("GET") + mux.HandleFunc("/{bucket}/{object:.*}", a.PutObjectHandler).Methods("PUT") // not implemented yet - mux.HandleFunc("/{bucket}", api.DeleteBucketHandler).Methods("DELETE") + mux.HandleFunc("/{bucket}", a.DeleteBucketHandler).Methods("DELETE") // unsupported API - mux.HandleFunc("/{bucket}/{object:.*}", api.DeleteObjectHandler).Methods("DELETE") + mux.HandleFunc("/{bucket}/{object:.*}", a.DeleteObjectHandler).Methods("DELETE") return mux } @@ -102,9 +105,12 @@ func registerRPC(mux *router.Router, s *rpc.Server) http.Handler { } // getAPIHandler api handler -func getAPIHandler(conf api.Config) http.Handler { +func getAPIHandler(conf api.Config) (http.Handler, api.Minio) { mux := router.NewRouter() - return registerOtherMiddleware(registerAPI(mux), conf) + minioAPI := getAPI() + apiHandler := registerAPI(mux, minioAPI) + apiHandler = registerOtherMiddleware(apiHandler, conf) + return apiHandler, minioAPI } // getRPCHandler rpc handler diff --git a/pkg/server/server.go b/pkg/server/server.go index 3ddc12623..a383732c2 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -25,13 +25,13 @@ import ( "github.com/minio/minio/pkg/server/api" ) -func startAPI(errCh chan error, conf api.Config) { +func startAPI(errCh chan error, conf api.Config, apiHandler http.Handler) { defer close(errCh) // Minio server config httpServer := &http.Server{ Addr: conf.Address, - Handler: getAPIHandler(conf), + Handler: apiHandler, MaxHeaderBytes: 1 << 20, } @@ -74,20 +74,24 @@ func startAPI(errCh chan error, conf api.Config) { } } -func startRPC(errCh chan error) { +func startRPC(errCh chan error, rpcHandler http.Handler) { defer close(errCh) // Minio server config httpServer := &http.Server{ Addr: "127.0.0.1:9001", // TODO make this configurable - Handler: getRPCHandler(), + Handler: rpcHandler, MaxHeaderBytes: 1 << 20, } errCh <- httpServer.ListenAndServe() } -func startTM(errCh chan error) { - defer close(errCh) +func startTM(a api.Minio) { + for { + for op := range a.OP { + close(op.ProceedCh) + } + } } // StartServices starts basic services for a server @@ -95,8 +99,11 @@ func StartServices(conf api.Config) error { apiErrCh := make(chan error) rpcErrCh := make(chan error) - go startAPI(apiErrCh, conf) - go startRPC(rpcErrCh) + apiHandler, minioAPI := getAPIHandler(conf) + go startAPI(apiErrCh, conf, apiHandler) + rpcHandler := getRPCHandler() + go startRPC(rpcErrCh, rpcHandler) + go startTM(minioAPI) select { case err := <-apiErrCh: From 7c37e9d06abe598230a4ad67cc4152b5b022027e Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 2 Jul 2015 20:31:22 -0700 Subject: [PATCH 17/19] Make donut fully integrated back into API handlers --- commands.go | 12 - pkg/server/api/api.go | 12 +- pkg/server/api/bucket-handlers.go | 246 +++++++++--- pkg/server/api/object-handlers.go | 310 +++++++++++---- pkg/server/router.go | 14 +- pkg/server/server.go | 8 +- pkg/storage/donut/bucket.go | 9 +- pkg/storage/donut/common.go | 21 + pkg/storage/donut/config.go | 87 ++++ pkg/storage/donut/definitions.go | 4 +- pkg/storage/donut/{donut.go => donut-v1.go} | 211 ++++------ .../donut/{donut_test.go => donut-v1_test.go} | 27 +- pkg/storage/donut/{cache.go => donut-v2.go} | 373 +++++++++--------- .../donut/{cache_test.go => donut-v2_test.go} | 29 +- pkg/storage/donut/interfaces.go | 27 +- pkg/storage/donut/management.go | 24 +- .../{cache-multipart.go => multipart.go} | 146 +++---- pkg/storage/donut/rebalance.go | 6 +- 18 files changed, 971 insertions(+), 595 deletions(-) create mode 100644 pkg/storage/donut/config.go rename pkg/storage/donut/{donut.go => donut-v1.go} (57%) rename pkg/storage/donut/{donut_test.go => donut-v1_test.go} (89%) rename pkg/storage/donut/{cache.go => donut-v2.go} (63%) rename pkg/storage/donut/{cache_test.go => donut-v2_test.go} (88%) rename pkg/storage/donut/{cache-multipart.go => multipart.go} (75%) diff --git a/commands.go b/commands.go index a613b766d..1aedd3b8d 100644 --- a/commands.go +++ b/commands.go @@ -8,18 +8,6 @@ import ( "github.com/minio/minio/pkg/server/api" ) -func removeDuplicates(slice []string) []string { - newSlice := []string{} - seen := make(map[string]struct{}) - for _, val := range slice { - if _, ok := seen[val]; !ok { - newSlice = append(newSlice, val) - seen[val] = struct{}{} - } - } - return newSlice -} - var commands = []cli.Command{ serverCmd, controlCmd, diff --git a/pkg/server/api/api.go b/pkg/server/api/api.go index b84a3563e..8306e7c0b 100644 --- a/pkg/server/api/api.go +++ b/pkg/server/api/api.go @@ -16,6 +16,8 @@ package api +import "github.com/minio/minio/pkg/storage/donut" + // Operation container for individual operations read by Ticket Master type Operation struct { ProceedCh chan struct{} @@ -23,10 +25,16 @@ type Operation struct { // Minio container for API and also carries OP (operation) channel type Minio struct { - OP chan Operation + OP chan Operation + Donut donut.Interface } // New instantiate a new minio API func New() Minio { - return Minio{OP: make(chan Operation)} + // ignore errors for now + d, _ := donut.LoadDonut() + return Minio{ + OP: make(chan Operation), + Donut: d, + } } diff --git a/pkg/server/api/bucket-handlers.go b/pkg/server/api/bucket-handlers.go index ef6f08bda..cbdc6b550 100644 --- a/pkg/server/api/bucket-handlers.go +++ b/pkg/server/api/bucket-handlers.go @@ -17,16 +17,51 @@ package api import ( - "log" "net/http" "github.com/gorilla/mux" + "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/storage/donut" + "github.com/minio/minio/pkg/utils/log" ) func (api Minio) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool { vars := mux.Vars(req) bucket := vars["bucket"] - log.Println(bucket) + + bucketMetadata, err := api.Donut.GetBucketMetadata(bucket) + switch iodine.ToError(err).(type) { + case donut.BucketNotFound: + { + writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) + return false + } + case donut.BucketNameInvalid: + { + writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) + return false + } + case nil: + if _, err := stripAuth(req); err != nil { + if bucketMetadata.ACL.IsPrivate() { + return true + //uncomment this when we have webcli + //writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path) + //return false + } + if bucketMetadata.ACL.IsPublicRead() && req.Method == "PUT" { + return true + //uncomment this when we have webcli + //writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path) + //return false + } + } + default: + { + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + } + } return true } @@ -38,18 +73,16 @@ func (api Minio) isValidOp(w http.ResponseWriter, req *http.Request, acceptsCont // This operation returns at most 1,000 multipart uploads in the response. // func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh + // Ticket master block { - // do you operation + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until ticket master gives us a go + <-op.ProceedCh } - log.Println(acceptsContentType) + acceptsContentType := getContentType(req) resources := getBucketMultipartResources(req.URL.Query()) if resources.MaxUploads == 0 { resources.MaxUploads = maxObjectList @@ -57,7 +90,29 @@ func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Re vars := mux.Vars(req) bucket := vars["bucket"] - log.Println(bucket) + + resources, err := api.Donut.ListMultipartUploads(bucket, resources) + switch iodine.ToError(err).(type) { + case nil: // success + { + // generate response + response := generateListMultipartUploadsResult(bucket, resources) + encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) + // write headers + setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) + // write body + w.Write(encodedSuccessResponse) + } + case donut.BucketNotFound: + { + writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) + } + default: + { + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + } + } } // ListObjectsHandler - GET Bucket (List Objects) @@ -67,18 +122,16 @@ func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Re // criteria to return a subset of the objects in a bucket. // func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh + // Ticket master block { - // do you operation + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh } - log.Println(acceptsContentType) + acceptsContentType := getContentType(req) // verify if bucket allows this operation if !api.isValidOp(w, req, acceptsContentType) { return @@ -96,8 +149,25 @@ func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) bucket := vars["bucket"] - log.Println(bucket) + objects, resources, err := api.Donut.ListObjects(bucket, resources) + switch iodine.ToError(err).(type) { + case nil: + // generate response + response := generateListObjectsResponse(bucket, objects, resources) + encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) + // write headers + setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) + // write body + w.Write(encodedSuccessResponse) + case donut.ObjectNotFound: + writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) + case donut.ObjectNameInvalid: + writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) + default: + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + } } // ListBucketsHandler - GET Service @@ -105,6 +175,15 @@ func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { // This implementation of the GET operation returns a list of all buckets // owned by the authenticated sender of the request. func (api Minio) ListBucketsHandler(w http.ResponseWriter, req *http.Request) { + // Ticket master block + { + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + } + acceptsContentType := getContentType(req) // uncomment this when we have webcli // without access key credentials one cannot list buckets @@ -112,21 +191,36 @@ func (api Minio) ListBucketsHandler(w http.ResponseWriter, req *http.Request) { // writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path) // return // } - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh - { - // do you operation + + buckets, err := api.Donut.ListBuckets() + switch iodine.ToError(err).(type) { + case nil: + // generate response + response := generateListBucketsResponse(buckets) + encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) + // write headers + setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) + // write response + w.Write(encodedSuccessResponse) + default: + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } - log.Println(acceptsContentType) } // PutBucketHandler - PUT Bucket // ---------- // This implementation of the PUT operation creates a new bucket for authenticated request func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) { + // Ticket master block + { + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh + } + acceptsContentType := getContentType(req) // uncomment this when we have webcli // without access key credentials one cannot create a bucket @@ -134,15 +228,6 @@ func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) { // writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path) // return // } - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh - { - // do you operation - } - log.Println(acceptsContentType) if isRequestBucketACL(req.URL.Query()) { api.PutBucketACLHandler(w, req) @@ -157,24 +242,39 @@ func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) bucket := vars["bucket"] - log.Println(bucket) + + err := api.Donut.MakeBucket(bucket, getACLTypeString(aclType)) + switch iodine.ToError(err).(type) { + case nil: + // Make sure to add Location information here only for bucket + w.Header().Set("Location", "/"+bucket) + writeSuccessResponse(w, acceptsContentType) + case donut.TooManyBuckets: + writeErrorResponse(w, req, TooManyBuckets, acceptsContentType, req.URL.Path) + case donut.BucketNameInvalid: + writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) + case donut.BucketExists: + writeErrorResponse(w, req, BucketAlreadyExists, acceptsContentType, req.URL.Path) + default: + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + } } // PutBucketACLHandler - PUT Bucket ACL // ---------- // This implementation of the PUT operation modifies the bucketACL for authenticated request func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh + // Ticket master block { - // do you operation + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh } - log.Println(acceptsContentType) + + acceptsContentType := getContentType(req) // read from 'x-amz-acl' aclType := getACLType(req) @@ -185,7 +285,19 @@ func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) bucket := vars["bucket"] - log.Println(bucket) + + err := api.Donut.SetBucketMetadata(bucket, map[string]string{"acl": getACLTypeString(aclType)}) + switch iodine.ToError(err).(type) { + case nil: + writeSuccessResponse(w, acceptsContentType) + case donut.BucketNameInvalid: + writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) + case donut.BucketNotFound: + writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) + default: + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + } } // HeadBucketHandler - HEAD Bucket @@ -195,19 +307,33 @@ func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) { // have permission to access it. Otherwise, the operation might // return responses such as 404 Not Found and 403 Forbidden. func (api Minio) HeadBucketHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh + // Ticket master block { - // do you operation + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh } - log.Println(acceptsContentType) + + acceptsContentType := getContentType(req) vars := mux.Vars(req) bucket := vars["bucket"] - log.Println(bucket) + + _, err := api.Donut.GetBucketMetadata(bucket) + switch iodine.ToError(err).(type) { + case nil: + writeSuccessResponse(w, acceptsContentType) + case donut.BucketNotFound: + error := getErrorCode(NoSuchBucket) + w.WriteHeader(error.HTTPStatusCode) + case donut.BucketNameInvalid: + error := getErrorCode(InvalidBucketName) + w.WriteHeader(error.HTTPStatusCode) + default: + log.Error.Println(iodine.New(err, nil)) + error := getErrorCode(InternalError) + w.WriteHeader(error.HTTPStatusCode) + } } diff --git a/pkg/server/api/object-handlers.go b/pkg/server/api/object-handlers.go index b1c9dc807..e09dfe64c 100644 --- a/pkg/server/api/object-handlers.go +++ b/pkg/server/api/object-handlers.go @@ -9,7 +9,7 @@ * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implieapi.Donut. * See the License for the specific language governing permissions and * limitations under the License. */ @@ -25,6 +25,7 @@ import ( "github.com/gorilla/mux" "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/storage/donut" "github.com/minio/minio/pkg/utils/log" ) @@ -37,17 +38,16 @@ const ( // This implementation of the GET operation retrieves object. To use GET, // you must have READ access to the object. func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh + // ticket master block { - // do you operation + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh } - log.Println(acceptsContentType) + + acceptsContentType := getContentType(req) // verify if this operation is allowed if !api.isValidOp(w, req, acceptsContentType) { @@ -58,25 +58,57 @@ func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) bucket = vars["bucket"] object = vars["object"] - log.Println(bucket, object) + metadata, err := api.Donut.GetObjectMetadata(bucket, object) + switch iodine.ToError(err).(type) { + case nil: // success + { + httpRange, err := getRequestedRange(req, metadata.Size) + if err != nil { + writeErrorResponse(w, req, InvalidRange, acceptsContentType, req.URL.Path) + return + } + switch httpRange.start == 0 && httpRange.length == 0 { + case true: + setObjectHeaders(w, metadata) + if _, err := api.Donut.GetObject(w, bucket, object); err != nil { + // unable to write headers, we've already printed data. Just close the connection. + log.Error.Println(iodine.New(err, nil)) + } + case false: + metadata.Size = httpRange.length + setRangeObjectHeaders(w, metadata, httpRange) + w.WriteHeader(http.StatusPartialContent) + if _, err := api.Donut.GetPartialObject(w, bucket, object, httpRange.start, httpRange.length); err != nil { + // unable to write headers, we've already printed data. Just close the connection. + log.Error.Println(iodine.New(err, nil)) + } + } + } + case donut.ObjectNotFound: + writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) + case donut.ObjectNameInvalid: + writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) + default: + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + } } // HeadObjectHandler - HEAD Object // ----------- // The HEAD operation retrieves metadata from an object without returning the object itself. func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh + // ticket master block { - // do you operation + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh } - log.Println(acceptsContentType) + + acceptsContentType := getContentType(req) // verify if this operation is allowed if !api.isValidOp(w, req, acceptsContentType) { @@ -87,25 +119,42 @@ func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) bucket = vars["bucket"] object = vars["object"] - log.Println(bucket, object) + + metadata, err := api.Donut.GetObjectMetadata(bucket, object) + switch iodine.ToError(err).(type) { + case nil: + setObjectHeaders(w, metadata) + w.WriteHeader(http.StatusOK) + case donut.ObjectNotFound: + error := getErrorCode(NoSuchKey) + w.Header().Set("Server", "Minio") + w.WriteHeader(error.HTTPStatusCode) + case donut.ObjectNameInvalid: + error := getErrorCode(NoSuchKey) + w.Header().Set("Server", "Minio") + w.WriteHeader(error.HTTPStatusCode) + default: + log.Error.Println(iodine.New(err, nil)) + error := getErrorCode(InternalError) + w.Header().Set("Server", "Minio") + w.WriteHeader(error.HTTPStatusCode) + } } // PutObjectHandler - PUT Object // ---------- // This implementation of the PUT operation adds an object to a bucket. func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh + // Ticket master block { - // do you operation + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh } - log.Println(acceptsContentType) + acceptsContentType := getContentType(req) // verify if this operation is allowed if !api.isValidOp(w, req, acceptsContentType) { return @@ -122,7 +171,7 @@ func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) { writeErrorResponse(w, req, InvalidDigest, acceptsContentType, req.URL.Path) return } - /// if Content-Length missing, throw away + /// if Content-Length missing, deny the request size := req.Header.Get("Content-Length") if size == "" { writeErrorResponse(w, req, MissingContentLength, acceptsContentType, req.URL.Path) @@ -148,24 +197,40 @@ func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) { writeErrorResponse(w, req, InvalidRequest, acceptsContentType, req.URL.Path) return } - log.Println(bucket, object, sizeInt64) + + metadata, err := api.Donut.CreateObject(bucket, object, md5, sizeInt64, req.Body, nil) + switch iodine.ToError(err).(type) { + case nil: + w.Header().Set("ETag", metadata.MD5Sum) + writeSuccessResponse(w, acceptsContentType) + case donut.ObjectExists: + writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path) + case donut.BadDigest: + writeErrorResponse(w, req, BadDigest, acceptsContentType, req.URL.Path) + case donut.EntityTooLarge: + writeErrorResponse(w, req, EntityTooLarge, acceptsContentType, req.URL.Path) + case donut.InvalidDigest: + writeErrorResponse(w, req, InvalidDigest, acceptsContentType, req.URL.Path) + default: + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + } } /// Multipart API // NewMultipartUploadHandler - New multipart upload func (api Minio) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh + // Ticket master block { - // do you operation + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh } - log.Println(acceptsContentType) + + acceptsContentType := getContentType(req) // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { @@ -181,22 +246,38 @@ func (api Minio) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Requ vars := mux.Vars(req) bucket = vars["bucket"] object = vars["object"] - log.Println(bucket, object) + + uploadID, err := api.Donut.NewMultipartUpload(bucket, object, "") + switch iodine.ToError(err).(type) { + case nil: + { + response := generateInitiateMultipartUploadResult(bucket, object, uploadID) + encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) + // write headers + setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) + // write body + w.Write(encodedSuccessResponse) + } + case donut.ObjectExists: + writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path) + default: + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + } } // PutObjectPartHandler - Upload part func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh + // Ticket master block { - // do you operation + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh } - log.Println(acceptsContentType) + + acceptsContentType := getContentType(req) // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { @@ -232,7 +313,6 @@ func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] - log.Println(bucket, object, sizeInt64) uploadID := req.URL.Query().Get("uploadId") partIDString := req.URL.Query().Get("partNumber") @@ -241,22 +321,40 @@ func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) if err != nil { writeErrorResponse(w, req, InvalidPart, acceptsContentType, req.URL.Path) } - log.Println(uploadID, partID) + + calculatedMD5, err := api.Donut.CreateObjectPart(bucket, object, uploadID, partID, "", md5, sizeInt64, req.Body) + switch iodine.ToError(err).(type) { + case nil: + w.Header().Set("ETag", calculatedMD5) + writeSuccessResponse(w, acceptsContentType) + case donut.InvalidUploadID: + writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) + case donut.ObjectExists: + writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path) + case donut.BadDigest: + writeErrorResponse(w, req, BadDigest, acceptsContentType, req.URL.Path) + case donut.EntityTooLarge: + writeErrorResponse(w, req, EntityTooLarge, acceptsContentType, req.URL.Path) + case donut.InvalidDigest: + writeErrorResponse(w, req, InvalidDigest, acceptsContentType, req.URL.Path) + default: + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + } } // AbortMultipartUploadHandler - Abort multipart upload func (api Minio) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh + // Ticket master block { - // do you operation + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh } - log.Println(acceptsContentType) + + acceptsContentType := getContentType(req) // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { @@ -267,23 +365,33 @@ func (api Minio) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Re bucket := vars["bucket"] object := vars["object"] - //objectResourcesMetadata := getObjectResources(req.URL.Query()) - log.Println(bucket, object) + objectResourcesMetadata := getObjectResources(req.URL.Query()) + + err := api.Donut.AbortMultipartUpload(bucket, object, objectResourcesMetadata.UploadID) + switch iodine.ToError(err).(type) { + case nil: + setCommonHeaders(w, getContentTypeString(acceptsContentType), 0) + w.WriteHeader(http.StatusNoContent) + case donut.InvalidUploadID: + writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) + default: + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + } } // ListObjectPartsHandler - List object parts func (api Minio) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh + // Ticket master block { - // do you operation + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh } - log.Println(acceptsContentType) + + acceptsContentType := getContentType(req) // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { @@ -298,22 +406,38 @@ func (api Minio) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] - log.Println(bucket, object) + + objectResourcesMetadata, err := api.Donut.ListObjectParts(bucket, object, objectResourcesMetadata) + switch iodine.ToError(err).(type) { + case nil: + { + response := generateListPartsResult(objectResourcesMetadata) + encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) + // write headers + setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) + // write body + w.Write(encodedSuccessResponse) + } + case donut.InvalidUploadID: + writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) + default: + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + } } // CompleteMultipartUploadHandler - Complete multipart upload func (api Minio) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { - acceptsContentType := getContentType(req) - - op := Operation{} - op.ProceedCh = make(chan struct{}) - api.OP <- op - // block until Ticket master gives us a go - <-op.ProceedCh + // Ticket master block { - // do you operation + op := Operation{} + op.ProceedCh = make(chan struct{}) + api.OP <- op + // block until Ticket master gives us a go + <-op.ProceedCh } - log.Println(acceptsContentType) + + acceptsContentType := getContentType(req) // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { @@ -336,15 +460,31 @@ func (api Minio) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] - log.Println(bucket, object) - //objectResourcesMetadata := getObjectResources(req.URL.Query()) + objectResourcesMetadata := getObjectResources(req.URL.Query()) partMap := make(map[int]string) for _, part := range parts.Part { partMap[part.PartNumber] = part.ETag } + metadata, err := api.Donut.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, partMap) + switch iodine.ToError(err).(type) { + case nil: + { + response := generateCompleteMultpartUploadResult(bucket, object, "", metadata.MD5Sum) + encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) + // write headers + setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) + // write body + w.Write(encodedSuccessResponse) + } + case donut.InvalidUploadID: + writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) + default: + log.Error.Println(iodine.New(err, nil)) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + } } /// Delete API diff --git a/pkg/server/router.go b/pkg/server/router.go index c3ba765e1..a789843d5 100644 --- a/pkg/server/router.go +++ b/pkg/server/router.go @@ -24,9 +24,9 @@ import ( "github.com/minio/minio/pkg/server/rpc" ) +// Get api func getAPI() api.Minio { - a := api.New() - return a + return api.New() } // registerAPI - register all the object API handlers to their respective paths @@ -82,15 +82,15 @@ func registerChain(handlers ...handlerFunc) chain { return ch } -// registerOtherMiddleware register all available middleware -func registerOtherMiddleware(mux http.Handler, conf api.Config) http.Handler { +// registerCustomMiddleware register all available custom middleware +func registerCustomMiddleware(mux http.Handler, conf api.Config) http.Handler { ch := registerChain( api.ValidContentTypeHandler, api.TimeValidityHandler, api.IgnoreResourcesHandler, api.ValidateAuthHeaderHandler, api.LoggingHandler, - // Add new middleware here + // Add new your new middleware here ) mux = ch.final(mux) @@ -109,7 +109,7 @@ func getAPIHandler(conf api.Config) (http.Handler, api.Minio) { mux := router.NewRouter() minioAPI := getAPI() apiHandler := registerAPI(mux, minioAPI) - apiHandler = registerOtherMiddleware(apiHandler, conf) + apiHandler = registerCustomMiddleware(apiHandler, conf) return apiHandler, minioAPI } @@ -120,6 +120,6 @@ func getRPCHandler() http.Handler { s.RegisterService(new(rpc.HelloService), "") s.RegisterService(new(rpc.VersionService), "") s.RegisterService(new(rpc.GetSysInfoService), "") - // add more services here + // Add new services here return registerRPC(router.NewRouter(), s) } diff --git a/pkg/server/server.go b/pkg/server/server.go index a383732c2..999c36efe 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -25,6 +25,7 @@ import ( "github.com/minio/minio/pkg/server/api" ) +// Start API listener func startAPI(errCh chan error, conf api.Config, apiHandler http.Handler) { defer close(errCh) @@ -74,6 +75,7 @@ func startAPI(errCh chan error, conf api.Config, apiHandler http.Handler) { } } +// Start RPC listener func startRPC(errCh chan error, rpcHandler http.Handler) { defer close(errCh) @@ -86,10 +88,11 @@ func startRPC(errCh chan error, rpcHandler http.Handler) { errCh <- httpServer.ListenAndServe() } +// Start ticket master func startTM(a api.Minio) { for { for op := range a.OP { - close(op.ProceedCh) + op.ProceedCh <- struct{}{} } } } @@ -101,8 +104,7 @@ func StartServices(conf api.Config) error { apiHandler, minioAPI := getAPIHandler(conf) go startAPI(apiErrCh, conf, apiHandler) - rpcHandler := getRPCHandler() - go startRPC(rpcErrCh, rpcHandler) + go startRPC(rpcErrCh, getRPCHandler()) go startTM(minioAPI) select { diff --git a/pkg/storage/donut/bucket.go b/pkg/storage/donut/bucket.go index 6e4012902..ccc89ada8 100644 --- a/pkg/storage/donut/bucket.go +++ b/pkg/storage/donut/bucket.go @@ -57,6 +57,7 @@ func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bu "donutName": donutName, "aclType": aclType, } + if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(donutName) == "" { return bucket{}, BucketMetadata{}, iodine.New(InvalidArgument{}, errParams) } @@ -130,7 +131,7 @@ func (b bucket) GetObjectMetadata(objectName string) (ObjectMetadata, error) { } // ListObjects - list all objects -func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (ListObjects, error) { +func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, error) { b.lock.RLock() defer b.lock.RUnlock() if maxkeys <= 0 { @@ -140,7 +141,7 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List var objects []string bucketMetadata, err := b.getBucketMetadata() if err != nil { - return ListObjects{}, iodine.New(err, nil) + return ListObjectsResults{}, iodine.New(err, nil) } for objectName := range bucketMetadata.Buckets[b.getBucketName()].BucketObjects { if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) { @@ -181,7 +182,7 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List commonPrefixes = RemoveDuplicates(commonPrefixes) sort.Strings(commonPrefixes) - listObjects := ListObjects{} + listObjects := ListObjectsResults{} listObjects.Objects = make(map[string]ObjectMetadata) listObjects.CommonPrefixes = commonPrefixes listObjects.IsTruncated = isTruncated @@ -189,7 +190,7 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List for _, objectName := range results { objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName)) if err != nil { - return ListObjects{}, iodine.New(err, nil) + return ListObjectsResults{}, iodine.New(err, nil) } listObjects.Objects[objectName] = objMetadata } diff --git a/pkg/storage/donut/common.go b/pkg/storage/donut/common.go index e55fc3739..b91fd3b1b 100644 --- a/pkg/storage/donut/common.go +++ b/pkg/storage/donut/common.go @@ -19,10 +19,31 @@ package donut import ( "bufio" "bytes" + "io" "sort" "strings" ) +// ProxyWriter implements io.Writer to trap written bytes +type ProxyWriter struct { + writer io.Writer + writtenBytes []byte +} + +func (r *ProxyWriter) Write(p []byte) (n int, err error) { + n, err = r.writer.Write(p) + if err != nil { + return + } + r.writtenBytes = append(r.writtenBytes, p[0:n]...) + return +} + +// NewProxyWriter - wrap around a given writer with ProxyWriter +func NewProxyWriter(w io.Writer) *ProxyWriter { + return &ProxyWriter{writer: w, writtenBytes: nil} +} + // Delimiter delims the string at delimiter func Delimiter(object, delimiter string) string { readBuffer := bytes.NewBufferString(object) diff --git a/pkg/storage/donut/config.go b/pkg/storage/donut/config.go new file mode 100644 index 000000000..4934f5714 --- /dev/null +++ b/pkg/storage/donut/config.go @@ -0,0 +1,87 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package donut + +import ( + "os/user" + "path/filepath" + "time" + + "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/quick" +) + +// getDonutConfigPath get donut config file path +func getDonutConfigPath() (string, error) { + u, err := user.Current() + if err != nil { + return "", iodine.New(err, nil) + } + donutConfigPath := filepath.Join(u.HomeDir, ".minio", "donut.json") + return donutConfigPath, nil +} + +// SaveConfig save donut config +func SaveConfig(a *Config) error { + donutConfigPath, err := getDonutConfigPath() + if err != nil { + return iodine.New(err, nil) + } + qc, err := quick.New(a) + if err != nil { + return iodine.New(err, nil) + } + if err := qc.Save(donutConfigPath); err != nil { + return iodine.New(err, nil) + } + return nil +} + +// LoadConfig load donut config +func LoadConfig() (*Config, error) { + donutConfigPath, err := getDonutConfigPath() + if err != nil { + return nil, iodine.New(err, nil) + } + a := &Config{} + a.Version = "0.0.1" + qc, err := quick.New(a) + if err != nil { + return nil, iodine.New(err, nil) + } + if err := qc.Load(donutConfigPath); err != nil { + return nil, iodine.New(err, nil) + } + return qc.Data().(*Config), nil +} + +// LoadDonut load donut from config +func LoadDonut() (Interface, error) { + conf, err := LoadConfig() + if err != nil { + conf = &Config{ + Version: "0.0.1", + MaxSize: 512000000, + Expiration: 1 * time.Hour, + } + } + donut, err := New(conf) + if err != nil { + return nil, iodine.New(err, nil) + } + return donut, nil +} diff --git a/pkg/storage/donut/definitions.go b/pkg/storage/donut/definitions.go index 333676fb1..b06a59148 100644 --- a/pkg/storage/donut/definitions.go +++ b/pkg/storage/donut/definitions.go @@ -65,8 +65,8 @@ type BucketMetadata struct { BucketObjects map[string]interface{} `json:"objects"` } -// ListObjects container for list objects response -type ListObjects struct { +// ListObjectsResults container for list objects response +type ListObjectsResults struct { Objects map[string]ObjectMetadata `json:"objects"` CommonPrefixes []string `json:"commonPrefixes"` IsTruncated bool `json:"isTruncated"` diff --git a/pkg/storage/donut/donut.go b/pkg/storage/donut/donut-v1.go similarity index 57% rename from pkg/storage/donut/donut.go rename to pkg/storage/donut/donut-v1.go index a34a0a5f0..54067c33a 100644 --- a/pkg/storage/donut/donut.go +++ b/pkg/storage/donut/donut-v1.go @@ -24,19 +24,10 @@ import ( "path/filepath" "strconv" "strings" - "sync" "github.com/minio/minio/pkg/iodine" ) -// donut struct internal data -type donut struct { - name string - buckets map[string]bucket - nodes map[string]node - lock *sync.RWMutex -} - // config files used inside Donut const ( // donut system config @@ -51,75 +42,41 @@ const ( bucketMetadataVersion = "1.0.0" ) -// attachDonutNode - wrapper function to instantiate a new node for associatedt donut -// based on the provided configuration -func (dt donut) attachDonutNode(hostname string, disks []string) error { - if err := dt.AttachNode(hostname, disks); err != nil { - return iodine.New(err, nil) - } - return nil -} - -// NewDonut - instantiate a new donut -func NewDonut(donutName string, nodeDiskMap map[string][]string) (Donut, error) { - if donutName == "" || len(nodeDiskMap) == 0 { - return nil, iodine.New(InvalidArgument{}, nil) - } - nodes := make(map[string]node) - buckets := make(map[string]bucket) - d := donut{ - name: donutName, - nodes: nodes, - buckets: buckets, - lock: new(sync.RWMutex), - } - for k, v := range nodeDiskMap { - if len(v) == 0 { - return nil, iodine.New(InvalidDisksArgument{}, nil) - } - err := d.attachDonutNode(k, v) - if err != nil { - return nil, iodine.New(err, nil) - } - } - return d, nil -} - -// MakeBucket - make a new bucket -func (dt donut) MakeBucket(bucket string, acl BucketACL) error { - dt.lock.Lock() - defer dt.lock.Unlock() +// makeBucket - make a new bucket +func (donut API) makeBucket(bucket string, acl BucketACL) error { + donut.lock.Lock() + defer donut.lock.Unlock() if bucket == "" || strings.TrimSpace(bucket) == "" { return iodine.New(InvalidArgument{}, nil) } - return dt.makeDonutBucket(bucket, acl.String()) + return donut.makeDonutBucket(bucket, acl.String()) } -// GetBucketMetadata - get bucket metadata -func (dt donut) GetBucketMetadata(bucketName string) (BucketMetadata, error) { - dt.lock.RLock() - defer dt.lock.RUnlock() - if err := dt.listDonutBuckets(); err != nil { +// getBucketMetadata - get bucket metadata +func (donut API) getBucketMetadata(bucketName string) (BucketMetadata, error) { + donut.lock.RLock() + defer donut.lock.RUnlock() + if err := donut.listDonutBuckets(); err != nil { return BucketMetadata{}, iodine.New(err, nil) } - if _, ok := dt.buckets[bucketName]; !ok { + if _, ok := donut.buckets[bucketName]; !ok { return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucketName}, nil) } - metadata, err := dt.getDonutBucketMetadata() + metadata, err := donut.getDonutBucketMetadata() if err != nil { return BucketMetadata{}, iodine.New(err, nil) } return metadata.Buckets[bucketName], nil } -// SetBucketMetadata - set bucket metadata -func (dt donut) SetBucketMetadata(bucketName string, bucketMetadata map[string]string) error { - dt.lock.Lock() - defer dt.lock.Unlock() - if err := dt.listDonutBuckets(); err != nil { +// setBucketMetadata - set bucket metadata +func (donut API) setBucketMetadata(bucketName string, bucketMetadata map[string]string) error { + donut.lock.Lock() + defer donut.lock.Unlock() + if err := donut.listDonutBuckets(); err != nil { return iodine.New(err, nil) } - metadata, err := dt.getDonutBucketMetadata() + metadata, err := donut.getDonutBucketMetadata() if err != nil { return iodine.New(err, nil) } @@ -130,17 +87,17 @@ func (dt donut) SetBucketMetadata(bucketName string, bucketMetadata map[string]s } oldBucketMetadata.ACL = BucketACL(acl) metadata.Buckets[bucketName] = oldBucketMetadata - return dt.setDonutBucketMetadata(metadata) + return donut.setDonutBucketMetadata(metadata) } -// ListBuckets - return list of buckets -func (dt donut) ListBuckets() (map[string]BucketMetadata, error) { - dt.lock.RLock() - defer dt.lock.RUnlock() - if err := dt.listDonutBuckets(); err != nil { +// listBuckets - return list of buckets +func (donut API) listBuckets() (map[string]BucketMetadata, error) { + donut.lock.RLock() + defer donut.lock.RUnlock() + if err := donut.listDonutBuckets(); err != nil { return nil, iodine.New(err, nil) } - metadata, err := dt.getDonutBucketMetadata() + metadata, err := donut.getDonutBucketMetadata() if err != nil { // intentionally left out the error when Donut is empty // but we need to revisit this area in future - since we need @@ -150,10 +107,10 @@ func (dt donut) ListBuckets() (map[string]BucketMetadata, error) { return metadata.Buckets, nil } -// ListObjects - return list of objects -func (dt donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys int) (ListObjects, error) { - dt.lock.RLock() - defer dt.lock.RUnlock() +// listObjects - return list of objects +func (donut API) listObjects(bucket, prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, error) { + donut.lock.RLock() + defer donut.lock.RUnlock() errParams := map[string]string{ "bucket": bucket, "prefix": prefix, @@ -161,23 +118,23 @@ func (dt donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys in "delimiter": delimiter, "maxkeys": strconv.Itoa(maxkeys), } - if err := dt.listDonutBuckets(); err != nil { - return ListObjects{}, iodine.New(err, errParams) + if err := donut.listDonutBuckets(); err != nil { + return ListObjectsResults{}, iodine.New(err, errParams) } - if _, ok := dt.buckets[bucket]; !ok { - return ListObjects{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams) + if _, ok := donut.buckets[bucket]; !ok { + return ListObjectsResults{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams) } - listObjects, err := dt.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys) + listObjects, err := donut.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys) if err != nil { - return ListObjects{}, iodine.New(err, errParams) + return ListObjectsResults{}, iodine.New(err, errParams) } return listObjects, nil } -// PutObject - put object -func (dt donut) PutObject(bucket, object, expectedMD5Sum string, reader io.Reader, metadata map[string]string) (ObjectMetadata, error) { - dt.lock.Lock() - defer dt.lock.Unlock() +// putObject - put object +func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, metadata map[string]string) (ObjectMetadata, error) { + donut.lock.Lock() + defer donut.lock.Unlock() errParams := map[string]string{ "bucket": bucket, "object": object, @@ -188,34 +145,34 @@ func (dt donut) PutObject(bucket, object, expectedMD5Sum string, reader io.Reade if object == "" || strings.TrimSpace(object) == "" { return ObjectMetadata{}, iodine.New(InvalidArgument{}, errParams) } - if err := dt.listDonutBuckets(); err != nil { + if err := donut.listDonutBuckets(); err != nil { return ObjectMetadata{}, iodine.New(err, errParams) } - if _, ok := dt.buckets[bucket]; !ok { + if _, ok := donut.buckets[bucket]; !ok { return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } - bucketMeta, err := dt.getDonutBucketMetadata() + bucketMeta, err := donut.getDonutBucketMetadata() if err != nil { return ObjectMetadata{}, iodine.New(err, errParams) } if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok { return ObjectMetadata{}, iodine.New(ObjectExists{Object: object}, errParams) } - objMetadata, err := dt.buckets[bucket].WriteObject(object, reader, expectedMD5Sum, metadata) + objMetadata, err := donut.buckets[bucket].WriteObject(object, reader, expectedMD5Sum, metadata) if err != nil { return ObjectMetadata{}, iodine.New(err, errParams) } bucketMeta.Buckets[bucket].BucketObjects[object] = 1 - if err := dt.setDonutBucketMetadata(bucketMeta); err != nil { + if err := donut.setDonutBucketMetadata(bucketMeta); err != nil { return ObjectMetadata{}, iodine.New(err, errParams) } return objMetadata, nil } -// GetObject - get object -func (dt donut) GetObject(bucket, object string) (reader io.ReadCloser, size int64, err error) { - dt.lock.RLock() - defer dt.lock.RUnlock() +// getObject - get object +func (donut API) getObject(bucket, object string) (reader io.ReadCloser, size int64, err error) { + donut.lock.RLock() + defer donut.lock.RUnlock() errParams := map[string]string{ "bucket": bucket, "object": object, @@ -226,37 +183,37 @@ func (dt donut) GetObject(bucket, object string) (reader io.ReadCloser, size int if object == "" || strings.TrimSpace(object) == "" { return nil, 0, iodine.New(InvalidArgument{}, errParams) } - if err := dt.listDonutBuckets(); err != nil { + if err := donut.listDonutBuckets(); err != nil { return nil, 0, iodine.New(err, nil) } - if _, ok := dt.buckets[bucket]; !ok { + if _, ok := donut.buckets[bucket]; !ok { return nil, 0, iodine.New(BucketNotFound{Bucket: bucket}, errParams) } - return dt.buckets[bucket].ReadObject(object) + return donut.buckets[bucket].ReadObject(object) } -// GetObjectMetadata - get object metadata -func (dt donut) GetObjectMetadata(bucket, object string) (ObjectMetadata, error) { - dt.lock.RLock() - defer dt.lock.RUnlock() +// getObjectMetadata - get object metadata +func (donut API) getObjectMetadata(bucket, object string) (ObjectMetadata, error) { + donut.lock.RLock() + defer donut.lock.RUnlock() errParams := map[string]string{ "bucket": bucket, "object": object, } - if err := dt.listDonutBuckets(); err != nil { + if err := donut.listDonutBuckets(); err != nil { return ObjectMetadata{}, iodine.New(err, errParams) } - if _, ok := dt.buckets[bucket]; !ok { + if _, ok := donut.buckets[bucket]; !ok { return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams) } - bucketMeta, err := dt.getDonutBucketMetadata() + bucketMeta, err := donut.getDonutBucketMetadata() if err != nil { return ObjectMetadata{}, iodine.New(err, errParams) } if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; !ok { return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: object}, errParams) } - objectMetadata, err := dt.buckets[bucket].GetObjectMetadata(object) + objectMetadata, err := donut.buckets[bucket].GetObjectMetadata(object) if err != nil { return ObjectMetadata{}, iodine.New(err, nil) } @@ -264,16 +221,16 @@ func (dt donut) GetObjectMetadata(bucket, object string) (ObjectMetadata, error) } // getDiskWriters - -func (dt donut) getBucketMetadataWriters() ([]io.WriteCloser, error) { +func (donut API) getBucketMetadataWriters() ([]io.WriteCloser, error) { var writers []io.WriteCloser - for _, node := range dt.nodes { + for _, node := range donut.nodes { disks, err := node.ListDisks() if err != nil { return nil, iodine.New(err, nil) } writers = make([]io.WriteCloser, len(disks)) for order, d := range disks { - bucketMetaDataWriter, err := d.CreateFile(filepath.Join(dt.name, bucketMetadataConfig)) + bucketMetaDataWriter, err := d.CreateFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig)) if err != nil { return nil, iodine.New(err, nil) } @@ -283,16 +240,16 @@ func (dt donut) getBucketMetadataWriters() ([]io.WriteCloser, error) { return writers, nil } -func (dt donut) getBucketMetadataReaders() ([]io.ReadCloser, error) { +func (donut API) getBucketMetadataReaders() ([]io.ReadCloser, error) { var readers []io.ReadCloser - for _, node := range dt.nodes { + for _, node := range donut.nodes { disks, err := node.ListDisks() if err != nil { return nil, iodine.New(err, nil) } readers = make([]io.ReadCloser, len(disks)) for order, d := range disks { - bucketMetaDataReader, err := d.OpenFile(filepath.Join(dt.name, bucketMetadataConfig)) + bucketMetaDataReader, err := d.OpenFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig)) if err != nil { return nil, iodine.New(err, nil) } @@ -303,8 +260,8 @@ func (dt donut) getBucketMetadataReaders() ([]io.ReadCloser, error) { } // -func (dt donut) setDonutBucketMetadata(metadata *AllBuckets) error { - writers, err := dt.getBucketMetadataWriters() +func (donut API) setDonutBucketMetadata(metadata *AllBuckets) error { + writers, err := donut.getBucketMetadataWriters() if err != nil { return iodine.New(err, nil) } @@ -320,9 +277,9 @@ func (dt donut) setDonutBucketMetadata(metadata *AllBuckets) error { return nil } -func (dt donut) getDonutBucketMetadata() (*AllBuckets, error) { +func (donut API) getDonutBucketMetadata() (*AllBuckets, error) { metadata := new(AllBuckets) - readers, err := dt.getBucketMetadataReaders() + readers, err := donut.getBucketMetadataReaders() if err != nil { return nil, iodine.New(err, nil) } @@ -339,40 +296,40 @@ func (dt donut) getDonutBucketMetadata() (*AllBuckets, error) { return nil, iodine.New(InvalidArgument{}, nil) } -func (dt donut) makeDonutBucket(bucketName, acl string) error { - if err := dt.listDonutBuckets(); err != nil { +func (donut API) makeDonutBucket(bucketName, acl string) error { + if err := donut.listDonutBuckets(); err != nil { return iodine.New(err, nil) } - if _, ok := dt.buckets[bucketName]; ok { + if _, ok := donut.buckets[bucketName]; ok { return iodine.New(BucketExists{Bucket: bucketName}, nil) } - bucket, bucketMetadata, err := newBucket(bucketName, acl, dt.name, dt.nodes) + bucket, bucketMetadata, err := newBucket(bucketName, acl, donut.config.DonutName, donut.nodes) if err != nil { return iodine.New(err, nil) } nodeNumber := 0 - dt.buckets[bucketName] = bucket - for _, node := range dt.nodes { + donut.buckets[bucketName] = bucket + for _, node := range donut.nodes { disks, err := node.ListDisks() if err != nil { return iodine.New(err, nil) } for order, disk := range disks { bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, order) - err := disk.MakeDir(filepath.Join(dt.name, bucketSlice)) + err := disk.MakeDir(filepath.Join(donut.config.DonutName, bucketSlice)) if err != nil { return iodine.New(err, nil) } } nodeNumber = nodeNumber + 1 } - metadata, err := dt.getDonutBucketMetadata() + metadata, err := donut.getDonutBucketMetadata() if err != nil { if os.IsNotExist(iodine.ToError(err)) { metadata := new(AllBuckets) metadata.Buckets = make(map[string]BucketMetadata) metadata.Buckets[bucketName] = bucketMetadata - err = dt.setDonutBucketMetadata(metadata) + err = donut.setDonutBucketMetadata(metadata) if err != nil { return iodine.New(err, nil) } @@ -381,21 +338,21 @@ func (dt donut) makeDonutBucket(bucketName, acl string) error { return iodine.New(err, nil) } metadata.Buckets[bucketName] = bucketMetadata - err = dt.setDonutBucketMetadata(metadata) + err = donut.setDonutBucketMetadata(metadata) if err != nil { return iodine.New(err, nil) } return nil } -func (dt donut) listDonutBuckets() error { - for _, node := range dt.nodes { +func (donut API) listDonutBuckets() error { + for _, node := range donut.nodes { disks, err := node.ListDisks() if err != nil { return iodine.New(err, nil) } for _, disk := range disks { - dirs, err := disk.ListDir(dt.name) + dirs, err := disk.ListDir(donut.config.DonutName) if err != nil { return iodine.New(err, nil) } @@ -406,11 +363,11 @@ func (dt donut) listDonutBuckets() error { } bucketName := splitDir[0] // we dont need this once we cache from makeDonutBucket() - bucket, _, err := newBucket(bucketName, "private", dt.name, dt.nodes) + bucket, _, err := newBucket(bucketName, "private", donut.config.DonutName, donut.nodes) if err != nil { return iodine.New(err, nil) } - dt.buckets[bucketName] = bucket + donut.buckets[bucketName] = bucket } } } diff --git a/pkg/storage/donut/donut_test.go b/pkg/storage/donut/donut-v1_test.go similarity index 89% rename from pkg/storage/donut/donut_test.go rename to pkg/storage/donut/donut-v1_test.go index e4f56fec4..10e5f3ef8 100644 --- a/pkg/storage/donut/donut_test.go +++ b/pkg/storage/donut/donut-v1_test.go @@ -55,13 +55,22 @@ func createTestNodeDiskMap(p string) map[string][]string { return nodes } -var dd Cache +var dd Interface func (s *MyDonutSuite) SetUpSuite(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) s.root = root - dd = NewCache(100000, time.Duration(1*time.Hour), "test", createTestNodeDiskMap(root)) + + conf := new(Config) + conf.DonutName = "test" + conf.NodeDiskMap = createTestNodeDiskMap(root) + conf.Expiration = time.Duration(1 * time.Hour) + conf.MaxSize = 100000 + + dd, err = New(conf) + c.Assert(err, IsNil) + // testing empty donut buckets, err := dd.ListBuckets() c.Assert(err, IsNil) @@ -145,7 +154,7 @@ func (s *MyDonutSuite) TestCreateMultipleBucketsAndList(c *C) { // test object create without bucket func (s *MyDonutSuite) TestNewObjectFailsWithoutBucket(c *C) { - _, err := dd.CreateObject("unknown", "obj", "", "", 0, nil) + _, err := dd.CreateObject("unknown", "obj", "", 0, nil, nil) c.Assert(err, Not(IsNil)) } @@ -160,7 +169,7 @@ func (s *MyDonutSuite) TestNewObjectMetadata(c *C) { err := dd.MakeBucket("foo6", "private") c.Assert(err, IsNil) - objectMetadata, err := dd.CreateObject("foo6", "obj", "application/json", expectedMd5Sum, int64(len(data)), reader) + objectMetadata, err := dd.CreateObject("foo6", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/json"}) c.Assert(err, IsNil) c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json") @@ -168,7 +177,7 @@ func (s *MyDonutSuite) TestNewObjectMetadata(c *C) { // test create object fails without name func (s *MyDonutSuite) TestNewObjectFailsWithEmptyName(c *C) { - _, err := dd.CreateObject("foo", "", "", "", 0, nil) + _, err := dd.CreateObject("foo", "", "", 0, nil, nil) c.Assert(err, Not(IsNil)) } @@ -184,7 +193,7 @@ func (s *MyDonutSuite) TestNewObjectCanBeWritten(c *C) { expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) reader := ioutil.NopCloser(bytes.NewReader([]byte(data))) - actualMetadata, err := dd.CreateObject("foo", "obj", "application/octet-stream", expectedMd5Sum, int64(len(data)), reader) + actualMetadata, err := dd.CreateObject("foo", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/octet-stream"}) c.Assert(err, IsNil) c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) @@ -206,11 +215,11 @@ func (s *MyDonutSuite) TestMultipleNewObjects(c *C) { one := ioutil.NopCloser(bytes.NewReader([]byte("one"))) - _, err := dd.CreateObject("foo5", "obj1", "", "", int64(len("one")), one) + _, err := dd.CreateObject("foo5", "obj1", "", int64(len("one")), one, nil) c.Assert(err, IsNil) two := ioutil.NopCloser(bytes.NewReader([]byte("two"))) - _, err = dd.CreateObject("foo5", "obj2", "", "", int64(len("two")), two) + _, err = dd.CreateObject("foo5", "obj2", "", int64(len("two")), two, nil) c.Assert(err, IsNil) var buffer1 bytes.Buffer @@ -259,7 +268,7 @@ func (s *MyDonutSuite) TestMultipleNewObjects(c *C) { c.Assert(objectsMetadata[1].Object, Equals, "obj2") three := ioutil.NopCloser(bytes.NewReader([]byte("three"))) - _, err = dd.CreateObject("foo5", "obj3", "", "", int64(len("three")), three) + _, err = dd.CreateObject("foo5", "obj3", "", int64(len("three")), three, nil) c.Assert(err, IsNil) var buffer bytes.Buffer diff --git a/pkg/storage/donut/cache.go b/pkg/storage/donut/donut-v2.go similarity index 63% rename from pkg/storage/donut/cache.go rename to pkg/storage/donut/donut-v2.go index 59b49a5fa..8cb9de52b 100644 --- a/pkg/storage/donut/cache.go +++ b/pkg/storage/donut/donut-v2.go @@ -33,6 +33,7 @@ import ( "time" "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/quick" "github.com/minio/minio/pkg/storage/donut/trove" ) @@ -41,15 +42,24 @@ const ( totalBuckets = 100 ) -// Cache - local variables -type Cache struct { - storedBuckets map[string]storedBucket +// Config donut config +type Config struct { + Version string `json:"version"` + MaxSize uint64 `json:"max-size"` + Expiration time.Duration `json:"expiration"` + DonutName string `json:"donut-name"` + NodeDiskMap map[string][]string `json:"node-disk-map"` +} + +// API - local variables +type API struct { + config *Config lock *sync.RWMutex objects *trove.Cache multiPartObjects *trove.Cache - maxSize uint64 - expiration time.Duration - donut Donut + storedBuckets map[string]storedBucket + nodes map[string]node + buckets map[string]bucket } // storedBucket saved bucket @@ -67,79 +77,85 @@ type multiPartSession struct { initiated time.Time } -type proxyWriter struct { - writer io.Writer - writtenBytes []byte -} - -func (r *proxyWriter) Write(p []byte) (n int, err error) { - n, err = r.writer.Write(p) - if err != nil { - return +// New instantiate a new donut +func New(c *Config) (Interface, error) { + if err := quick.CheckData(c); err != nil { + return nil, iodine.New(err, nil) } - r.writtenBytes = append(r.writtenBytes, p[0:n]...) - return -} - -func newProxyWriter(w io.Writer) *proxyWriter { - return &proxyWriter{writer: w, writtenBytes: nil} -} - -// NewCache new cache -func NewCache(maxSize uint64, expiration time.Duration, donutName string, nodeDiskMap map[string][]string) Cache { - c := Cache{} - c.storedBuckets = make(map[string]storedBucket) - c.objects = trove.NewCache(maxSize, expiration) - c.multiPartObjects = trove.NewCache(0, time.Duration(0)) - c.objects.OnExpired = c.expiredObject - c.multiPartObjects.OnExpired = c.expiredPart - c.lock = new(sync.RWMutex) - c.maxSize = maxSize - c.expiration = expiration + a := API{config: c} + a.storedBuckets = make(map[string]storedBucket) + a.nodes = make(map[string]node) + a.buckets = make(map[string]bucket) + a.objects = trove.NewCache(a.config.MaxSize, a.config.Expiration) + a.multiPartObjects = trove.NewCache(0, time.Duration(0)) + a.objects.OnExpired = a.expiredObject + a.multiPartObjects.OnExpired = a.expiredPart + a.lock = new(sync.RWMutex) // set up cache expiration - c.objects.ExpireObjects(time.Second * 5) - c.donut, _ = NewDonut(donutName, nodeDiskMap) - return c + a.objects.ExpireObjects(time.Second * 5) + + if len(a.config.NodeDiskMap) > 0 { + for k, v := range a.config.NodeDiskMap { + if len(v) == 0 { + return nil, iodine.New(InvalidDisksArgument{}, nil) + } + err := a.AttachNode(k, v) + if err != nil { + return nil, iodine.New(err, nil) + } + } + /// Initialization, populate all buckets into memory + buckets, err := a.listBuckets() + if err != nil { + return nil, iodine.New(err, nil) + } + for k, v := range buckets { + storedBucket := a.storedBuckets[k] + storedBucket.bucketMetadata = v + a.storedBuckets[k] = storedBucket + } + } + return a, nil } // GetObject - GET object from cache buffer -func (cache Cache) GetObject(w io.Writer, bucket string, object string) (int64, error) { - cache.lock.RLock() +func (donut API) GetObject(w io.Writer, bucket string, object string) (int64, error) { + donut.lock.RLock() if !IsValidBucket(bucket) { - cache.lock.RUnlock() + donut.lock.RUnlock() return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !IsValidObjectName(object) { - cache.lock.RUnlock() + donut.lock.RUnlock() return 0, iodine.New(ObjectNameInvalid{Object: object}, nil) } - if _, ok := cache.storedBuckets[bucket]; ok == false { - cache.lock.RUnlock() + if _, ok := donut.storedBuckets[bucket]; ok == false { + donut.lock.RUnlock() return 0, iodine.New(BucketNotFound{Bucket: bucket}, nil) } objectKey := bucket + "/" + object - data, ok := cache.objects.Get(objectKey) + data, ok := donut.objects.Get(objectKey) if !ok { - if cache.donut != nil { - reader, size, err := cache.donut.GetObject(bucket, object) + if len(donut.config.NodeDiskMap) > 0 { + reader, size, err := donut.getObject(bucket, object) if err != nil { - cache.lock.RUnlock() + donut.lock.RUnlock() return 0, iodine.New(err, nil) } // new proxy writer to capture data read from disk - pw := newProxyWriter(w) + pw := NewProxyWriter(w) written, err := io.CopyN(pw, reader, size) if err != nil { - cache.lock.RUnlock() + donut.lock.RUnlock() return 0, iodine.New(err, nil) } - cache.lock.RUnlock() + donut.lock.RUnlock() /// cache object read from disk { - cache.lock.Lock() - ok := cache.objects.Set(objectKey, pw.writtenBytes) - cache.lock.Unlock() + donut.lock.Lock() + ok := donut.objects.Set(objectKey, pw.writtenBytes) + donut.lock.Unlock() pw.writtenBytes = nil go debug.FreeOSMemory() if !ok { @@ -148,65 +164,65 @@ func (cache Cache) GetObject(w io.Writer, bucket string, object string) (int64, } return written, nil } - cache.lock.RUnlock() + donut.lock.RUnlock() return 0, iodine.New(ObjectNotFound{Object: object}, nil) } - written, err := io.CopyN(w, bytes.NewBuffer(data), int64(cache.objects.Len(objectKey))) + written, err := io.CopyN(w, bytes.NewBuffer(data), int64(donut.objects.Len(objectKey))) if err != nil { return 0, iodine.New(err, nil) } - cache.lock.RUnlock() + donut.lock.RUnlock() return written, nil } // GetPartialObject - GET object from cache buffer range -func (cache Cache) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { +func (donut API) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { errParams := map[string]string{ "bucket": bucket, "object": object, "start": strconv.FormatInt(start, 10), "length": strconv.FormatInt(length, 10), } - cache.lock.RLock() + donut.lock.RLock() if !IsValidBucket(bucket) { - cache.lock.RUnlock() + donut.lock.RUnlock() return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, errParams) } if !IsValidObjectName(object) { - cache.lock.RUnlock() + donut.lock.RUnlock() return 0, iodine.New(ObjectNameInvalid{Object: object}, errParams) } if start < 0 { - cache.lock.RUnlock() + donut.lock.RUnlock() return 0, iodine.New(InvalidRange{ Start: start, Length: length, }, errParams) } objectKey := bucket + "/" + object - data, ok := cache.objects.Get(objectKey) + data, ok := donut.objects.Get(objectKey) if !ok { - if cache.donut != nil { - reader, _, err := cache.donut.GetObject(bucket, object) + if len(donut.config.NodeDiskMap) > 0 { + reader, _, err := donut.getObject(bucket, object) if err != nil { - cache.lock.RUnlock() + donut.lock.RUnlock() return 0, iodine.New(err, nil) } if _, err := io.CopyN(ioutil.Discard, reader, start); err != nil { - cache.lock.RUnlock() + donut.lock.RUnlock() return 0, iodine.New(err, nil) } - pw := newProxyWriter(w) + pw := NewProxyWriter(w) written, err := io.CopyN(w, reader, length) if err != nil { - cache.lock.RUnlock() + donut.lock.RUnlock() return 0, iodine.New(err, nil) } - cache.lock.RUnlock() + donut.lock.RUnlock() { - cache.lock.Lock() - ok := cache.objects.Set(objectKey, pw.writtenBytes) - cache.lock.Unlock() + donut.lock.Lock() + ok := donut.objects.Set(objectKey, pw.writtenBytes) + donut.lock.Unlock() pw.writtenBytes = nil go debug.FreeOSMemory() if !ok { @@ -215,72 +231,70 @@ func (cache Cache) GetPartialObject(w io.Writer, bucket, object string, start, l } return written, nil } - cache.lock.RUnlock() + donut.lock.RUnlock() return 0, iodine.New(ObjectNotFound{Object: object}, nil) } written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length) if err != nil { return 0, iodine.New(err, nil) } - cache.lock.RUnlock() + donut.lock.RUnlock() return written, nil } // GetBucketMetadata - -func (cache Cache) GetBucketMetadata(bucket string) (BucketMetadata, error) { - cache.lock.RLock() +func (donut API) GetBucketMetadata(bucket string) (BucketMetadata, error) { + donut.lock.RLock() if !IsValidBucket(bucket) { - cache.lock.RUnlock() + donut.lock.RUnlock() return BucketMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } - if _, ok := cache.storedBuckets[bucket]; ok == false { - if cache.donut == nil { - cache.lock.RUnlock() - return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) + if _, ok := donut.storedBuckets[bucket]; ok == false { + if len(donut.config.NodeDiskMap) > 0 { + bucketMetadata, err := donut.getBucketMetadata(bucket) + if err != nil { + donut.lock.RUnlock() + return BucketMetadata{}, iodine.New(err, nil) + } + storedBucket := donut.storedBuckets[bucket] + donut.lock.RUnlock() + { + donut.lock.Lock() + storedBucket.bucketMetadata = bucketMetadata + donut.storedBuckets[bucket] = storedBucket + donut.lock.Unlock() + } } - bucketMetadata, err := cache.donut.GetBucketMetadata(bucket) - if err != nil { - cache.lock.RUnlock() - return BucketMetadata{}, iodine.New(err, nil) - } - storedBucket := cache.storedBuckets[bucket] - cache.lock.RUnlock() - cache.lock.Lock() - storedBucket.bucketMetadata = bucketMetadata - cache.storedBuckets[bucket] = storedBucket - cache.lock.Unlock() + return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } - cache.lock.RUnlock() - return cache.storedBuckets[bucket].bucketMetadata, nil + donut.lock.RUnlock() + return donut.storedBuckets[bucket].bucketMetadata, nil } // SetBucketMetadata - -func (cache Cache) SetBucketMetadata(bucket, acl string) error { - cache.lock.RLock() +func (donut API) SetBucketMetadata(bucket string, metadata map[string]string) error { + donut.lock.RLock() if !IsValidBucket(bucket) { - cache.lock.RUnlock() + donut.lock.RUnlock() return iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } - if _, ok := cache.storedBuckets[bucket]; ok == false { - cache.lock.RUnlock() + if _, ok := donut.storedBuckets[bucket]; ok == false { + donut.lock.RUnlock() return iodine.New(BucketNotFound{Bucket: bucket}, nil) } - if strings.TrimSpace(acl) == "" { - acl = "private" - } - cache.lock.RUnlock() - cache.lock.Lock() - m := make(map[string]string) - m["acl"] = acl - if cache.donut != nil { - if err := cache.donut.SetBucketMetadata(bucket, m); err != nil { - return iodine.New(err, nil) + donut.lock.RUnlock() + donut.lock.Lock() + { + if len(donut.config.NodeDiskMap) > 0 { + if err := donut.setBucketMetadata(bucket, metadata); err != nil { + return iodine.New(err, nil) + } } + storedBucket := donut.storedBuckets[bucket] + storedBucket.bucketMetadata.ACL = BucketACL(metadata["acl"]) + donut.storedBuckets[bucket] = storedBucket } - storedBucket := cache.storedBuckets[bucket] - storedBucket.bucketMetadata.ACL = BucketACL(acl) - cache.storedBuckets[bucket] = storedBucket - cache.lock.Unlock() + donut.lock.Unlock() return nil } @@ -304,44 +318,45 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error { } // CreateObject - -func (cache Cache) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (ObjectMetadata, error) { - if size > int64(cache.maxSize) { +func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string) (ObjectMetadata, error) { + if size > int64(donut.config.MaxSize) { generic := GenericObjectError{Bucket: bucket, Object: key} return ObjectMetadata{}, iodine.New(EntityTooLarge{ GenericObjectError: generic, Size: strconv.FormatInt(size, 10), - MaxSize: strconv.FormatUint(cache.maxSize, 10), + MaxSize: strconv.FormatUint(donut.config.MaxSize, 10), }, nil) } - objectMetadata, err := cache.createObject(bucket, key, contentType, expectedMD5Sum, size, data) + contentType := metadata["contentType"] + objectMetadata, err := donut.createObject(bucket, key, contentType, expectedMD5Sum, size, data) // free debug.FreeOSMemory() return objectMetadata, iodine.New(err, nil) } // createObject - PUT object to cache buffer -func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (ObjectMetadata, error) { - cache.lock.RLock() +func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (ObjectMetadata, error) { + donut.lock.RLock() if !IsValidBucket(bucket) { - cache.lock.RUnlock() + donut.lock.RUnlock() return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !IsValidObjectName(key) { - cache.lock.RUnlock() + donut.lock.RUnlock() return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil) } - if _, ok := cache.storedBuckets[bucket]; ok == false { - cache.lock.RUnlock() + if _, ok := donut.storedBuckets[bucket]; ok == false { + donut.lock.RUnlock() return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } - storedBucket := cache.storedBuckets[bucket] + storedBucket := donut.storedBuckets[bucket] // get object key objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { - cache.lock.RUnlock() + donut.lock.RUnlock() return ObjectMetadata{}, iodine.New(ObjectExists{Object: key}, nil) } - cache.lock.RUnlock() + donut.lock.RUnlock() if contentType == "" { contentType = "application/octet-stream" @@ -356,15 +371,15 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } - if cache.donut != nil { - objMetadata, err := cache.donut.PutObject(bucket, key, expectedMD5Sum, data, map[string]string{"contentType": contentType}) + if len(donut.config.NodeDiskMap) > 0 { + objMetadata, err := donut.putObject(bucket, key, expectedMD5Sum, data, map[string]string{"contentType": contentType}) if err != nil { return ObjectMetadata{}, iodine.New(err, nil) } - cache.lock.Lock() + donut.lock.Lock() storedBucket.objectMetadata[objectKey] = objMetadata - cache.storedBuckets[bucket] = storedBucket - cache.lock.Unlock() + donut.storedBuckets[bucket] = storedBucket + donut.lock.Unlock() return objMetadata, nil } // calculate md5 @@ -382,9 +397,9 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, break } hash.Write(byteBuffer[0:length]) - cache.lock.Lock() - ok := cache.objects.Append(objectKey, byteBuffer[0:length]) - cache.lock.Unlock() + donut.lock.Lock() + ok := donut.objects.Append(objectKey, byteBuffer[0:length]) + donut.lock.Unlock() if !ok { return ObjectMetadata{}, iodine.New(InternalError{}, nil) } @@ -416,40 +431,40 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, Size: int64(totalLength), } - cache.lock.Lock() + donut.lock.Lock() storedBucket.objectMetadata[objectKey] = newObject - cache.storedBuckets[bucket] = storedBucket - cache.lock.Unlock() + donut.storedBuckets[bucket] = storedBucket + donut.lock.Unlock() return newObject, nil } // MakeBucket - create bucket in cache -func (cache Cache) MakeBucket(bucketName, acl string) error { - cache.lock.RLock() - if len(cache.storedBuckets) == totalBuckets { - cache.lock.RUnlock() +func (donut API) MakeBucket(bucketName, acl string) error { + donut.lock.RLock() + if len(donut.storedBuckets) == totalBuckets { + donut.lock.RUnlock() return iodine.New(TooManyBuckets{Bucket: bucketName}, nil) } if !IsValidBucket(bucketName) { - cache.lock.RUnlock() + donut.lock.RUnlock() return iodine.New(BucketNameInvalid{Bucket: bucketName}, nil) } if !IsValidBucketACL(acl) { - cache.lock.RUnlock() + donut.lock.RUnlock() return iodine.New(InvalidACL{ACL: acl}, nil) } - if _, ok := cache.storedBuckets[bucketName]; ok == true { - cache.lock.RUnlock() + if _, ok := donut.storedBuckets[bucketName]; ok == true { + donut.lock.RUnlock() return iodine.New(BucketExists{Bucket: bucketName}, nil) } - cache.lock.RUnlock() + donut.lock.RUnlock() if strings.TrimSpace(acl) == "" { // default is private acl = "private" } - if cache.donut != nil { - if err := cache.donut.MakeBucket(bucketName, BucketACL(acl)); err != nil { + if len(donut.config.NodeDiskMap) > 0 { + if err := donut.makeBucket(bucketName, BucketACL(acl)); err != nil { return iodine.New(err, nil) } } @@ -461,29 +476,29 @@ func (cache Cache) MakeBucket(bucketName, acl string) error { newBucket.bucketMetadata.Name = bucketName newBucket.bucketMetadata.Created = time.Now().UTC() newBucket.bucketMetadata.ACL = BucketACL(acl) - cache.lock.Lock() - cache.storedBuckets[bucketName] = newBucket - cache.lock.Unlock() + donut.lock.Lock() + donut.storedBuckets[bucketName] = newBucket + donut.lock.Unlock() return nil } // ListObjects - list objects from cache -func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) { - cache.lock.RLock() - defer cache.lock.RUnlock() +func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) { + donut.lock.RLock() + defer donut.lock.RUnlock() if !IsValidBucket(bucket) { return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !IsValidPrefix(resources.Prefix) { return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(ObjectNameInvalid{Object: resources.Prefix}, nil) } - if _, ok := cache.storedBuckets[bucket]; ok == false { + if _, ok := donut.storedBuckets[bucket]; ok == false { return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } var results []ObjectMetadata var keys []string - if cache.donut != nil { - listObjects, err := cache.donut.ListObjects( + if len(donut.config.NodeDiskMap) > 0 { + listObjects, err := donut.listObjects( bucket, resources.Prefix, resources.Marker, @@ -507,7 +522,7 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) } return results, resources, nil } - storedBucket := cache.storedBuckets[bucket] + storedBucket := donut.storedBuckets[bucket] for key := range storedBucket.objectMetadata { if strings.HasPrefix(key, bucket+"/") { key = key[len(bucket)+1:] @@ -561,11 +576,11 @@ func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name } // ListBuckets - List buckets from cache -func (cache Cache) ListBuckets() ([]BucketMetadata, error) { - cache.lock.RLock() - defer cache.lock.RUnlock() +func (donut API) ListBuckets() ([]BucketMetadata, error) { + donut.lock.RLock() + defer donut.lock.RUnlock() var results []BucketMetadata - for _, bucket := range cache.storedBuckets { + for _, bucket := range donut.storedBuckets { results = append(results, bucket.bucketMetadata) } sort.Sort(byBucketName(results)) @@ -573,50 +588,50 @@ func (cache Cache) ListBuckets() ([]BucketMetadata, error) { } // GetObjectMetadata - get object metadata from cache -func (cache Cache) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) { - cache.lock.RLock() +func (donut API) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) { + donut.lock.RLock() // check if bucket exists if !IsValidBucket(bucket) { - cache.lock.RUnlock() + donut.lock.RUnlock() return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !IsValidObjectName(key) { - cache.lock.RUnlock() + donut.lock.RUnlock() return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil) } - if _, ok := cache.storedBuckets[bucket]; ok == false { - cache.lock.RUnlock() + if _, ok := donut.storedBuckets[bucket]; ok == false { + donut.lock.RUnlock() return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } - storedBucket := cache.storedBuckets[bucket] + storedBucket := donut.storedBuckets[bucket] objectKey := bucket + "/" + key if objMetadata, ok := storedBucket.objectMetadata[objectKey]; ok == true { - cache.lock.RUnlock() + donut.lock.RUnlock() return objMetadata, nil } - if cache.donut != nil { - objMetadata, err := cache.donut.GetObjectMetadata(bucket, key) - cache.lock.RUnlock() + if len(donut.config.NodeDiskMap) > 0 { + objMetadata, err := donut.getObjectMetadata(bucket, key) + donut.lock.RUnlock() if err != nil { return ObjectMetadata{}, iodine.New(err, nil) } // update - cache.lock.Lock() + donut.lock.Lock() storedBucket.objectMetadata[objectKey] = objMetadata - cache.lock.Unlock() + donut.lock.Unlock() return objMetadata, nil } - cache.lock.RUnlock() + donut.lock.RUnlock() return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil) } -func (cache Cache) expiredObject(a ...interface{}) { - cacheStats := cache.objects.Stats() +func (donut API) expiredObject(a ...interface{}) { + cacheStats := donut.objects.Stats() log.Printf("CurrentSize: %d, CurrentItems: %d, TotalExpirations: %d", cacheStats.Bytes, cacheStats.Items, cacheStats.Expired) key := a[0].(string) // loop through all buckets - for _, storedBucket := range cache.storedBuckets { + for _, storedBucket := range donut.storedBuckets { delete(storedBucket.objectMetadata, key) } debug.FreeOSMemory() diff --git a/pkg/storage/donut/cache_test.go b/pkg/storage/donut/donut-v2_test.go similarity index 88% rename from pkg/storage/donut/cache_test.go rename to pkg/storage/donut/donut-v2_test.go index 644df5f00..556c0ddc0 100644 --- a/pkg/storage/donut/cache_test.go +++ b/pkg/storage/donut/donut-v2_test.go @@ -34,11 +34,20 @@ type MyCacheSuite struct{} var _ = Suite(&MyCacheSuite{}) -var dc Cache +var dc Interface func (s *MyCacheSuite) SetUpSuite(c *C) { - // no donut this time - dc = NewCache(100000, time.Duration(1*time.Hour), "", nil) + // test only cache + conf := new(Config) + conf.DonutName = "" + conf.NodeDiskMap = nil + conf.Expiration = time.Duration(1 * time.Hour) + conf.MaxSize = 100000 + + var err error + dc, err = New(conf) + c.Assert(err, IsNil) + // testing empty cache buckets, err := dc.ListBuckets() c.Assert(err, IsNil) @@ -118,7 +127,7 @@ func (s *MyCacheSuite) TestCreateMultipleBucketsAndList(c *C) { // test object create without bucket func (s *MyCacheSuite) TestNewObjectFailsWithoutBucket(c *C) { - _, err := dc.CreateObject("unknown", "obj", "", "", 0, nil) + _, err := dc.CreateObject("unknown", "obj", "", 0, nil, nil) c.Assert(err, Not(IsNil)) } @@ -133,7 +142,7 @@ func (s *MyCacheSuite) TestNewObjectMetadata(c *C) { err := dc.MakeBucket("foo6", "private") c.Assert(err, IsNil) - objectMetadata, err := dc.CreateObject("foo6", "obj", "application/json", expectedMd5Sum, int64(len(data)), reader) + objectMetadata, err := dc.CreateObject("foo6", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/json"}) c.Assert(err, IsNil) c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json") @@ -141,7 +150,7 @@ func (s *MyCacheSuite) TestNewObjectMetadata(c *C) { // test create object fails without name func (s *MyCacheSuite) TestNewObjectFailsWithEmptyName(c *C) { - _, err := dc.CreateObject("foo", "", "", "", 0, nil) + _, err := dc.CreateObject("foo", "", "", 0, nil, nil) c.Assert(err, Not(IsNil)) } @@ -157,7 +166,7 @@ func (s *MyCacheSuite) TestNewObjectCanBeWritten(c *C) { expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) reader := ioutil.NopCloser(bytes.NewReader([]byte(data))) - actualMetadata, err := dc.CreateObject("foo", "obj", "application/octet-stream", expectedMd5Sum, int64(len(data)), reader) + actualMetadata, err := dc.CreateObject("foo", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/octet-stream"}) c.Assert(err, IsNil) c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) @@ -179,11 +188,11 @@ func (s *MyCacheSuite) TestMultipleNewObjects(c *C) { one := ioutil.NopCloser(bytes.NewReader([]byte("one"))) - _, err := dc.CreateObject("foo5", "obj1", "", "", int64(len("one")), one) + _, err := dc.CreateObject("foo5", "obj1", "", int64(len("one")), one, nil) c.Assert(err, IsNil) two := ioutil.NopCloser(bytes.NewReader([]byte("two"))) - _, err = dc.CreateObject("foo5", "obj2", "", "", int64(len("two")), two) + _, err = dc.CreateObject("foo5", "obj2", "", int64(len("two")), two, nil) c.Assert(err, IsNil) var buffer1 bytes.Buffer @@ -232,7 +241,7 @@ func (s *MyCacheSuite) TestMultipleNewObjects(c *C) { c.Assert(objectsMetadata[1].Object, Equals, "obj2") three := ioutil.NopCloser(bytes.NewReader([]byte("three"))) - _, err = dc.CreateObject("foo5", "obj3", "", "", int64(len("three")), three) + _, err = dc.CreateObject("foo5", "obj3", "", int64(len("three")), three, nil) c.Assert(err, IsNil) var buffer bytes.Buffer diff --git a/pkg/storage/donut/interfaces.go b/pkg/storage/donut/interfaces.go index b1ea80318..d675c5b8c 100644 --- a/pkg/storage/donut/interfaces.go +++ b/pkg/storage/donut/interfaces.go @@ -20,8 +20,8 @@ import "io" // Collection of Donut specification interfaces -// Donut is a collection of object storage and management interface -type Donut interface { +// Interface is a collection of object storage and management interface +type Interface interface { ObjectStorage Management } @@ -31,16 +31,29 @@ type ObjectStorage interface { // Storage service operations GetBucketMetadata(bucket string) (BucketMetadata, error) SetBucketMetadata(bucket string, metadata map[string]string) error - ListBuckets() (map[string]BucketMetadata, error) - MakeBucket(bucket string, acl BucketACL) error + ListBuckets() ([]BucketMetadata, error) + MakeBucket(bucket string, ACL string) error // Bucket operations - ListObjects(bucket, prefix, marker, delim string, maxKeys int) (ListObjects, error) + ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) // Object operations - GetObject(bucket, object string) (io.ReadCloser, int64, error) + GetObject(w io.Writer, bucket, object string) (int64, error) + GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) GetObjectMetadata(bucket, object string) (ObjectMetadata, error) - PutObject(bucket, object, expectedMD5Sum string, reader io.Reader, metadata map[string]string) (ObjectMetadata, error) + CreateObject(bucket, object, expectedMD5Sum string, size int64, reader io.Reader, metadata map[string]string) (ObjectMetadata, error) + + Multipart +} + +// Multipart API +type Multipart interface { + NewMultipartUpload(bucket, key, contentType string) (string, error) + AbortMultipartUpload(bucket, key, uploadID string) error + CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) + CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (ObjectMetadata, error) + ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) + ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) } // Management is a donut management system interface diff --git a/pkg/storage/donut/management.go b/pkg/storage/donut/management.go index 17f89e4ef..c897524b2 100644 --- a/pkg/storage/donut/management.go +++ b/pkg/storage/donut/management.go @@ -25,14 +25,14 @@ import ( ) // Heal - heal a donut and fix bad data blocks -func (dt donut) Heal() error { +func (donut API) Heal() error { return iodine.New(NotImplemented{Function: "Heal"}, nil) } // Info - return info about donut configuration -func (dt donut) Info() (nodeDiskMap map[string][]string, err error) { +func (donut API) Info() (nodeDiskMap map[string][]string, err error) { nodeDiskMap = make(map[string][]string) - for nodeName, node := range dt.nodes { + for nodeName, node := range donut.nodes { disks, err := node.ListDisks() if err != nil { return nil, iodine.New(err, nil) @@ -47,7 +47,7 @@ func (dt donut) Info() (nodeDiskMap map[string][]string, err error) { } // AttachNode - attach node -func (dt donut) AttachNode(hostname string, disks []string) error { +func (donut API) AttachNode(hostname string, disks []string) error { if hostname == "" || len(disks) == 0 { return iodine.New(InvalidArgument{}, nil) } @@ -55,13 +55,13 @@ func (dt donut) AttachNode(hostname string, disks []string) error { if err != nil { return iodine.New(err, nil) } - dt.nodes[hostname] = node + donut.nodes[hostname] = node for i, d := range disks { newDisk, err := disk.New(d) if err != nil { return iodine.New(err, nil) } - if err := newDisk.MakeDir(dt.name); err != nil { + if err := newDisk.MakeDir(donut.config.DonutName); err != nil { return iodine.New(err, nil) } if err := node.AttachDisk(newDisk, i); err != nil { @@ -72,21 +72,21 @@ func (dt donut) AttachNode(hostname string, disks []string) error { } // DetachNode - detach node -func (dt donut) DetachNode(hostname string) error { - delete(dt.nodes, hostname) +func (donut API) DetachNode(hostname string) error { + delete(donut.nodes, hostname) return nil } // SaveConfig - save donut configuration -func (dt donut) SaveConfig() error { +func (donut API) SaveConfig() error { nodeDiskMap := make(map[string][]string) - for hostname, node := range dt.nodes { + for hostname, node := range donut.nodes { disks, err := node.ListDisks() if err != nil { return iodine.New(err, nil) } for order, disk := range disks { - donutConfigPath := filepath.Join(dt.name, donutConfig) + donutConfigPath := filepath.Join(donut.config.DonutName, donutConfig) donutConfigWriter, err := disk.CreateFile(donutConfigPath) defer donutConfigWriter.Close() if err != nil { @@ -103,6 +103,6 @@ func (dt donut) SaveConfig() error { } // LoadConfig - load configuration -func (dt donut) LoadConfig() error { +func (donut API) LoadConfig() error { return iodine.New(NotImplemented{Function: "LoadConfig"}, nil) } diff --git a/pkg/storage/donut/cache-multipart.go b/pkg/storage/donut/multipart.go similarity index 75% rename from pkg/storage/donut/cache-multipart.go rename to pkg/storage/donut/multipart.go index e89b57ee6..1c76451c1 100644 --- a/pkg/storage/donut/cache-multipart.go +++ b/pkg/storage/donut/multipart.go @@ -35,55 +35,55 @@ import ( ) // NewMultipartUpload - -func (cache Cache) NewMultipartUpload(bucket, key, contentType string) (string, error) { - cache.lock.RLock() +func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, error) { + donut.lock.RLock() if !IsValidBucket(bucket) { - cache.lock.RUnlock() + donut.lock.RUnlock() return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !IsValidObjectName(key) { - cache.lock.RUnlock() + donut.lock.RUnlock() return "", iodine.New(ObjectNameInvalid{Object: key}, nil) } - if _, ok := cache.storedBuckets[bucket]; ok == false { - cache.lock.RUnlock() + if _, ok := donut.storedBuckets[bucket]; ok == false { + donut.lock.RUnlock() return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) } - storedBucket := cache.storedBuckets[bucket] + storedBucket := donut.storedBuckets[bucket] objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { - cache.lock.RUnlock() + donut.lock.RUnlock() return "", iodine.New(ObjectExists{Object: key}, nil) } - cache.lock.RUnlock() + donut.lock.RUnlock() - cache.lock.Lock() + donut.lock.Lock() id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + key + time.Now().String()) uploadIDSum := sha512.Sum512(id) uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47] - cache.storedBuckets[bucket].multiPartSession[key] = multiPartSession{ + donut.storedBuckets[bucket].multiPartSession[key] = multiPartSession{ uploadID: uploadID, initiated: time.Now(), totalParts: 0, } - cache.lock.Unlock() + donut.lock.Unlock() return uploadID, nil } // AbortMultipartUpload - -func (cache Cache) AbortMultipartUpload(bucket, key, uploadID string) error { - cache.lock.RLock() - storedBucket := cache.storedBuckets[bucket] +func (donut API) AbortMultipartUpload(bucket, key, uploadID string) error { + donut.lock.RLock() + storedBucket := donut.storedBuckets[bucket] if storedBucket.multiPartSession[key].uploadID != uploadID { - cache.lock.RUnlock() + donut.lock.RUnlock() return iodine.New(InvalidUploadID{UploadID: uploadID}, nil) } - cache.lock.RUnlock() + donut.lock.RUnlock() - cache.cleanupMultiparts(bucket, key, uploadID) - cache.cleanupMultipartSession(bucket, key, uploadID) + donut.cleanupMultiparts(bucket, key, uploadID) + donut.cleanupMultipartSession(bucket, key, uploadID) return nil } @@ -92,17 +92,17 @@ func getMultipartKey(key string, uploadID string, partNumber int) string { } // CreateObjectPart - -func (cache Cache) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { +func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { // Verify upload id - cache.lock.RLock() - storedBucket := cache.storedBuckets[bucket] + donut.lock.RLock() + storedBucket := donut.storedBuckets[bucket] if storedBucket.multiPartSession[key].uploadID != uploadID { - cache.lock.RUnlock() + donut.lock.RUnlock() return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil) } - cache.lock.RUnlock() + donut.lock.RUnlock() - etag, err := cache.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data) + etag, err := donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data) if err != nil { return "", iodine.New(err, nil) } @@ -112,28 +112,28 @@ func (cache Cache) CreateObjectPart(bucket, key, uploadID string, partID int, co } // createObject - PUT object to cache buffer -func (cache Cache) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { - cache.lock.RLock() +func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { + donut.lock.RLock() if !IsValidBucket(bucket) { - cache.lock.RUnlock() + donut.lock.RUnlock() return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !IsValidObjectName(key) { - cache.lock.RUnlock() + donut.lock.RUnlock() return "", iodine.New(ObjectNameInvalid{Object: key}, nil) } - if _, ok := cache.storedBuckets[bucket]; ok == false { - cache.lock.RUnlock() + if _, ok := donut.storedBuckets[bucket]; ok == false { + donut.lock.RUnlock() return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) } - storedBucket := cache.storedBuckets[bucket] + storedBucket := donut.storedBuckets[bucket] // get object key partKey := bucket + "/" + getMultipartKey(key, uploadID, partID) if _, ok := storedBucket.partMetadata[partKey]; ok == true { - cache.lock.RUnlock() + donut.lock.RUnlock() return storedBucket.partMetadata[partKey].ETag, nil } - cache.lock.RUnlock() + donut.lock.RUnlock() if contentType == "" { contentType = "application/octet-stream" @@ -172,9 +172,9 @@ func (cache Cache) createObjectPart(bucket, key, uploadID string, partID int, co md5SumBytes := hash.Sum(nil) totalLength := int64(len(readBytes)) - cache.lock.Lock() - cache.multiPartObjects.Set(partKey, readBytes) - cache.lock.Unlock() + donut.lock.Lock() + donut.multiPartObjects.Set(partKey, readBytes) + donut.lock.Unlock() // setting up for de-allocation readBytes = nil @@ -192,32 +192,32 @@ func (cache Cache) createObjectPart(bucket, key, uploadID string, partID int, co Size: totalLength, } - cache.lock.Lock() + donut.lock.Lock() storedBucket.partMetadata[partKey] = newPart multiPartSession := storedBucket.multiPartSession[key] multiPartSession.totalParts++ storedBucket.multiPartSession[key] = multiPartSession - cache.storedBuckets[bucket] = storedBucket - cache.lock.Unlock() + donut.storedBuckets[bucket] = storedBucket + donut.lock.Unlock() return md5Sum, nil } -func (cache Cache) cleanupMultipartSession(bucket, key, uploadID string) { - cache.lock.Lock() - defer cache.lock.Unlock() - delete(cache.storedBuckets[bucket].multiPartSession, key) +func (donut API) cleanupMultipartSession(bucket, key, uploadID string) { + donut.lock.Lock() + defer donut.lock.Unlock() + delete(donut.storedBuckets[bucket].multiPartSession, key) } -func (cache Cache) cleanupMultiparts(bucket, key, uploadID string) { - for i := 1; i <= cache.storedBuckets[bucket].multiPartSession[key].totalParts; i++ { +func (donut API) cleanupMultiparts(bucket, key, uploadID string) { + for i := 1; i <= donut.storedBuckets[bucket].multiPartSession[key].totalParts; i++ { objectKey := bucket + "/" + getMultipartKey(key, uploadID, i) - cache.multiPartObjects.Delete(objectKey) + donut.multiPartObjects.Delete(objectKey) } } // CompleteMultipartUpload - -func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (ObjectMetadata, error) { +func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (ObjectMetadata, error) { if !IsValidBucket(bucket) { return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } @@ -225,26 +225,26 @@ func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts m return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil) } // Verify upload id - cache.lock.RLock() - if _, ok := cache.storedBuckets[bucket]; ok == false { - cache.lock.RUnlock() + donut.lock.RLock() + if _, ok := donut.storedBuckets[bucket]; ok == false { + donut.lock.RUnlock() return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } - storedBucket := cache.storedBuckets[bucket] + storedBucket := donut.storedBuckets[bucket] if storedBucket.multiPartSession[key].uploadID != uploadID { - cache.lock.RUnlock() + donut.lock.RUnlock() return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, nil) } - cache.lock.RUnlock() + donut.lock.RUnlock() - cache.lock.Lock() + donut.lock.Lock() var size int64 var fullObject bytes.Buffer for i := 1; i <= len(parts); i++ { recvMD5 := parts[i] - object, ok := cache.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i)) + object, ok := donut.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i)) if ok == false { - cache.lock.Unlock() + donut.lock.Unlock() return ObjectMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil) } size += int64(len(object)) @@ -264,20 +264,20 @@ func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts m object = nil go debug.FreeOSMemory() } - cache.lock.Unlock() + donut.lock.Unlock() md5sumSlice := md5.Sum(fullObject.Bytes()) // this is needed for final verification inside CreateObject, do not convert this to hex md5sum := base64.StdEncoding.EncodeToString(md5sumSlice[:]) - objectMetadata, err := cache.CreateObject(bucket, key, "", md5sum, size, &fullObject) + objectMetadata, err := donut.CreateObject(bucket, key, md5sum, size, &fullObject, nil) if err != nil { // No need to call internal cleanup functions here, caller will call AbortMultipartUpload() // which would in-turn cleanup properly in accordance with S3 Spec return ObjectMetadata{}, iodine.New(err, nil) } fullObject.Reset() - cache.cleanupMultiparts(bucket, key, uploadID) - cache.cleanupMultipartSession(bucket, key, uploadID) + donut.cleanupMultiparts(bucket, key, uploadID) + donut.cleanupMultipartSession(bucket, key, uploadID) return objectMetadata, nil } @@ -289,14 +289,14 @@ func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key } // ListMultipartUploads - -func (cache Cache) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) { +func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) { // TODO handle delimiter - cache.lock.RLock() - defer cache.lock.RUnlock() - if _, ok := cache.storedBuckets[bucket]; ok == false { + donut.lock.RLock() + defer donut.lock.RUnlock() + if _, ok := donut.storedBuckets[bucket]; ok == false { return BucketMultipartResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } - storedBucket := cache.storedBuckets[bucket] + storedBucket := donut.storedBuckets[bucket] var uploads []*UploadMetadata for key, session := range storedBucket.multiPartSession { @@ -351,14 +351,14 @@ func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } // ListObjectParts - -func (cache Cache) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) { +func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) { // Verify upload id - cache.lock.RLock() - defer cache.lock.RUnlock() - if _, ok := cache.storedBuckets[bucket]; ok == false { + donut.lock.RLock() + defer donut.lock.RUnlock() + if _, ok := donut.storedBuckets[bucket]; ok == false { return ObjectResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } - storedBucket := cache.storedBuckets[bucket] + storedBucket := donut.storedBuckets[bucket] if _, ok := storedBucket.multiPartSession[key]; ok == false { return ObjectResourcesMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil) } @@ -395,10 +395,10 @@ func (cache Cache) ListObjectParts(bucket, key string, resources ObjectResources return objectResourcesMetadata, nil } -func (cache Cache) expiredPart(a ...interface{}) { +func (donut API) expiredPart(a ...interface{}) { key := a[0].(string) // loop through all buckets - for _, storedBucket := range cache.storedBuckets { + for _, storedBucket := range donut.storedBuckets { delete(storedBucket.partMetadata, key) } debug.FreeOSMemory() diff --git a/pkg/storage/donut/rebalance.go b/pkg/storage/donut/rebalance.go index 3d47f2a32..7db4cfedd 100644 --- a/pkg/storage/donut/rebalance.go +++ b/pkg/storage/donut/rebalance.go @@ -26,11 +26,11 @@ import ( ) // Rebalance - -func (d donut) Rebalance() error { +func (donut API) Rebalance() error { var totalOffSetLength int var newDisks []disk.Disk var existingDirs []os.FileInfo - for _, node := range d.nodes { + for _, node := range donut.nodes { disks, err := node.ListDisks() if err != nil { return iodine.New(err, nil) @@ -38,7 +38,7 @@ func (d donut) Rebalance() error { totalOffSetLength = len(disks) fmt.Println(totalOffSetLength) for _, disk := range disks { - dirs, err := disk.ListDir(d.name) + dirs, err := disk.ListDir(donut.config.DonutName) if err != nil { return iodine.New(err, nil) } From 8a4e7bcdcf21b4c8d7a3af7bb8aec7e8344848d6 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Fri, 3 Jul 2015 00:29:54 -0700 Subject: [PATCH 18/19] Add full API tests, move storage/donut to donut, add disk tests as well --- pkg/{storage => }/donut/.gitignore | 0 pkg/{storage => }/donut/LICENSE | 0 pkg/{storage => }/donut/README.md | 0 pkg/{storage => }/donut/bucket.go | 0 pkg/{storage => }/donut/common.go | 0 pkg/{storage => }/donut/config.go | 0 pkg/{storage => }/donut/date.go | 0 pkg/{storage => }/donut/definitions.go | 0 pkg/{storage => }/donut/disk/disk.go | 0 pkg/{storage => }/donut/disk/disk_darwin.go | 0 pkg/{storage => }/donut/disk/disk_linux.go | 0 pkg/donut/disk/disk_test.go | 78 ++ pkg/donut/disk/errors.go | 33 + pkg/{storage => }/donut/donut-v1.go | 0 pkg/{storage => }/donut/donut-v1_test.go | 0 pkg/{storage => }/donut/donut-v2.go | 14 +- pkg/{storage => }/donut/donut-v2_test.go | 0 pkg/{storage => }/donut/encoder.go | 0 pkg/{storage => }/donut/errors.go | 0 pkg/{storage => }/donut/interfaces.go | 0 pkg/{storage => }/donut/management.go | 2 +- pkg/{storage => }/donut/multipart.go | 20 +- pkg/{storage => }/donut/node.go | 2 +- pkg/{storage => }/donut/rebalance.go | 2 +- pkg/{storage => }/donut/trove/trove.go | 0 pkg/{storage => }/donut/trove/trove_test.go | 0 pkg/{storage => }/donut/utils.go | 0 pkg/server/api/api.go | 2 +- pkg/server/api/bucket-handlers.go | 11 +- pkg/server/api/generic-handlers.go | 2 +- pkg/server/api/headers.go | 2 +- pkg/server/api/object-handlers.go | 33 +- pkg/server/api/resources.go | 2 +- pkg/server/api/response.go | 2 +- pkg/server/api_test.go | 948 ++++++++++++++++++++ pkg/storage/donut/disk/errors.go | 17 - 36 files changed, 1110 insertions(+), 60 deletions(-) rename pkg/{storage => }/donut/.gitignore (100%) rename pkg/{storage => }/donut/LICENSE (100%) rename pkg/{storage => }/donut/README.md (100%) rename pkg/{storage => }/donut/bucket.go (100%) rename pkg/{storage => }/donut/common.go (100%) rename pkg/{storage => }/donut/config.go (100%) rename pkg/{storage => }/donut/date.go (100%) rename pkg/{storage => }/donut/definitions.go (100%) rename pkg/{storage => }/donut/disk/disk.go (100%) rename pkg/{storage => }/donut/disk/disk_darwin.go (100%) rename pkg/{storage => }/donut/disk/disk_linux.go (100%) create mode 100644 pkg/donut/disk/disk_test.go create mode 100644 pkg/donut/disk/errors.go rename pkg/{storage => }/donut/donut-v1.go (100%) rename pkg/{storage => }/donut/donut-v1_test.go (100%) rename pkg/{storage => }/donut/donut-v2.go (99%) rename pkg/{storage => }/donut/donut-v2_test.go (100%) rename pkg/{storage => }/donut/encoder.go (100%) rename pkg/{storage => }/donut/errors.go (100%) rename pkg/{storage => }/donut/interfaces.go (100%) rename pkg/{storage => }/donut/management.go (98%) rename pkg/{storage => }/donut/multipart.go (98%) rename pkg/{storage => }/donut/node.go (97%) rename pkg/{storage => }/donut/rebalance.go (96%) rename pkg/{storage => }/donut/trove/trove.go (100%) rename pkg/{storage => }/donut/trove/trove_test.go (100%) rename pkg/{storage => }/donut/utils.go (100%) create mode 100644 pkg/server/api_test.go delete mode 100644 pkg/storage/donut/disk/errors.go diff --git a/pkg/storage/donut/.gitignore b/pkg/donut/.gitignore similarity index 100% rename from pkg/storage/donut/.gitignore rename to pkg/donut/.gitignore diff --git a/pkg/storage/donut/LICENSE b/pkg/donut/LICENSE similarity index 100% rename from pkg/storage/donut/LICENSE rename to pkg/donut/LICENSE diff --git a/pkg/storage/donut/README.md b/pkg/donut/README.md similarity index 100% rename from pkg/storage/donut/README.md rename to pkg/donut/README.md diff --git a/pkg/storage/donut/bucket.go b/pkg/donut/bucket.go similarity index 100% rename from pkg/storage/donut/bucket.go rename to pkg/donut/bucket.go diff --git a/pkg/storage/donut/common.go b/pkg/donut/common.go similarity index 100% rename from pkg/storage/donut/common.go rename to pkg/donut/common.go diff --git a/pkg/storage/donut/config.go b/pkg/donut/config.go similarity index 100% rename from pkg/storage/donut/config.go rename to pkg/donut/config.go diff --git a/pkg/storage/donut/date.go b/pkg/donut/date.go similarity index 100% rename from pkg/storage/donut/date.go rename to pkg/donut/date.go diff --git a/pkg/storage/donut/definitions.go b/pkg/donut/definitions.go similarity index 100% rename from pkg/storage/donut/definitions.go rename to pkg/donut/definitions.go diff --git a/pkg/storage/donut/disk/disk.go b/pkg/donut/disk/disk.go similarity index 100% rename from pkg/storage/donut/disk/disk.go rename to pkg/donut/disk/disk.go diff --git a/pkg/storage/donut/disk/disk_darwin.go b/pkg/donut/disk/disk_darwin.go similarity index 100% rename from pkg/storage/donut/disk/disk_darwin.go rename to pkg/donut/disk/disk_darwin.go diff --git a/pkg/storage/donut/disk/disk_linux.go b/pkg/donut/disk/disk_linux.go similarity index 100% rename from pkg/storage/donut/disk/disk_linux.go rename to pkg/donut/disk/disk_linux.go diff --git a/pkg/donut/disk/disk_test.go b/pkg/donut/disk/disk_test.go new file mode 100644 index 000000000..18cb5145d --- /dev/null +++ b/pkg/donut/disk/disk_test.go @@ -0,0 +1,78 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedisk. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package disk + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + . "github.com/minio/check" +) + +func TestDisk(t *testing.T) { TestingT(t) } + +type MyDiskSuite struct { + path string + disk Disk +} + +var _ = Suite(&MyDiskSuite{}) + +func (s *MyDiskSuite) SetUpSuite(c *C) { + path, err := ioutil.TempDir(os.TempDir(), "disk-") + c.Assert(err, IsNil) + s.path = path + d, err := New(s.path) + c.Assert(err, IsNil) + s.disk = d +} + +func (s *MyDiskSuite) TearDownSuite(c *C) { + os.RemoveAll(s.path) +} + +func (s *MyDiskSuite) TestDiskInfo(c *C) { + c.Assert(s.path, Equals, s.disk.GetPath()) + fsInfo := s.disk.GetFSInfo() + c.Assert(fsInfo["MountPoint"], Equals, s.disk.GetPath()) + c.Assert(fsInfo["FSType"], Not(Equals), "UNKNOWN") +} + +func (s *MyDiskSuite) TestDiskCreateDir(c *C) { + c.Assert(s.disk.MakeDir("hello"), IsNil) +} + +func (s *MyDiskSuite) TestDiskCreateFile(c *C) { + f, err := s.disk.CreateFile("hello1") + c.Assert(err, IsNil) + c.Assert(f.Name(), Equals, filepath.Join(s.path, "hello1")) + defer f.Close() +} + +func (s *MyDiskSuite) TestDiskOpenFile(c *C) { + f, err := s.disk.CreateFile("hello2") + c.Assert(err, IsNil) + c.Assert(f.Name(), Equals, filepath.Join(s.path, "hello2")) + defer f.Close() + + f, err = s.disk.OpenFile("hello2") + c.Assert(err, IsNil) + c.Assert(f.Name(), Equals, filepath.Join(s.path, "hello2")) + defer f.Close() +} diff --git a/pkg/donut/disk/errors.go b/pkg/donut/disk/errors.go new file mode 100644 index 000000000..5129665e9 --- /dev/null +++ b/pkg/donut/disk/errors.go @@ -0,0 +1,33 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedisk. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package disk + +// InvalidArgument invalid argument +type InvalidArgument struct{} + +func (e InvalidArgument) Error() string { + return "Invalid argument" +} + +// UnsupportedFilesystem unsupported filesystem type +type UnsupportedFilesystem struct { + Type string +} + +func (e UnsupportedFilesystem) Error() string { + return "Unsupported filesystem: " + e.Type +} diff --git a/pkg/storage/donut/donut-v1.go b/pkg/donut/donut-v1.go similarity index 100% rename from pkg/storage/donut/donut-v1.go rename to pkg/donut/donut-v1.go diff --git a/pkg/storage/donut/donut-v1_test.go b/pkg/donut/donut-v1_test.go similarity index 100% rename from pkg/storage/donut/donut-v1_test.go rename to pkg/donut/donut-v1_test.go diff --git a/pkg/storage/donut/donut-v2.go b/pkg/donut/donut-v2.go similarity index 99% rename from pkg/storage/donut/donut-v2.go rename to pkg/donut/donut-v2.go index 8cb9de52b..446596c89 100644 --- a/pkg/storage/donut/donut-v2.go +++ b/pkg/donut/donut-v2.go @@ -32,9 +32,9 @@ import ( "sync" "time" + "github.com/minio/minio/pkg/donut/trove" "github.com/minio/minio/pkg/iodine" "github.com/minio/minio/pkg/quick" - "github.com/minio/minio/pkg/storage/donut/trove" ) // total Number of buckets allowed @@ -397,9 +397,9 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s break } hash.Write(byteBuffer[0:length]) - donut.lock.Lock() + //donut.lock.Lock() ok := donut.objects.Append(objectKey, byteBuffer[0:length]) - donut.lock.Unlock() + //donut.lock.Unlock() if !ok { return ObjectMetadata{}, iodine.New(InternalError{}, nil) } @@ -431,10 +431,10 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s Size: int64(totalLength), } - donut.lock.Lock() + //donut.lock.Lock() storedBucket.objectMetadata[objectKey] = newObject donut.storedBuckets[bucket] = storedBucket - donut.lock.Unlock() + //donut.lock.Unlock() return newObject, nil } @@ -476,9 +476,9 @@ func (donut API) MakeBucket(bucketName, acl string) error { newBucket.bucketMetadata.Name = bucketName newBucket.bucketMetadata.Created = time.Now().UTC() newBucket.bucketMetadata.ACL = BucketACL(acl) - donut.lock.Lock() + //donut.lock.Lock() donut.storedBuckets[bucketName] = newBucket - donut.lock.Unlock() + //donut.lock.Unlock() return nil } diff --git a/pkg/storage/donut/donut-v2_test.go b/pkg/donut/donut-v2_test.go similarity index 100% rename from pkg/storage/donut/donut-v2_test.go rename to pkg/donut/donut-v2_test.go diff --git a/pkg/storage/donut/encoder.go b/pkg/donut/encoder.go similarity index 100% rename from pkg/storage/donut/encoder.go rename to pkg/donut/encoder.go diff --git a/pkg/storage/donut/errors.go b/pkg/donut/errors.go similarity index 100% rename from pkg/storage/donut/errors.go rename to pkg/donut/errors.go diff --git a/pkg/storage/donut/interfaces.go b/pkg/donut/interfaces.go similarity index 100% rename from pkg/storage/donut/interfaces.go rename to pkg/donut/interfaces.go diff --git a/pkg/storage/donut/management.go b/pkg/donut/management.go similarity index 98% rename from pkg/storage/donut/management.go rename to pkg/donut/management.go index c897524b2..3abbe45ca 100644 --- a/pkg/storage/donut/management.go +++ b/pkg/donut/management.go @@ -20,8 +20,8 @@ import ( "encoding/json" "path/filepath" + "github.com/minio/minio/pkg/donut/disk" "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/donut/disk" ) // Heal - heal a donut and fix bad data blocks diff --git a/pkg/storage/donut/multipart.go b/pkg/donut/multipart.go similarity index 98% rename from pkg/storage/donut/multipart.go rename to pkg/donut/multipart.go index 1c76451c1..9946bcfce 100644 --- a/pkg/storage/donut/multipart.go +++ b/pkg/donut/multipart.go @@ -57,7 +57,7 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, er } donut.lock.RUnlock() - donut.lock.Lock() + //donut.lock.Lock() id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + key + time.Now().String()) uploadIDSum := sha512.Sum512(id) uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47] @@ -67,7 +67,7 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, er initiated: time.Now(), totalParts: 0, } - donut.lock.Unlock() + //donut.lock.Unlock() return uploadID, nil } @@ -172,9 +172,9 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont md5SumBytes := hash.Sum(nil) totalLength := int64(len(readBytes)) - donut.lock.Lock() + //donut.lock.Lock() donut.multiPartObjects.Set(partKey, readBytes) - donut.lock.Unlock() + //donut.lock.Unlock() // setting up for de-allocation readBytes = nil @@ -192,20 +192,20 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont Size: totalLength, } - donut.lock.Lock() + //donut.lock.Lock() storedBucket.partMetadata[partKey] = newPart multiPartSession := storedBucket.multiPartSession[key] multiPartSession.totalParts++ storedBucket.multiPartSession[key] = multiPartSession donut.storedBuckets[bucket] = storedBucket - donut.lock.Unlock() + //donut.lock.Unlock() return md5Sum, nil } func (donut API) cleanupMultipartSession(bucket, key, uploadID string) { - donut.lock.Lock() - defer donut.lock.Unlock() + // donut.lock.Lock() + // defer donut.lock.Unlock() delete(donut.storedBuckets[bucket].multiPartSession, key) } @@ -237,7 +237,7 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, parts map } donut.lock.RUnlock() - donut.lock.Lock() + //donut.lock.Lock() var size int64 var fullObject bytes.Buffer for i := 1; i <= len(parts); i++ { @@ -264,7 +264,7 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, parts map object = nil go debug.FreeOSMemory() } - donut.lock.Unlock() + //donut.lock.Unlock() md5sumSlice := md5.Sum(fullObject.Bytes()) // this is needed for final verification inside CreateObject, do not convert this to hex diff --git a/pkg/storage/donut/node.go b/pkg/donut/node.go similarity index 97% rename from pkg/storage/donut/node.go rename to pkg/donut/node.go index a3f6d0231..3cab84667 100644 --- a/pkg/storage/donut/node.go +++ b/pkg/donut/node.go @@ -17,8 +17,8 @@ package donut import ( + "github.com/minio/minio/pkg/donut/disk" "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/donut/disk" ) // node struct internal diff --git a/pkg/storage/donut/rebalance.go b/pkg/donut/rebalance.go similarity index 96% rename from pkg/storage/donut/rebalance.go rename to pkg/donut/rebalance.go index 7db4cfedd..d37ce1370 100644 --- a/pkg/storage/donut/rebalance.go +++ b/pkg/donut/rebalance.go @@ -21,8 +21,8 @@ import ( "os" "strings" + "github.com/minio/minio/pkg/donut/disk" "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/donut/disk" ) // Rebalance - diff --git a/pkg/storage/donut/trove/trove.go b/pkg/donut/trove/trove.go similarity index 100% rename from pkg/storage/donut/trove/trove.go rename to pkg/donut/trove/trove.go diff --git a/pkg/storage/donut/trove/trove_test.go b/pkg/donut/trove/trove_test.go similarity index 100% rename from pkg/storage/donut/trove/trove_test.go rename to pkg/donut/trove/trove_test.go diff --git a/pkg/storage/donut/utils.go b/pkg/donut/utils.go similarity index 100% rename from pkg/storage/donut/utils.go rename to pkg/donut/utils.go diff --git a/pkg/server/api/api.go b/pkg/server/api/api.go index 8306e7c0b..3a9fdd217 100644 --- a/pkg/server/api/api.go +++ b/pkg/server/api/api.go @@ -16,7 +16,7 @@ package api -import "github.com/minio/minio/pkg/storage/donut" +import "github.com/minio/minio/pkg/donut" // Operation container for individual operations read by Ticket Master type Operation struct { diff --git a/pkg/server/api/bucket-handlers.go b/pkg/server/api/bucket-handlers.go index cbdc6b550..e47eabbc0 100644 --- a/pkg/server/api/bucket-handlers.go +++ b/pkg/server/api/bucket-handlers.go @@ -20,8 +20,8 @@ import ( "net/http" "github.com/gorilla/mux" + "github.com/minio/minio/pkg/donut" "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/donut" "github.com/minio/minio/pkg/utils/log" ) @@ -83,6 +83,10 @@ func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Re } acceptsContentType := getContentType(req) + if !api.isValidOp(w, req, acceptsContentType) { + return + } + resources := getBucketMultipartResources(req.URL.Query()) if resources.MaxUploads == 0 { resources.MaxUploads = maxObjectList @@ -132,7 +136,6 @@ func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { } acceptsContentType := getContentType(req) - // verify if bucket allows this operation if !api.isValidOp(w, req, acceptsContentType) { return } @@ -160,6 +163,10 @@ func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) // write body w.Write(encodedSuccessResponse) + case donut.BucketNameInvalid: + writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) + case donut.BucketNotFound: + writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) case donut.ObjectNotFound: writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) case donut.ObjectNameInvalid: diff --git a/pkg/server/api/generic-handlers.go b/pkg/server/api/generic-handlers.go index 5d7f3fb00..08f36f94d 100644 --- a/pkg/server/api/generic-handlers.go +++ b/pkg/server/api/generic-handlers.go @@ -199,7 +199,7 @@ func getConfigFile() string { if err := os.MkdirAll(confPath, 0700); err != nil { return "" } - return filepath.Join(confPath, "config.json") + return filepath.Join(confPath, "users.json") } // validate auth header handler ServeHTTP() wrapper diff --git a/pkg/server/api/headers.go b/pkg/server/api/headers.go index 01f59b492..6d4cb81d3 100644 --- a/pkg/server/api/headers.go +++ b/pkg/server/api/headers.go @@ -23,7 +23,7 @@ import ( "net/http" "strconv" - "github.com/minio/minio/pkg/storage/donut" + "github.com/minio/minio/pkg/donut" ) // No encoder interface exists, so we create one. diff --git a/pkg/server/api/object-handlers.go b/pkg/server/api/object-handlers.go index e09dfe64c..210fe0fcc 100644 --- a/pkg/server/api/object-handlers.go +++ b/pkg/server/api/object-handlers.go @@ -24,8 +24,8 @@ import ( "encoding/xml" "github.com/gorilla/mux" + "github.com/minio/minio/pkg/donut" "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/storage/donut" "github.com/minio/minio/pkg/utils/log" ) @@ -48,8 +48,6 @@ func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) { } acceptsContentType := getContentType(req) - - // verify if this operation is allowed if !api.isValidOp(w, req, acceptsContentType) { return } @@ -85,6 +83,10 @@ func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) { } } } + case donut.BucketNameInvalid: + writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) + case donut.BucketNotFound: + writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) case donut.ObjectNotFound: writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) case donut.ObjectNameInvalid: @@ -109,8 +111,6 @@ func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) { } acceptsContentType := getContentType(req) - - // verify if this operation is allowed if !api.isValidOp(w, req, acceptsContentType) { return } @@ -125,6 +125,14 @@ func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) { case nil: setObjectHeaders(w, metadata) w.WriteHeader(http.StatusOK) + case donut.BucketNameInvalid: + error := getErrorCode(InvalidBucketName) + w.Header().Set("Server", "Minio") + w.WriteHeader(error.HTTPStatusCode) + case donut.BucketNotFound: + error := getErrorCode(NoSuchBucket) + w.Header().Set("Server", "Minio") + w.WriteHeader(error.HTTPStatusCode) case donut.ObjectNotFound: error := getErrorCode(NoSuchKey) w.Header().Set("Server", "Minio") @@ -155,7 +163,6 @@ func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) { } acceptsContentType := getContentType(req) - // verify if this operation is allowed if !api.isValidOp(w, req, acceptsContentType) { return } @@ -203,6 +210,10 @@ func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) { case nil: w.Header().Set("ETag", metadata.MD5Sum) writeSuccessResponse(w, acceptsContentType) + case donut.BucketNotFound: + writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) + case donut.BucketNameInvalid: + writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) case donut.ObjectExists: writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path) case donut.BadDigest: @@ -231,8 +242,6 @@ func (api Minio) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Requ } acceptsContentType := getContentType(req) - - // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { return } @@ -278,8 +287,6 @@ func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) } acceptsContentType := getContentType(req) - - // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { return } @@ -355,8 +362,6 @@ func (api Minio) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Re } acceptsContentType := getContentType(req) - - // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { return } @@ -392,8 +397,6 @@ func (api Minio) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request } acceptsContentType := getContentType(req) - - // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { return } @@ -438,8 +441,6 @@ func (api Minio) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http } acceptsContentType := getContentType(req) - - // handle ACL's here at bucket level if !api.isValidOp(w, req, acceptsContentType) { return } diff --git a/pkg/server/api/resources.go b/pkg/server/api/resources.go index dea992b4e..8b26721dc 100644 --- a/pkg/server/api/resources.go +++ b/pkg/server/api/resources.go @@ -20,7 +20,7 @@ import ( "net/url" "strconv" - "github.com/minio/minio/pkg/storage/donut" + "github.com/minio/minio/pkg/donut" ) // parse bucket url queries diff --git a/pkg/server/api/response.go b/pkg/server/api/response.go index 5f6de4435..9d2a63f6e 100644 --- a/pkg/server/api/response.go +++ b/pkg/server/api/response.go @@ -20,7 +20,7 @@ import ( "net/http" "sort" - "github.com/minio/minio/pkg/storage/donut" + "github.com/minio/minio/pkg/donut" ) // Reply date format diff --git a/pkg/server/api_test.go b/pkg/server/api_test.go new file mode 100644 index 000000000..cecd07f5d --- /dev/null +++ b/pkg/server/api_test.go @@ -0,0 +1,948 @@ +/* + * Minimalist Object Storage, (C) 2014 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package server + +import ( + "bytes" + "io" + "io/ioutil" + "strconv" + "strings" + "testing" + "time" + + "encoding/xml" + "net/http" + "net/http/httptest" + + . "github.com/minio/check" + "github.com/minio/minio/pkg/server/api" +) + +func Test(t *testing.T) { TestingT(t) } + +type MySuite struct{} + +var _ = Suite(&MySuite{}) + +var testServer *httptest.Server + +func (s *MySuite) SetUpSuite(c *C) { + httpHandler, minioAPI := getAPIHandler(api.Config{RateLimit: 16}) + go startTM(minioAPI) + testServer = httptest.NewServer(httpHandler) +} + +func (s *MySuite) TearDownSuite(c *C) { + testServer.Close() +} + +func setDummyAuthHeader(req *http.Request) { + authDummy := "AWS4-HMAC-SHA256 Credential=AC5NH40NQLTL4DUMMY/20130524/us-east-1/s3/aws4_request, SignedHeaders=date;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class, Signature=98ad721746da40c64f1a55b78f14c238d841ea1380cd77a1b5971af0ece108bd" + req.Header.Set("Authorization", authDummy) + req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) +} + +func (s *MySuite) TestNonExistantBucket(c *C) { + request, err := http.NewRequest("HEAD", testServer.URL+"/nonexistantbucket", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusNotFound) +} + +func (s *MySuite) TestEmptyObject(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/emptyobject", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("PUT", testServer.URL+"/emptyobject/object", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("GET", testServer.URL+"/emptyobject/object", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + var buffer bytes.Buffer + responseBody, err := ioutil.ReadAll(response.Body) + c.Assert(err, IsNil) + c.Assert(true, Equals, bytes.Equal(responseBody, buffer.Bytes())) +} + +func (s *MySuite) TestBucket(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/bucket", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("HEAD", testServer.URL+"/bucket", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) +} + +/* +func (s *MySuite) TestObject(c *C) { + buffer := bytes.NewBufferString("hello world") + request, err := http.NewRequest("PUT", testServer.URL+"/testobject", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("PUT", testServer.URL+"/testobject/object", buffer) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("GET", testServer.URL+"/testobject/object", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + responseBody, err := ioutil.ReadAll(response.Body) + c.Assert(err, IsNil) + c.Assert(responseBody, DeepEquals, []byte("hello world")) + +} +*/ + +func (s *MySuite) TestMultipleObjects(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/multipleobjects", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("GET", testServer.URL+"/multipleobjects/object", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + verifyError(c, response, "NoSuchKey", "The specified key does not exist.", http.StatusNotFound) + + //// test object 1 + + // get object + buffer1 := bytes.NewBufferString("hello one") + request, err = http.NewRequest("PUT", testServer.URL+"/multipleobjects/object1", buffer1) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("GET", testServer.URL+"/multipleobjects/object1", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + // verify response data + responseBody, err := ioutil.ReadAll(response.Body) + c.Assert(err, IsNil) + c.Assert(true, Equals, bytes.Equal(responseBody, []byte("hello one"))) + + buffer2 := bytes.NewBufferString("hello two") + request, err = http.NewRequest("PUT", testServer.URL+"/multipleobjects/object2", buffer2) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("GET", testServer.URL+"/multipleobjects/object2", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + // verify response data + responseBody, err = ioutil.ReadAll(response.Body) + c.Assert(err, IsNil) + c.Assert(true, Equals, bytes.Equal(responseBody, []byte("hello two"))) + + buffer3 := bytes.NewBufferString("hello three") + request, err = http.NewRequest("PUT", testServer.URL+"/multipleobjects/object3", buffer3) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("GET", testServer.URL+"/multipleobjects/object3", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + // verify object + responseBody, err = ioutil.ReadAll(response.Body) + c.Assert(err, IsNil) + c.Assert(true, Equals, bytes.Equal(responseBody, []byte("hello three"))) +} + +func (s *MySuite) TestNotImplemented(c *C) { + request, err := http.NewRequest("GET", testServer.URL+"/bucket/object?policy", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusNotImplemented) + +} + +func (s *MySuite) TestHeader(c *C) { + request, err := http.NewRequest("GET", testServer.URL+"/bucket/object", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + + verifyError(c, response, "NoSuchKey", "The specified key does not exist.", http.StatusNotFound) +} + +func (s *MySuite) TestPutBucket(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/put-bucket", nil) + c.Assert(err, IsNil) + request.Header.Add("x-amz-acl", "private") + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) +} + +func (s *MySuite) TestPutObject(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/put-object", nil) + c.Assert(err, IsNil) + request.Header.Add("x-amz-acl", "private") + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("PUT", testServer.URL+"/put-object/object", bytes.NewBufferString("hello world")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) +} + +func (s *MySuite) TestListBuckets(c *C) { + request, err := http.NewRequest("GET", testServer.URL+"/", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + _, err = readListBucket(response.Body) + c.Assert(err, IsNil) +} + +func readListBucket(reader io.Reader) (api.ListBucketsResponse, error) { + var results api.ListBucketsResponse + decoder := xml.NewDecoder(reader) + err := decoder.Decode(&results) + return results, err +} + +func (s *MySuite) TestNotBeAbleToCreateObjectInNonexistantBucket(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/innonexistantbucket/object", bytes.NewBufferString("hello world")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + verifyError(c, response, "NoSuchBucket", "The specified bucket does not exist.", http.StatusNotFound) +} + +func (s *MySuite) TestHeadOnObject(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/headonobject", nil) + c.Assert(err, IsNil) + request.Header.Add("x-amz-acl", "private") + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("PUT", testServer.URL+"/headonobject/object1", bytes.NewBufferString("hello world")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("HEAD", testServer.URL+"/headonobject/object1", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) +} + +func (s *MySuite) TestHeadOnBucket(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/headonbucket", nil) + c.Assert(err, IsNil) + request.Header.Add("x-amz-acl", "private") + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("HEAD", testServer.URL+"/headonbucket", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) +} + +func (s *MySuite) TestDateFormat(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/dateformat", nil) + c.Assert(err, IsNil) + request.Header.Add("x-amz-acl", "private") + setDummyAuthHeader(request) + + // set an invalid date + request.Header.Set("Date", "asfasdfadf") + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + verifyError(c, response, "RequestTimeTooSkewed", + "The difference between the request time and the server's time is too large.", http.StatusForbidden) + + request.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) + setDummyAuthHeader(request) + response, err = client.Do(request) + c.Assert(response.StatusCode, Equals, http.StatusOK) +} + +func verifyHeaders(c *C, header http.Header, date time.Time, size int, contentType string, etag string) { + // Verify date + c.Assert(header.Get("Last-Modified"), Equals, date.Format(http.TimeFormat)) + + // verify size + c.Assert(header.Get("Content-Length"), Equals, strconv.Itoa(size)) + + // verify content type + c.Assert(header.Get("Content-Type"), Equals, contentType) + + // verify etag + c.Assert(header.Get("Etag"), Equals, "\""+etag+"\"") +} + +func (s *MySuite) TestXMLNameNotInBucketListJson(c *C) { + request, err := http.NewRequest("GET", testServer.URL+"/", nil) + c.Assert(err, IsNil) + request.Header.Add("Accept", "application/json") + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + byteResults, err := ioutil.ReadAll(response.Body) + c.Assert(err, IsNil) + c.Assert(strings.Contains(string(byteResults), "XML"), Equals, false) +} + +func (s *MySuite) TestXMLNameNotInObjectListJson(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/xmlnamenotinobjectlistjson", nil) + c.Assert(err, IsNil) + request.Header.Add("Accept", "application/json") + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("GET", testServer.URL+"/xmlnamenotinobjectlistjson", nil) + c.Assert(err, IsNil) + request.Header.Add("Accept", "application/json") + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + byteResults, err := ioutil.ReadAll(response.Body) + c.Assert(err, IsNil) + c.Assert(strings.Contains(string(byteResults), "XML"), Equals, false) +} + +func (s *MySuite) TestContentTypePersists(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/contenttype-persists", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("PUT", testServer.URL+"/contenttype-persists/one", bytes.NewBufferString("hello world")) + delete(request.Header, "Content-Type") + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("HEAD", testServer.URL+"/contenttype-persists/one", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.Header.Get("Content-Type"), Equals, "application/octet-stream") + + request, err = http.NewRequest("GET", testServer.URL+"/contenttype-persists/one", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + c.Assert(response.Header.Get("Content-Type"), Equals, "application/octet-stream") + + request, err = http.NewRequest("PUT", testServer.URL+"/contenttype-persists/two", bytes.NewBufferString("hello world")) + delete(request.Header, "Content-Type") + request.Header.Add("Content-Type", "application/json") + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("HEAD", testServer.URL+"/contenttype-persists/two", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.Header.Get("Content-Type"), Equals, "application/octet-stream") + + request, err = http.NewRequest("GET", testServer.URL+"/contenttype-persists/two", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.Header.Get("Content-Type"), Equals, "application/octet-stream") +} + +func (s *MySuite) TestPartialContent(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/partial-content", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("PUT", testServer.URL+"/partial-content/bar", bytes.NewBufferString("Hello World")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + // prepare request + request, err = http.NewRequest("GET", testServer.URL+"/partial-content/bar", nil) + c.Assert(err, IsNil) + request.Header.Add("Accept", "application/json") + request.Header.Add("Range", "bytes=6-7") + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusPartialContent) + partialObject, err := ioutil.ReadAll(response.Body) + c.Assert(err, IsNil) + + c.Assert(string(partialObject), Equals, "Wo") +} + +func (s *MySuite) TestListObjectsHandlerErrors(c *C) { + request, err := http.NewRequest("GET", testServer.URL+"/objecthandlererrors-.", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + verifyError(c, response, "InvalidBucketName", "The specified bucket is not valid.", http.StatusBadRequest) + + request, err = http.NewRequest("GET", testServer.URL+"/objecthandlererrors", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + verifyError(c, response, "NoSuchBucket", "The specified bucket does not exist.", http.StatusNotFound) +} + +func (s *MySuite) TestPutBucketErrors(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/putbucket-.", nil) + c.Assert(err, IsNil) + request.Header.Add("x-amz-acl", "private") + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + verifyError(c, response, "InvalidBucketName", "The specified bucket is not valid.", http.StatusBadRequest) + + request, err = http.NewRequest("PUT", testServer.URL+"/putbucket", nil) + c.Assert(err, IsNil) + request.Header.Add("x-amz-acl", "private") + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("PUT", testServer.URL+"/putbucket", nil) + c.Assert(err, IsNil) + request.Header.Add("x-amz-acl", "private") + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + verifyError(c, response, "BucketAlreadyExists", "The requested bucket name is not available.", http.StatusConflict) + + request, err = http.NewRequest("PUT", testServer.URL+"/putbucket?acl", nil) + c.Assert(err, IsNil) + request.Header.Add("x-amz-acl", "unknown") + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + verifyError(c, response, "NotImplemented", "A header you provided implies functionality that is not implemented.", http.StatusNotImplemented) +} + +func (s *MySuite) TestGetObjectErrors(c *C) { + request, err := http.NewRequest("GET", testServer.URL+"/getobjecterrors", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + verifyError(c, response, "NoSuchBucket", "The specified bucket does not exist.", http.StatusNotFound) + + request, err = http.NewRequest("PUT", testServer.URL+"/getobjecterrors", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("GET", testServer.URL+"/getobjecterrors/bar", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + verifyError(c, response, "NoSuchKey", "The specified key does not exist.", http.StatusNotFound) + + request, err = http.NewRequest("GET", testServer.URL+"/getobjecterrors-./bar", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + verifyError(c, response, "InvalidBucketName", "The specified bucket is not valid.", http.StatusBadRequest) + +} + +func (s *MySuite) TestGetObjectRangeErrors(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/getobjectrangeerrors", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("PUT", testServer.URL+"/getobjectrangeerrors/bar", bytes.NewBufferString("Hello World")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("GET", testServer.URL+"/getobjectrangeerrors/bar", nil) + request.Header.Add("Range", "bytes=7-6") + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + verifyError(c, response, "InvalidRange", "The requested range cannot be satisfied.", http.StatusRequestedRangeNotSatisfiable) +} + +func (s *MySuite) TestObjectMultipartAbort(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/objectmultipartabort", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, 200) + + request, err = http.NewRequest("POST", testServer.URL+"/objectmultipartabort/object?uploads", bytes.NewBufferString("")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + decoder := xml.NewDecoder(response.Body) + newResponse := &api.InitiateMultipartUploadResult{} + + err = decoder.Decode(newResponse) + c.Assert(err, IsNil) + c.Assert(len(newResponse.UploadID) > 0, Equals, true) + uploadID := newResponse.UploadID + + request, err = http.NewRequest("PUT", testServer.URL+"/objectmultipartabort/object?uploadId="+uploadID+"&partNumber=1", bytes.NewBufferString("hello world")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response1, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response1.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("PUT", testServer.URL+"/objectmultipartabort/object?uploadId="+uploadID+"&partNumber=2", bytes.NewBufferString("hello world")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response2, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response2.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("DELETE", testServer.URL+"/objectmultipartabort/object?uploadId="+uploadID, nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response3, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response3.StatusCode, Equals, http.StatusNoContent) +} + +func (s *MySuite) TestBucketMultipartList(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/bucketmultipartlist", bytes.NewBufferString("")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, 200) + + request, err = http.NewRequest("POST", testServer.URL+"/bucketmultipartlist/object?uploads", bytes.NewBufferString("")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + decoder := xml.NewDecoder(response.Body) + newResponse := &api.InitiateMultipartUploadResult{} + + err = decoder.Decode(newResponse) + c.Assert(err, IsNil) + c.Assert(len(newResponse.UploadID) > 0, Equals, true) + uploadID := newResponse.UploadID + + request, err = http.NewRequest("PUT", testServer.URL+"/bucketmultipartlist/object?uploadId="+uploadID+"&partNumber=1", bytes.NewBufferString("hello world")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response1, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response1.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("PUT", testServer.URL+"/bucketmultipartlist/object?uploadId="+uploadID+"&partNumber=2", bytes.NewBufferString("hello world")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response2, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response2.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("GET", testServer.URL+"/bucketmultipartlist?uploads", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response3, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response3.StatusCode, Equals, http.StatusOK) + + decoder = xml.NewDecoder(response3.Body) + newResponse3 := &api.ListMultipartUploadsResponse{} + err = decoder.Decode(newResponse3) + c.Assert(err, IsNil) + c.Assert(newResponse3.Bucket, Equals, "bucketmultipartlist") +} + +func (s *MySuite) TestObjectMultipartList(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/objectmultipartlist", bytes.NewBufferString("")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, 200) + + request, err = http.NewRequest("POST", testServer.URL+"/objectmultipartlist/object?uploads", bytes.NewBufferString("")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + decoder := xml.NewDecoder(response.Body) + newResponse := &api.InitiateMultipartUploadResult{} + + err = decoder.Decode(newResponse) + c.Assert(err, IsNil) + c.Assert(len(newResponse.UploadID) > 0, Equals, true) + uploadID := newResponse.UploadID + + request, err = http.NewRequest("PUT", testServer.URL+"/objectmultipartlist/object?uploadId="+uploadID+"&partNumber=1", bytes.NewBufferString("hello world")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response1, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response1.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("PUT", testServer.URL+"/objectmultipartlist/object?uploadId="+uploadID+"&partNumber=2", bytes.NewBufferString("hello world")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response2, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response2.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("GET", testServer.URL+"/objectmultipartlist/object?uploadId="+uploadID, nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response3, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response3.StatusCode, Equals, http.StatusOK) + +} + +func (s *MySuite) TestObjectMultipart(c *C) { + request, err := http.NewRequest("PUT", testServer.URL+"/objectmultiparts", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client := http.Client{} + response, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, 200) + + request, err = http.NewRequest("POST", testServer.URL+"/objectmultiparts/object?uploads", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + decoder := xml.NewDecoder(response.Body) + newResponse := &api.InitiateMultipartUploadResult{} + + err = decoder.Decode(newResponse) + c.Assert(err, IsNil) + c.Assert(len(newResponse.UploadID) > 0, Equals, true) + uploadID := newResponse.UploadID + + request, err = http.NewRequest("PUT", testServer.URL+"/objectmultiparts/object?uploadId="+uploadID+"&partNumber=1", bytes.NewBufferString("hello world")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response1, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response1.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("PUT", testServer.URL+"/objectmultiparts/object?uploadId="+uploadID+"&partNumber=2", bytes.NewBufferString("hello world")) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + client = http.Client{} + response2, err := client.Do(request) + c.Assert(err, IsNil) + c.Assert(response2.StatusCode, Equals, http.StatusOK) + + // complete multipart upload + completeUploads := &api.CompleteMultipartUpload{ + Part: []api.Part{ + { + PartNumber: 1, + ETag: response1.Header.Get("ETag"), + }, + { + PartNumber: 2, + ETag: response2.Header.Get("ETag"), + }, + }, + } + + var completeBuffer bytes.Buffer + encoder := xml.NewEncoder(&completeBuffer) + encoder.Encode(completeUploads) + + request, err = http.NewRequest("POST", testServer.URL+"/objectmultiparts/object?uploadId="+uploadID, &completeBuffer) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = http.NewRequest("GET", testServer.URL+"/objectmultiparts/object", nil) + c.Assert(err, IsNil) + setDummyAuthHeader(request) + + response, err = client.Do(request) + c.Assert(err, IsNil) + c.Assert(response.StatusCode, Equals, http.StatusOK) + object, err := ioutil.ReadAll(response.Body) + c.Assert(err, IsNil) + c.Assert(string(object), Equals, ("hello worldhello world")) +} + +func verifyError(c *C, response *http.Response, code, description string, statusCode int) { + data, err := ioutil.ReadAll(response.Body) + c.Assert(err, IsNil) + errorResponse := api.ErrorResponse{} + err = xml.Unmarshal(data, &errorResponse) + c.Assert(err, IsNil) + c.Assert(errorResponse.Code, Equals, code) + c.Assert(errorResponse.Message, Equals, description) + c.Assert(response.StatusCode, Equals, statusCode) +} diff --git a/pkg/storage/donut/disk/errors.go b/pkg/storage/donut/disk/errors.go deleted file mode 100644 index e195a8ded..000000000 --- a/pkg/storage/donut/disk/errors.go +++ /dev/null @@ -1,17 +0,0 @@ -package disk - -// InvalidArgument invalid argument -type InvalidArgument struct{} - -func (e InvalidArgument) Error() string { - return "Invalid argument" -} - -// UnsupportedFilesystem unsupported filesystem type -type UnsupportedFilesystem struct { - Type string -} - -func (e UnsupportedFilesystem) Error() string { - return "Unsupported filesystem: " + e.Type -} From 30fc14e7036d32416d5ec1f1c6ee13b8294ee2ac Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Fri, 3 Jul 2015 15:17:44 -0700 Subject: [PATCH 19/19] Restructure codebase move crypto, checksum to top-level, move ``split`` into donut, move crypto/keys into api/auth --- pkg/{utils => }/crypto/md5/md5.go | 0 pkg/{utils => }/crypto/md5/md5_test.go | 2 +- pkg/{utils => }/crypto/sha1/.gitignore | 0 pkg/{utils/checksum/crc32c => crypto/sha1}/LICENSE | 0 pkg/{utils => }/crypto/sha1/sha1_darwin.go | 0 pkg/{utils => }/crypto/sha1/sha1_linux.S | 0 pkg/{utils => }/crypto/sha1/sha1_linux.go | 0 pkg/{utils => }/crypto/sha1/sha1_sse3_amd64.asm | 0 pkg/{utils => }/crypto/sha1/sha1_test.go | 0 pkg/{utils => }/crypto/sha1/sha1_windows.go | 0 pkg/{utils => }/crypto/sha1/sha1_yasm_darwin.go | 0 pkg/{utils => }/crypto/sha1/sha1_yasm_linux.go | 0 pkg/{utils => }/crypto/sha1/sha1_yasm_windows.go | 0 pkg/{utils => }/crypto/sha1/sha1block_darwin.go | 0 pkg/{utils => }/crypto/sha1/sha1block_generic.go | 0 pkg/{utils => }/crypto/sha1/sha1block_linux.go | 0 pkg/{utils => }/crypto/sha1/sha1block_windows.go | 0 pkg/{utils/crypto/sha1 => crypto/sha256}/LICENSE | 0 pkg/{utils => }/crypto/sha256/sha256-avx-asm_linux.S | 0 pkg/{utils => }/crypto/sha256/sha256-avx2-asm_linux.S | 0 pkg/{utils => }/crypto/sha256/sha256-ssse3-asm_linux.S | 0 pkg/{utils => }/crypto/sha256/sha256_darwin.go | 0 pkg/{utils => }/crypto/sha256/sha256_linux.go | 0 pkg/{utils => }/crypto/sha256/sha256_linux_test.go | 0 pkg/{utils => }/crypto/sha256/sha256_windows.go | 0 pkg/{utils => }/crypto/sha256/sha256block_linux.go | 0 pkg/{utils/crypto/sha256 => crypto/sha512}/LICENSE | 0 pkg/{utils => }/crypto/sha512/sha512-avx-asm_linux.S | 0 pkg/{utils => }/crypto/sha512/sha512-avx2-asm_linux.S | 0 pkg/{utils => }/crypto/sha512/sha512-ssse3-asm_linux.S | 0 pkg/{utils => }/crypto/sha512/sha512_darwin.go | 0 pkg/{utils => }/crypto/sha512/sha512_linux.go | 0 pkg/{utils => }/crypto/sha512/sha512_linux_test.go | 0 pkg/{utils => }/crypto/sha512/sha512_windows.go | 0 pkg/{utils => }/crypto/sha512/sha512block_linux.go | 0 pkg/donut/bucket.go | 4 ++-- pkg/{utils => donut}/split/.gitignore | 0 pkg/{utils => donut}/split/split.go | 0 pkg/{utils => donut}/split/split_test.go | 2 +- pkg/{utils => donut}/split/testdata/TESTFILE | 0 pkg/{utils/crypto/sha512 => hash/crc32c}/LICENSE | 0 pkg/{utils/checksum => hash}/crc32c/crc32c_darwin.go | 0 pkg/{utils/checksum => hash}/crc32c/crc32c_intel_linux.go | 0 pkg/{utils/checksum => hash}/crc32c/crc32c_linux.S | 0 pkg/{utils/checksum => hash}/crc32c/crc32c_linux.go | 0 pkg/{utils/checksum => hash}/crc32c/crc32c_linux_test.go | 0 pkg/{utils/checksum => hash}/crc32c/crc32c_windows.go | 0 pkg/{utils/checksum => hash}/crc32c/doc.go | 0 pkg/{utils/crypto => server/api/auth}/keys/common.go | 0 pkg/{utils/crypto => server/api/auth}/keys/keys.go | 0 pkg/{utils/crypto => server/api/auth}/keys/keys_test.go | 2 +- pkg/server/api/generic-handlers.go | 2 +- 52 files changed, 6 insertions(+), 6 deletions(-) rename pkg/{utils => }/crypto/md5/md5.go (100%) rename pkg/{utils => }/crypto/md5/md5_test.go (90%) rename pkg/{utils => }/crypto/sha1/.gitignore (100%) rename pkg/{utils/checksum/crc32c => crypto/sha1}/LICENSE (100%) rename pkg/{utils => }/crypto/sha1/sha1_darwin.go (100%) rename pkg/{utils => }/crypto/sha1/sha1_linux.S (100%) rename pkg/{utils => }/crypto/sha1/sha1_linux.go (100%) rename pkg/{utils => }/crypto/sha1/sha1_sse3_amd64.asm (100%) rename pkg/{utils => }/crypto/sha1/sha1_test.go (100%) rename pkg/{utils => }/crypto/sha1/sha1_windows.go (100%) rename pkg/{utils => }/crypto/sha1/sha1_yasm_darwin.go (100%) rename pkg/{utils => }/crypto/sha1/sha1_yasm_linux.go (100%) rename pkg/{utils => }/crypto/sha1/sha1_yasm_windows.go (100%) rename pkg/{utils => }/crypto/sha1/sha1block_darwin.go (100%) rename pkg/{utils => }/crypto/sha1/sha1block_generic.go (100%) rename pkg/{utils => }/crypto/sha1/sha1block_linux.go (100%) rename pkg/{utils => }/crypto/sha1/sha1block_windows.go (100%) rename pkg/{utils/crypto/sha1 => crypto/sha256}/LICENSE (100%) rename pkg/{utils => }/crypto/sha256/sha256-avx-asm_linux.S (100%) rename pkg/{utils => }/crypto/sha256/sha256-avx2-asm_linux.S (100%) rename pkg/{utils => }/crypto/sha256/sha256-ssse3-asm_linux.S (100%) rename pkg/{utils => }/crypto/sha256/sha256_darwin.go (100%) rename pkg/{utils => }/crypto/sha256/sha256_linux.go (100%) rename pkg/{utils => }/crypto/sha256/sha256_linux_test.go (100%) rename pkg/{utils => }/crypto/sha256/sha256_windows.go (100%) rename pkg/{utils => }/crypto/sha256/sha256block_linux.go (100%) rename pkg/{utils/crypto/sha256 => crypto/sha512}/LICENSE (100%) rename pkg/{utils => }/crypto/sha512/sha512-avx-asm_linux.S (100%) rename pkg/{utils => }/crypto/sha512/sha512-avx2-asm_linux.S (100%) rename pkg/{utils => }/crypto/sha512/sha512-ssse3-asm_linux.S (100%) rename pkg/{utils => }/crypto/sha512/sha512_darwin.go (100%) rename pkg/{utils => }/crypto/sha512/sha512_linux.go (100%) rename pkg/{utils => }/crypto/sha512/sha512_linux_test.go (100%) rename pkg/{utils => }/crypto/sha512/sha512_windows.go (100%) rename pkg/{utils => }/crypto/sha512/sha512block_linux.go (100%) rename pkg/{utils => donut}/split/.gitignore (100%) rename pkg/{utils => donut}/split/split.go (100%) rename pkg/{utils => donut}/split/split_test.go (97%) rename pkg/{utils => donut}/split/testdata/TESTFILE (100%) rename pkg/{utils/crypto/sha512 => hash/crc32c}/LICENSE (100%) rename pkg/{utils/checksum => hash}/crc32c/crc32c_darwin.go (100%) rename pkg/{utils/checksum => hash}/crc32c/crc32c_intel_linux.go (100%) rename pkg/{utils/checksum => hash}/crc32c/crc32c_linux.S (100%) rename pkg/{utils/checksum => hash}/crc32c/crc32c_linux.go (100%) rename pkg/{utils/checksum => hash}/crc32c/crc32c_linux_test.go (100%) rename pkg/{utils/checksum => hash}/crc32c/crc32c_windows.go (100%) rename pkg/{utils/checksum => hash}/crc32c/doc.go (100%) rename pkg/{utils/crypto => server/api/auth}/keys/common.go (100%) rename pkg/{utils/crypto => server/api/auth}/keys/keys.go (100%) rename pkg/{utils/crypto => server/api/auth}/keys/keys_test.go (95%) diff --git a/pkg/utils/crypto/md5/md5.go b/pkg/crypto/md5/md5.go similarity index 100% rename from pkg/utils/crypto/md5/md5.go rename to pkg/crypto/md5/md5.go diff --git a/pkg/utils/crypto/md5/md5_test.go b/pkg/crypto/md5/md5_test.go similarity index 90% rename from pkg/utils/crypto/md5/md5_test.go rename to pkg/crypto/md5/md5_test.go index 1c0e14a6b..c39fdd6c6 100644 --- a/pkg/utils/crypto/md5/md5_test.go +++ b/pkg/crypto/md5/md5_test.go @@ -6,7 +6,7 @@ import ( "testing" . "github.com/minio/check" - "github.com/minio/minio/pkg/utils/crypto/md5" + "github.com/minio/minio/pkg/crypto/md5" ) func Test(t *testing.T) { TestingT(t) } diff --git a/pkg/utils/crypto/sha1/.gitignore b/pkg/crypto/sha1/.gitignore similarity index 100% rename from pkg/utils/crypto/sha1/.gitignore rename to pkg/crypto/sha1/.gitignore diff --git a/pkg/utils/checksum/crc32c/LICENSE b/pkg/crypto/sha1/LICENSE similarity index 100% rename from pkg/utils/checksum/crc32c/LICENSE rename to pkg/crypto/sha1/LICENSE diff --git a/pkg/utils/crypto/sha1/sha1_darwin.go b/pkg/crypto/sha1/sha1_darwin.go similarity index 100% rename from pkg/utils/crypto/sha1/sha1_darwin.go rename to pkg/crypto/sha1/sha1_darwin.go diff --git a/pkg/utils/crypto/sha1/sha1_linux.S b/pkg/crypto/sha1/sha1_linux.S similarity index 100% rename from pkg/utils/crypto/sha1/sha1_linux.S rename to pkg/crypto/sha1/sha1_linux.S diff --git a/pkg/utils/crypto/sha1/sha1_linux.go b/pkg/crypto/sha1/sha1_linux.go similarity index 100% rename from pkg/utils/crypto/sha1/sha1_linux.go rename to pkg/crypto/sha1/sha1_linux.go diff --git a/pkg/utils/crypto/sha1/sha1_sse3_amd64.asm b/pkg/crypto/sha1/sha1_sse3_amd64.asm similarity index 100% rename from pkg/utils/crypto/sha1/sha1_sse3_amd64.asm rename to pkg/crypto/sha1/sha1_sse3_amd64.asm diff --git a/pkg/utils/crypto/sha1/sha1_test.go b/pkg/crypto/sha1/sha1_test.go similarity index 100% rename from pkg/utils/crypto/sha1/sha1_test.go rename to pkg/crypto/sha1/sha1_test.go diff --git a/pkg/utils/crypto/sha1/sha1_windows.go b/pkg/crypto/sha1/sha1_windows.go similarity index 100% rename from pkg/utils/crypto/sha1/sha1_windows.go rename to pkg/crypto/sha1/sha1_windows.go diff --git a/pkg/utils/crypto/sha1/sha1_yasm_darwin.go b/pkg/crypto/sha1/sha1_yasm_darwin.go similarity index 100% rename from pkg/utils/crypto/sha1/sha1_yasm_darwin.go rename to pkg/crypto/sha1/sha1_yasm_darwin.go diff --git a/pkg/utils/crypto/sha1/sha1_yasm_linux.go b/pkg/crypto/sha1/sha1_yasm_linux.go similarity index 100% rename from pkg/utils/crypto/sha1/sha1_yasm_linux.go rename to pkg/crypto/sha1/sha1_yasm_linux.go diff --git a/pkg/utils/crypto/sha1/sha1_yasm_windows.go b/pkg/crypto/sha1/sha1_yasm_windows.go similarity index 100% rename from pkg/utils/crypto/sha1/sha1_yasm_windows.go rename to pkg/crypto/sha1/sha1_yasm_windows.go diff --git a/pkg/utils/crypto/sha1/sha1block_darwin.go b/pkg/crypto/sha1/sha1block_darwin.go similarity index 100% rename from pkg/utils/crypto/sha1/sha1block_darwin.go rename to pkg/crypto/sha1/sha1block_darwin.go diff --git a/pkg/utils/crypto/sha1/sha1block_generic.go b/pkg/crypto/sha1/sha1block_generic.go similarity index 100% rename from pkg/utils/crypto/sha1/sha1block_generic.go rename to pkg/crypto/sha1/sha1block_generic.go diff --git a/pkg/utils/crypto/sha1/sha1block_linux.go b/pkg/crypto/sha1/sha1block_linux.go similarity index 100% rename from pkg/utils/crypto/sha1/sha1block_linux.go rename to pkg/crypto/sha1/sha1block_linux.go diff --git a/pkg/utils/crypto/sha1/sha1block_windows.go b/pkg/crypto/sha1/sha1block_windows.go similarity index 100% rename from pkg/utils/crypto/sha1/sha1block_windows.go rename to pkg/crypto/sha1/sha1block_windows.go diff --git a/pkg/utils/crypto/sha1/LICENSE b/pkg/crypto/sha256/LICENSE similarity index 100% rename from pkg/utils/crypto/sha1/LICENSE rename to pkg/crypto/sha256/LICENSE diff --git a/pkg/utils/crypto/sha256/sha256-avx-asm_linux.S b/pkg/crypto/sha256/sha256-avx-asm_linux.S similarity index 100% rename from pkg/utils/crypto/sha256/sha256-avx-asm_linux.S rename to pkg/crypto/sha256/sha256-avx-asm_linux.S diff --git a/pkg/utils/crypto/sha256/sha256-avx2-asm_linux.S b/pkg/crypto/sha256/sha256-avx2-asm_linux.S similarity index 100% rename from pkg/utils/crypto/sha256/sha256-avx2-asm_linux.S rename to pkg/crypto/sha256/sha256-avx2-asm_linux.S diff --git a/pkg/utils/crypto/sha256/sha256-ssse3-asm_linux.S b/pkg/crypto/sha256/sha256-ssse3-asm_linux.S similarity index 100% rename from pkg/utils/crypto/sha256/sha256-ssse3-asm_linux.S rename to pkg/crypto/sha256/sha256-ssse3-asm_linux.S diff --git a/pkg/utils/crypto/sha256/sha256_darwin.go b/pkg/crypto/sha256/sha256_darwin.go similarity index 100% rename from pkg/utils/crypto/sha256/sha256_darwin.go rename to pkg/crypto/sha256/sha256_darwin.go diff --git a/pkg/utils/crypto/sha256/sha256_linux.go b/pkg/crypto/sha256/sha256_linux.go similarity index 100% rename from pkg/utils/crypto/sha256/sha256_linux.go rename to pkg/crypto/sha256/sha256_linux.go diff --git a/pkg/utils/crypto/sha256/sha256_linux_test.go b/pkg/crypto/sha256/sha256_linux_test.go similarity index 100% rename from pkg/utils/crypto/sha256/sha256_linux_test.go rename to pkg/crypto/sha256/sha256_linux_test.go diff --git a/pkg/utils/crypto/sha256/sha256_windows.go b/pkg/crypto/sha256/sha256_windows.go similarity index 100% rename from pkg/utils/crypto/sha256/sha256_windows.go rename to pkg/crypto/sha256/sha256_windows.go diff --git a/pkg/utils/crypto/sha256/sha256block_linux.go b/pkg/crypto/sha256/sha256block_linux.go similarity index 100% rename from pkg/utils/crypto/sha256/sha256block_linux.go rename to pkg/crypto/sha256/sha256block_linux.go diff --git a/pkg/utils/crypto/sha256/LICENSE b/pkg/crypto/sha512/LICENSE similarity index 100% rename from pkg/utils/crypto/sha256/LICENSE rename to pkg/crypto/sha512/LICENSE diff --git a/pkg/utils/crypto/sha512/sha512-avx-asm_linux.S b/pkg/crypto/sha512/sha512-avx-asm_linux.S similarity index 100% rename from pkg/utils/crypto/sha512/sha512-avx-asm_linux.S rename to pkg/crypto/sha512/sha512-avx-asm_linux.S diff --git a/pkg/utils/crypto/sha512/sha512-avx2-asm_linux.S b/pkg/crypto/sha512/sha512-avx2-asm_linux.S similarity index 100% rename from pkg/utils/crypto/sha512/sha512-avx2-asm_linux.S rename to pkg/crypto/sha512/sha512-avx2-asm_linux.S diff --git a/pkg/utils/crypto/sha512/sha512-ssse3-asm_linux.S b/pkg/crypto/sha512/sha512-ssse3-asm_linux.S similarity index 100% rename from pkg/utils/crypto/sha512/sha512-ssse3-asm_linux.S rename to pkg/crypto/sha512/sha512-ssse3-asm_linux.S diff --git a/pkg/utils/crypto/sha512/sha512_darwin.go b/pkg/crypto/sha512/sha512_darwin.go similarity index 100% rename from pkg/utils/crypto/sha512/sha512_darwin.go rename to pkg/crypto/sha512/sha512_darwin.go diff --git a/pkg/utils/crypto/sha512/sha512_linux.go b/pkg/crypto/sha512/sha512_linux.go similarity index 100% rename from pkg/utils/crypto/sha512/sha512_linux.go rename to pkg/crypto/sha512/sha512_linux.go diff --git a/pkg/utils/crypto/sha512/sha512_linux_test.go b/pkg/crypto/sha512/sha512_linux_test.go similarity index 100% rename from pkg/utils/crypto/sha512/sha512_linux_test.go rename to pkg/crypto/sha512/sha512_linux_test.go diff --git a/pkg/utils/crypto/sha512/sha512_windows.go b/pkg/crypto/sha512/sha512_windows.go similarity index 100% rename from pkg/utils/crypto/sha512/sha512_windows.go rename to pkg/crypto/sha512/sha512_windows.go diff --git a/pkg/utils/crypto/sha512/sha512block_linux.go b/pkg/crypto/sha512/sha512block_linux.go similarity index 100% rename from pkg/utils/crypto/sha512/sha512block_linux.go rename to pkg/crypto/sha512/sha512block_linux.go diff --git a/pkg/donut/bucket.go b/pkg/donut/bucket.go index ccc89ada8..fca6a5adb 100644 --- a/pkg/donut/bucket.go +++ b/pkg/donut/bucket.go @@ -31,9 +31,9 @@ import ( "encoding/hex" "encoding/json" + "github.com/minio/minio/pkg/crypto/sha512" + "github.com/minio/minio/pkg/donut/split" "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/utils/crypto/sha512" - "github.com/minio/minio/pkg/utils/split" ) const ( diff --git a/pkg/utils/split/.gitignore b/pkg/donut/split/.gitignore similarity index 100% rename from pkg/utils/split/.gitignore rename to pkg/donut/split/.gitignore diff --git a/pkg/utils/split/split.go b/pkg/donut/split/split.go similarity index 100% rename from pkg/utils/split/split.go rename to pkg/donut/split/split.go diff --git a/pkg/utils/split/split_test.go b/pkg/donut/split/split_test.go similarity index 97% rename from pkg/utils/split/split_test.go rename to pkg/donut/split/split_test.go index de84c0e32..569f6d02a 100644 --- a/pkg/utils/split/split_test.go +++ b/pkg/donut/split/split_test.go @@ -25,7 +25,7 @@ import ( "testing" . "github.com/minio/check" - "github.com/minio/minio/pkg/utils/split" + "github.com/minio/minio/pkg/donut/split" ) type MySuite struct{} diff --git a/pkg/utils/split/testdata/TESTFILE b/pkg/donut/split/testdata/TESTFILE similarity index 100% rename from pkg/utils/split/testdata/TESTFILE rename to pkg/donut/split/testdata/TESTFILE diff --git a/pkg/utils/crypto/sha512/LICENSE b/pkg/hash/crc32c/LICENSE similarity index 100% rename from pkg/utils/crypto/sha512/LICENSE rename to pkg/hash/crc32c/LICENSE diff --git a/pkg/utils/checksum/crc32c/crc32c_darwin.go b/pkg/hash/crc32c/crc32c_darwin.go similarity index 100% rename from pkg/utils/checksum/crc32c/crc32c_darwin.go rename to pkg/hash/crc32c/crc32c_darwin.go diff --git a/pkg/utils/checksum/crc32c/crc32c_intel_linux.go b/pkg/hash/crc32c/crc32c_intel_linux.go similarity index 100% rename from pkg/utils/checksum/crc32c/crc32c_intel_linux.go rename to pkg/hash/crc32c/crc32c_intel_linux.go diff --git a/pkg/utils/checksum/crc32c/crc32c_linux.S b/pkg/hash/crc32c/crc32c_linux.S similarity index 100% rename from pkg/utils/checksum/crc32c/crc32c_linux.S rename to pkg/hash/crc32c/crc32c_linux.S diff --git a/pkg/utils/checksum/crc32c/crc32c_linux.go b/pkg/hash/crc32c/crc32c_linux.go similarity index 100% rename from pkg/utils/checksum/crc32c/crc32c_linux.go rename to pkg/hash/crc32c/crc32c_linux.go diff --git a/pkg/utils/checksum/crc32c/crc32c_linux_test.go b/pkg/hash/crc32c/crc32c_linux_test.go similarity index 100% rename from pkg/utils/checksum/crc32c/crc32c_linux_test.go rename to pkg/hash/crc32c/crc32c_linux_test.go diff --git a/pkg/utils/checksum/crc32c/crc32c_windows.go b/pkg/hash/crc32c/crc32c_windows.go similarity index 100% rename from pkg/utils/checksum/crc32c/crc32c_windows.go rename to pkg/hash/crc32c/crc32c_windows.go diff --git a/pkg/utils/checksum/crc32c/doc.go b/pkg/hash/crc32c/doc.go similarity index 100% rename from pkg/utils/checksum/crc32c/doc.go rename to pkg/hash/crc32c/doc.go diff --git a/pkg/utils/crypto/keys/common.go b/pkg/server/api/auth/keys/common.go similarity index 100% rename from pkg/utils/crypto/keys/common.go rename to pkg/server/api/auth/keys/common.go diff --git a/pkg/utils/crypto/keys/keys.go b/pkg/server/api/auth/keys/keys.go similarity index 100% rename from pkg/utils/crypto/keys/keys.go rename to pkg/server/api/auth/keys/keys.go diff --git a/pkg/utils/crypto/keys/keys_test.go b/pkg/server/api/auth/keys/keys_test.go similarity index 95% rename from pkg/utils/crypto/keys/keys_test.go rename to pkg/server/api/auth/keys/keys_test.go index f3a6017b0..a3ecb9573 100644 --- a/pkg/utils/crypto/keys/keys_test.go +++ b/pkg/server/api/auth/keys/keys_test.go @@ -20,7 +20,7 @@ import ( "testing" . "github.com/minio/check" - "github.com/minio/minio/pkg/utils/crypto/keys" + "github.com/minio/minio/pkg/server/api/auth/keys" ) func Test(t *testing.T) { TestingT(t) } diff --git a/pkg/server/api/generic-handlers.go b/pkg/server/api/generic-handlers.go index 08f36f94d..bf21b6c43 100644 --- a/pkg/server/api/generic-handlers.go +++ b/pkg/server/api/generic-handlers.go @@ -26,7 +26,7 @@ import ( "time" "github.com/minio/minio/pkg/quick" - "github.com/minio/minio/pkg/utils/crypto/keys" + "github.com/minio/minio/pkg/server/api/auth/keys" ) type contentTypeHandler struct {