mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
Move all server and controller packages into top-level
This commit is contained in:
parent
d808c3685d
commit
d54488f144
4
Makefile
4
Makefile
@ -34,7 +34,8 @@ lint:
|
||||
|
||||
cyclo:
|
||||
@echo "Running $@:"
|
||||
@GO15VENDOREXPERIMENT=1 gocyclo -over 25 .
|
||||
@GO15VENDOREXPERIMENT=1 gocyclo -over 25 *.go
|
||||
@GO15VENDOREXPERIMENT=1 gocyclo -over 25 pkg
|
||||
|
||||
build: getdeps verifiers
|
||||
@echo "Installing minio:"
|
||||
@ -42,6 +43,7 @@ build: getdeps verifiers
|
||||
|
||||
test: build
|
||||
@echo "Running all testing:"
|
||||
@GO15VENDOREXPERIMENT=1 go test $(GOFLAGS) .
|
||||
@GO15VENDOREXPERIMENT=1 go test $(GOFLAGS) github.com/minio/minio/pkg...
|
||||
|
||||
gomake-all: build
|
||||
|
@ -21,7 +21,7 @@ _init() {
|
||||
|
||||
## Minimum required versions for build dependencies
|
||||
GCC_VERSION="4.0"
|
||||
CLANG_VERSION="3.5"
|
||||
LLVM_VERSION="7.0.0"
|
||||
YASM_VERSION="1.2.0"
|
||||
GIT_VERSION="1.0"
|
||||
GO_VERSION="1.5.1"
|
||||
@ -173,7 +173,7 @@ is_supported_arch() {
|
||||
check_deps() {
|
||||
check_version "$(env go version 2>/dev/null | sed 's/^.* go\([0-9.]*\).*$/\1/')" "${GO_VERSION}"
|
||||
if [ $? -ge 2 ]; then
|
||||
MISSING="${MISSING} golang(1.5)"
|
||||
MISSING="${MISSING} golang(${GO_VERSION})"
|
||||
fi
|
||||
|
||||
check_version "$(env git --version 2>/dev/null | sed -e 's/^.* \([0-9.\].*\).*$/\1/' -e 's/^\([0-9.\]*\).*/\1/g')" "${GIT_VERSION}"
|
||||
@ -185,13 +185,13 @@ check_deps() {
|
||||
"Linux")
|
||||
check_version "$(env gcc --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${GCC_VERSION}"
|
||||
if [ $? -ge 2 ]; then
|
||||
MISSING="${MISSING} build-essential"
|
||||
MISSING="${MISSING} build-essential(${GCC_VERSION})"
|
||||
fi
|
||||
;;
|
||||
"Darwin")
|
||||
check_version "$(env gcc --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${CLANG_VERSION}"
|
||||
check_version "$(env gcc --version 2>/dev/null | awk '{print $4}' | head -1)" "${LLVM_VERSION}"
|
||||
if [ $? -ge 2 ]; then
|
||||
MISSING="${MISSING} xcode-cli"
|
||||
MISSING="${MISSING} xcode-cli(${LLVM_VERSION})"
|
||||
fi
|
||||
;;
|
||||
"*")
|
||||
@ -200,7 +200,7 @@ check_deps() {
|
||||
|
||||
check_version "$(env yasm --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${YASM_VERSION}"
|
||||
if [ $? -ge 2 ]; then
|
||||
MISSING="${MISSING} yasm(1.2.0)"
|
||||
MISSING="${MISSING} yasm(${YASM_VERSION})"
|
||||
fi
|
||||
|
||||
}
|
||||
|
@ -16,10 +16,7 @@
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio/pkg/controller"
|
||||
)
|
||||
import "github.com/minio/cli"
|
||||
|
||||
var controllerCmd = cli.Command{
|
||||
Name: "controller",
|
||||
@ -43,6 +40,6 @@ func controllerMain(c *cli.Context) {
|
||||
cli.ShowCommandHelpAndExit(c, "controller", 1)
|
||||
}
|
||||
|
||||
err := controller.Start()
|
||||
err := StartController()
|
||||
errorIf(err.Trace(), "Failed to start minio controller.", nil)
|
||||
}
|
||||
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@ -22,17 +22,16 @@ import (
|
||||
router "github.com/gorilla/mux"
|
||||
jsonrpc "github.com/gorilla/rpc/v2"
|
||||
"github.com/gorilla/rpc/v2/json"
|
||||
"github.com/minio/minio/pkg/controller/rpc"
|
||||
)
|
||||
|
||||
// getRPCHandler rpc handler
|
||||
func getRPCHandler() http.Handler {
|
||||
s := jsonrpc.NewServer()
|
||||
s.RegisterCodec(json.NewCodec(), "application/json")
|
||||
s.RegisterService(new(rpc.VersionService), "Version")
|
||||
s.RegisterService(new(rpc.DonutService), "Donut")
|
||||
s.RegisterService(new(rpc.AuthService), "Auth")
|
||||
s.RegisterService(new(rpc.ServerService), "Server")
|
||||
s.RegisterService(new(VersionService), "Version")
|
||||
s.RegisterService(new(DonutService), "Donut")
|
||||
s.RegisterService(new(AuthService), "Auth")
|
||||
s.RegisterService(new(ServerService), "Server")
|
||||
// Add new RPC services here
|
||||
return registerRPC(router.NewRouter(), s)
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package rpc
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package rpc
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package rpc
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package rpc
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@ -38,8 +38,7 @@ type VersionReply struct {
|
||||
// Get version
|
||||
func (v *VersionService) Get(r *http.Request, args *VersionArgs, reply *VersionReply) error {
|
||||
reply.Version = "0.0.1"
|
||||
//TODO: Better approach needed here to pass global states like version. --ab.
|
||||
// reply.BuildDate = version.Version
|
||||
reply.BuildDate = minioVersion
|
||||
reply.Architecture = runtime.GOARCH
|
||||
reply.OperatingSystem = runtime.GOOS
|
||||
return nil
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -54,8 +54,8 @@ func getRPCServer(rpcHandler http.Handler) (*http.Server, *probe.Error) {
|
||||
return httpServer, nil
|
||||
}
|
||||
|
||||
// Start starts a controller
|
||||
func Start() *probe.Error {
|
||||
// StartController starts a minio controller
|
||||
func StartController() *probe.Error {
|
||||
rpcServer, err := getRPCServer(getRPCHandler())
|
||||
if err != nil {
|
||||
return err.Trace()
|
@ -14,31 +14,26 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
jsonrpc "github.com/gorilla/rpc/v2/json"
|
||||
"github.com/gorilla/rpc/v2/json"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/controller/rpc"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
type ControllerRPCSuite struct{}
|
||||
|
||||
type MySuite struct{}
|
||||
|
||||
var _ = Suite(&MySuite{})
|
||||
var _ = Suite(&ControllerRPCSuite{})
|
||||
|
||||
var testRPCServer *httptest.Server
|
||||
|
||||
func (s *MySuite) SetUpSuite(c *C) {
|
||||
func (s *ControllerRPCSuite) SetUpSuite(c *C) {
|
||||
root, err := ioutil.TempDir(os.TempDir(), "api-")
|
||||
c.Assert(err, IsNil)
|
||||
auth.SetAuthConfigPath(root)
|
||||
@ -46,136 +41,136 @@ func (s *MySuite) SetUpSuite(c *C) {
|
||||
testRPCServer = httptest.NewServer(getRPCHandler())
|
||||
}
|
||||
|
||||
func (s *MySuite) TearDownSuite(c *C) {
|
||||
func (s *ControllerRPCSuite) TearDownSuite(c *C) {
|
||||
testRPCServer.Close()
|
||||
}
|
||||
|
||||
func (s *MySuite) TestMemStats(c *C) {
|
||||
op := rpc.Operation{
|
||||
func (s *ControllerRPCSuite) TestMemStats(c *C) {
|
||||
op := rpcOperation{
|
||||
Method: "Server.MemStats",
|
||||
Request: rpc.ServerArgs{},
|
||||
Request: ServerArgs{},
|
||||
}
|
||||
req, err := rpc.NewRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
req, err := newRPCRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(req.Get("Content-Type"), Equals, "application/json")
|
||||
resp, err := req.Do()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(resp.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
var reply rpc.MemStatsReply
|
||||
c.Assert(jsonrpc.DecodeClientResponse(resp.Body, &reply), IsNil)
|
||||
var reply MemStatsReply
|
||||
c.Assert(json.DecodeClientResponse(resp.Body, &reply), IsNil)
|
||||
resp.Body.Close()
|
||||
c.Assert(reply, Not(DeepEquals), rpc.MemStatsReply{})
|
||||
c.Assert(reply, Not(DeepEquals), MemStatsReply{})
|
||||
}
|
||||
|
||||
func (s *MySuite) TestSysInfo(c *C) {
|
||||
op := rpc.Operation{
|
||||
func (s *ControllerRPCSuite) TestSysInfo(c *C) {
|
||||
op := rpcOperation{
|
||||
Method: "Server.SysInfo",
|
||||
Request: rpc.ServerArgs{},
|
||||
Request: ServerArgs{},
|
||||
}
|
||||
req, err := rpc.NewRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
req, err := newRPCRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(req.Get("Content-Type"), Equals, "application/json")
|
||||
resp, err := req.Do()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(resp.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
var reply rpc.SysInfoReply
|
||||
c.Assert(jsonrpc.DecodeClientResponse(resp.Body, &reply), IsNil)
|
||||
var reply SysInfoReply
|
||||
c.Assert(json.DecodeClientResponse(resp.Body, &reply), IsNil)
|
||||
resp.Body.Close()
|
||||
c.Assert(reply, Not(DeepEquals), rpc.SysInfoReply{})
|
||||
c.Assert(reply, Not(DeepEquals), SysInfoReply{})
|
||||
}
|
||||
|
||||
func (s *MySuite) TestServerList(c *C) {
|
||||
op := rpc.Operation{
|
||||
func (s *ControllerRPCSuite) TestServerList(c *C) {
|
||||
op := rpcOperation{
|
||||
Method: "Server.List",
|
||||
Request: rpc.ServerArgs{},
|
||||
Request: ServerArgs{},
|
||||
}
|
||||
req, err := rpc.NewRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
req, err := newRPCRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(req.Get("Content-Type"), Equals, "application/json")
|
||||
resp, err := req.Do()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(resp.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
var reply rpc.ServerListReply
|
||||
c.Assert(jsonrpc.DecodeClientResponse(resp.Body, &reply), IsNil)
|
||||
var reply ServerListReply
|
||||
c.Assert(json.DecodeClientResponse(resp.Body, &reply), IsNil)
|
||||
resp.Body.Close()
|
||||
c.Assert(reply, Not(DeepEquals), rpc.ServerListReply{})
|
||||
c.Assert(reply, Not(DeepEquals), ServerListReply{})
|
||||
}
|
||||
|
||||
func (s *MySuite) TestServerAdd(c *C) {
|
||||
op := rpc.Operation{
|
||||
func (s *ControllerRPCSuite) TestServerAdd(c *C) {
|
||||
op := rpcOperation{
|
||||
Method: "Server.Add",
|
||||
Request: rpc.ServerArgs{MinioServers: []rpc.MinioServer{}},
|
||||
Request: ServerArgs{MinioServers: []MinioServer{}},
|
||||
}
|
||||
req, err := rpc.NewRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
req, err := newRPCRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(req.Get("Content-Type"), Equals, "application/json")
|
||||
resp, err := req.Do()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(resp.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
var reply rpc.ServerAddReply
|
||||
c.Assert(jsonrpc.DecodeClientResponse(resp.Body, &reply), IsNil)
|
||||
var reply ServerAddReply
|
||||
c.Assert(json.DecodeClientResponse(resp.Body, &reply), IsNil)
|
||||
resp.Body.Close()
|
||||
c.Assert(reply, Not(DeepEquals), rpc.ServerAddReply{ServersAdded: []rpc.MinioServer{}})
|
||||
c.Assert(reply, Not(DeepEquals), ServerAddReply{ServersAdded: []MinioServer{}})
|
||||
}
|
||||
|
||||
func (s *MySuite) TestAuth(c *C) {
|
||||
op := rpc.Operation{
|
||||
func (s *ControllerRPCSuite) TestAuth(c *C) {
|
||||
op := rpcOperation{
|
||||
Method: "Auth.Generate",
|
||||
Request: rpc.AuthArgs{User: "newuser"},
|
||||
Request: AuthArgs{User: "newuser"},
|
||||
}
|
||||
req, err := rpc.NewRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
req, err := newRPCRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(req.Get("Content-Type"), Equals, "application/json")
|
||||
resp, err := req.Do()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(resp.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
var reply rpc.AuthReply
|
||||
c.Assert(jsonrpc.DecodeClientResponse(resp.Body, &reply), IsNil)
|
||||
var reply AuthReply
|
||||
c.Assert(json.DecodeClientResponse(resp.Body, &reply), IsNil)
|
||||
resp.Body.Close()
|
||||
c.Assert(reply, Not(DeepEquals), rpc.AuthReply{})
|
||||
c.Assert(reply, Not(DeepEquals), AuthReply{})
|
||||
c.Assert(len(reply.AccessKeyID), Equals, 20)
|
||||
c.Assert(len(reply.SecretAccessKey), Equals, 40)
|
||||
c.Assert(len(reply.Name), Not(Equals), 0)
|
||||
|
||||
op = rpc.Operation{
|
||||
op = rpcOperation{
|
||||
Method: "Auth.Fetch",
|
||||
Request: rpc.AuthArgs{User: "newuser"},
|
||||
Request: AuthArgs{User: "newuser"},
|
||||
}
|
||||
req, err = rpc.NewRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
req, err = newRPCRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(req.Get("Content-Type"), Equals, "application/json")
|
||||
resp, err = req.Do()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(resp.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
var newReply rpc.AuthReply
|
||||
c.Assert(jsonrpc.DecodeClientResponse(resp.Body, &newReply), IsNil)
|
||||
var newReply AuthReply
|
||||
c.Assert(json.DecodeClientResponse(resp.Body, &newReply), IsNil)
|
||||
resp.Body.Close()
|
||||
c.Assert(newReply, Not(DeepEquals), rpc.AuthReply{})
|
||||
c.Assert(newReply, Not(DeepEquals), AuthReply{})
|
||||
c.Assert(reply.AccessKeyID, Equals, newReply.AccessKeyID)
|
||||
c.Assert(reply.SecretAccessKey, Equals, newReply.SecretAccessKey)
|
||||
c.Assert(len(reply.Name), Not(Equals), 0)
|
||||
|
||||
op = rpc.Operation{
|
||||
op = rpcOperation{
|
||||
Method: "Auth.Reset",
|
||||
Request: rpc.AuthArgs{User: "newuser"},
|
||||
Request: AuthArgs{User: "newuser"},
|
||||
}
|
||||
req, err = rpc.NewRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
req, err = newRPCRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(req.Get("Content-Type"), Equals, "application/json")
|
||||
resp, err = req.Do()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(resp.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
var resetReply rpc.AuthReply
|
||||
c.Assert(jsonrpc.DecodeClientResponse(resp.Body, &resetReply), IsNil)
|
||||
var resetReply AuthReply
|
||||
c.Assert(json.DecodeClientResponse(resp.Body, &resetReply), IsNil)
|
||||
resp.Body.Close()
|
||||
c.Assert(newReply, Not(DeepEquals), rpc.AuthReply{})
|
||||
c.Assert(newReply, Not(DeepEquals), AuthReply{})
|
||||
c.Assert(reply.AccessKeyID, Not(Equals), resetReply.AccessKeyID)
|
||||
c.Assert(reply.SecretAccessKey, Not(Equals), resetReply.SecretAccessKey)
|
||||
c.Assert(len(reply.Name), Not(Equals), 0)
|
||||
@ -183,11 +178,11 @@ func (s *MySuite) TestAuth(c *C) {
|
||||
// these operations should fail
|
||||
|
||||
/// generating access for existing user fails
|
||||
op = rpc.Operation{
|
||||
op = rpcOperation{
|
||||
Method: "Auth.Generate",
|
||||
Request: rpc.AuthArgs{User: "newuser"},
|
||||
Request: AuthArgs{User: "newuser"},
|
||||
}
|
||||
req, err = rpc.NewRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
req, err = newRPCRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(req.Get("Content-Type"), Equals, "application/json")
|
||||
resp, err = req.Do()
|
||||
@ -195,11 +190,11 @@ func (s *MySuite) TestAuth(c *C) {
|
||||
c.Assert(resp.StatusCode, Equals, http.StatusBadRequest)
|
||||
|
||||
/// null user provided invalid
|
||||
op = rpc.Operation{
|
||||
op = rpcOperation{
|
||||
Method: "Auth.Generate",
|
||||
Request: rpc.AuthArgs{User: ""},
|
||||
Request: AuthArgs{User: ""},
|
||||
}
|
||||
req, err = rpc.NewRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
req, err = newRPCRequest(testRPCServer.URL+"/rpc", op, http.DefaultTransport)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(req.Get("Content-Type"), Equals, "application/json")
|
||||
resp, err = req.Do()
|
@ -27,7 +27,11 @@ import (
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func (s *TestSuite) TestLogger(c *C) {
|
||||
type LoggerSuite struct{}
|
||||
|
||||
var _ = Suite(&LoggerSuite{})
|
||||
|
||||
func (s *LoggerSuite) TestLogger(c *C) {
|
||||
var buffer bytes.Buffer
|
||||
var fields logrus.Fields
|
||||
log.Out = &buffer
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -22,14 +22,5 @@ import (
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type TestSuite struct{}
|
||||
|
||||
var _ = Suite(&TestSuite{})
|
||||
|
||||
func (s *TestSuite) SetUpSuite(c *C) {
|
||||
}
|
||||
|
||||
func (s *TestSuite) TearDownSuite(c *C) {
|
||||
}
|
||||
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package rpc
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -24,20 +24,20 @@ import (
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// Operation RPC operation
|
||||
type Operation struct {
|
||||
// rpcOperation RPC operation
|
||||
type rpcOperation struct {
|
||||
Method string
|
||||
Request interface{}
|
||||
}
|
||||
|
||||
// Request rpc client request
|
||||
type Request struct {
|
||||
// rpcRequest rpc client request
|
||||
type rpcRequest struct {
|
||||
req *http.Request
|
||||
transport http.RoundTripper
|
||||
}
|
||||
|
||||
// NewRequest initiate a new client RPC request
|
||||
func NewRequest(url string, op Operation, transport http.RoundTripper) (*Request, *probe.Error) {
|
||||
// newRPCRequest initiate a new client RPC request
|
||||
func newRPCRequest(url string, op rpcOperation, transport http.RoundTripper) (*rpcRequest, *probe.Error) {
|
||||
params, err := json.EncodeClientRequest(op.Method, op.Request)
|
||||
if err != nil {
|
||||
return nil, probe.NewError(err)
|
||||
@ -46,7 +46,7 @@ func NewRequest(url string, op Operation, transport http.RoundTripper) (*Request
|
||||
if err != nil {
|
||||
return nil, probe.NewError(err)
|
||||
}
|
||||
rpcReq := &Request{}
|
||||
rpcReq := &rpcRequest{}
|
||||
rpcReq.req = req
|
||||
rpcReq.req.Header.Set("Content-Type", "application/json")
|
||||
if transport == nil {
|
||||
@ -57,7 +57,7 @@ func NewRequest(url string, op Operation, transport http.RoundTripper) (*Request
|
||||
}
|
||||
|
||||
// Do - make a http connection
|
||||
func (r Request) Do() (*http.Response, *probe.Error) {
|
||||
func (r rpcRequest) Do() (*http.Response, *probe.Error) {
|
||||
resp, err := r.transport.RoundTrip(r.req)
|
||||
if err != nil {
|
||||
if err, ok := probe.UnwrapError(err); ok {
|
||||
@ -69,11 +69,11 @@ func (r Request) Do() (*http.Response, *probe.Error) {
|
||||
}
|
||||
|
||||
// Get - get value of requested header
|
||||
func (r Request) Get(key string) string {
|
||||
func (r rpcRequest) Get(key string) string {
|
||||
return r.req.Header.Get(key)
|
||||
}
|
||||
|
||||
// Set - set value of a header key
|
||||
func (r *Request) Set(key, value string) {
|
||||
func (r *rpcRequest) Set(key, value string) {
|
||||
r.req.Header.Set(key, value)
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import "net/http"
|
||||
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@ -24,7 +24,7 @@ import (
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
func (api Minio) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool {
|
||||
func (api MinioAPI) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool {
|
||||
vars := mux.Vars(req)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -67,10 +67,10 @@ func (api Minio) isValidOp(w http.ResponseWriter, req *http.Request, acceptsCont
|
||||
// using the Initiate Multipart Upload request, but has not yet been completed or aborted.
|
||||
// This operation returns at most 1,000 multipart uploads in the response.
|
||||
//
|
||||
func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// Ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until ticket master gives us a go
|
||||
@ -133,10 +133,10 @@ func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Re
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// Ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
||||
@ -208,10 +208,10 @@ func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// -----------
|
||||
// This implementation of the GET operation returns a list of all buckets
|
||||
// owned by the authenticated sender of the request.
|
||||
func (api Minio) ListBucketsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) ListBucketsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// Ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
||||
@ -260,10 +260,10 @@ func (api Minio) ListBucketsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// PutBucketHandler - PUT Bucket
|
||||
// ----------
|
||||
// This implementation of the PUT operation creates a new bucket for authenticated request
|
||||
func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// Ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
||||
@ -338,10 +338,10 @@ func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// PutBucketACLHandler - PUT Bucket ACL
|
||||
// ----------
|
||||
// This implementation of the PUT operation modifies the bucketACL for authenticated request
|
||||
func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// Ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
||||
@ -395,10 +395,10 @@ func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// The operation returns a 200 OK if the bucket exists and you
|
||||
// have permission to access it. Otherwise, the operation might
|
||||
// return responses such as 404 Not Found and 403 Forbidden.
|
||||
func (api Minio) HeadBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) HeadBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// Ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import "net/http"
|
||||
|
@ -14,12 +14,12 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import "encoding/xml"
|
||||
|
||||
// Config - http server config
|
||||
type Config struct {
|
||||
// APIConfig - http server config
|
||||
type APIConfig struct {
|
||||
Address string
|
||||
TLS bool
|
||||
CertFile string
|
@ -14,22 +14,22 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Error structure
|
||||
type Error struct {
|
||||
// APIError structure
|
||||
type APIError struct {
|
||||
Code string
|
||||
Description string
|
||||
HTTPStatusCode int
|
||||
}
|
||||
|
||||
// ErrorResponse - error response format
|
||||
type ErrorResponse struct {
|
||||
// APIErrorResponse - error response format
|
||||
type APIErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
@ -77,8 +77,8 @@ const (
|
||||
NotAcceptable = iota + 30
|
||||
)
|
||||
|
||||
// Error code to Error structure map
|
||||
var errorCodeResponse = map[int]Error{
|
||||
// APIError code to Error structure map
|
||||
var errorCodeResponse = map[int]APIError{
|
||||
InvalidMaxUploads: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Argument maxUploads must be an integer between 0 and 2147483647.",
|
||||
@ -232,14 +232,14 @@ var errorCodeResponse = map[int]Error{
|
||||
}
|
||||
|
||||
// errorCodeError provides errorCode to Error. It returns empty if the code provided is unknown
|
||||
func getErrorCode(code int) Error {
|
||||
func getErrorCode(code int) APIError {
|
||||
return errorCodeResponse[code]
|
||||
}
|
||||
|
||||
// getErrorResponse gets in standard error and resource value and
|
||||
// provides a encodable populated response values
|
||||
func getErrorResponse(err Error, resource string) ErrorResponse {
|
||||
var data = ErrorResponse{}
|
||||
func getErrorResponse(err APIError, resource string) APIErrorResponse {
|
||||
var data = APIErrorResponse{}
|
||||
data.Code = err.Code
|
||||
data.Message = err.Description
|
||||
if resource != "" {
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@ -44,10 +44,6 @@ type resourceHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
const (
|
||||
iso8601Format = "20060102T150405Z"
|
||||
)
|
||||
|
||||
func parseDate(req *http.Request) (time.Time, error) {
|
||||
amzDate := req.Header.Get(http.CanonicalHeaderKey("x-amz-date"))
|
||||
switch {
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -22,6 +22,7 @@ import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
||||
"github.com/minio/minio/pkg/donut"
|
||||
@ -51,9 +52,7 @@ func generateRequestID() []byte {
|
||||
func setCommonHeaders(w http.ResponseWriter, acceptsType string, contentLength int) {
|
||||
// set unique request ID for each reply
|
||||
w.Header().Set("X-Amz-Request-Id", string(generateRequestID()))
|
||||
|
||||
// TODO: Modularity comes in the way of passing global state like "version". A better approach needed here. -ab
|
||||
// w.Header().Set("Server", ("Minio/" + version + " (" + runtime.GOOS + ";" + runtime.GOARCH + ")"))
|
||||
w.Header().Set("Server", ("Minio/" + minioReleaseTag + " (" + runtime.GOOS + ";" + runtime.GOARCH + ")"))
|
||||
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
w.Header().Set("Content-Type", acceptsType)
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@ -33,10 +33,10 @@ const (
|
||||
// ----------
|
||||
// This implementation of the GET operation retrieves object. To use GET,
|
||||
// you must have READ access to the object.
|
||||
func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
||||
@ -100,10 +100,10 @@ func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// HeadObjectHandler - HEAD Object
|
||||
// -----------
|
||||
// The HEAD operation retrieves metadata from an object without returning the object itself.
|
||||
func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
||||
@ -157,10 +157,10 @@ func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// PutObjectHandler - PUT Object
|
||||
// ----------
|
||||
// This implementation of the PUT operation adds an object to a bucket.
|
||||
func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// Ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
||||
@ -259,10 +259,10 @@ func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
/// Multipart API
|
||||
|
||||
// NewMultipartUploadHandler - New multipart upload
|
||||
func (api Minio) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// Ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
||||
@ -317,10 +317,10 @@ func (api Minio) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Requ
|
||||
}
|
||||
|
||||
// PutObjectPartHandler - Upload part
|
||||
func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// Ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
||||
@ -417,10 +417,10 @@ func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request)
|
||||
}
|
||||
|
||||
// AbortMultipartUploadHandler - Abort multipart upload
|
||||
func (api Minio) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// Ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
||||
@ -467,10 +467,10 @@ func (api Minio) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Re
|
||||
}
|
||||
|
||||
// ListObjectPartsHandler - List object parts
|
||||
func (api Minio) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// Ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
||||
@ -532,10 +532,10 @@ func (api Minio) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request
|
||||
}
|
||||
|
||||
// CompleteMultipartUploadHandler - Complete multipart upload
|
||||
func (api Minio) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
||||
// Ticket master block
|
||||
{
|
||||
op := Operation{}
|
||||
op := APIOperation{}
|
||||
op.ProceedCh = make(chan struct{})
|
||||
api.OP <- op
|
||||
// block until Ticket master gives us a go
|
||||
@ -597,13 +597,13 @@ func (api Minio) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http
|
||||
/// Delete API
|
||||
|
||||
// DeleteBucketHandler - Delete bucket
|
||||
func (api Minio) DeleteBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) DeleteBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||
error := getErrorCode(MethodNotAllowed)
|
||||
w.WriteHeader(error.HTTPStatusCode)
|
||||
}
|
||||
|
||||
// DeleteObjectHandler - Delete object
|
||||
func (api Minio) DeleteObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api MinioAPI) DeleteObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
error := getErrorCode(MethodNotAllowed)
|
||||
w.WriteHeader(error.HTTPStatusCode)
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/url"
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@ -28,6 +28,8 @@ import (
|
||||
|
||||
const (
|
||||
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
||||
iso8601Format = "20060102T150405Z"
|
||||
yyyymmdd = "20060102"
|
||||
)
|
||||
|
||||
// getCredentialsFromAuth parse credentials tag from authorization value
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import "errors"
|
||||
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
@ -14,30 +14,30 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import "github.com/minio/minio/pkg/donut"
|
||||
|
||||
// Operation container for individual operations read by Ticket Master
|
||||
type Operation struct {
|
||||
// APIOperation container for individual operations read by Ticket Master
|
||||
type APIOperation struct {
|
||||
ProceedCh chan struct{}
|
||||
}
|
||||
|
||||
// Minio container for API and also carries OP (operation) channel
|
||||
type Minio struct {
|
||||
OP chan Operation
|
||||
// MinioAPI container for API and also carries OP (operation) channel
|
||||
type MinioAPI struct {
|
||||
OP chan APIOperation
|
||||
Donut donut.Interface
|
||||
}
|
||||
|
||||
// New instantiate a new minio API
|
||||
func New() Minio {
|
||||
// NewAPI instantiate a new minio API
|
||||
func NewAPI() MinioAPI {
|
||||
// ignore errors for now
|
||||
d, err := donut.New()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return Minio{
|
||||
OP: make(chan Operation),
|
||||
return MinioAPI{
|
||||
OP: make(chan APIOperation),
|
||||
Donut: d,
|
||||
}
|
||||
}
|
@ -16,11 +16,7 @@
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio/pkg/server"
|
||||
"github.com/minio/minio/pkg/server/api"
|
||||
)
|
||||
import "github.com/minio/cli"
|
||||
|
||||
var serverCmd = cli.Command{
|
||||
Name: "server",
|
||||
@ -39,14 +35,14 @@ EXAMPLES:
|
||||
`,
|
||||
}
|
||||
|
||||
func getServerConfig(c *cli.Context) api.Config {
|
||||
func getServerConfig(c *cli.Context) APIConfig {
|
||||
certFile := c.GlobalString("cert")
|
||||
keyFile := c.GlobalString("key")
|
||||
if (certFile != "" && keyFile == "") || (certFile == "" && keyFile != "") {
|
||||
Fatalln("Both certificate and key are required to enable https.")
|
||||
}
|
||||
tls := (certFile != "" && keyFile != "")
|
||||
return api.Config{
|
||||
return APIConfig{
|
||||
Address: c.GlobalString("address"),
|
||||
TLS: tls,
|
||||
CertFile: certFile,
|
||||
@ -61,6 +57,6 @@ func serverMain(c *cli.Context) {
|
||||
}
|
||||
|
||||
apiServerConfig := getServerConfig(c)
|
||||
err := server.Start(apiServerConfig)
|
||||
err := StartServer(apiServerConfig)
|
||||
errorIf(err.Trace(), "Failed to start the minio server.", nil)
|
||||
}
|
||||
|
@ -14,17 +14,16 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package server
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
router "github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/server/api"
|
||||
)
|
||||
|
||||
// registerAPI - register all the object API handlers to their respective paths
|
||||
func registerAPI(mux *router.Router, a api.Minio) {
|
||||
func registerAPI(mux *router.Router, a MinioAPI) {
|
||||
mux.HandleFunc("/", a.ListBucketsHandler).Methods("GET")
|
||||
mux.HandleFunc("/{bucket}", a.ListObjectsHandler).Methods("GET")
|
||||
mux.HandleFunc("/{bucket}", a.PutBucketHandler).Methods("PUT")
|
||||
@ -45,7 +44,7 @@ func registerAPI(mux *router.Router, a api.Minio) {
|
||||
mux.HandleFunc("/{bucket}/{object:.*}", a.DeleteObjectHandler).Methods("DELETE")
|
||||
}
|
||||
|
||||
func registerCustomMiddleware(mux *router.Router, mwHandlers ...api.MiddlewareHandler) http.Handler {
|
||||
func registerCustomMiddleware(mux *router.Router, mwHandlers ...MiddlewareHandler) http.Handler {
|
||||
var f http.Handler
|
||||
f = mux
|
||||
for _, mw := range mwHandlers {
|
||||
@ -55,18 +54,18 @@ func registerCustomMiddleware(mux *router.Router, mwHandlers ...api.MiddlewareHa
|
||||
}
|
||||
|
||||
// getAPIHandler api handler
|
||||
func getAPIHandler(conf api.Config) (http.Handler, api.Minio) {
|
||||
var mwHandlers = []api.MiddlewareHandler{
|
||||
api.ValidContentTypeHandler,
|
||||
api.TimeValidityHandler,
|
||||
api.IgnoreResourcesHandler,
|
||||
api.ValidateAuthHeaderHandler,
|
||||
func getAPIHandler(conf APIConfig) (http.Handler, MinioAPI) {
|
||||
var mwHandlers = []MiddlewareHandler{
|
||||
ValidContentTypeHandler,
|
||||
TimeValidityHandler,
|
||||
IgnoreResourcesHandler,
|
||||
ValidateAuthHeaderHandler,
|
||||
// api.LoggingHandler, // Disabled logging until we bring in external logging support
|
||||
api.CorsHandler,
|
||||
CorsHandler,
|
||||
}
|
||||
|
||||
mux := router.NewRouter()
|
||||
minioAPI := api.New()
|
||||
minioAPI := NewAPI()
|
||||
registerAPI(mux, minioAPI)
|
||||
apiHandler := registerCustomMiddleware(mux, mwHandlers...)
|
||||
return apiHandler, minioAPI
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package server
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -28,20 +28,8 @@ import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
const (
|
||||
authHeader = "AWS4-HMAC-SHA256"
|
||||
iso8601Format = "20060102T150405Z"
|
||||
yyyymmdd = "20060102"
|
||||
)
|
||||
|
||||
///
|
||||
@ -254,7 +242,7 @@ func (s *MyAPISignatureV4Suite) newRequest(method, urlStr string, contentLength
|
||||
"aws4_request",
|
||||
}, "/")
|
||||
|
||||
stringToSign := authHeader + "\n" + t.Format(iso8601Format) + "\n"
|
||||
stringToSign := authHeaderPrefix + "\n" + t.Format(iso8601Format) + "\n"
|
||||
stringToSign = stringToSign + scope + "\n"
|
||||
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
|
||||
|
||||
@ -267,7 +255,7 @@ func (s *MyAPISignatureV4Suite) newRequest(method, urlStr string, contentLength
|
||||
|
||||
// final Authorization header
|
||||
parts := []string{
|
||||
authHeader + " Credential=" + s.accessKeyID + "/" + scope,
|
||||
authHeaderPrefix + " Credential=" + s.accessKeyID + "/" + scope,
|
||||
"SignedHeaders=" + signedHeaders,
|
||||
"Signature=" + signature,
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package server
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
@ -26,11 +26,10 @@ import (
|
||||
|
||||
"github.com/minio/minio/pkg/minhttp"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/server/api"
|
||||
)
|
||||
|
||||
// getAPI server instance
|
||||
func getAPIServer(conf api.Config, apiHandler http.Handler) (*http.Server, *probe.Error) {
|
||||
func getAPIServer(conf APIConfig, apiHandler http.Handler) (*http.Server, *probe.Error) {
|
||||
// Minio server config
|
||||
httpServer := &http.Server{
|
||||
Addr: conf.Address,
|
||||
@ -84,7 +83,7 @@ func getAPIServer(conf api.Config, apiHandler http.Handler) (*http.Server, *prob
|
||||
}
|
||||
|
||||
// Start ticket master
|
||||
func startTM(a api.Minio) {
|
||||
func startTM(a MinioAPI) {
|
||||
for {
|
||||
for op := range a.OP {
|
||||
op.ProceedCh <- struct{}{}
|
||||
@ -92,8 +91,8 @@ func startTM(a api.Minio) {
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts a s3 compatible cloud storage server
|
||||
func Start(conf api.Config) *probe.Error {
|
||||
// StartServer starts an s3 compatible cloud storage server
|
||||
func StartServer(conf APIConfig) *probe.Error {
|
||||
apiHandler, minioAPI := getAPIHandler(conf)
|
||||
apiServer, err := getAPIServer(conf, apiHandler)
|
||||
if err != nil {
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package server
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -28,7 +28,6 @@ import (
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/minio/minio/pkg/donut"
|
||||
"github.com/minio/minio/pkg/server/api"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
@ -52,7 +51,7 @@ func (s *MyAPIDonutCacheSuite) SetUpSuite(c *C) {
|
||||
perr := donut.SaveConfig(conf)
|
||||
c.Assert(perr, IsNil)
|
||||
|
||||
httpHandler, minioAPI := getAPIHandler(api.Config{RateLimit: 16})
|
||||
httpHandler, minioAPI := getAPIHandler(APIConfig{RateLimit: 16})
|
||||
go startTM(minioAPI)
|
||||
testAPIDonutCacheServer = httptest.NewServer(httpHandler)
|
||||
}
|
||||
@ -319,7 +318,7 @@ func (s *MyAPIDonutCacheSuite) TestListBuckets(c *C) {
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
var results api.ListBucketsResponse
|
||||
var results ListBucketsResponse
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
err = decoder.Decode(&results)
|
||||
c.Assert(err, IsNil)
|
||||
@ -676,7 +675,7 @@ func (s *MyAPIDonutCacheSuite) TestObjectMultipartAbort(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
newResponse := &api.InitiateMultipartUploadResponse{}
|
||||
newResponse := &InitiateMultipartUploadResponse{}
|
||||
|
||||
err = decoder.Decode(newResponse)
|
||||
c.Assert(err, IsNil)
|
||||
@ -721,7 +720,7 @@ func (s *MyAPIDonutCacheSuite) TestBucketMultipartList(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
newResponse := &api.InitiateMultipartUploadResponse{}
|
||||
newResponse := &InitiateMultipartUploadResponse{}
|
||||
|
||||
err = decoder.Decode(newResponse)
|
||||
c.Assert(err, IsNil)
|
||||
@ -750,7 +749,7 @@ func (s *MyAPIDonutCacheSuite) TestBucketMultipartList(c *C) {
|
||||
c.Assert(response3.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder = xml.NewDecoder(response3.Body)
|
||||
newResponse3 := &api.ListMultipartUploadsResponse{}
|
||||
newResponse3 := &ListMultipartUploadsResponse{}
|
||||
err = decoder.Decode(newResponse3)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(newResponse3.Bucket, Equals, "bucketmultipartlist")
|
||||
@ -772,7 +771,7 @@ func (s *MyAPIDonutCacheSuite) TestObjectMultipartList(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
newResponse := &api.InitiateMultipartUploadResponse{}
|
||||
newResponse := &InitiateMultipartUploadResponse{}
|
||||
|
||||
err = decoder.Decode(newResponse)
|
||||
c.Assert(err, IsNil)
|
||||
@ -826,7 +825,7 @@ func (s *MyAPIDonutCacheSuite) TestObjectMultipart(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
newResponse := &api.InitiateMultipartUploadResponse{}
|
||||
newResponse := &InitiateMultipartUploadResponse{}
|
||||
|
||||
err = decoder.Decode(newResponse)
|
||||
c.Assert(err, IsNil)
|
||||
@ -888,7 +887,7 @@ func (s *MyAPIDonutCacheSuite) TestObjectMultipart(c *C) {
|
||||
func verifyError(c *C, response *http.Response, code, description string, statusCode int) {
|
||||
data, err := ioutil.ReadAll(response.Body)
|
||||
c.Assert(err, IsNil)
|
||||
errorResponse := api.ErrorResponse{}
|
||||
errorResponse := APIErrorResponse{}
|
||||
err = xml.Unmarshal(data, &errorResponse)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(errorResponse.Code, Equals, code)
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package server
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -29,7 +29,6 @@ import (
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/minio/minio/pkg/donut"
|
||||
"github.com/minio/minio/pkg/server/api"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
@ -71,7 +70,7 @@ func (s *MyAPIDonutSuite) SetUpSuite(c *C) {
|
||||
perr := donut.SaveConfig(conf)
|
||||
c.Assert(perr, IsNil)
|
||||
|
||||
httpHandler, minioAPI := getAPIHandler(api.Config{RateLimit: 16})
|
||||
httpHandler, minioAPI := getAPIHandler(APIConfig{RateLimit: 16})
|
||||
go startTM(minioAPI)
|
||||
testAPIDonutServer = httptest.NewServer(httpHandler)
|
||||
}
|
||||
@ -338,7 +337,7 @@ func (s *MyAPIDonutSuite) TestListBuckets(c *C) {
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
var results api.ListBucketsResponse
|
||||
var results ListBucketsResponse
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
err = decoder.Decode(&results)
|
||||
c.Assert(err, IsNil)
|
||||
@ -696,7 +695,7 @@ func (s *MyAPIDonutSuite) TestObjectMultipartAbort(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
newResponse := &api.InitiateMultipartUploadResponse{}
|
||||
newResponse := &InitiateMultipartUploadResponse{}
|
||||
|
||||
err = decoder.Decode(newResponse)
|
||||
c.Assert(err, IsNil)
|
||||
@ -741,7 +740,7 @@ func (s *MyAPIDonutSuite) TestBucketMultipartList(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
newResponse := &api.InitiateMultipartUploadResponse{}
|
||||
newResponse := &InitiateMultipartUploadResponse{}
|
||||
|
||||
err = decoder.Decode(newResponse)
|
||||
c.Assert(err, IsNil)
|
||||
@ -770,7 +769,7 @@ func (s *MyAPIDonutSuite) TestBucketMultipartList(c *C) {
|
||||
c.Assert(response3.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder = xml.NewDecoder(response3.Body)
|
||||
newResponse3 := &api.ListMultipartUploadsResponse{}
|
||||
newResponse3 := &ListMultipartUploadsResponse{}
|
||||
err = decoder.Decode(newResponse3)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(newResponse3.Bucket, Equals, "bucketmultipartlist")
|
||||
@ -792,7 +791,7 @@ func (s *MyAPIDonutSuite) TestObjectMultipartList(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
newResponse := &api.InitiateMultipartUploadResponse{}
|
||||
newResponse := &InitiateMultipartUploadResponse{}
|
||||
|
||||
err = decoder.Decode(newResponse)
|
||||
c.Assert(err, IsNil)
|
||||
@ -846,7 +845,7 @@ func (s *MyAPIDonutSuite) TestObjectMultipart(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
newResponse := &api.InitiateMultipartUploadResponse{}
|
||||
newResponse := &InitiateMultipartUploadResponse{}
|
||||
|
||||
err = decoder.Decode(newResponse)
|
||||
c.Assert(err, IsNil)
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package server
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -30,7 +30,6 @@ import (
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/donut"
|
||||
"github.com/minio/minio/pkg/server/api"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
@ -79,7 +78,7 @@ func (s *MyAPISignatureV4Suite) SetUpSuite(c *C) {
|
||||
perr = auth.SaveConfig(authConf)
|
||||
c.Assert(perr, IsNil)
|
||||
|
||||
httpHandler, minioAPI := getAPIHandler(api.Config{RateLimit: 16})
|
||||
httpHandler, minioAPI := getAPIHandler(APIConfig{RateLimit: 16})
|
||||
go startTM(minioAPI)
|
||||
testSignatureV4Server = httptest.NewServer(httpHandler)
|
||||
}
|
||||
@ -347,7 +346,7 @@ func (s *MyAPISignatureV4Suite) TestListBuckets(c *C) {
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
var results api.ListBucketsResponse
|
||||
var results ListBucketsResponse
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
err = decoder.Decode(&results)
|
||||
c.Assert(err, IsNil)
|
||||
@ -689,7 +688,7 @@ func (s *MyAPISignatureV4Suite) TestObjectMultipartAbort(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
newResponse := &api.InitiateMultipartUploadResponse{}
|
||||
newResponse := &InitiateMultipartUploadResponse{}
|
||||
|
||||
err = decoder.Decode(newResponse)
|
||||
c.Assert(err, IsNil)
|
||||
@ -736,7 +735,7 @@ func (s *MyAPISignatureV4Suite) TestBucketMultipartList(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
newResponse := &api.InitiateMultipartUploadResponse{}
|
||||
newResponse := &InitiateMultipartUploadResponse{}
|
||||
|
||||
err = decoder.Decode(newResponse)
|
||||
c.Assert(err, IsNil)
|
||||
@ -767,7 +766,7 @@ func (s *MyAPISignatureV4Suite) TestBucketMultipartList(c *C) {
|
||||
c.Assert(response3.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder = xml.NewDecoder(response3.Body)
|
||||
newResponse3 := &api.ListMultipartUploadsResponse{}
|
||||
newResponse3 := &ListMultipartUploadsResponse{}
|
||||
err = decoder.Decode(newResponse3)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(newResponse3.Bucket, Equals, "bucketmultipartlist")
|
||||
@ -789,7 +788,7 @@ func (s *MyAPISignatureV4Suite) TestObjectMultipartList(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
newResponse := &api.InitiateMultipartUploadResponse{}
|
||||
newResponse := &InitiateMultipartUploadResponse{}
|
||||
|
||||
err = decoder.Decode(newResponse)
|
||||
c.Assert(err, IsNil)
|
||||
@ -845,7 +844,7 @@ func (s *MyAPISignatureV4Suite) TestObjectMultipart(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
newResponse := &api.InitiateMultipartUploadResponse{}
|
||||
newResponse := &InitiateMultipartUploadResponse{}
|
||||
|
||||
err = decoder.Decode(newResponse)
|
||||
c.Assert(err, IsNil)
|
28
vendor.json
28
vendor.json
@ -99,6 +99,34 @@
|
||||
"local": "vendor/gopkg.in/check.v1",
|
||||
"revision": "11d3bc7aa68e238947792f30573146a3231fc0f1",
|
||||
"revisionTime": "2015-07-29T10:04:31+02:00"
|
||||
},
|
||||
{
|
||||
"canonical": "gopkg.in/mgo.v2",
|
||||
"comment": "",
|
||||
"local": "vendor/gopkg.in/mgo.v2",
|
||||
"revision": "f4923a569136442e900b8cf5c1a706c0a8b0883c",
|
||||
"revisionTime": "2015-08-21T12:30:02-03:00"
|
||||
},
|
||||
{
|
||||
"canonical": "gopkg.in/mgo.v2/bson",
|
||||
"comment": "",
|
||||
"local": "vendor/gopkg.in/mgo.v2/bson",
|
||||
"revision": "f4923a569136442e900b8cf5c1a706c0a8b0883c",
|
||||
"revisionTime": "2015-08-21T12:30:02-03:00"
|
||||
},
|
||||
{
|
||||
"canonical": "gopkg.in/mgo.v2/internal/sasl",
|
||||
"comment": "",
|
||||
"local": "vendor/gopkg.in/mgo.v2/internal/sasl",
|
||||
"revision": "f4923a569136442e900b8cf5c1a706c0a8b0883c",
|
||||
"revisionTime": "2015-08-21T12:30:02-03:00"
|
||||
},
|
||||
{
|
||||
"canonical": "gopkg.in/mgo.v2/internal/scram",
|
||||
"comment": "",
|
||||
"local": "vendor/gopkg.in/mgo.v2/internal/scram",
|
||||
"revision": "f4923a569136442e900b8cf5c1a706c0a8b0883c",
|
||||
"revisionTime": "2015-08-21T12:30:02-03:00"
|
||||
}
|
||||
]
|
||||
}
|
25
vendor/gopkg.in/mgo.v2/LICENSE
generated
vendored
Normal file
25
vendor/gopkg.in/mgo.v2/LICENSE
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
mgo - MongoDB driver for Go
|
||||
|
||||
Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
5
vendor/gopkg.in/mgo.v2/Makefile
generated
vendored
Normal file
5
vendor/gopkg.in/mgo.v2/Makefile
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
startdb:
|
||||
@testdb/setup.sh start
|
||||
|
||||
stopdb:
|
||||
@testdb/setup.sh stop
|
4
vendor/gopkg.in/mgo.v2/README.md
generated
vendored
Normal file
4
vendor/gopkg.in/mgo.v2/README.md
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
The MongoDB driver for Go
|
||||
-------------------------
|
||||
|
||||
Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details.
|
467
vendor/gopkg.in/mgo.v2/auth.go
generated
vendored
Normal file
467
vendor/gopkg.in/mgo.v2/auth.go
generated
vendored
Normal file
@ -0,0 +1,467 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
"gopkg.in/mgo.v2/internal/scram"
|
||||
)
|
||||
|
||||
type authCmd struct {
|
||||
Authenticate int
|
||||
|
||||
Nonce string
|
||||
User string
|
||||
Key string
|
||||
}
|
||||
|
||||
type startSaslCmd struct {
|
||||
StartSASL int `bson:"startSasl"`
|
||||
}
|
||||
|
||||
type authResult struct {
|
||||
ErrMsg string
|
||||
Ok bool
|
||||
}
|
||||
|
||||
type getNonceCmd struct {
|
||||
GetNonce int
|
||||
}
|
||||
|
||||
type getNonceResult struct {
|
||||
Nonce string
|
||||
Err string "$err"
|
||||
Code int
|
||||
}
|
||||
|
||||
type logoutCmd struct {
|
||||
Logout int
|
||||
}
|
||||
|
||||
type saslCmd struct {
|
||||
Start int `bson:"saslStart,omitempty"`
|
||||
Continue int `bson:"saslContinue,omitempty"`
|
||||
ConversationId int `bson:"conversationId,omitempty"`
|
||||
Mechanism string `bson:"mechanism,omitempty"`
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
type saslResult struct {
|
||||
Ok bool `bson:"ok"`
|
||||
NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?)
|
||||
Done bool
|
||||
|
||||
ConversationId int `bson:"conversationId"`
|
||||
Payload []byte
|
||||
ErrMsg string
|
||||
}
|
||||
|
||||
type saslStepper interface {
|
||||
Step(serverData []byte) (clientData []byte, done bool, err error)
|
||||
Close()
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) getNonce() (nonce string, err error) {
|
||||
socket.Lock()
|
||||
for socket.cachedNonce == "" && socket.dead == nil {
|
||||
debugf("Socket %p to %s: waiting for nonce", socket, socket.addr)
|
||||
socket.gotNonce.Wait()
|
||||
}
|
||||
if socket.cachedNonce == "mongos" {
|
||||
socket.Unlock()
|
||||
return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth")
|
||||
}
|
||||
debugf("Socket %p to %s: got nonce", socket, socket.addr)
|
||||
nonce, err = socket.cachedNonce, socket.dead
|
||||
socket.cachedNonce = ""
|
||||
socket.Unlock()
|
||||
if err != nil {
|
||||
nonce = ""
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) resetNonce() {
|
||||
debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr)
|
||||
op := &queryOp{}
|
||||
op.query = &getNonceCmd{GetNonce: 1}
|
||||
op.collection = "admin.$cmd"
|
||||
op.limit = -1
|
||||
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
|
||||
if err != nil {
|
||||
socket.kill(errors.New("getNonce: "+err.Error()), true)
|
||||
return
|
||||
}
|
||||
result := &getNonceResult{}
|
||||
err = bson.Unmarshal(docData, &result)
|
||||
if err != nil {
|
||||
socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true)
|
||||
return
|
||||
}
|
||||
debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result)
|
||||
if result.Code == 13390 {
|
||||
// mongos doesn't yet support auth (see http://j.mp/mongos-auth)
|
||||
result.Nonce = "mongos"
|
||||
} else if result.Nonce == "" {
|
||||
var msg string
|
||||
if result.Err != "" {
|
||||
msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code)
|
||||
} else {
|
||||
msg = "Got an empty nonce"
|
||||
}
|
||||
socket.kill(errors.New(msg), true)
|
||||
return
|
||||
}
|
||||
socket.Lock()
|
||||
if socket.cachedNonce != "" {
|
||||
socket.Unlock()
|
||||
panic("resetNonce: nonce already cached")
|
||||
}
|
||||
socket.cachedNonce = result.Nonce
|
||||
socket.gotNonce.Signal()
|
||||
socket.Unlock()
|
||||
}
|
||||
err := socket.Query(op)
|
||||
if err != nil {
|
||||
socket.kill(errors.New("resetNonce: "+err.Error()), true)
|
||||
}
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) Login(cred Credential) error {
|
||||
socket.Lock()
|
||||
if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 {
|
||||
cred.Mechanism = "SCRAM-SHA-1"
|
||||
}
|
||||
for _, sockCred := range socket.creds {
|
||||
if sockCred == cred {
|
||||
debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if socket.dropLogout(cred) {
|
||||
debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
}
|
||||
socket.Unlock()
|
||||
|
||||
debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username)
|
||||
|
||||
var err error
|
||||
switch cred.Mechanism {
|
||||
case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501.
|
||||
err = socket.loginClassic(cred)
|
||||
case "PLAIN":
|
||||
err = socket.loginPlain(cred)
|
||||
case "MONGODB-X509":
|
||||
err = socket.loginX509(cred)
|
||||
default:
|
||||
// Try SASL for everything else, if it is available.
|
||||
err = socket.loginSASL(cred)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
debugf("Socket %p to %s: login error: %s", socket, socket.addr, err)
|
||||
} else {
|
||||
debugf("Socket %p to %s: login successful", socket, socket.addr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginClassic(cred Credential) error {
|
||||
// Note that this only works properly because this function is
|
||||
// synchronous, which means the nonce won't get reset while we're
|
||||
// using it and any other login requests will block waiting for a
|
||||
// new nonce provided in the defer call below.
|
||||
nonce, err := socket.getNonce()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer socket.resetNonce()
|
||||
|
||||
psum := md5.New()
|
||||
psum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
|
||||
|
||||
ksum := md5.New()
|
||||
ksum.Write([]byte(nonce + cred.Username))
|
||||
ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil))))
|
||||
|
||||
key := hex.EncodeToString(ksum.Sum(nil))
|
||||
|
||||
cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key}
|
||||
res := authResult{}
|
||||
return socket.loginRun(cred.Source, &cmd, &res, func() error {
|
||||
if !res.Ok {
|
||||
return errors.New(res.ErrMsg)
|
||||
}
|
||||
socket.Lock()
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
type authX509Cmd struct {
|
||||
Authenticate int
|
||||
User string
|
||||
Mechanism string
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginX509(cred Credential) error {
|
||||
cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"}
|
||||
res := authResult{}
|
||||
return socket.loginRun(cred.Source, &cmd, &res, func() error {
|
||||
if !res.Ok {
|
||||
return errors.New(res.ErrMsg)
|
||||
}
|
||||
socket.Lock()
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginPlain(cred Credential) error {
|
||||
cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)}
|
||||
res := authResult{}
|
||||
return socket.loginRun(cred.Source, &cmd, &res, func() error {
|
||||
if !res.Ok {
|
||||
return errors.New(res.ErrMsg)
|
||||
}
|
||||
socket.Lock()
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginSASL(cred Credential) error {
|
||||
var sasl saslStepper
|
||||
var err error
|
||||
if cred.Mechanism == "SCRAM-SHA-1" {
|
||||
// SCRAM is handled without external libraries.
|
||||
sasl = saslNewScram(cred)
|
||||
} else if len(cred.ServiceHost) > 0 {
|
||||
sasl, err = saslNew(cred, cred.ServiceHost)
|
||||
} else {
|
||||
sasl, err = saslNew(cred, socket.Server().Addr)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sasl.Close()
|
||||
|
||||
// The goal of this logic is to carry a locked socket until the
|
||||
// local SASL step confirms the auth is valid; the socket needs to be
|
||||
// locked so that concurrent action doesn't leave the socket in an
|
||||
// auth state that doesn't reflect the operations that took place.
|
||||
// As a simple case, imagine inverting login=>logout to logout=>login.
|
||||
//
|
||||
// The logic below works because the lock func isn't called concurrently.
|
||||
locked := false
|
||||
lock := func(b bool) {
|
||||
if locked != b {
|
||||
locked = b
|
||||
if b {
|
||||
socket.Lock()
|
||||
} else {
|
||||
socket.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lock(true)
|
||||
defer lock(false)
|
||||
|
||||
start := 1
|
||||
cmd := saslCmd{}
|
||||
res := saslResult{}
|
||||
for {
|
||||
payload, done, err := sasl.Step(res.Payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done && res.Done {
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
break
|
||||
}
|
||||
lock(false)
|
||||
|
||||
cmd = saslCmd{
|
||||
Start: start,
|
||||
Continue: 1 - start,
|
||||
ConversationId: res.ConversationId,
|
||||
Mechanism: cred.Mechanism,
|
||||
Payload: payload,
|
||||
}
|
||||
start = 0
|
||||
err = socket.loginRun(cred.Source, &cmd, &res, func() error {
|
||||
// See the comment on lock for why this is necessary.
|
||||
lock(true)
|
||||
if !res.Ok || res.NotOk {
|
||||
return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done && res.Done {
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func saslNewScram(cred Credential) *saslScram {
|
||||
credsum := md5.New()
|
||||
credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
|
||||
client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil)))
|
||||
return &saslScram{cred: cred, client: client}
|
||||
}
|
||||
|
||||
type saslScram struct {
|
||||
cred Credential
|
||||
client *scram.Client
|
||||
}
|
||||
|
||||
func (s *saslScram) Close() {}
|
||||
|
||||
func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) {
|
||||
more := s.client.Step(serverData)
|
||||
return s.client.Out(), !more, s.client.Err()
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error {
|
||||
var mutex sync.Mutex
|
||||
var replyErr error
|
||||
mutex.Lock()
|
||||
|
||||
op := queryOp{}
|
||||
op.query = query
|
||||
op.collection = db + ".$cmd"
|
||||
op.limit = -1
|
||||
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
|
||||
defer mutex.Unlock()
|
||||
|
||||
if err != nil {
|
||||
replyErr = err
|
||||
return
|
||||
}
|
||||
|
||||
err = bson.Unmarshal(docData, result)
|
||||
if err != nil {
|
||||
replyErr = err
|
||||
} else {
|
||||
// Must handle this within the read loop for the socket, so
|
||||
// that concurrent login requests are properly ordered.
|
||||
replyErr = f()
|
||||
}
|
||||
}
|
||||
|
||||
err := socket.Query(&op)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mutex.Lock() // Wait.
|
||||
return replyErr
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) Logout(db string) {
|
||||
socket.Lock()
|
||||
cred, found := socket.dropAuth(db)
|
||||
if found {
|
||||
debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db)
|
||||
socket.logout = append(socket.logout, cred)
|
||||
}
|
||||
socket.Unlock()
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) LogoutAll() {
|
||||
socket.Lock()
|
||||
if l := len(socket.creds); l > 0 {
|
||||
debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l)
|
||||
socket.logout = append(socket.logout, socket.creds...)
|
||||
socket.creds = socket.creds[0:0]
|
||||
}
|
||||
socket.Unlock()
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) flushLogout() (ops []interface{}) {
|
||||
socket.Lock()
|
||||
if l := len(socket.logout); l > 0 {
|
||||
debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l)
|
||||
for i := 0; i != l; i++ {
|
||||
op := queryOp{}
|
||||
op.query = &logoutCmd{1}
|
||||
op.collection = socket.logout[i].Source + ".$cmd"
|
||||
op.limit = -1
|
||||
ops = append(ops, &op)
|
||||
}
|
||||
socket.logout = socket.logout[0:0]
|
||||
}
|
||||
socket.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) {
|
||||
for i, sockCred := range socket.creds {
|
||||
if sockCred.Source == db {
|
||||
copy(socket.creds[i:], socket.creds[i+1:])
|
||||
socket.creds = socket.creds[:len(socket.creds)-1]
|
||||
return sockCred, true
|
||||
}
|
||||
}
|
||||
return cred, false
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) dropLogout(cred Credential) (found bool) {
|
||||
for i, sockCred := range socket.logout {
|
||||
if sockCred == cred {
|
||||
copy(socket.logout[i:], socket.logout[i+1:])
|
||||
socket.logout = socket.logout[:len(socket.logout)-1]
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
1180
vendor/gopkg.in/mgo.v2/auth_test.go
generated
vendored
Normal file
1180
vendor/gopkg.in/mgo.v2/auth_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
25
vendor/gopkg.in/mgo.v2/bson/LICENSE
generated
vendored
Normal file
25
vendor/gopkg.in/mgo.v2/bson/LICENSE
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
BSON library for Go
|
||||
|
||||
Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
705
vendor/gopkg.in/mgo.v2/bson/bson.go
generated
vendored
Normal file
705
vendor/gopkg.in/mgo.v2/bson/bson.go
generated
vendored
Normal file
@ -0,0 +1,705 @@
|
||||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package bson is an implementation of the BSON specification for Go:
|
||||
//
|
||||
// http://bsonspec.org
|
||||
//
|
||||
// It was created as part of the mgo MongoDB driver for Go, but is standalone
|
||||
// and may be used on its own without the driver.
|
||||
package bson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// The public API.
|
||||
|
||||
// A value implementing the bson.Getter interface will have its GetBSON
|
||||
// method called when the given value has to be marshalled, and the result
|
||||
// of this method will be marshaled in place of the actual object.
|
||||
//
|
||||
// If GetBSON returns return a non-nil error, the marshalling procedure
|
||||
// will stop and error out with the provided value.
|
||||
type Getter interface {
|
||||
GetBSON() (interface{}, error)
|
||||
}
|
||||
|
||||
// A value implementing the bson.Setter interface will receive the BSON
|
||||
// value via the SetBSON method during unmarshaling, and the object
|
||||
// itself will not be changed as usual.
|
||||
//
|
||||
// If setting the value works, the method should return nil or alternatively
|
||||
// bson.SetZero to set the respective field to its zero value (nil for
|
||||
// pointer types). If SetBSON returns a value of type bson.TypeError, the
|
||||
// BSON value will be omitted from a map or slice being decoded and the
|
||||
// unmarshalling will continue. If it returns any other non-nil error, the
|
||||
// unmarshalling procedure will stop and error out with the provided value.
|
||||
//
|
||||
// This interface is generally useful in pointer receivers, since the method
|
||||
// will want to change the receiver. A type field that implements the Setter
|
||||
// interface doesn't have to be a pointer, though.
|
||||
//
|
||||
// Unlike the usual behavior, unmarshalling onto a value that implements a
|
||||
// Setter interface will NOT reset the value to its zero state. This allows
|
||||
// the value to decide by itself how to be unmarshalled.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type MyString string
|
||||
//
|
||||
// func (s *MyString) SetBSON(raw bson.Raw) error {
|
||||
// return raw.Unmarshal(s)
|
||||
// }
|
||||
//
|
||||
type Setter interface {
|
||||
SetBSON(raw Raw) error
|
||||
}
|
||||
|
||||
// SetZero may be returned from a SetBSON method to have the value set to
|
||||
// its respective zero value. When used in pointer values, this will set the
|
||||
// field to nil rather than to the pre-allocated value.
|
||||
var SetZero = errors.New("set to zero")
|
||||
|
||||
// M is a convenient alias for a map[string]interface{} map, useful for
|
||||
// dealing with BSON in a native way. For instance:
|
||||
//
|
||||
// bson.M{"a": 1, "b": true}
|
||||
//
|
||||
// There's no special handling for this type in addition to what's done anyway
|
||||
// for an equivalent map type. Elements in the map will be dumped in an
|
||||
// undefined ordered. See also the bson.D type for an ordered alternative.
|
||||
type M map[string]interface{}
|
||||
|
||||
// D represents a BSON document containing ordered elements. For example:
|
||||
//
|
||||
// bson.D{{"a", 1}, {"b", true}}
|
||||
//
|
||||
// In some situations, such as when creating indexes for MongoDB, the order in
|
||||
// which the elements are defined is important. If the order is not important,
|
||||
// using a map is generally more comfortable. See bson.M and bson.RawD.
|
||||
type D []DocElem
|
||||
|
||||
// DocElem is an element of the bson.D document representation.
|
||||
type DocElem struct {
|
||||
Name string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// Map returns a map out of the ordered element name/value pairs in d.
|
||||
func (d D) Map() (m M) {
|
||||
m = make(M, len(d))
|
||||
for _, item := range d {
|
||||
m[item.Name] = item.Value
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// The Raw type represents raw unprocessed BSON documents and elements.
|
||||
// Kind is the kind of element as defined per the BSON specification, and
|
||||
// Data is the raw unprocessed data for the respective element.
|
||||
// Using this type it is possible to unmarshal or marshal values partially.
|
||||
//
|
||||
// Relevant documentation:
|
||||
//
|
||||
// http://bsonspec.org/#/specification
|
||||
//
|
||||
type Raw struct {
|
||||
Kind byte
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// RawD represents a BSON document containing raw unprocessed elements.
|
||||
// This low-level representation may be useful when lazily processing
|
||||
// documents of uncertain content, or when manipulating the raw content
|
||||
// documents in general.
|
||||
type RawD []RawDocElem
|
||||
|
||||
// See the RawD type.
|
||||
type RawDocElem struct {
|
||||
Name string
|
||||
Value Raw
|
||||
}
|
||||
|
||||
// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
|
||||
// long. MongoDB objects by default have such a property set in their "_id"
|
||||
// property.
|
||||
//
|
||||
// http://www.mongodb.org/display/DOCS/Object+IDs
|
||||
type ObjectId string
|
||||
|
||||
// ObjectIdHex returns an ObjectId from the provided hex representation.
|
||||
// Calling this function with an invalid hex representation will
|
||||
// cause a runtime panic. See the IsObjectIdHex function.
|
||||
func ObjectIdHex(s string) ObjectId {
|
||||
d, err := hex.DecodeString(s)
|
||||
if err != nil || len(d) != 12 {
|
||||
panic(fmt.Sprintf("Invalid input to ObjectIdHex: %q", s))
|
||||
}
|
||||
return ObjectId(d)
|
||||
}
|
||||
|
||||
// IsObjectIdHex returns whether s is a valid hex representation of
|
||||
// an ObjectId. See the ObjectIdHex function.
|
||||
func IsObjectIdHex(s string) bool {
|
||||
if len(s) != 24 {
|
||||
return false
|
||||
}
|
||||
_, err := hex.DecodeString(s)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// objectIdCounter is atomically incremented when generating a new ObjectId
|
||||
// using NewObjectId() function. It's used as a counter part of an id.
|
||||
var objectIdCounter uint32 = 0
|
||||
|
||||
// machineId stores machine id generated once and used in subsequent calls
|
||||
// to NewObjectId function.
|
||||
var machineId = readMachineId()
|
||||
|
||||
// readMachineId generates machine id and puts it into the machineId global
|
||||
// variable. If this function fails to get the hostname, it will cause
|
||||
// a runtime error.
|
||||
func readMachineId() []byte {
|
||||
var sum [3]byte
|
||||
id := sum[:]
|
||||
hostname, err1 := os.Hostname()
|
||||
if err1 != nil {
|
||||
_, err2 := io.ReadFull(rand.Reader, id)
|
||||
if err2 != nil {
|
||||
panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
|
||||
}
|
||||
return id
|
||||
}
|
||||
hw := md5.New()
|
||||
hw.Write([]byte(hostname))
|
||||
copy(id, hw.Sum(nil))
|
||||
return id
|
||||
}
|
||||
|
||||
// NewObjectId returns a new unique ObjectId.
|
||||
func NewObjectId() ObjectId {
|
||||
var b [12]byte
|
||||
// Timestamp, 4 bytes, big endian
|
||||
binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
|
||||
// Machine, first 3 bytes of md5(hostname)
|
||||
b[4] = machineId[0]
|
||||
b[5] = machineId[1]
|
||||
b[6] = machineId[2]
|
||||
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
|
||||
pid := os.Getpid()
|
||||
b[7] = byte(pid >> 8)
|
||||
b[8] = byte(pid)
|
||||
// Increment, 3 bytes, big endian
|
||||
i := atomic.AddUint32(&objectIdCounter, 1)
|
||||
b[9] = byte(i >> 16)
|
||||
b[10] = byte(i >> 8)
|
||||
b[11] = byte(i)
|
||||
return ObjectId(b[:])
|
||||
}
|
||||
|
||||
// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
|
||||
// with the provided number of seconds from epoch UTC, and all other parts
|
||||
// filled with zeroes. It's not safe to insert a document with an id generated
|
||||
// by this method, it is useful only for queries to find documents with ids
|
||||
// generated before or after the specified timestamp.
|
||||
func NewObjectIdWithTime(t time.Time) ObjectId {
|
||||
var b [12]byte
|
||||
binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
|
||||
return ObjectId(string(b[:]))
|
||||
}
|
||||
|
||||
// String returns a hex string representation of the id.
|
||||
// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
|
||||
func (id ObjectId) String() string {
|
||||
return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
|
||||
}
|
||||
|
||||
// Hex returns a hex representation of the ObjectId.
|
||||
func (id ObjectId) Hex() string {
|
||||
return hex.EncodeToString([]byte(id))
|
||||
}
|
||||
|
||||
// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
|
||||
func (id ObjectId) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
|
||||
}
|
||||
|
||||
var nullBytes = []byte("null")
|
||||
|
||||
// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
|
||||
func (id *ObjectId) UnmarshalJSON(data []byte) error {
|
||||
if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
|
||||
*id = ""
|
||||
return nil
|
||||
}
|
||||
if len(data) != 26 || data[0] != '"' || data[25] != '"' {
|
||||
return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s", string(data)))
|
||||
}
|
||||
var buf [12]byte
|
||||
_, err := hex.Decode(buf[:], data[1:25])
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s (%s)", string(data), err))
|
||||
}
|
||||
*id = ObjectId(string(buf[:]))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
|
||||
func (id ObjectId) Valid() bool {
|
||||
return len(id) == 12
|
||||
}
|
||||
|
||||
// byteSlice returns byte slice of id from start to end.
|
||||
// Calling this function with an invalid id will cause a runtime panic.
|
||||
func (id ObjectId) byteSlice(start, end int) []byte {
|
||||
if len(id) != 12 {
|
||||
panic(fmt.Sprintf("Invalid ObjectId: %q", string(id)))
|
||||
}
|
||||
return []byte(string(id)[start:end])
|
||||
}
|
||||
|
||||
// Time returns the timestamp part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Time() time.Time {
|
||||
// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
|
||||
secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
|
||||
return time.Unix(secs, 0)
|
||||
}
|
||||
|
||||
// Machine returns the 3-byte machine id part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Machine() []byte {
|
||||
return id.byteSlice(4, 7)
|
||||
}
|
||||
|
||||
// Pid returns the process id part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Pid() uint16 {
|
||||
return binary.BigEndian.Uint16(id.byteSlice(7, 9))
|
||||
}
|
||||
|
||||
// Counter returns the incrementing value part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Counter() int32 {
|
||||
b := id.byteSlice(9, 12)
|
||||
// Counter is stored as big-endian 3-byte value
|
||||
return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
|
||||
}
|
||||
|
||||
// The Symbol type is similar to a string and is used in languages with a
|
||||
// distinct symbol type.
|
||||
type Symbol string
|
||||
|
||||
// Now returns the current time with millisecond precision. MongoDB stores
|
||||
// timestamps with the same precision, so a Time returned from this method
|
||||
// will not change after a roundtrip to the database. That's the only reason
|
||||
// why this function exists. Using the time.Now function also works fine
|
||||
// otherwise.
|
||||
func Now() time.Time {
|
||||
return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
|
||||
}
|
||||
|
||||
// MongoTimestamp is a special internal type used by MongoDB that for some
|
||||
// strange reason has its own datatype defined in BSON.
|
||||
type MongoTimestamp int64
|
||||
|
||||
type orderKey int64
|
||||
|
||||
// MaxKey is a special value that compares higher than all other possible BSON
|
||||
// values in a MongoDB database.
|
||||
var MaxKey = orderKey(1<<63 - 1)
|
||||
|
||||
// MinKey is a special value that compares lower than all other possible BSON
|
||||
// values in a MongoDB database.
|
||||
var MinKey = orderKey(-1 << 63)
|
||||
|
||||
type undefined struct{}
|
||||
|
||||
// Undefined represents the undefined BSON value.
|
||||
var Undefined undefined
|
||||
|
||||
// Binary is a representation for non-standard binary values. Any kind should
|
||||
// work, but the following are known as of this writing:
|
||||
//
|
||||
// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
|
||||
// 0x01 - Function (!?)
|
||||
// 0x02 - Obsolete generic.
|
||||
// 0x03 - UUID
|
||||
// 0x05 - MD5
|
||||
// 0x80 - User defined.
|
||||
//
|
||||
type Binary struct {
|
||||
Kind byte
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// RegEx represents a regular expression. The Options field may contain
|
||||
// individual characters defining the way in which the pattern should be
|
||||
// applied, and must be sorted. Valid options as of this writing are 'i' for
|
||||
// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
|
||||
// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
|
||||
// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
|
||||
// unicode. The value of the Options parameter is not verified before being
|
||||
// marshaled into the BSON format.
|
||||
type RegEx struct {
|
||||
Pattern string
|
||||
Options string
|
||||
}
|
||||
|
||||
// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
|
||||
// will be marshaled as a mapping from identifiers to values that may be
|
||||
// used when evaluating the provided Code.
|
||||
type JavaScript struct {
|
||||
Code string
|
||||
Scope interface{}
|
||||
}
|
||||
|
||||
// DBPointer refers to a document id in a namespace.
|
||||
//
|
||||
// This type is deprecated in the BSON specification and should not be used
|
||||
// except for backwards compatibility with ancient applications.
|
||||
type DBPointer struct {
|
||||
Namespace string
|
||||
Id ObjectId
|
||||
}
|
||||
|
||||
const initialBufferSize = 64
|
||||
|
||||
func handleErr(err *error) {
|
||||
if r := recover(); r != nil {
|
||||
if _, ok := r.(runtime.Error); ok {
|
||||
panic(r)
|
||||
} else if _, ok := r.(externalPanic); ok {
|
||||
panic(r)
|
||||
} else if s, ok := r.(string); ok {
|
||||
*err = errors.New(s)
|
||||
} else if e, ok := r.(error); ok {
|
||||
*err = e
|
||||
} else {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal serializes the in value, which may be a map or a struct value.
|
||||
// In the case of struct values, only exported fields will be serialized.
|
||||
// The lowercased field name is used as the key for each exported field,
|
||||
// but this behavior may be changed using the respective field tag.
|
||||
// The tag may also contain flags to tweak the marshalling behavior for
|
||||
// the field. The tag formats accepted are:
|
||||
//
|
||||
// "[<key>][,<flag1>[,<flag2>]]"
|
||||
//
|
||||
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported:
|
||||
//
|
||||
// omitempty Only include the field if it's not set to the zero
|
||||
// value for the type or to empty slices or maps.
|
||||
//
|
||||
// minsize Marshal an int64 value as an int32, if that's feasible
|
||||
// while preserving the numeric value.
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map,
|
||||
// causing all of its fields or keys to be processed as if
|
||||
// they were part of the outer struct. For maps, keys must
|
||||
// not conflict with the bson keys of other struct fields.
|
||||
//
|
||||
// Some examples:
|
||||
//
|
||||
// type T struct {
|
||||
// A bool
|
||||
// B int "myb"
|
||||
// C string "myc,omitempty"
|
||||
// D string `bson:",omitempty" json:"jsonkey"`
|
||||
// E int64 ",minsize"
|
||||
// F int64 "myf,omitempty,minsize"
|
||||
// }
|
||||
//
|
||||
func Marshal(in interface{}) (out []byte, err error) {
|
||||
defer handleErr(&err)
|
||||
e := &encoder{make([]byte, 0, initialBufferSize)}
|
||||
e.addDoc(reflect.ValueOf(in))
|
||||
return e.out, nil
|
||||
}
|
||||
|
||||
// Unmarshal deserializes data from in into the out value. The out value
|
||||
// must be a map, a pointer to a struct, or a pointer to a bson.D value.
|
||||
// The lowercased field name is used as the key for each exported field,
|
||||
// but this behavior may be changed using the respective field tag.
|
||||
// The tag may also contain flags to tweak the marshalling behavior for
|
||||
// the field. The tag formats accepted are:
|
||||
//
|
||||
// "[<key>][,<flag1>[,<flag2>]]"
|
||||
//
|
||||
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported during unmarshal (see the
|
||||
// Marshal method for other flags):
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map.
|
||||
// Inlined structs are handled as if its fields were part
|
||||
// of the outer struct. An inlined map causes keys that do
|
||||
// not match any other struct field to be inserted in the
|
||||
// map rather than being discarded as usual.
|
||||
//
|
||||
// The target field or element types of out may not necessarily match
|
||||
// the BSON values of the provided data. The following conversions are
|
||||
// made automatically:
|
||||
//
|
||||
// - Numeric types are converted if at least the integer part of the
|
||||
// value would be preserved correctly
|
||||
// - Bools are converted to numeric types as 1 or 0
|
||||
// - Numeric types are converted to bools as true if not 0 or false otherwise
|
||||
// - Binary and string BSON data is converted to a string, array or byte slice
|
||||
//
|
||||
// If the value would not fit the type and cannot be converted, it's
|
||||
// silently skipped.
|
||||
//
|
||||
// Pointer values are initialized when necessary.
|
||||
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
if raw, ok := out.(*Raw); ok {
|
||||
raw.Kind = 3
|
||||
raw.Data = in
|
||||
return nil
|
||||
}
|
||||
defer handleErr(&err)
|
||||
v := reflect.ValueOf(out)
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
fallthrough
|
||||
case reflect.Map:
|
||||
d := newDecoder(in)
|
||||
d.readDocTo(v)
|
||||
case reflect.Struct:
|
||||
return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
|
||||
default:
|
||||
return errors.New("Unmarshal needs a map or a pointer to a struct.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmarshal deserializes raw into the out value. If the out value type
|
||||
// is not compatible with raw, a *bson.TypeError is returned.
|
||||
//
|
||||
// See the Unmarshal function documentation for more details on the
|
||||
// unmarshalling process.
|
||||
func (raw Raw) Unmarshal(out interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
v := reflect.ValueOf(out)
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
v = v.Elem()
|
||||
fallthrough
|
||||
case reflect.Map:
|
||||
d := newDecoder(raw.Data)
|
||||
good := d.readElemTo(v, raw.Kind)
|
||||
if !good {
|
||||
return &TypeError{v.Type(), raw.Kind}
|
||||
}
|
||||
case reflect.Struct:
|
||||
return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
|
||||
default:
|
||||
return errors.New("Raw Unmarshal needs a map or a valid pointer.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TypeError struct {
|
||||
Type reflect.Type
|
||||
Kind byte
|
||||
}
|
||||
|
||||
func (e *TypeError) Error() string {
|
||||
return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Maintain a mapping of keys to structure field indexes
|
||||
|
||||
type structInfo struct {
|
||||
FieldsMap map[string]fieldInfo
|
||||
FieldsList []fieldInfo
|
||||
InlineMap int
|
||||
Zero reflect.Value
|
||||
}
|
||||
|
||||
type fieldInfo struct {
|
||||
Key string
|
||||
Num int
|
||||
OmitEmpty bool
|
||||
MinSize bool
|
||||
Inline []int
|
||||
}
|
||||
|
||||
var structMap = make(map[reflect.Type]*structInfo)
|
||||
var structMapMutex sync.RWMutex
|
||||
|
||||
type externalPanic string
|
||||
|
||||
func (e externalPanic) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
structMapMutex.RLock()
|
||||
sinfo, found := structMap[st]
|
||||
structMapMutex.RUnlock()
|
||||
if found {
|
||||
return sinfo, nil
|
||||
}
|
||||
n := st.NumField()
|
||||
fieldsMap := make(map[string]fieldInfo)
|
||||
fieldsList := make([]fieldInfo, 0, n)
|
||||
inlineMap := -1
|
||||
for i := 0; i != n; i++ {
|
||||
field := st.Field(i)
|
||||
if field.PkgPath != "" {
|
||||
continue // Private field
|
||||
}
|
||||
|
||||
info := fieldInfo{Num: i}
|
||||
|
||||
tag := field.Tag.Get("bson")
|
||||
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
|
||||
tag = string(field.Tag)
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
// XXX Drop this after a few releases.
|
||||
if s := strings.Index(tag, "/"); s >= 0 {
|
||||
recommend := tag[:s]
|
||||
for _, c := range tag[s+1:] {
|
||||
switch c {
|
||||
case 'c':
|
||||
recommend += ",omitempty"
|
||||
case 's':
|
||||
recommend += ",minsize"
|
||||
default:
|
||||
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", string([]byte{uint8(c)}), tag, st)
|
||||
panic(externalPanic(msg))
|
||||
}
|
||||
}
|
||||
msg := fmt.Sprintf("Replace tag %q in field %s of type %s by %q", tag, field.Name, st, recommend)
|
||||
panic(externalPanic(msg))
|
||||
}
|
||||
|
||||
inline := false
|
||||
fields := strings.Split(tag, ",")
|
||||
if len(fields) > 1 {
|
||||
for _, flag := range fields[1:] {
|
||||
switch flag {
|
||||
case "omitempty":
|
||||
info.OmitEmpty = true
|
||||
case "minsize":
|
||||
info.MinSize = true
|
||||
case "inline":
|
||||
inline = true
|
||||
default:
|
||||
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
|
||||
panic(externalPanic(msg))
|
||||
}
|
||||
}
|
||||
tag = fields[0]
|
||||
}
|
||||
|
||||
if inline {
|
||||
switch field.Type.Kind() {
|
||||
case reflect.Map:
|
||||
if inlineMap >= 0 {
|
||||
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
||||
}
|
||||
if field.Type.Key() != reflect.TypeOf("") {
|
||||
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
|
||||
}
|
||||
inlineMap = info.Num
|
||||
case reflect.Struct:
|
||||
sinfo, err := getStructInfo(field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, finfo := range sinfo.FieldsList {
|
||||
if _, found := fieldsMap[finfo.Key]; found {
|
||||
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if finfo.Inline == nil {
|
||||
finfo.Inline = []int{i, finfo.Num}
|
||||
} else {
|
||||
finfo.Inline = append([]int{i}, finfo.Inline...)
|
||||
}
|
||||
fieldsMap[finfo.Key] = finfo
|
||||
fieldsList = append(fieldsList, finfo)
|
||||
}
|
||||
default:
|
||||
panic("Option ,inline needs a struct value or map field")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if tag != "" {
|
||||
info.Key = tag
|
||||
} else {
|
||||
info.Key = strings.ToLower(field.Name)
|
||||
}
|
||||
|
||||
if _, found = fieldsMap[info.Key]; found {
|
||||
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
fieldsList = append(fieldsList, info)
|
||||
fieldsMap[info.Key] = info
|
||||
}
|
||||
sinfo = &structInfo{
|
||||
fieldsMap,
|
||||
fieldsList,
|
||||
inlineMap,
|
||||
reflect.New(st).Elem(),
|
||||
}
|
||||
structMapMutex.Lock()
|
||||
structMap[st] = sinfo
|
||||
structMapMutex.Unlock()
|
||||
return sinfo, nil
|
||||
}
|
1605
vendor/gopkg.in/mgo.v2/bson/bson_test.go
generated
vendored
Normal file
1605
vendor/gopkg.in/mgo.v2/bson/bson_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
825
vendor/gopkg.in/mgo.v2/bson/decode.go
generated
vendored
Normal file
825
vendor/gopkg.in/mgo.v2/bson/decode.go
generated
vendored
Normal file
@ -0,0 +1,825 @@
|
||||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// gobson - BSON library for Go.
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
in []byte
|
||||
i int
|
||||
docType reflect.Type
|
||||
}
|
||||
|
||||
var typeM = reflect.TypeOf(M{})
|
||||
|
||||
func newDecoder(in []byte) *decoder {
|
||||
return &decoder{in, 0, typeM}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Some helper functions.
|
||||
|
||||
func corrupted() {
|
||||
panic("Document is corrupted")
|
||||
}
|
||||
|
||||
func settableValueOf(i interface{}) reflect.Value {
|
||||
v := reflect.ValueOf(i)
|
||||
sv := reflect.New(v.Type()).Elem()
|
||||
sv.Set(v)
|
||||
return sv
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Unmarshaling of documents.
|
||||
|
||||
const (
|
||||
setterUnknown = iota
|
||||
setterNone
|
||||
setterType
|
||||
setterAddr
|
||||
)
|
||||
|
||||
var setterStyles map[reflect.Type]int
|
||||
var setterIface reflect.Type
|
||||
var setterMutex sync.RWMutex
|
||||
|
||||
func init() {
|
||||
var iface Setter
|
||||
setterIface = reflect.TypeOf(&iface).Elem()
|
||||
setterStyles = make(map[reflect.Type]int)
|
||||
}
|
||||
|
||||
func setterStyle(outt reflect.Type) int {
|
||||
setterMutex.RLock()
|
||||
style := setterStyles[outt]
|
||||
setterMutex.RUnlock()
|
||||
if style == setterUnknown {
|
||||
setterMutex.Lock()
|
||||
defer setterMutex.Unlock()
|
||||
if outt.Implements(setterIface) {
|
||||
setterStyles[outt] = setterType
|
||||
} else if reflect.PtrTo(outt).Implements(setterIface) {
|
||||
setterStyles[outt] = setterAddr
|
||||
} else {
|
||||
setterStyles[outt] = setterNone
|
||||
}
|
||||
style = setterStyles[outt]
|
||||
}
|
||||
return style
|
||||
}
|
||||
|
||||
func getSetter(outt reflect.Type, out reflect.Value) Setter {
|
||||
style := setterStyle(outt)
|
||||
if style == setterNone {
|
||||
return nil
|
||||
}
|
||||
if style == setterAddr {
|
||||
if !out.CanAddr() {
|
||||
return nil
|
||||
}
|
||||
out = out.Addr()
|
||||
} else if outt.Kind() == reflect.Ptr && out.IsNil() {
|
||||
out.Set(reflect.New(outt.Elem()))
|
||||
}
|
||||
return out.Interface().(Setter)
|
||||
}
|
||||
|
||||
func clearMap(m reflect.Value) {
|
||||
var none reflect.Value
|
||||
for _, k := range m.MapKeys() {
|
||||
m.SetMapIndex(k, none)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readDocTo(out reflect.Value) {
|
||||
var elemType reflect.Type
|
||||
outt := out.Type()
|
||||
outk := outt.Kind()
|
||||
|
||||
for {
|
||||
if outk == reflect.Ptr && out.IsNil() {
|
||||
out.Set(reflect.New(outt.Elem()))
|
||||
}
|
||||
if setter := getSetter(outt, out); setter != nil {
|
||||
var raw Raw
|
||||
d.readDocTo(reflect.ValueOf(&raw))
|
||||
err := setter.SetBSON(raw)
|
||||
if _, ok := err.(*TypeError); err != nil && !ok {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if outk == reflect.Ptr {
|
||||
out = out.Elem()
|
||||
outt = out.Type()
|
||||
outk = out.Kind()
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
var fieldsMap map[string]fieldInfo
|
||||
var inlineMap reflect.Value
|
||||
start := d.i
|
||||
|
||||
origout := out
|
||||
if outk == reflect.Interface {
|
||||
if d.docType.Kind() == reflect.Map {
|
||||
mv := reflect.MakeMap(d.docType)
|
||||
out.Set(mv)
|
||||
out = mv
|
||||
} else {
|
||||
dv := reflect.New(d.docType).Elem()
|
||||
out.Set(dv)
|
||||
out = dv
|
||||
}
|
||||
outt = out.Type()
|
||||
outk = outt.Kind()
|
||||
}
|
||||
|
||||
docType := d.docType
|
||||
keyType := typeString
|
||||
convertKey := false
|
||||
switch outk {
|
||||
case reflect.Map:
|
||||
keyType = outt.Key()
|
||||
if keyType.Kind() != reflect.String {
|
||||
panic("BSON map must have string keys. Got: " + outt.String())
|
||||
}
|
||||
if keyType != typeString {
|
||||
convertKey = true
|
||||
}
|
||||
elemType = outt.Elem()
|
||||
if elemType == typeIface {
|
||||
d.docType = outt
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(out.Type()))
|
||||
} else if out.Len() > 0 {
|
||||
clearMap(out)
|
||||
}
|
||||
case reflect.Struct:
|
||||
if outt != typeRaw {
|
||||
sinfo, err := getStructInfo(out.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fieldsMap = sinfo.FieldsMap
|
||||
out.Set(sinfo.Zero)
|
||||
if sinfo.InlineMap != -1 {
|
||||
inlineMap = out.Field(sinfo.InlineMap)
|
||||
if !inlineMap.IsNil() && inlineMap.Len() > 0 {
|
||||
clearMap(inlineMap)
|
||||
}
|
||||
elemType = inlineMap.Type().Elem()
|
||||
if elemType == typeIface {
|
||||
d.docType = inlineMap.Type()
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
switch outt.Elem() {
|
||||
case typeDocElem:
|
||||
origout.Set(d.readDocElems(outt))
|
||||
return
|
||||
case typeRawDocElem:
|
||||
origout.Set(d.readRawDocElems(outt))
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
panic("Unsupported document type for unmarshalling: " + out.Type().String())
|
||||
}
|
||||
|
||||
end := int(d.readInt32())
|
||||
end += d.i - 4
|
||||
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
for d.in[d.i] != '\x00' {
|
||||
kind := d.readByte()
|
||||
name := d.readCStr()
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
|
||||
switch outk {
|
||||
case reflect.Map:
|
||||
e := reflect.New(elemType).Elem()
|
||||
if d.readElemTo(e, kind) {
|
||||
k := reflect.ValueOf(name)
|
||||
if convertKey {
|
||||
k = k.Convert(keyType)
|
||||
}
|
||||
out.SetMapIndex(k, e)
|
||||
}
|
||||
case reflect.Struct:
|
||||
if outt == typeRaw {
|
||||
d.dropElem(kind)
|
||||
} else {
|
||||
if info, ok := fieldsMap[name]; ok {
|
||||
if info.Inline == nil {
|
||||
d.readElemTo(out.Field(info.Num), kind)
|
||||
} else {
|
||||
d.readElemTo(out.FieldByIndex(info.Inline), kind)
|
||||
}
|
||||
} else if inlineMap.IsValid() {
|
||||
if inlineMap.IsNil() {
|
||||
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||||
}
|
||||
e := reflect.New(elemType).Elem()
|
||||
if d.readElemTo(e, kind) {
|
||||
inlineMap.SetMapIndex(reflect.ValueOf(name), e)
|
||||
}
|
||||
} else {
|
||||
d.dropElem(kind)
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
}
|
||||
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
d.i++ // '\x00'
|
||||
if d.i != end {
|
||||
corrupted()
|
||||
}
|
||||
d.docType = docType
|
||||
|
||||
if outt == typeRaw {
|
||||
out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readArrayDocTo(out reflect.Value) {
|
||||
end := int(d.readInt32())
|
||||
end += d.i - 4
|
||||
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
i := 0
|
||||
l := out.Len()
|
||||
for d.in[d.i] != '\x00' {
|
||||
if i >= l {
|
||||
panic("Length mismatch on array field")
|
||||
}
|
||||
kind := d.readByte()
|
||||
for d.i < end && d.in[d.i] != '\x00' {
|
||||
d.i++
|
||||
}
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
d.i++
|
||||
d.readElemTo(out.Index(i), kind)
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i != l {
|
||||
panic("Length mismatch on array field")
|
||||
}
|
||||
d.i++ // '\x00'
|
||||
if d.i != end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
|
||||
tmp := make([]reflect.Value, 0, 8)
|
||||
elemType := t.Elem()
|
||||
|
||||
end := int(d.readInt32())
|
||||
end += d.i - 4
|
||||
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
for d.in[d.i] != '\x00' {
|
||||
kind := d.readByte()
|
||||
for d.i < end && d.in[d.i] != '\x00' {
|
||||
d.i++
|
||||
}
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
d.i++
|
||||
e := reflect.New(elemType).Elem()
|
||||
if d.readElemTo(e, kind) {
|
||||
tmp = append(tmp, e)
|
||||
}
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
d.i++ // '\x00'
|
||||
if d.i != end {
|
||||
corrupted()
|
||||
}
|
||||
|
||||
n := len(tmp)
|
||||
slice := reflect.MakeSlice(t, n, n)
|
||||
for i := 0; i != n; i++ {
|
||||
slice.Index(i).Set(tmp[i])
|
||||
}
|
||||
return slice.Interface()
|
||||
}
|
||||
|
||||
var typeSlice = reflect.TypeOf([]interface{}{})
|
||||
var typeIface = typeSlice.Elem()
|
||||
|
||||
func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
|
||||
docType := d.docType
|
||||
d.docType = typ
|
||||
slice := make([]DocElem, 0, 8)
|
||||
d.readDocWith(func(kind byte, name string) {
|
||||
e := DocElem{Name: name}
|
||||
v := reflect.ValueOf(&e.Value)
|
||||
if d.readElemTo(v.Elem(), kind) {
|
||||
slice = append(slice, e)
|
||||
}
|
||||
})
|
||||
slicev := reflect.New(typ).Elem()
|
||||
slicev.Set(reflect.ValueOf(slice))
|
||||
d.docType = docType
|
||||
return slicev
|
||||
}
|
||||
|
||||
func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
|
||||
docType := d.docType
|
||||
d.docType = typ
|
||||
slice := make([]RawDocElem, 0, 8)
|
||||
d.readDocWith(func(kind byte, name string) {
|
||||
e := RawDocElem{Name: name}
|
||||
v := reflect.ValueOf(&e.Value)
|
||||
if d.readElemTo(v.Elem(), kind) {
|
||||
slice = append(slice, e)
|
||||
}
|
||||
})
|
||||
slicev := reflect.New(typ).Elem()
|
||||
slicev.Set(reflect.ValueOf(slice))
|
||||
d.docType = docType
|
||||
return slicev
|
||||
}
|
||||
|
||||
func (d *decoder) readDocWith(f func(kind byte, name string)) {
|
||||
end := int(d.readInt32())
|
||||
end += d.i - 4
|
||||
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
for d.in[d.i] != '\x00' {
|
||||
kind := d.readByte()
|
||||
name := d.readCStr()
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
f(kind, name)
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
d.i++ // '\x00'
|
||||
if d.i != end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Unmarshaling of individual elements within a document.
|
||||
|
||||
var blackHole = settableValueOf(struct{}{})
|
||||
|
||||
func (d *decoder) dropElem(kind byte) {
|
||||
d.readElemTo(blackHole, kind)
|
||||
}
|
||||
|
||||
// Attempt to decode an element from the document and put it into out.
|
||||
// If the types are not compatible, the returned ok value will be
|
||||
// false and out will be unchanged.
|
||||
func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
|
||||
|
||||
start := d.i
|
||||
|
||||
if kind == '\x03' {
|
||||
// Delegate unmarshaling of documents.
|
||||
outt := out.Type()
|
||||
outk := out.Kind()
|
||||
switch outk {
|
||||
case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
|
||||
d.readDocTo(out)
|
||||
return true
|
||||
}
|
||||
if setterStyle(outt) != setterNone {
|
||||
d.readDocTo(out)
|
||||
return true
|
||||
}
|
||||
if outk == reflect.Slice {
|
||||
switch outt.Elem() {
|
||||
case typeDocElem:
|
||||
out.Set(d.readDocElems(outt))
|
||||
case typeRawDocElem:
|
||||
out.Set(d.readRawDocElems(outt))
|
||||
}
|
||||
return true
|
||||
}
|
||||
d.readDocTo(blackHole)
|
||||
return true
|
||||
}
|
||||
|
||||
var in interface{}
|
||||
|
||||
switch kind {
|
||||
case 0x01: // Float64
|
||||
in = d.readFloat64()
|
||||
case 0x02: // UTF-8 string
|
||||
in = d.readStr()
|
||||
case 0x03: // Document
|
||||
panic("Can't happen. Handled above.")
|
||||
case 0x04: // Array
|
||||
outt := out.Type()
|
||||
if setterStyle(outt) != setterNone {
|
||||
// Skip the value so its data is handed to the setter below.
|
||||
d.dropElem(kind)
|
||||
break
|
||||
}
|
||||
for outt.Kind() == reflect.Ptr {
|
||||
outt = outt.Elem()
|
||||
}
|
||||
switch outt.Kind() {
|
||||
case reflect.Array:
|
||||
d.readArrayDocTo(out)
|
||||
return true
|
||||
case reflect.Slice:
|
||||
in = d.readSliceDoc(outt)
|
||||
default:
|
||||
in = d.readSliceDoc(typeSlice)
|
||||
}
|
||||
case 0x05: // Binary
|
||||
b := d.readBinary()
|
||||
if b.Kind == 0x00 || b.Kind == 0x02 {
|
||||
in = b.Data
|
||||
} else {
|
||||
in = b
|
||||
}
|
||||
case 0x06: // Undefined (obsolete, but still seen in the wild)
|
||||
in = Undefined
|
||||
case 0x07: // ObjectId
|
||||
in = ObjectId(d.readBytes(12))
|
||||
case 0x08: // Bool
|
||||
in = d.readBool()
|
||||
case 0x09: // Timestamp
|
||||
// MongoDB handles timestamps as milliseconds.
|
||||
i := d.readInt64()
|
||||
if i == -62135596800000 {
|
||||
in = time.Time{} // In UTC for convenience.
|
||||
} else {
|
||||
in = time.Unix(i/1e3, i%1e3*1e6)
|
||||
}
|
||||
case 0x0A: // Nil
|
||||
in = nil
|
||||
case 0x0B: // RegEx
|
||||
in = d.readRegEx()
|
||||
case 0x0C:
|
||||
in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
|
||||
case 0x0D: // JavaScript without scope
|
||||
in = JavaScript{Code: d.readStr()}
|
||||
case 0x0E: // Symbol
|
||||
in = Symbol(d.readStr())
|
||||
case 0x0F: // JavaScript with scope
|
||||
d.i += 4 // Skip length
|
||||
js := JavaScript{d.readStr(), make(M)}
|
||||
d.readDocTo(reflect.ValueOf(js.Scope))
|
||||
in = js
|
||||
case 0x10: // Int32
|
||||
in = int(d.readInt32())
|
||||
case 0x11: // Mongo-specific timestamp
|
||||
in = MongoTimestamp(d.readInt64())
|
||||
case 0x12: // Int64
|
||||
in = d.readInt64()
|
||||
case 0x7F: // Max key
|
||||
in = MaxKey
|
||||
case 0xFF: // Min key
|
||||
in = MinKey
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
|
||||
}
|
||||
|
||||
outt := out.Type()
|
||||
|
||||
if outt == typeRaw {
|
||||
out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
|
||||
return true
|
||||
}
|
||||
|
||||
if setter := getSetter(outt, out); setter != nil {
|
||||
err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
|
||||
if err == SetZero {
|
||||
out.Set(reflect.Zero(outt))
|
||||
return true
|
||||
}
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
if _, ok := err.(*TypeError); !ok {
|
||||
panic(err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if in == nil {
|
||||
out.Set(reflect.Zero(outt))
|
||||
return true
|
||||
}
|
||||
|
||||
outk := outt.Kind()
|
||||
|
||||
// Dereference and initialize pointer if necessary.
|
||||
first := true
|
||||
for outk == reflect.Ptr {
|
||||
if !out.IsNil() {
|
||||
out = out.Elem()
|
||||
} else {
|
||||
elem := reflect.New(outt.Elem())
|
||||
if first {
|
||||
// Only set if value is compatible.
|
||||
first = false
|
||||
defer func(out, elem reflect.Value) {
|
||||
if good {
|
||||
out.Set(elem)
|
||||
}
|
||||
}(out, elem)
|
||||
} else {
|
||||
out.Set(elem)
|
||||
}
|
||||
out = elem
|
||||
}
|
||||
outt = out.Type()
|
||||
outk = outt.Kind()
|
||||
}
|
||||
|
||||
inv := reflect.ValueOf(in)
|
||||
if outt == inv.Type() {
|
||||
out.Set(inv)
|
||||
return true
|
||||
}
|
||||
|
||||
switch outk {
|
||||
case reflect.Interface:
|
||||
out.Set(inv)
|
||||
return true
|
||||
case reflect.String:
|
||||
switch inv.Kind() {
|
||||
case reflect.String:
|
||||
out.SetString(inv.String())
|
||||
return true
|
||||
case reflect.Slice:
|
||||
if b, ok := in.([]byte); ok {
|
||||
out.SetString(string(b))
|
||||
return true
|
||||
}
|
||||
case reflect.Int, reflect.Int64:
|
||||
if outt == typeJSONNumber {
|
||||
out.SetString(strconv.FormatInt(inv.Int(), 10))
|
||||
return true
|
||||
}
|
||||
case reflect.Float64:
|
||||
if outt == typeJSONNumber {
|
||||
out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
|
||||
return true
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
// Remember, array (0x04) slices are built with the correct
|
||||
// element type. If we are here, must be a cross BSON kind
|
||||
// conversion (e.g. 0x05 unmarshalling on string).
|
||||
if outt.Elem().Kind() != reflect.Uint8 {
|
||||
break
|
||||
}
|
||||
switch inv.Kind() {
|
||||
case reflect.String:
|
||||
slice := []byte(inv.String())
|
||||
out.Set(reflect.ValueOf(slice))
|
||||
return true
|
||||
case reflect.Slice:
|
||||
switch outt.Kind() {
|
||||
case reflect.Array:
|
||||
reflect.Copy(out, inv)
|
||||
case reflect.Slice:
|
||||
out.SetBytes(inv.Bytes())
|
||||
}
|
||||
return true
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch inv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
out.SetInt(inv.Int())
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
out.SetInt(int64(inv.Float()))
|
||||
return true
|
||||
case reflect.Bool:
|
||||
if inv.Bool() {
|
||||
out.SetInt(1)
|
||||
} else {
|
||||
out.SetInt(0)
|
||||
}
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
panic("can't happen: no uint types in BSON (!?)")
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
switch inv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
out.SetUint(uint64(inv.Int()))
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
out.SetUint(uint64(inv.Float()))
|
||||
return true
|
||||
case reflect.Bool:
|
||||
if inv.Bool() {
|
||||
out.SetUint(1)
|
||||
} else {
|
||||
out.SetUint(0)
|
||||
}
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
panic("Can't happen. No uint types in BSON.")
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch inv.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
out.SetFloat(inv.Float())
|
||||
return true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
out.SetFloat(float64(inv.Int()))
|
||||
return true
|
||||
case reflect.Bool:
|
||||
if inv.Bool() {
|
||||
out.SetFloat(1)
|
||||
} else {
|
||||
out.SetFloat(0)
|
||||
}
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
panic("Can't happen. No uint types in BSON?")
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch inv.Kind() {
|
||||
case reflect.Bool:
|
||||
out.SetBool(inv.Bool())
|
||||
return true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
out.SetBool(inv.Int() != 0)
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
out.SetBool(inv.Float() != 0)
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
panic("Can't happen. No uint types in BSON?")
|
||||
}
|
||||
case reflect.Struct:
|
||||
if outt == typeURL && inv.Kind() == reflect.String {
|
||||
u, err := url.Parse(inv.String())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
out.Set(reflect.ValueOf(u).Elem())
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Parsers of basic types.
|
||||
|
||||
func (d *decoder) readRegEx() RegEx {
|
||||
re := RegEx{}
|
||||
re.Pattern = d.readCStr()
|
||||
re.Options = d.readCStr()
|
||||
return re
|
||||
}
|
||||
|
||||
func (d *decoder) readBinary() Binary {
|
||||
l := d.readInt32()
|
||||
b := Binary{}
|
||||
b.Kind = d.readByte()
|
||||
b.Data = d.readBytes(l)
|
||||
if b.Kind == 0x02 && len(b.Data) >= 4 {
|
||||
// Weird obsolete format with redundant length.
|
||||
b.Data = b.Data[4:]
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (d *decoder) readStr() string {
|
||||
l := d.readInt32()
|
||||
b := d.readBytes(l - 1)
|
||||
if d.readByte() != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (d *decoder) readCStr() string {
|
||||
start := d.i
|
||||
end := start
|
||||
l := len(d.in)
|
||||
for ; end != l; end++ {
|
||||
if d.in[end] == '\x00' {
|
||||
break
|
||||
}
|
||||
}
|
||||
d.i = end + 1
|
||||
if d.i > l {
|
||||
corrupted()
|
||||
}
|
||||
return string(d.in[start:end])
|
||||
}
|
||||
|
||||
func (d *decoder) readBool() bool {
|
||||
if d.readByte() == 1 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *decoder) readFloat64() float64 {
|
||||
return math.Float64frombits(uint64(d.readInt64()))
|
||||
}
|
||||
|
||||
func (d *decoder) readInt32() int32 {
|
||||
b := d.readBytes(4)
|
||||
return int32((uint32(b[0]) << 0) |
|
||||
(uint32(b[1]) << 8) |
|
||||
(uint32(b[2]) << 16) |
|
||||
(uint32(b[3]) << 24))
|
||||
}
|
||||
|
||||
func (d *decoder) readInt64() int64 {
|
||||
b := d.readBytes(8)
|
||||
return int64((uint64(b[0]) << 0) |
|
||||
(uint64(b[1]) << 8) |
|
||||
(uint64(b[2]) << 16) |
|
||||
(uint64(b[3]) << 24) |
|
||||
(uint64(b[4]) << 32) |
|
||||
(uint64(b[5]) << 40) |
|
||||
(uint64(b[6]) << 48) |
|
||||
(uint64(b[7]) << 56))
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() byte {
|
||||
i := d.i
|
||||
d.i++
|
||||
if d.i > len(d.in) {
|
||||
corrupted()
|
||||
}
|
||||
return d.in[i]
|
||||
}
|
||||
|
||||
func (d *decoder) readBytes(length int32) []byte {
|
||||
start := d.i
|
||||
d.i += int(length)
|
||||
if d.i > len(d.in) {
|
||||
corrupted()
|
||||
}
|
||||
return d.in[start : start+int(length)]
|
||||
}
|
503
vendor/gopkg.in/mgo.v2/bson/encode.go
generated
vendored
Normal file
503
vendor/gopkg.in/mgo.v2/bson/encode.go
generated
vendored
Normal file
@ -0,0 +1,503 @@
|
||||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// gobson - BSON library for Go.
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Some internal infrastructure.
|
||||
|
||||
var (
|
||||
typeBinary = reflect.TypeOf(Binary{})
|
||||
typeObjectId = reflect.TypeOf(ObjectId(""))
|
||||
typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")})
|
||||
typeSymbol = reflect.TypeOf(Symbol(""))
|
||||
typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
|
||||
typeOrderKey = reflect.TypeOf(MinKey)
|
||||
typeDocElem = reflect.TypeOf(DocElem{})
|
||||
typeRawDocElem = reflect.TypeOf(RawDocElem{})
|
||||
typeRaw = reflect.TypeOf(Raw{})
|
||||
typeURL = reflect.TypeOf(url.URL{})
|
||||
typeTime = reflect.TypeOf(time.Time{})
|
||||
typeString = reflect.TypeOf("")
|
||||
typeJSONNumber = reflect.TypeOf(json.Number(""))
|
||||
)
|
||||
|
||||
const itoaCacheSize = 32
|
||||
|
||||
var itoaCache []string
|
||||
|
||||
func init() {
|
||||
itoaCache = make([]string, itoaCacheSize)
|
||||
for i := 0; i != itoaCacheSize; i++ {
|
||||
itoaCache[i] = strconv.Itoa(i)
|
||||
}
|
||||
}
|
||||
|
||||
func itoa(i int) string {
|
||||
if i < itoaCacheSize {
|
||||
return itoaCache[i]
|
||||
}
|
||||
return strconv.Itoa(i)
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Marshaling of the document value itself.
|
||||
|
||||
type encoder struct {
|
||||
out []byte
|
||||
}
|
||||
|
||||
func (e *encoder) addDoc(v reflect.Value) {
|
||||
for {
|
||||
if vi, ok := v.Interface().(Getter); ok {
|
||||
getv, err := vi.GetBSON()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
v = reflect.ValueOf(getv)
|
||||
continue
|
||||
}
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if v.Type() == typeRaw {
|
||||
raw := v.Interface().(Raw)
|
||||
if raw.Kind != 0x03 && raw.Kind != 0x00 {
|
||||
panic("Attempted to unmarshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
|
||||
}
|
||||
e.addBytes(raw.Data...)
|
||||
return
|
||||
}
|
||||
|
||||
start := e.reserveInt32()
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Map:
|
||||
e.addMap(v)
|
||||
case reflect.Struct:
|
||||
e.addStruct(v)
|
||||
case reflect.Array, reflect.Slice:
|
||||
e.addSlice(v)
|
||||
default:
|
||||
panic("Can't marshal " + v.Type().String() + " as a BSON document")
|
||||
}
|
||||
|
||||
e.addBytes(0)
|
||||
e.setInt32(start, int32(len(e.out)-start))
|
||||
}
|
||||
|
||||
func (e *encoder) addMap(v reflect.Value) {
|
||||
for _, k := range v.MapKeys() {
|
||||
e.addElem(k.String(), v.MapIndex(k), false)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) addStruct(v reflect.Value) {
|
||||
sinfo, err := getStructInfo(v.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var value reflect.Value
|
||||
if sinfo.InlineMap >= 0 {
|
||||
m := v.Field(sinfo.InlineMap)
|
||||
if m.Len() > 0 {
|
||||
for _, k := range m.MapKeys() {
|
||||
ks := k.String()
|
||||
if _, found := sinfo.FieldsMap[ks]; found {
|
||||
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
|
||||
}
|
||||
e.addElem(ks, m.MapIndex(k), false)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, info := range sinfo.FieldsList {
|
||||
if info.Inline == nil {
|
||||
value = v.Field(info.Num)
|
||||
} else {
|
||||
value = v.FieldByIndex(info.Inline)
|
||||
}
|
||||
if info.OmitEmpty && isZero(value) {
|
||||
continue
|
||||
}
|
||||
e.addElem(info.Key, value, info.MinSize)
|
||||
}
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
return len(v.String()) == 0
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return v.IsNil()
|
||||
case reflect.Slice:
|
||||
return v.Len() == 0
|
||||
case reflect.Map:
|
||||
return v.Len() == 0
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Struct:
|
||||
vt := v.Type()
|
||||
if vt == typeTime {
|
||||
return v.Interface().(time.Time).IsZero()
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if vt.Field(i).PkgPath != "" {
|
||||
continue // Private field
|
||||
}
|
||||
if !isZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *encoder) addSlice(v reflect.Value) {
|
||||
vi := v.Interface()
|
||||
if d, ok := vi.(D); ok {
|
||||
for _, elem := range d {
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
if d, ok := vi.(RawD); ok {
|
||||
for _, elem := range d {
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
l := v.Len()
|
||||
et := v.Type().Elem()
|
||||
if et == typeDocElem {
|
||||
for i := 0; i < l; i++ {
|
||||
elem := v.Index(i).Interface().(DocElem)
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
if et == typeRawDocElem {
|
||||
for i := 0; i < l; i++ {
|
||||
elem := v.Index(i).Interface().(RawDocElem)
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
e.addElem(itoa(i), v.Index(i), false)
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Marshaling of elements in a document.
|
||||
|
||||
func (e *encoder) addElemName(kind byte, name string) {
|
||||
e.addBytes(kind)
|
||||
e.addBytes([]byte(name)...)
|
||||
e.addBytes(0)
|
||||
}
|
||||
|
||||
func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
|
||||
|
||||
if !v.IsValid() {
|
||||
e.addElemName('\x0A', name)
|
||||
return
|
||||
}
|
||||
|
||||
if getter, ok := v.Interface().(Getter); ok {
|
||||
getv, err := getter.GetBSON()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
e.addElem(name, reflect.ValueOf(getv), minSize)
|
||||
return
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
|
||||
case reflect.Interface:
|
||||
e.addElem(name, v.Elem(), minSize)
|
||||
|
||||
case reflect.Ptr:
|
||||
e.addElem(name, v.Elem(), minSize)
|
||||
|
||||
case reflect.String:
|
||||
s := v.String()
|
||||
switch v.Type() {
|
||||
case typeObjectId:
|
||||
if len(s) != 12 {
|
||||
panic("ObjectIDs must be exactly 12 bytes long (got " +
|
||||
strconv.Itoa(len(s)) + ")")
|
||||
}
|
||||
e.addElemName('\x07', name)
|
||||
e.addBytes([]byte(s)...)
|
||||
case typeSymbol:
|
||||
e.addElemName('\x0E', name)
|
||||
e.addStr(s)
|
||||
case typeJSONNumber:
|
||||
n := v.Interface().(json.Number)
|
||||
if i, err := n.Int64(); err == nil {
|
||||
e.addElemName('\x12', name)
|
||||
e.addInt64(i)
|
||||
} else if f, err := n.Float64(); err == nil {
|
||||
e.addElemName('\x01', name)
|
||||
e.addFloat64(f)
|
||||
} else {
|
||||
panic("failed to convert json.Number to a number: " + s)
|
||||
}
|
||||
default:
|
||||
e.addElemName('\x02', name)
|
||||
e.addStr(s)
|
||||
}
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
e.addElemName('\x01', name)
|
||||
e.addFloat64(v.Float())
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
u := v.Uint()
|
||||
if int64(u) < 0 {
|
||||
panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
|
||||
} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
|
||||
e.addElemName('\x10', name)
|
||||
e.addInt32(int32(u))
|
||||
} else {
|
||||
e.addElemName('\x12', name)
|
||||
e.addInt64(int64(u))
|
||||
}
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch v.Type() {
|
||||
case typeMongoTimestamp:
|
||||
e.addElemName('\x11', name)
|
||||
e.addInt64(v.Int())
|
||||
|
||||
case typeOrderKey:
|
||||
if v.Int() == int64(MaxKey) {
|
||||
e.addElemName('\x7F', name)
|
||||
} else {
|
||||
e.addElemName('\xFF', name)
|
||||
}
|
||||
|
||||
default:
|
||||
i := v.Int()
|
||||
if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
|
||||
// It fits into an int32, encode as such.
|
||||
e.addElemName('\x10', name)
|
||||
e.addInt32(int32(i))
|
||||
} else {
|
||||
e.addElemName('\x12', name)
|
||||
e.addInt64(i)
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Bool:
|
||||
e.addElemName('\x08', name)
|
||||
if v.Bool() {
|
||||
e.addBytes(1)
|
||||
} else {
|
||||
e.addBytes(0)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
e.addElemName('\x03', name)
|
||||
e.addDoc(v)
|
||||
|
||||
case reflect.Slice:
|
||||
vt := v.Type()
|
||||
et := vt.Elem()
|
||||
if et.Kind() == reflect.Uint8 {
|
||||
e.addElemName('\x05', name)
|
||||
e.addBinary('\x00', v.Bytes())
|
||||
} else if et == typeDocElem || et == typeRawDocElem {
|
||||
e.addElemName('\x03', name)
|
||||
e.addDoc(v)
|
||||
} else {
|
||||
e.addElemName('\x04', name)
|
||||
e.addDoc(v)
|
||||
}
|
||||
|
||||
case reflect.Array:
|
||||
et := v.Type().Elem()
|
||||
if et.Kind() == reflect.Uint8 {
|
||||
e.addElemName('\x05', name)
|
||||
if v.CanAddr() {
|
||||
e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte))
|
||||
} else {
|
||||
n := v.Len()
|
||||
e.addInt32(int32(n))
|
||||
e.addBytes('\x00')
|
||||
for i := 0; i < n; i++ {
|
||||
el := v.Index(i)
|
||||
e.addBytes(byte(el.Uint()))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
e.addElemName('\x04', name)
|
||||
e.addDoc(v)
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
switch s := v.Interface().(type) {
|
||||
|
||||
case Raw:
|
||||
kind := s.Kind
|
||||
if kind == 0x00 {
|
||||
kind = 0x03
|
||||
}
|
||||
e.addElemName(kind, name)
|
||||
e.addBytes(s.Data...)
|
||||
|
||||
case Binary:
|
||||
e.addElemName('\x05', name)
|
||||
e.addBinary(s.Kind, s.Data)
|
||||
|
||||
case DBPointer:
|
||||
e.addElemName('\x0C', name)
|
||||
e.addStr(s.Namespace)
|
||||
if len(s.Id) != 12 {
|
||||
panic("ObjectIDs must be exactly 12 bytes long (got " +
|
||||
strconv.Itoa(len(s.Id)) + ")")
|
||||
}
|
||||
e.addBytes([]byte(s.Id)...)
|
||||
|
||||
case RegEx:
|
||||
e.addElemName('\x0B', name)
|
||||
e.addCStr(s.Pattern)
|
||||
e.addCStr(s.Options)
|
||||
|
||||
case JavaScript:
|
||||
if s.Scope == nil {
|
||||
e.addElemName('\x0D', name)
|
||||
e.addStr(s.Code)
|
||||
} else {
|
||||
e.addElemName('\x0F', name)
|
||||
start := e.reserveInt32()
|
||||
e.addStr(s.Code)
|
||||
e.addDoc(reflect.ValueOf(s.Scope))
|
||||
e.setInt32(start, int32(len(e.out)-start))
|
||||
}
|
||||
|
||||
case time.Time:
|
||||
// MongoDB handles timestamps as milliseconds.
|
||||
e.addElemName('\x09', name)
|
||||
e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
|
||||
|
||||
case url.URL:
|
||||
e.addElemName('\x02', name)
|
||||
e.addStr(s.String())
|
||||
|
||||
case undefined:
|
||||
e.addElemName('\x06', name)
|
||||
|
||||
default:
|
||||
e.addElemName('\x03', name)
|
||||
e.addDoc(v)
|
||||
}
|
||||
|
||||
default:
|
||||
panic("Can't marshal " + v.Type().String() + " in a BSON document")
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Marshaling of base types.
|
||||
|
||||
func (e *encoder) addBinary(subtype byte, v []byte) {
|
||||
if subtype == 0x02 {
|
||||
// Wonder how that brilliant idea came to life. Obsolete, luckily.
|
||||
e.addInt32(int32(len(v) + 4))
|
||||
e.addBytes(subtype)
|
||||
e.addInt32(int32(len(v)))
|
||||
} else {
|
||||
e.addInt32(int32(len(v)))
|
||||
e.addBytes(subtype)
|
||||
}
|
||||
e.addBytes(v...)
|
||||
}
|
||||
|
||||
func (e *encoder) addStr(v string) {
|
||||
e.addInt32(int32(len(v) + 1))
|
||||
e.addCStr(v)
|
||||
}
|
||||
|
||||
func (e *encoder) addCStr(v string) {
|
||||
e.addBytes([]byte(v)...)
|
||||
e.addBytes(0)
|
||||
}
|
||||
|
||||
func (e *encoder) reserveInt32() (pos int) {
|
||||
pos = len(e.out)
|
||||
e.addBytes(0, 0, 0, 0)
|
||||
return pos
|
||||
}
|
||||
|
||||
func (e *encoder) setInt32(pos int, v int32) {
|
||||
e.out[pos+0] = byte(v)
|
||||
e.out[pos+1] = byte(v >> 8)
|
||||
e.out[pos+2] = byte(v >> 16)
|
||||
e.out[pos+3] = byte(v >> 24)
|
||||
}
|
||||
|
||||
func (e *encoder) addInt32(v int32) {
|
||||
u := uint32(v)
|
||||
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
|
||||
}
|
||||
|
||||
func (e *encoder) addInt64(v int64) {
|
||||
u := uint64(v)
|
||||
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
|
||||
byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
|
||||
}
|
||||
|
||||
func (e *encoder) addFloat64(v float64) {
|
||||
e.addInt64(int64(math.Float64bits(v)))
|
||||
}
|
||||
|
||||
func (e *encoder) addBytes(v ...byte) {
|
||||
e.out = append(e.out, v...)
|
||||
}
|
71
vendor/gopkg.in/mgo.v2/bulk.go
generated
vendored
Normal file
71
vendor/gopkg.in/mgo.v2/bulk.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
package mgo
|
||||
|
||||
// Bulk represents an operation that can be prepared with several
|
||||
// orthogonal changes before being delivered to the server.
|
||||
//
|
||||
// WARNING: This API is still experimental.
|
||||
//
|
||||
// Relevant documentation:
|
||||
//
|
||||
// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api
|
||||
//
|
||||
type Bulk struct {
|
||||
c *Collection
|
||||
ordered bool
|
||||
inserts []interface{}
|
||||
}
|
||||
|
||||
// BulkError holds an error returned from running a Bulk operation.
|
||||
//
|
||||
// TODO: This is private for the moment, until we understand exactly how
|
||||
// to report these multi-errors in a useful and convenient way.
|
||||
type bulkError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// BulkResult holds the results for a bulk operation.
|
||||
type BulkResult struct {
|
||||
// Be conservative while we understand exactly how to report these
|
||||
// results in a useful and convenient way, and also how to emulate
|
||||
// them with prior servers.
|
||||
private bool
|
||||
}
|
||||
|
||||
func (e *bulkError) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
// Bulk returns a value to prepare the execution of a bulk operation.
|
||||
//
|
||||
// WARNING: This API is still experimental.
|
||||
//
|
||||
func (c *Collection) Bulk() *Bulk {
|
||||
return &Bulk{c: c, ordered: true}
|
||||
}
|
||||
|
||||
// Unordered puts the bulk operation in unordered mode.
|
||||
//
|
||||
// In unordered mode the indvidual operations may be sent
|
||||
// out of order, which means latter operations may proceed
|
||||
// even if prior ones have failed.
|
||||
func (b *Bulk) Unordered() {
|
||||
b.ordered = false
|
||||
}
|
||||
|
||||
// Insert queues up the provided documents for insertion.
|
||||
func (b *Bulk) Insert(docs ...interface{}) {
|
||||
b.inserts = append(b.inserts, docs...)
|
||||
}
|
||||
|
||||
// Run runs all the operations queued up.
|
||||
func (b *Bulk) Run() (*BulkResult, error) {
|
||||
op := &insertOp{b.c.FullName, b.inserts, 0}
|
||||
if !b.ordered {
|
||||
op.flags = 1 // ContinueOnError
|
||||
}
|
||||
_, err := b.c.writeQuery(op)
|
||||
if err != nil {
|
||||
return nil, &bulkError{err}
|
||||
}
|
||||
return &BulkResult{}, nil
|
||||
}
|
131
vendor/gopkg.in/mgo.v2/bulk_test.go
generated
vendored
Normal file
131
vendor/gopkg.in/mgo.v2/bulk_test.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2014 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo_test
|
||||
|
||||
import (
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/mgo.v2"
|
||||
)
|
||||
|
||||
func (s *S) TestBulkInsert(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
bulk := coll.Bulk()
|
||||
bulk.Insert(M{"n": 1})
|
||||
bulk.Insert(M{"n": 2}, M{"n": 3})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkInsertError(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
bulk := coll.Bulk()
|
||||
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
|
||||
_, err = bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*duplicate key.*")
|
||||
|
||||
type doc struct {
|
||||
N int `_id`
|
||||
}
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("_id").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{1}, {2}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkInsertErrorUnordered(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
|
||||
_, err = bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*duplicate key.*")
|
||||
|
||||
type doc struct {
|
||||
N int `_id`
|
||||
}
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("_id").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) {
|
||||
// The server has a batch limit of 1000 documents when using write commands.
|
||||
// This artificial limit did not exist with the old wire protocol, so to
|
||||
// avoid compatibility issues the implementation internally split batches
|
||||
// into the proper size and delivers them one by one. This test ensures that
|
||||
// the behavior of unordered (that is, continue on error) remains correct
|
||||
// when errors happen and there are batches left.
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
|
||||
const total = 4096
|
||||
type doc struct {
|
||||
Id int `_id`
|
||||
}
|
||||
docs := make([]interface{}, total)
|
||||
for i := 0; i < total; i++ {
|
||||
docs[i] = doc{i}
|
||||
}
|
||||
docs[1] = doc{0}
|
||||
bulk.Insert(docs...)
|
||||
_, err = bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*duplicate key.*")
|
||||
|
||||
n, err := coll.Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, total-1)
|
||||
|
||||
var res doc
|
||||
err = coll.FindId(1500).One(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res.Id, Equals, 1500)
|
||||
}
|
632
vendor/gopkg.in/mgo.v2/cluster.go
generated
vendored
Normal file
632
vendor/gopkg.in/mgo.v2/cluster.go
generated
vendored
Normal file
@ -0,0 +1,632 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mongo cluster encapsulation.
|
||||
//
|
||||
// A cluster enables the communication with one or more servers participating
|
||||
// in a mongo cluster. This works with individual servers, a replica set,
|
||||
// a replica pair, one or multiple mongos routers, etc.
|
||||
|
||||
type mongoCluster struct {
|
||||
sync.RWMutex
|
||||
serverSynced sync.Cond
|
||||
userSeeds []string
|
||||
dynaSeeds []string
|
||||
servers mongoServers
|
||||
masters mongoServers
|
||||
references int
|
||||
syncing bool
|
||||
direct bool
|
||||
failFast bool
|
||||
syncCount uint
|
||||
setName string
|
||||
cachedIndex map[string]bool
|
||||
sync chan bool
|
||||
dial dialer
|
||||
}
|
||||
|
||||
func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster {
|
||||
cluster := &mongoCluster{
|
||||
userSeeds: userSeeds,
|
||||
references: 1,
|
||||
direct: direct,
|
||||
failFast: failFast,
|
||||
dial: dial,
|
||||
setName: setName,
|
||||
}
|
||||
cluster.serverSynced.L = cluster.RWMutex.RLocker()
|
||||
cluster.sync = make(chan bool, 1)
|
||||
stats.cluster(+1)
|
||||
go cluster.syncServersLoop()
|
||||
return cluster
|
||||
}
|
||||
|
||||
// Acquire increases the reference count for the cluster.
|
||||
func (cluster *mongoCluster) Acquire() {
|
||||
cluster.Lock()
|
||||
cluster.references++
|
||||
debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references)
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
// Release decreases the reference count for the cluster. Once
|
||||
// it reaches zero, all servers will be closed.
|
||||
func (cluster *mongoCluster) Release() {
|
||||
cluster.Lock()
|
||||
if cluster.references == 0 {
|
||||
panic("cluster.Release() with references == 0")
|
||||
}
|
||||
cluster.references--
|
||||
debugf("Cluster %p released (refs=%d)", cluster, cluster.references)
|
||||
if cluster.references == 0 {
|
||||
for _, server := range cluster.servers.Slice() {
|
||||
server.Close()
|
||||
}
|
||||
// Wake up the sync loop so it can die.
|
||||
cluster.syncServers()
|
||||
stats.cluster(-1)
|
||||
}
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) LiveServers() (servers []string) {
|
||||
cluster.RLock()
|
||||
for _, serv := range cluster.servers.Slice() {
|
||||
servers = append(servers, serv.Addr)
|
||||
}
|
||||
cluster.RUnlock()
|
||||
return servers
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) removeServer(server *mongoServer) {
|
||||
cluster.Lock()
|
||||
cluster.masters.Remove(server)
|
||||
other := cluster.servers.Remove(server)
|
||||
cluster.Unlock()
|
||||
if other != nil {
|
||||
other.Close()
|
||||
log("Removed server ", server.Addr, " from cluster.")
|
||||
}
|
||||
server.Close()
|
||||
}
|
||||
|
||||
type isMasterResult struct {
|
||||
IsMaster bool
|
||||
Secondary bool
|
||||
Primary string
|
||||
Hosts []string
|
||||
Passives []string
|
||||
Tags bson.D
|
||||
Msg string
|
||||
SetName string `bson:"setName"`
|
||||
MaxWireVersion int `bson:"maxWireVersion"`
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error {
|
||||
// Monotonic let's it talk to a slave and still hold the socket.
|
||||
session := newSession(Monotonic, cluster, 10*time.Second)
|
||||
session.setSocket(socket)
|
||||
err := session.Run("ismaster", result)
|
||||
session.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
type possibleTimeout interface {
|
||||
Timeout() bool
|
||||
}
|
||||
|
||||
var syncSocketTimeout = 5 * time.Second
|
||||
|
||||
func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) {
|
||||
var syncTimeout time.Duration
|
||||
if raceDetector {
|
||||
// This variable is only ever touched by tests.
|
||||
globalMutex.Lock()
|
||||
syncTimeout = syncSocketTimeout
|
||||
globalMutex.Unlock()
|
||||
} else {
|
||||
syncTimeout = syncSocketTimeout
|
||||
}
|
||||
|
||||
addr := server.Addr
|
||||
log("SYNC Processing ", addr, "...")
|
||||
|
||||
// Retry a few times to avoid knocking a server down for a hiccup.
|
||||
var result isMasterResult
|
||||
var tryerr error
|
||||
for retry := 0; ; retry++ {
|
||||
if retry == 3 || retry == 1 && cluster.failFast {
|
||||
return nil, nil, tryerr
|
||||
}
|
||||
if retry > 0 {
|
||||
// Don't abuse the server needlessly if there's something actually wrong.
|
||||
if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() {
|
||||
// Give a chance for waiters to timeout as well.
|
||||
cluster.serverSynced.Broadcast()
|
||||
}
|
||||
time.Sleep(syncShortDelay)
|
||||
}
|
||||
|
||||
// It's not clear what would be a good timeout here. Is it
|
||||
// better to wait longer or to retry?
|
||||
socket, _, err := server.AcquireSocket(0, syncTimeout)
|
||||
if err != nil {
|
||||
tryerr = err
|
||||
logf("SYNC Failed to get socket to %s: %v", addr, err)
|
||||
continue
|
||||
}
|
||||
err = cluster.isMaster(socket, &result)
|
||||
socket.Release()
|
||||
if err != nil {
|
||||
tryerr = err
|
||||
logf("SYNC Command 'ismaster' to %s failed: %v", addr, err)
|
||||
continue
|
||||
}
|
||||
debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result)
|
||||
break
|
||||
}
|
||||
|
||||
if cluster.setName != "" && result.SetName != cluster.setName {
|
||||
logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName)
|
||||
return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName)
|
||||
}
|
||||
|
||||
if result.IsMaster {
|
||||
debugf("SYNC %s is a master.", addr)
|
||||
if !server.info.Master {
|
||||
// Made an incorrect assumption above, so fix stats.
|
||||
stats.conn(-1, false)
|
||||
stats.conn(+1, true)
|
||||
}
|
||||
} else if result.Secondary {
|
||||
debugf("SYNC %s is a slave.", addr)
|
||||
} else if cluster.direct {
|
||||
logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr)
|
||||
} else {
|
||||
logf("SYNC %s is neither a master nor a slave.", addr)
|
||||
// Let stats track it as whatever was known before.
|
||||
return nil, nil, errors.New(addr + " is not a master nor slave")
|
||||
}
|
||||
|
||||
info = &mongoServerInfo{
|
||||
Master: result.IsMaster,
|
||||
Mongos: result.Msg == "isdbgrid",
|
||||
Tags: result.Tags,
|
||||
SetName: result.SetName,
|
||||
MaxWireVersion: result.MaxWireVersion,
|
||||
}
|
||||
|
||||
hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives))
|
||||
if result.Primary != "" {
|
||||
// First in the list to speed up master discovery.
|
||||
hosts = append(hosts, result.Primary)
|
||||
}
|
||||
hosts = append(hosts, result.Hosts...)
|
||||
hosts = append(hosts, result.Passives...)
|
||||
|
||||
debugf("SYNC %s knows about the following peers: %#v", addr, hosts)
|
||||
return info, hosts, nil
|
||||
}
|
||||
|
||||
type syncKind bool
|
||||
|
||||
const (
|
||||
completeSync syncKind = true
|
||||
partialSync syncKind = false
|
||||
)
|
||||
|
||||
func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) {
|
||||
cluster.Lock()
|
||||
current := cluster.servers.Search(server.ResolvedAddr)
|
||||
if current == nil {
|
||||
if syncKind == partialSync {
|
||||
cluster.Unlock()
|
||||
server.Close()
|
||||
log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.")
|
||||
return
|
||||
}
|
||||
cluster.servers.Add(server)
|
||||
if info.Master {
|
||||
cluster.masters.Add(server)
|
||||
log("SYNC Adding ", server.Addr, " to cluster as a master.")
|
||||
} else {
|
||||
log("SYNC Adding ", server.Addr, " to cluster as a slave.")
|
||||
}
|
||||
} else {
|
||||
if server != current {
|
||||
panic("addServer attempting to add duplicated server")
|
||||
}
|
||||
if server.Info().Master != info.Master {
|
||||
if info.Master {
|
||||
log("SYNC Server ", server.Addr, " is now a master.")
|
||||
cluster.masters.Add(server)
|
||||
} else {
|
||||
log("SYNC Server ", server.Addr, " is now a slave.")
|
||||
cluster.masters.Remove(server)
|
||||
}
|
||||
}
|
||||
}
|
||||
server.SetInfo(info)
|
||||
debugf("SYNC Broadcasting availability of server %s", server.Addr)
|
||||
cluster.serverSynced.Broadcast()
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) getKnownAddrs() []string {
|
||||
cluster.RLock()
|
||||
max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len()
|
||||
seen := make(map[string]bool, max)
|
||||
known := make([]string, 0, max)
|
||||
|
||||
add := func(addr string) {
|
||||
if _, found := seen[addr]; !found {
|
||||
seen[addr] = true
|
||||
known = append(known, addr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, addr := range cluster.userSeeds {
|
||||
add(addr)
|
||||
}
|
||||
for _, addr := range cluster.dynaSeeds {
|
||||
add(addr)
|
||||
}
|
||||
for _, serv := range cluster.servers.Slice() {
|
||||
add(serv.Addr)
|
||||
}
|
||||
cluster.RUnlock()
|
||||
|
||||
return known
|
||||
}
|
||||
|
||||
// syncServers injects a value into the cluster.sync channel to force
|
||||
// an iteration of the syncServersLoop function.
|
||||
func (cluster *mongoCluster) syncServers() {
|
||||
select {
|
||||
case cluster.sync <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// How long to wait for a checkup of the cluster topology if nothing
|
||||
// else kicks a synchronization before that.
|
||||
const syncServersDelay = 30 * time.Second
|
||||
const syncShortDelay = 500 * time.Millisecond
|
||||
|
||||
// syncServersLoop loops while the cluster is alive to keep its idea of
|
||||
// the server topology up-to-date. It must be called just once from
|
||||
// newCluster. The loop iterates once syncServersDelay has passed, or
|
||||
// if somebody injects a value into the cluster.sync channel to force a
|
||||
// synchronization. A loop iteration will contact all servers in
|
||||
// parallel, ask them about known peers and their own role within the
|
||||
// cluster, and then attempt to do the same with all the peers
|
||||
// retrieved.
|
||||
func (cluster *mongoCluster) syncServersLoop() {
|
||||
for {
|
||||
debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster)
|
||||
|
||||
cluster.Lock()
|
||||
if cluster.references == 0 {
|
||||
cluster.Unlock()
|
||||
break
|
||||
}
|
||||
cluster.references++ // Keep alive while syncing.
|
||||
direct := cluster.direct
|
||||
cluster.Unlock()
|
||||
|
||||
cluster.syncServersIteration(direct)
|
||||
|
||||
// We just synchronized, so consume any outstanding requests.
|
||||
select {
|
||||
case <-cluster.sync:
|
||||
default:
|
||||
}
|
||||
|
||||
cluster.Release()
|
||||
|
||||
// Hold off before allowing another sync. No point in
|
||||
// burning CPU looking for down servers.
|
||||
if !cluster.failFast {
|
||||
time.Sleep(syncShortDelay)
|
||||
}
|
||||
|
||||
cluster.Lock()
|
||||
if cluster.references == 0 {
|
||||
cluster.Unlock()
|
||||
break
|
||||
}
|
||||
cluster.syncCount++
|
||||
// Poke all waiters so they have a chance to timeout or
|
||||
// restart syncing if they wish to.
|
||||
cluster.serverSynced.Broadcast()
|
||||
// Check if we have to restart immediately either way.
|
||||
restart := !direct && cluster.masters.Empty() || cluster.servers.Empty()
|
||||
cluster.Unlock()
|
||||
|
||||
if restart {
|
||||
log("SYNC No masters found. Will synchronize again.")
|
||||
time.Sleep(syncShortDelay)
|
||||
continue
|
||||
}
|
||||
|
||||
debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster)
|
||||
|
||||
// Hold off until somebody explicitly requests a synchronization
|
||||
// or it's time to check for a cluster topology change again.
|
||||
select {
|
||||
case <-cluster.sync:
|
||||
case <-time.After(syncServersDelay):
|
||||
}
|
||||
}
|
||||
debugf("SYNC Cluster %p is stopping its sync loop.", cluster)
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer {
|
||||
cluster.RLock()
|
||||
server := cluster.servers.Search(tcpaddr.String())
|
||||
cluster.RUnlock()
|
||||
if server != nil {
|
||||
return server
|
||||
}
|
||||
return newServer(addr, tcpaddr, cluster.sync, cluster.dial)
|
||||
}
|
||||
|
||||
func resolveAddr(addr string) (*net.TCPAddr, error) {
|
||||
// This hack allows having a timeout on resolution.
|
||||
conn, err := net.DialTimeout("udp4", addr, 10*time.Second)
|
||||
if err != nil {
|
||||
log("SYNC Failed to resolve server address: ", addr)
|
||||
return nil, errors.New("failed to resolve server address: " + addr)
|
||||
}
|
||||
tcpaddr := (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr))
|
||||
conn.Close()
|
||||
if tcpaddr.String() != addr {
|
||||
debug("SYNC Address ", addr, " resolved as ", tcpaddr.String())
|
||||
}
|
||||
return tcpaddr, nil
|
||||
}
|
||||
|
||||
type pendingAdd struct {
|
||||
server *mongoServer
|
||||
info *mongoServerInfo
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) syncServersIteration(direct bool) {
|
||||
log("SYNC Starting full topology synchronization...")
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var m sync.Mutex
|
||||
notYetAdded := make(map[string]pendingAdd)
|
||||
addIfFound := make(map[string]bool)
|
||||
seen := make(map[string]bool)
|
||||
syncKind := partialSync
|
||||
|
||||
var spawnSync func(addr string, byMaster bool)
|
||||
spawnSync = func(addr string, byMaster bool) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
tcpaddr, err := resolveAddr(addr)
|
||||
if err != nil {
|
||||
log("SYNC Failed to start sync of ", addr, ": ", err.Error())
|
||||
return
|
||||
}
|
||||
resolvedAddr := tcpaddr.String()
|
||||
|
||||
m.Lock()
|
||||
if byMaster {
|
||||
if pending, ok := notYetAdded[resolvedAddr]; ok {
|
||||
delete(notYetAdded, resolvedAddr)
|
||||
m.Unlock()
|
||||
cluster.addServer(pending.server, pending.info, completeSync)
|
||||
return
|
||||
}
|
||||
addIfFound[resolvedAddr] = true
|
||||
}
|
||||
if seen[resolvedAddr] {
|
||||
m.Unlock()
|
||||
return
|
||||
}
|
||||
seen[resolvedAddr] = true
|
||||
m.Unlock()
|
||||
|
||||
server := cluster.server(addr, tcpaddr)
|
||||
info, hosts, err := cluster.syncServer(server)
|
||||
if err != nil {
|
||||
cluster.removeServer(server)
|
||||
return
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
add := direct || info.Master || addIfFound[resolvedAddr]
|
||||
if add {
|
||||
syncKind = completeSync
|
||||
} else {
|
||||
notYetAdded[resolvedAddr] = pendingAdd{server, info}
|
||||
}
|
||||
m.Unlock()
|
||||
if add {
|
||||
cluster.addServer(server, info, completeSync)
|
||||
}
|
||||
if !direct {
|
||||
for _, addr := range hosts {
|
||||
spawnSync(addr, info.Master)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
knownAddrs := cluster.getKnownAddrs()
|
||||
for _, addr := range knownAddrs {
|
||||
spawnSync(addr, false)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if syncKind == completeSync {
|
||||
logf("SYNC Synchronization was complete (got data from primary).")
|
||||
for _, pending := range notYetAdded {
|
||||
cluster.removeServer(pending.server)
|
||||
}
|
||||
} else {
|
||||
logf("SYNC Synchronization was partial (cannot talk to primary).")
|
||||
for _, pending := range notYetAdded {
|
||||
cluster.addServer(pending.server, pending.info, partialSync)
|
||||
}
|
||||
}
|
||||
|
||||
cluster.Lock()
|
||||
ml := cluster.masters.Len()
|
||||
logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", ml, cluster.servers.Len()-ml)
|
||||
|
||||
// Update dynamic seeds, but only if we have any good servers. Otherwise,
|
||||
// leave them alone for better chances of a successful sync in the future.
|
||||
if syncKind == completeSync {
|
||||
dynaSeeds := make([]string, cluster.servers.Len())
|
||||
for i, server := range cluster.servers.Slice() {
|
||||
dynaSeeds[i] = server.Addr
|
||||
}
|
||||
cluster.dynaSeeds = dynaSeeds
|
||||
debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds)
|
||||
}
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
// AcquireSocket returns a socket to a server in the cluster. If slaveOk is
|
||||
// true, it will attempt to return a socket to a slave server. If it is
|
||||
// false, the socket will necessarily be to a master server.
|
||||
func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) {
|
||||
var started time.Time
|
||||
var syncCount uint
|
||||
warnedLimit := false
|
||||
for {
|
||||
cluster.RLock()
|
||||
for {
|
||||
ml := cluster.masters.Len()
|
||||
sl := cluster.servers.Len()
|
||||
debugf("Cluster has %d known masters and %d known slaves.", ml, sl-ml)
|
||||
if ml > 0 || slaveOk && sl > 0 {
|
||||
break
|
||||
}
|
||||
if started.IsZero() {
|
||||
// Initialize after fast path above.
|
||||
started = time.Now()
|
||||
syncCount = cluster.syncCount
|
||||
} else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount {
|
||||
cluster.RUnlock()
|
||||
return nil, errors.New("no reachable servers")
|
||||
}
|
||||
log("Waiting for servers to synchronize...")
|
||||
cluster.syncServers()
|
||||
|
||||
// Remember: this will release and reacquire the lock.
|
||||
cluster.serverSynced.Wait()
|
||||
}
|
||||
|
||||
var server *mongoServer
|
||||
if slaveOk {
|
||||
server = cluster.servers.BestFit(serverTags)
|
||||
} else {
|
||||
server = cluster.masters.BestFit(nil)
|
||||
}
|
||||
cluster.RUnlock()
|
||||
|
||||
if server == nil {
|
||||
// Must have failed the requested tags. Sleep to avoid spinning.
|
||||
time.Sleep(1e8)
|
||||
continue
|
||||
}
|
||||
|
||||
s, abended, err := server.AcquireSocket(poolLimit, socketTimeout)
|
||||
if err == errPoolLimit {
|
||||
if !warnedLimit {
|
||||
warnedLimit = true
|
||||
log("WARNING: Per-server connection limit reached.")
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
cluster.removeServer(server)
|
||||
cluster.syncServers()
|
||||
continue
|
||||
}
|
||||
if abended && !slaveOk {
|
||||
var result isMasterResult
|
||||
err := cluster.isMaster(s, &result)
|
||||
if err != nil || !result.IsMaster {
|
||||
logf("Cannot confirm server %s as master (%v)", server.Addr, err)
|
||||
s.Release()
|
||||
cluster.syncServers()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
panic("unreached")
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) {
|
||||
cluster.Lock()
|
||||
if cluster.cachedIndex == nil {
|
||||
cluster.cachedIndex = make(map[string]bool)
|
||||
}
|
||||
if exists {
|
||||
cluster.cachedIndex[cacheKey] = true
|
||||
} else {
|
||||
delete(cluster.cachedIndex, cacheKey)
|
||||
}
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) {
|
||||
cluster.RLock()
|
||||
if cluster.cachedIndex != nil {
|
||||
result = cluster.cachedIndex[cacheKey]
|
||||
}
|
||||
cluster.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) ResetIndexCache() {
|
||||
cluster.Lock()
|
||||
cluster.cachedIndex = make(map[string]bool)
|
||||
cluster.Unlock()
|
||||
}
|
1657
vendor/gopkg.in/mgo.v2/cluster_test.go
generated
vendored
Normal file
1657
vendor/gopkg.in/mgo.v2/cluster_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
31
vendor/gopkg.in/mgo.v2/doc.go
generated
vendored
Normal file
31
vendor/gopkg.in/mgo.v2/doc.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
// Package mgo offers a rich MongoDB driver for Go.
|
||||
//
|
||||
// Details about the mgo project (pronounced as "mango") are found
|
||||
// in its web page:
|
||||
//
|
||||
// http://labix.org/mgo
|
||||
//
|
||||
// Usage of the driver revolves around the concept of sessions. To
|
||||
// get started, obtain a session using the Dial function:
|
||||
//
|
||||
// session, err := mgo.Dial(url)
|
||||
//
|
||||
// This will establish one or more connections with the cluster of
|
||||
// servers defined by the url parameter. From then on, the cluster
|
||||
// may be queried with multiple consistency rules (see SetMode) and
|
||||
// documents retrieved with statements such as:
|
||||
//
|
||||
// c := session.DB(database).C(collection)
|
||||
// err := c.Find(query).One(&result)
|
||||
//
|
||||
// New sessions are typically created by calling session.Copy on the
|
||||
// initial session obtained at dial time. These new sessions will share
|
||||
// the same cluster information and connection cache, and may be easily
|
||||
// handed into other methods and functions for organizing logic.
|
||||
// Every session created must have its Close method called at the end
|
||||
// of its life time, so its resources may be put back in the pool or
|
||||
// collected, depending on the case.
|
||||
//
|
||||
// For more details, see the documentation for the types and methods.
|
||||
//
|
||||
package mgo
|
33
vendor/gopkg.in/mgo.v2/export_test.go
generated
vendored
Normal file
33
vendor/gopkg.in/mgo.v2/export_test.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func HackPingDelay(newDelay time.Duration) (restore func()) {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
|
||||
oldDelay := pingDelay
|
||||
restore = func() {
|
||||
globalMutex.Lock()
|
||||
pingDelay = oldDelay
|
||||
globalMutex.Unlock()
|
||||
}
|
||||
pingDelay = newDelay
|
||||
return
|
||||
}
|
||||
|
||||
func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
|
||||
oldTimeout := syncSocketTimeout
|
||||
restore = func() {
|
||||
globalMutex.Lock()
|
||||
syncSocketTimeout = oldTimeout
|
||||
globalMutex.Unlock()
|
||||
}
|
||||
syncSocketTimeout = newTimeout
|
||||
return
|
||||
}
|
755
vendor/gopkg.in/mgo.v2/gridfs.go
generated
vendored
Normal file
755
vendor/gopkg.in/mgo.v2/gridfs.go
generated
vendored
Normal file
@ -0,0 +1,755 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
type GridFS struct {
|
||||
Files *Collection
|
||||
Chunks *Collection
|
||||
}
|
||||
|
||||
type gfsFileMode int
|
||||
|
||||
const (
|
||||
gfsClosed gfsFileMode = 0
|
||||
gfsReading gfsFileMode = 1
|
||||
gfsWriting gfsFileMode = 2
|
||||
)
|
||||
|
||||
type GridFile struct {
|
||||
m sync.Mutex
|
||||
c sync.Cond
|
||||
gfs *GridFS
|
||||
mode gfsFileMode
|
||||
err error
|
||||
|
||||
chunk int
|
||||
offset int64
|
||||
|
||||
wpending int
|
||||
wbuf []byte
|
||||
wsum hash.Hash
|
||||
|
||||
rbuf []byte
|
||||
rcache *gfsCachedChunk
|
||||
|
||||
doc gfsFile
|
||||
}
|
||||
|
||||
type gfsFile struct {
|
||||
Id interface{} "_id"
|
||||
ChunkSize int "chunkSize"
|
||||
UploadDate time.Time "uploadDate"
|
||||
Length int64 ",minsize"
|
||||
MD5 string
|
||||
Filename string ",omitempty"
|
||||
ContentType string "contentType,omitempty"
|
||||
Metadata *bson.Raw ",omitempty"
|
||||
}
|
||||
|
||||
type gfsChunk struct {
|
||||
Id interface{} "_id"
|
||||
FilesId interface{} "files_id"
|
||||
N int
|
||||
Data []byte
|
||||
}
|
||||
|
||||
type gfsCachedChunk struct {
|
||||
wait sync.Mutex
|
||||
n int
|
||||
data []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func newGridFS(db *Database, prefix string) *GridFS {
|
||||
return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")}
|
||||
}
|
||||
|
||||
func (gfs *GridFS) newFile() *GridFile {
|
||||
file := &GridFile{gfs: gfs}
|
||||
file.c.L = &file.m
|
||||
//runtime.SetFinalizer(file, finalizeFile)
|
||||
return file
|
||||
}
|
||||
|
||||
func finalizeFile(file *GridFile) {
|
||||
file.Close()
|
||||
}
|
||||
|
||||
// Create creates a new file with the provided name in the GridFS. If the file
|
||||
// name already exists, a new version will be inserted with an up-to-date
|
||||
// uploadDate that will cause it to be atomically visible to the Open and
|
||||
// OpenId methods. If the file name is not important, an empty name may be
|
||||
// provided and the file Id used instead.
|
||||
//
|
||||
// It's important to Close files whether they are being written to
|
||||
// or read from, and to check the err result to ensure the operation
|
||||
// completed successfully.
|
||||
//
|
||||
// A simple example inserting a new file:
|
||||
//
|
||||
// func check(err error) {
|
||||
// if err != nil {
|
||||
// panic(err.String())
|
||||
// }
|
||||
// }
|
||||
// file, err := db.GridFS("fs").Create("myfile.txt")
|
||||
// check(err)
|
||||
// n, err := file.Write([]byte("Hello world!"))
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
// fmt.Printf("%d bytes written\n", n)
|
||||
//
|
||||
// The io.Writer interface is implemented by *GridFile and may be used to
|
||||
// help on the file creation. For example:
|
||||
//
|
||||
// file, err := db.GridFS("fs").Create("myfile.txt")
|
||||
// check(err)
|
||||
// messages, err := os.Open("/var/log/messages")
|
||||
// check(err)
|
||||
// defer messages.Close()
|
||||
// err = io.Copy(file, messages)
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
//
|
||||
func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
|
||||
file = gfs.newFile()
|
||||
file.mode = gfsWriting
|
||||
file.wsum = md5.New()
|
||||
file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name}
|
||||
return
|
||||
}
|
||||
|
||||
// OpenId returns the file with the provided id, for reading.
|
||||
// If the file isn't found, err will be set to mgo.ErrNotFound.
|
||||
//
|
||||
// It's important to Close files whether they are being written to
|
||||
// or read from, and to check the err result to ensure the operation
|
||||
// completed successfully.
|
||||
//
|
||||
// The following example will print the first 8192 bytes from the file:
|
||||
//
|
||||
// func check(err error) {
|
||||
// if err != nil {
|
||||
// panic(err.String())
|
||||
// }
|
||||
// }
|
||||
// file, err := db.GridFS("fs").OpenId(objid)
|
||||
// check(err)
|
||||
// b := make([]byte, 8192)
|
||||
// n, err := file.Read(b)
|
||||
// check(err)
|
||||
// fmt.Println(string(b))
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
// fmt.Printf("%d bytes read\n", n)
|
||||
//
|
||||
// The io.Reader interface is implemented by *GridFile and may be used to
|
||||
// deal with it. As an example, the following snippet will dump the whole
|
||||
// file into the standard output:
|
||||
//
|
||||
// file, err := db.GridFS("fs").OpenId(objid)
|
||||
// check(err)
|
||||
// err = io.Copy(os.Stdout, file)
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
//
|
||||
func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) {
|
||||
var doc gfsFile
|
||||
err = gfs.Files.Find(bson.M{"_id": id}).One(&doc)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
file = gfs.newFile()
|
||||
file.mode = gfsReading
|
||||
file.doc = doc
|
||||
return
|
||||
}
|
||||
|
||||
// Open returns the most recently uploaded file with the provided
|
||||
// name, for reading. If the file isn't found, err will be set
|
||||
// to mgo.ErrNotFound.
|
||||
//
|
||||
// It's important to Close files whether they are being written to
|
||||
// or read from, and to check the err result to ensure the operation
|
||||
// completed successfully.
|
||||
//
|
||||
// The following example will print the first 8192 bytes from the file:
|
||||
//
|
||||
// file, err := db.GridFS("fs").Open("myfile.txt")
|
||||
// check(err)
|
||||
// b := make([]byte, 8192)
|
||||
// n, err := file.Read(b)
|
||||
// check(err)
|
||||
// fmt.Println(string(b))
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
// fmt.Printf("%d bytes read\n", n)
|
||||
//
|
||||
// The io.Reader interface is implemented by *GridFile and may be used to
|
||||
// deal with it. As an example, the following snippet will dump the whole
|
||||
// file into the standard output:
|
||||
//
|
||||
// file, err := db.GridFS("fs").Open("myfile.txt")
|
||||
// check(err)
|
||||
// err = io.Copy(os.Stdout, file)
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
//
|
||||
func (gfs *GridFS) Open(name string) (file *GridFile, err error) {
|
||||
var doc gfsFile
|
||||
err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
file = gfs.newFile()
|
||||
file.mode = gfsReading
|
||||
file.doc = doc
|
||||
return
|
||||
}
|
||||
|
||||
// OpenNext opens the next file from iter for reading, sets *file to it,
|
||||
// and returns true on the success case. If no more documents are available
|
||||
// on iter or an error occurred, *file is set to nil and the result is false.
|
||||
// Errors will be available via iter.Err().
|
||||
//
|
||||
// The iter parameter must be an iterator on the GridFS files collection.
|
||||
// Using the GridFS.Find method is an easy way to obtain such an iterator,
|
||||
// but any iterator on the collection will work.
|
||||
//
|
||||
// If the provided *file is non-nil, OpenNext will close it before attempting
|
||||
// to iterate to the next element. This means that in a loop one only
|
||||
// has to worry about closing files when breaking out of the loop early
|
||||
// (break, return, or panic).
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// gfs := db.GridFS("fs")
|
||||
// query := gfs.Find(nil).Sort("filename")
|
||||
// iter := query.Iter()
|
||||
// var f *mgo.GridFile
|
||||
// for gfs.OpenNext(iter, &f) {
|
||||
// fmt.Printf("Filename: %s\n", f.Name())
|
||||
// }
|
||||
// if iter.Close() != nil {
|
||||
// panic(iter.Close())
|
||||
// }
|
||||
//
|
||||
func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool {
|
||||
if *file != nil {
|
||||
// Ignoring the error here shouldn't be a big deal
|
||||
// as we're reading the file and the loop iteration
|
||||
// for this file is finished.
|
||||
_ = (*file).Close()
|
||||
}
|
||||
var doc gfsFile
|
||||
if !iter.Next(&doc) {
|
||||
*file = nil
|
||||
return false
|
||||
}
|
||||
f := gfs.newFile()
|
||||
f.mode = gfsReading
|
||||
f.doc = doc
|
||||
*file = f
|
||||
return true
|
||||
}
|
||||
|
||||
// Find runs query on GridFS's files collection and returns
|
||||
// the resulting Query.
|
||||
//
|
||||
// This logic:
|
||||
//
|
||||
// gfs := db.GridFS("fs")
|
||||
// iter := gfs.Find(nil).Iter()
|
||||
//
|
||||
// Is equivalent to:
|
||||
//
|
||||
// files := db.C("fs" + ".files")
|
||||
// iter := files.Find(nil).Iter()
|
||||
//
|
||||
func (gfs *GridFS) Find(query interface{}) *Query {
|
||||
return gfs.Files.Find(query)
|
||||
}
|
||||
|
||||
// RemoveId deletes the file with the provided id from the GridFS.
|
||||
func (gfs *GridFS) RemoveId(id interface{}) error {
|
||||
err := gfs.Files.Remove(bson.M{"_id": id})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}})
|
||||
return err
|
||||
}
|
||||
|
||||
type gfsDocId struct {
|
||||
Id interface{} "_id"
|
||||
}
|
||||
|
||||
// Remove deletes all files with the provided name from the GridFS.
|
||||
func (gfs *GridFS) Remove(name string) (err error) {
|
||||
iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter()
|
||||
var doc gfsDocId
|
||||
for iter.Next(&doc) {
|
||||
if e := gfs.RemoveId(doc.Id); e != nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = iter.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (file *GridFile) assertMode(mode gfsFileMode) {
|
||||
switch file.mode {
|
||||
case mode:
|
||||
return
|
||||
case gfsWriting:
|
||||
panic("GridFile is open for writing")
|
||||
case gfsReading:
|
||||
panic("GridFile is open for reading")
|
||||
case gfsClosed:
|
||||
panic("GridFile is closed")
|
||||
default:
|
||||
panic("internal error: missing GridFile mode")
|
||||
}
|
||||
}
|
||||
|
||||
// SetChunkSize sets size of saved chunks. Once the file is written to, it
|
||||
// will be split in blocks of that size and each block saved into an
|
||||
// independent chunk document. The default chunk size is 256kb.
|
||||
//
|
||||
// It is a runtime error to call this function once the file has started
|
||||
// being written to.
|
||||
func (file *GridFile) SetChunkSize(bytes int) {
|
||||
file.assertMode(gfsWriting)
|
||||
debugf("GridFile %p: setting chunk size to %d", file, bytes)
|
||||
file.m.Lock()
|
||||
file.doc.ChunkSize = bytes
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// Id returns the current file Id.
|
||||
func (file *GridFile) Id() interface{} {
|
||||
return file.doc.Id
|
||||
}
|
||||
|
||||
// SetId changes the current file Id.
|
||||
//
|
||||
// It is a runtime error to call this function once the file has started
|
||||
// being written to, or when the file is not open for writing.
|
||||
func (file *GridFile) SetId(id interface{}) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
file.doc.Id = id
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// Name returns the optional file name. An empty string will be returned
|
||||
// in case it is unset.
|
||||
func (file *GridFile) Name() string {
|
||||
return file.doc.Filename
|
||||
}
|
||||
|
||||
// SetName changes the optional file name. An empty string may be used to
|
||||
// unset it.
|
||||
//
|
||||
// It is a runtime error to call this function when the file is not open
|
||||
// for writing.
|
||||
func (file *GridFile) SetName(name string) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
file.doc.Filename = name
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// ContentType returns the optional file content type. An empty string will be
|
||||
// returned in case it is unset.
|
||||
func (file *GridFile) ContentType() string {
|
||||
return file.doc.ContentType
|
||||
}
|
||||
|
||||
// ContentType changes the optional file content type. An empty string may be
|
||||
// used to unset it.
|
||||
//
|
||||
// It is a runtime error to call this function when the file is not open
|
||||
// for writing.
|
||||
func (file *GridFile) SetContentType(ctype string) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
file.doc.ContentType = ctype
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// GetMeta unmarshals the optional "metadata" field associated with the
|
||||
// file into the result parameter. The meaning of keys under that field
|
||||
// is user-defined. For example:
|
||||
//
|
||||
// result := struct{ INode int }{}
|
||||
// err = file.GetMeta(&result)
|
||||
// if err != nil {
|
||||
// panic(err.String())
|
||||
// }
|
||||
// fmt.Printf("inode: %d\n", result.INode)
|
||||
//
|
||||
func (file *GridFile) GetMeta(result interface{}) (err error) {
|
||||
file.m.Lock()
|
||||
if file.doc.Metadata != nil {
|
||||
err = bson.Unmarshal(file.doc.Metadata.Data, result)
|
||||
}
|
||||
file.m.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetMeta changes the optional "metadata" field associated with the
|
||||
// file. The meaning of keys under that field is user-defined.
|
||||
// For example:
|
||||
//
|
||||
// file.SetMeta(bson.M{"inode": inode})
|
||||
//
|
||||
// It is a runtime error to call this function when the file is not open
|
||||
// for writing.
|
||||
func (file *GridFile) SetMeta(metadata interface{}) {
|
||||
file.assertMode(gfsWriting)
|
||||
data, err := bson.Marshal(metadata)
|
||||
file.m.Lock()
|
||||
if err != nil && file.err == nil {
|
||||
file.err = err
|
||||
} else {
|
||||
file.doc.Metadata = &bson.Raw{Data: data}
|
||||
}
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// Size returns the file size in bytes.
|
||||
func (file *GridFile) Size() (bytes int64) {
|
||||
file.m.Lock()
|
||||
bytes = file.doc.Length
|
||||
file.m.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// MD5 returns the file MD5 as a hex-encoded string.
|
||||
func (file *GridFile) MD5() (md5 string) {
|
||||
return file.doc.MD5
|
||||
}
|
||||
|
||||
// UploadDate returns the file upload time.
|
||||
func (file *GridFile) UploadDate() time.Time {
|
||||
return file.doc.UploadDate
|
||||
}
|
||||
|
||||
// SetUploadDate changes the file upload time.
|
||||
//
|
||||
// It is a runtime error to call this function when the file is not open
|
||||
// for writing.
|
||||
func (file *GridFile) SetUploadDate(t time.Time) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
file.doc.UploadDate = t
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// Close flushes any pending changes in case the file is being written
|
||||
// to, waits for any background operations to finish, and closes the file.
|
||||
//
|
||||
// It's important to Close files whether they are being written to
|
||||
// or read from, and to check the err result to ensure the operation
|
||||
// completed successfully.
|
||||
func (file *GridFile) Close() (err error) {
|
||||
file.m.Lock()
|
||||
defer file.m.Unlock()
|
||||
if file.mode == gfsWriting {
|
||||
if len(file.wbuf) > 0 && file.err == nil {
|
||||
file.insertChunk(file.wbuf)
|
||||
file.wbuf = file.wbuf[0:0]
|
||||
}
|
||||
file.completeWrite()
|
||||
} else if file.mode == gfsReading && file.rcache != nil {
|
||||
file.rcache.wait.Lock()
|
||||
file.rcache = nil
|
||||
}
|
||||
file.mode = gfsClosed
|
||||
debugf("GridFile %p: closed", file)
|
||||
return file.err
|
||||
}
|
||||
|
||||
func (file *GridFile) completeWrite() {
|
||||
for file.wpending > 0 {
|
||||
debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending)
|
||||
file.c.Wait()
|
||||
}
|
||||
if file.err == nil {
|
||||
hexsum := hex.EncodeToString(file.wsum.Sum(nil))
|
||||
if file.doc.UploadDate.IsZero() {
|
||||
file.doc.UploadDate = bson.Now()
|
||||
}
|
||||
file.doc.MD5 = hexsum
|
||||
file.err = file.gfs.Files.Insert(file.doc)
|
||||
file.gfs.Chunks.EnsureIndexKey("files_id", "n")
|
||||
}
|
||||
if file.err != nil {
|
||||
file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}})
|
||||
}
|
||||
}
|
||||
|
||||
// Abort cancels an in-progress write, preventing the file from being
|
||||
// automically created and ensuring previously written chunks are
|
||||
// removed when the file is closed.
|
||||
//
|
||||
// It is a runtime error to call Abort when the file was not opened
|
||||
// for writing.
|
||||
func (file *GridFile) Abort() {
|
||||
if file.mode != gfsWriting {
|
||||
panic("file.Abort must be called on file opened for writing")
|
||||
}
|
||||
file.err = errors.New("write aborted")
|
||||
}
|
||||
|
||||
// Write writes the provided data to the file and returns the
|
||||
// number of bytes written and an error in case something
|
||||
// wrong happened.
|
||||
//
|
||||
// The file will internally cache the data so that all but the last
|
||||
// chunk sent to the database have the size defined by SetChunkSize.
|
||||
// This also means that errors may be deferred until a future call
|
||||
// to Write or Close.
|
||||
//
|
||||
// The parameters and behavior of this function turn the file
|
||||
// into an io.Writer.
|
||||
func (file *GridFile) Write(data []byte) (n int, err error) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
debugf("GridFile %p: writing %d bytes", file, len(data))
|
||||
defer file.m.Unlock()
|
||||
|
||||
if file.err != nil {
|
||||
return 0, file.err
|
||||
}
|
||||
|
||||
n = len(data)
|
||||
file.doc.Length += int64(n)
|
||||
chunkSize := file.doc.ChunkSize
|
||||
|
||||
if len(file.wbuf)+len(data) < chunkSize {
|
||||
file.wbuf = append(file.wbuf, data...)
|
||||
return
|
||||
}
|
||||
|
||||
// First, flush file.wbuf complementing with data.
|
||||
if len(file.wbuf) > 0 {
|
||||
missing := chunkSize - len(file.wbuf)
|
||||
if missing > len(data) {
|
||||
missing = len(data)
|
||||
}
|
||||
file.wbuf = append(file.wbuf, data[:missing]...)
|
||||
data = data[missing:]
|
||||
file.insertChunk(file.wbuf)
|
||||
file.wbuf = file.wbuf[0:0]
|
||||
}
|
||||
|
||||
// Then, flush all chunks from data without copying.
|
||||
for len(data) > chunkSize {
|
||||
size := chunkSize
|
||||
if size > len(data) {
|
||||
size = len(data)
|
||||
}
|
||||
file.insertChunk(data[:size])
|
||||
data = data[size:]
|
||||
}
|
||||
|
||||
// And append the rest for a future call.
|
||||
file.wbuf = append(file.wbuf, data...)
|
||||
|
||||
return n, file.err
|
||||
}
|
||||
|
||||
func (file *GridFile) insertChunk(data []byte) {
|
||||
n := file.chunk
|
||||
file.chunk++
|
||||
debugf("GridFile %p: adding to checksum: %q", file, string(data))
|
||||
file.wsum.Write(data)
|
||||
|
||||
for file.doc.ChunkSize*file.wpending >= 1024*1024 {
|
||||
// Hold on.. we got a MB pending.
|
||||
file.c.Wait()
|
||||
if file.err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
file.wpending++
|
||||
|
||||
debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))
|
||||
|
||||
// We may not own the memory of data, so rather than
|
||||
// simply copying it, we'll marshal the document ahead of time.
|
||||
data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
|
||||
if err != nil {
|
||||
file.err = err
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
|
||||
file.m.Lock()
|
||||
file.wpending--
|
||||
if err != nil && file.err == nil {
|
||||
file.err = err
|
||||
}
|
||||
file.c.Broadcast()
|
||||
file.m.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
// Seek sets the offset for the next Read or Write on file to
|
||||
// offset, interpreted according to whence: 0 means relative to
|
||||
// the origin of the file, 1 means relative to the current offset,
|
||||
// and 2 means relative to the end. It returns the new offset and
|
||||
// an error, if any.
|
||||
func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) {
|
||||
file.m.Lock()
|
||||
debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence)
|
||||
defer file.m.Unlock()
|
||||
switch whence {
|
||||
case os.SEEK_SET:
|
||||
case os.SEEK_CUR:
|
||||
offset += file.offset
|
||||
case os.SEEK_END:
|
||||
offset += file.doc.Length
|
||||
default:
|
||||
panic("unsupported whence value")
|
||||
}
|
||||
if offset > file.doc.Length {
|
||||
return file.offset, errors.New("seek past end of file")
|
||||
}
|
||||
if offset == file.doc.Length {
|
||||
// If we're seeking to the end of the file,
|
||||
// no need to read anything. This enables
|
||||
// a client to find the size of the file using only the
|
||||
// io.ReadSeeker interface with low overhead.
|
||||
file.offset = offset
|
||||
return file.offset, nil
|
||||
}
|
||||
chunk := int(offset / int64(file.doc.ChunkSize))
|
||||
if chunk+1 == file.chunk && offset >= file.offset {
|
||||
file.rbuf = file.rbuf[int(offset-file.offset):]
|
||||
file.offset = offset
|
||||
return file.offset, nil
|
||||
}
|
||||
file.offset = offset
|
||||
file.chunk = chunk
|
||||
file.rbuf = nil
|
||||
file.rbuf, err = file.getChunk()
|
||||
if err == nil {
|
||||
file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):]
|
||||
}
|
||||
return file.offset, err
|
||||
}
|
||||
|
||||
// Read reads into b the next available data from the file and
|
||||
// returns the number of bytes written and an error in case
|
||||
// something wrong happened. At the end of the file, n will
|
||||
// be zero and err will be set to os.EOF.
|
||||
//
|
||||
// The parameters and behavior of this function turn the file
|
||||
// into an io.Reader.
|
||||
func (file *GridFile) Read(b []byte) (n int, err error) {
|
||||
file.assertMode(gfsReading)
|
||||
file.m.Lock()
|
||||
debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b))
|
||||
defer file.m.Unlock()
|
||||
if file.offset == file.doc.Length {
|
||||
return 0, io.EOF
|
||||
}
|
||||
for err == nil {
|
||||
i := copy(b, file.rbuf)
|
||||
n += i
|
||||
file.offset += int64(i)
|
||||
file.rbuf = file.rbuf[i:]
|
||||
if i == len(b) || file.offset == file.doc.Length {
|
||||
break
|
||||
}
|
||||
b = b[i:]
|
||||
file.rbuf, err = file.getChunk()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (file *GridFile) getChunk() (data []byte, err error) {
|
||||
cache := file.rcache
|
||||
file.rcache = nil
|
||||
if cache != nil && cache.n == file.chunk {
|
||||
debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk)
|
||||
cache.wait.Lock()
|
||||
data, err = cache.data, cache.err
|
||||
} else {
|
||||
debugf("GridFile %p: Fetching chunk %d", file, file.chunk)
|
||||
var doc gfsChunk
|
||||
err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc)
|
||||
data = doc.Data
|
||||
}
|
||||
file.chunk++
|
||||
if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length {
|
||||
// Read the next one in background.
|
||||
cache = &gfsCachedChunk{n: file.chunk}
|
||||
cache.wait.Lock()
|
||||
debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk)
|
||||
// Clone the session to avoid having it closed in between.
|
||||
chunks := file.gfs.Chunks
|
||||
session := chunks.Database.Session.Clone()
|
||||
go func(id interface{}, n int) {
|
||||
defer session.Close()
|
||||
chunks = chunks.With(session)
|
||||
var doc gfsChunk
|
||||
cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc)
|
||||
cache.data = doc.Data
|
||||
cache.wait.Unlock()
|
||||
}(file.doc.Id, file.chunk)
|
||||
file.rcache = cache
|
||||
}
|
||||
debugf("Returning err: %#v", err)
|
||||
return
|
||||
}
|
708
vendor/gopkg.in/mgo.v2/gridfs_test.go
generated
vendored
Normal file
708
vendor/gopkg.in/mgo.v2/gridfs_test.go
generated
vendored
Normal file
@ -0,0 +1,708 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
func (s *S) TestGridFSCreate(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
before := bson.Now()
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
n, err := file.Write([]byte("some data"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 9)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
after := bson.Now()
|
||||
|
||||
// Check the file information.
|
||||
result := M{}
|
||||
err = db.C("fs.files").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
fileId, ok := result["_id"].(bson.ObjectId)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(fileId.Valid(), Equals, true)
|
||||
result["_id"] = "<id>"
|
||||
|
||||
ud, ok := result["uploadDate"].(time.Time)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(ud.After(before) && ud.Before(after), Equals, true)
|
||||
result["uploadDate"] = "<timestamp>"
|
||||
|
||||
expected := M{
|
||||
"_id": "<id>",
|
||||
"length": 9,
|
||||
"chunkSize": 255 * 1024,
|
||||
"uploadDate": "<timestamp>",
|
||||
"md5": "1e50210a0202497fb79bc38b6ade6c34",
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
|
||||
// Check the chunk.
|
||||
result = M{}
|
||||
err = db.C("fs.chunks").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
chunkId, ok := result["_id"].(bson.ObjectId)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(chunkId.Valid(), Equals, true)
|
||||
result["_id"] = "<id>"
|
||||
|
||||
expected = M{
|
||||
"_id": "<id>",
|
||||
"files_id": fileId,
|
||||
"n": 0,
|
||||
"data": []byte("some data"),
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
|
||||
// Check that an index was created.
|
||||
indexes, err := db.C("fs.chunks").Indexes()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(len(indexes), Equals, 2)
|
||||
c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"})
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSFileDetails(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile1.txt")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
n, err := file.Write([]byte("some"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 4)
|
||||
|
||||
c.Assert(file.Size(), Equals, int64(4))
|
||||
|
||||
n, err = file.Write([]byte(" data"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 5)
|
||||
|
||||
c.Assert(file.Size(), Equals, int64(9))
|
||||
|
||||
id, _ := file.Id().(bson.ObjectId)
|
||||
c.Assert(id.Valid(), Equals, true)
|
||||
c.Assert(file.Name(), Equals, "myfile1.txt")
|
||||
c.Assert(file.ContentType(), Equals, "")
|
||||
|
||||
var info interface{}
|
||||
err = file.GetMeta(&info)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(info, IsNil)
|
||||
|
||||
file.SetId("myid")
|
||||
file.SetName("myfile2.txt")
|
||||
file.SetContentType("text/plain")
|
||||
file.SetMeta(M{"any": "thing"})
|
||||
|
||||
c.Assert(file.Id(), Equals, "myid")
|
||||
c.Assert(file.Name(), Equals, "myfile2.txt")
|
||||
c.Assert(file.ContentType(), Equals, "text/plain")
|
||||
|
||||
err = file.GetMeta(&info)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(info, DeepEquals, bson.M{"any": "thing"})
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34")
|
||||
|
||||
ud := file.UploadDate()
|
||||
now := time.Now()
|
||||
c.Assert(ud.Before(now), Equals, true)
|
||||
c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true)
|
||||
|
||||
result := M{}
|
||||
err = db.C("fs.files").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
result["uploadDate"] = "<timestamp>"
|
||||
|
||||
expected := M{
|
||||
"_id": "myid",
|
||||
"length": 9,
|
||||
"chunkSize": 255 * 1024,
|
||||
"uploadDate": "<timestamp>",
|
||||
"md5": "1e50210a0202497fb79bc38b6ade6c34",
|
||||
"filename": "myfile2.txt",
|
||||
"contentType": "text/plain",
|
||||
"metadata": M{"any": "thing"},
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSSetUploadDate(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local)
|
||||
file.SetUploadDate(t)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// Check the file information.
|
||||
result := M{}
|
||||
err = db.C("fs.files").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
ud := result["uploadDate"].(time.Time)
|
||||
if !ud.Equal(t) {
|
||||
c.Fatalf("want upload date %s, got %s", t, ud)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSCreateWithChunking(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
// Smaller than the chunk size.
|
||||
n, err := file.Write([]byte("abc"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 3)
|
||||
|
||||
// Boundary in the middle.
|
||||
n, err = file.Write([]byte("defg"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 4)
|
||||
|
||||
// Boundary at the end.
|
||||
n, err = file.Write([]byte("hij"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 3)
|
||||
|
||||
// Larger than the chunk size, with 3 chunks.
|
||||
n, err = file.Write([]byte("klmnopqrstuv"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 12)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// Check the file information.
|
||||
result := M{}
|
||||
err = db.C("fs.files").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
fileId, _ := result["_id"].(bson.ObjectId)
|
||||
c.Assert(fileId.Valid(), Equals, true)
|
||||
result["_id"] = "<id>"
|
||||
result["uploadDate"] = "<timestamp>"
|
||||
|
||||
expected := M{
|
||||
"_id": "<id>",
|
||||
"length": 22,
|
||||
"chunkSize": 5,
|
||||
"uploadDate": "<timestamp>",
|
||||
"md5": "44a66044834cbe55040089cabfc102d5",
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
|
||||
// Check the chunks.
|
||||
iter := db.C("fs.chunks").Find(nil).Sort("n").Iter()
|
||||
dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"}
|
||||
for i := 0; ; i++ {
|
||||
result = M{}
|
||||
if !iter.Next(result) {
|
||||
if i != 5 {
|
||||
c.Fatalf("Expected 5 chunks, got %d", i)
|
||||
}
|
||||
break
|
||||
}
|
||||
c.Assert(iter.Close(), IsNil)
|
||||
|
||||
result["_id"] = "<id>"
|
||||
|
||||
expected = M{
|
||||
"_id": "<id>",
|
||||
"files_id": fileId,
|
||||
"n": i,
|
||||
"data": []byte(dataChunks[i]),
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSAbort(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
n, err := file.Write([]byte("some data"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 9)
|
||||
|
||||
var count int
|
||||
for i := 0; i < 10; i++ {
|
||||
count, err = db.C("fs.chunks").Count()
|
||||
if count > 0 || err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(count, Equals, 1)
|
||||
|
||||
file.Abort()
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, ErrorMatches, "write aborted")
|
||||
|
||||
count, err = db.C("fs.chunks").Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(count, Equals, 0)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSCloseConflict(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
db.C("fs.files").EnsureIndex(mgo.Index{Key: []string{"filename"}, Unique: true})
|
||||
|
||||
// For a closing-time conflict
|
||||
err = db.C("fs.files").Insert(M{"filename": "foo.txt"})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("foo.txt")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
_, err = file.Write([]byte("some data"))
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(mgo.IsDup(err), Equals, true)
|
||||
|
||||
count, err := db.C("fs.chunks").Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(count, Equals, 0)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSOpenNotFound(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.OpenId("non-existent")
|
||||
c.Assert(err == mgo.ErrNotFound, Equals, true)
|
||||
c.Assert(file, IsNil)
|
||||
|
||||
file, err = gfs.Open("non-existent")
|
||||
c.Assert(err == mgo.ErrNotFound, Equals, true)
|
||||
c.Assert(file, IsNil)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSReadAll(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
id := file.Id()
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 22)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file, err = gfs.OpenId(id)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
b := make([]byte, 30)
|
||||
n, err = file.Read(b)
|
||||
c.Assert(n, Equals, 22)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
n, err = file.Read(b)
|
||||
c.Assert(n, Equals, 0)
|
||||
c.Assert(err == io.EOF, Equals, true)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSReadChunking(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
id := file.Id()
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 22)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file, err = gfs.OpenId(id)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
b := make([]byte, 30)
|
||||
|
||||
// Smaller than the chunk size.
|
||||
n, err = file.Read(b[:3])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 3)
|
||||
c.Assert(b[:3], DeepEquals, []byte("abc"))
|
||||
|
||||
// Boundary in the middle.
|
||||
n, err = file.Read(b[:4])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 4)
|
||||
c.Assert(b[:4], DeepEquals, []byte("defg"))
|
||||
|
||||
// Boundary at the end.
|
||||
n, err = file.Read(b[:3])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 3)
|
||||
c.Assert(b[:3], DeepEquals, []byte("hij"))
|
||||
|
||||
// Larger than the chunk size, with 3 chunks.
|
||||
n, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 12)
|
||||
c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv"))
|
||||
|
||||
n, err = file.Read(b)
|
||||
c.Assert(n, Equals, 0)
|
||||
c.Assert(err == io.EOF, Equals, true)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSOpen(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'1'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'2'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Open("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
defer file.Close()
|
||||
|
||||
var b [1]byte
|
||||
|
||||
_, err = file.Read(b[:])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b[:]), Equals, "2")
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSSeek(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
id := file.Id()
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 22)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
b := make([]byte, 5)
|
||||
|
||||
file, err = gfs.OpenId(id)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
o, err := file.Seek(3, os.SEEK_SET)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(3))
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("defgh"))
|
||||
|
||||
o, err = file.Seek(5, os.SEEK_CUR)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(13))
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("nopqr"))
|
||||
|
||||
o, err = file.Seek(0, os.SEEK_END)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(22))
|
||||
n, err = file.Read(b)
|
||||
c.Assert(err, Equals, io.EOF)
|
||||
c.Assert(n, Equals, 0)
|
||||
|
||||
o, err = file.Seek(-10, os.SEEK_END)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(12))
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("mnopq"))
|
||||
|
||||
o, err = file.Seek(8, os.SEEK_SET)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(8))
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("ijklm"))
|
||||
|
||||
// Trivial seek forward within same chunk. Already
|
||||
// got the data, shouldn't touch the database.
|
||||
sent := mgo.GetStats().SentOps
|
||||
o, err = file.Seek(1, os.SEEK_CUR)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(14))
|
||||
c.Assert(mgo.GetStats().SentOps, Equals, sent)
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("opqrs"))
|
||||
|
||||
// Try seeking past end of file.
|
||||
file.Seek(3, os.SEEK_SET)
|
||||
o, err = file.Seek(23, os.SEEK_SET)
|
||||
c.Assert(err, ErrorMatches, "seek past end of file")
|
||||
c.Assert(o, Equals, int64(3))
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSRemoveId(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'1'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'2'})
|
||||
id := file.Id()
|
||||
file.Close()
|
||||
|
||||
err = gfs.RemoveId(id)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file, err = gfs.Open("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
defer file.Close()
|
||||
|
||||
var b [1]byte
|
||||
|
||||
_, err = file.Read(b[:])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b[:]), Equals, "1")
|
||||
|
||||
n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 0)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSRemove(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'1'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'2'})
|
||||
file.Close()
|
||||
|
||||
err = gfs.Remove("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
_, err = gfs.Open("myfile.txt")
|
||||
c.Assert(err == mgo.ErrNotFound, Equals, true)
|
||||
|
||||
n, err := db.C("fs.chunks").Find(nil).Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 0)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSOpenNext(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile1.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'1'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Create("myfile2.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'2'})
|
||||
file.Close()
|
||||
|
||||
var f *mgo.GridFile
|
||||
var b [1]byte
|
||||
|
||||
iter := gfs.Find(nil).Sort("-filename").Iter()
|
||||
|
||||
ok := gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Check(f.Name(), Equals, "myfile2.txt")
|
||||
|
||||
_, err = f.Read(b[:])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b[:]), Equals, "2")
|
||||
|
||||
ok = gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Check(f.Name(), Equals, "myfile1.txt")
|
||||
|
||||
_, err = f.Read(b[:])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b[:]), Equals, "1")
|
||||
|
||||
ok = gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, false)
|
||||
c.Assert(iter.Close(), IsNil)
|
||||
c.Assert(f, IsNil)
|
||||
|
||||
// Do it again with a more restrictive query to make sure
|
||||
// it's actually taken into account.
|
||||
iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter()
|
||||
|
||||
ok = gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Check(f.Name(), Equals, "myfile1.txt")
|
||||
|
||||
ok = gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, false)
|
||||
c.Assert(iter.Close(), IsNil)
|
||||
c.Assert(f, IsNil)
|
||||
}
|
77
vendor/gopkg.in/mgo.v2/internal/sasl/sasl.c
generated
vendored
Normal file
77
vendor/gopkg.in/mgo.v2/internal/sasl/sasl.c
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// +build !windows
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sasl/sasl.h>
|
||||
|
||||
static int mgo_sasl_simple(void *context, int id, const char **result, unsigned int *len)
|
||||
{
|
||||
if (!result) {
|
||||
return SASL_BADPARAM;
|
||||
}
|
||||
switch (id) {
|
||||
case SASL_CB_USER:
|
||||
*result = (char *)context;
|
||||
break;
|
||||
case SASL_CB_AUTHNAME:
|
||||
*result = (char *)context;
|
||||
break;
|
||||
case SASL_CB_LANGUAGE:
|
||||
*result = NULL;
|
||||
break;
|
||||
default:
|
||||
return SASL_BADPARAM;
|
||||
}
|
||||
if (len) {
|
||||
*len = *result ? strlen(*result) : 0;
|
||||
}
|
||||
return SASL_OK;
|
||||
}
|
||||
|
||||
typedef int (*callback)(void);
|
||||
|
||||
static int mgo_sasl_secret(sasl_conn_t *conn, void *context, int id, sasl_secret_t **result)
|
||||
{
|
||||
if (!conn || !result || id != SASL_CB_PASS) {
|
||||
return SASL_BADPARAM;
|
||||
}
|
||||
*result = (sasl_secret_t *)context;
|
||||
return SASL_OK;
|
||||
}
|
||||
|
||||
sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password)
|
||||
{
|
||||
sasl_callback_t *cb = malloc(4 * sizeof(sasl_callback_t));
|
||||
int n = 0;
|
||||
|
||||
size_t len = strlen(password);
|
||||
sasl_secret_t *secret = (sasl_secret_t*)malloc(sizeof(sasl_secret_t) + len);
|
||||
if (!secret) {
|
||||
free(cb);
|
||||
return NULL;
|
||||
}
|
||||
strcpy((char *)secret->data, password);
|
||||
secret->len = len;
|
||||
|
||||
cb[n].id = SASL_CB_PASS;
|
||||
cb[n].proc = (callback)&mgo_sasl_secret;
|
||||
cb[n].context = secret;
|
||||
n++;
|
||||
|
||||
cb[n].id = SASL_CB_USER;
|
||||
cb[n].proc = (callback)&mgo_sasl_simple;
|
||||
cb[n].context = (char*)username;
|
||||
n++;
|
||||
|
||||
cb[n].id = SASL_CB_AUTHNAME;
|
||||
cb[n].proc = (callback)&mgo_sasl_simple;
|
||||
cb[n].context = (char*)username;
|
||||
n++;
|
||||
|
||||
cb[n].id = SASL_CB_LIST_END;
|
||||
cb[n].proc = NULL;
|
||||
cb[n].context = NULL;
|
||||
|
||||
return cb;
|
||||
}
|
138
vendor/gopkg.in/mgo.v2/internal/sasl/sasl.go
generated
vendored
Normal file
138
vendor/gopkg.in/mgo.v2/internal/sasl/sasl.go
generated
vendored
Normal file
@ -0,0 +1,138 @@
|
||||
// Package sasl is an implementation detail of the mgo package.
|
||||
//
|
||||
// This package is not meant to be used by itself.
|
||||
//
|
||||
|
||||
// +build !windows
|
||||
|
||||
package sasl
|
||||
|
||||
// #cgo LDFLAGS: -lsasl2
|
||||
//
|
||||
// struct sasl_conn {};
|
||||
//
|
||||
// #include <stdlib.h>
|
||||
// #include <sasl/sasl.h>
|
||||
//
|
||||
// sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password);
|
||||
//
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type saslStepper interface {
|
||||
Step(serverData []byte) (clientData []byte, done bool, err error)
|
||||
Close()
|
||||
}
|
||||
|
||||
type saslSession struct {
|
||||
conn *C.sasl_conn_t
|
||||
step int
|
||||
mech string
|
||||
|
||||
cstrings []*C.char
|
||||
callbacks *C.sasl_callback_t
|
||||
}
|
||||
|
||||
var initError error
|
||||
var initOnce sync.Once
|
||||
|
||||
func initSASL() {
|
||||
rc := C.sasl_client_init(nil)
|
||||
if rc != C.SASL_OK {
|
||||
initError = saslError(rc, nil, "cannot initialize SASL library")
|
||||
}
|
||||
}
|
||||
|
||||
func New(username, password, mechanism, service, host string) (saslStepper, error) {
|
||||
initOnce.Do(initSASL)
|
||||
if initError != nil {
|
||||
return nil, initError
|
||||
}
|
||||
|
||||
ss := &saslSession{mech: mechanism}
|
||||
if service == "" {
|
||||
service = "mongodb"
|
||||
}
|
||||
if i := strings.Index(host, ":"); i >= 0 {
|
||||
host = host[:i]
|
||||
}
|
||||
ss.callbacks = C.mgo_sasl_callbacks(ss.cstr(username), ss.cstr(password))
|
||||
rc := C.sasl_client_new(ss.cstr(service), ss.cstr(host), nil, nil, ss.callbacks, 0, &ss.conn)
|
||||
if rc != C.SASL_OK {
|
||||
ss.Close()
|
||||
return nil, saslError(rc, nil, "cannot create new SASL client")
|
||||
}
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func (ss *saslSession) cstr(s string) *C.char {
|
||||
cstr := C.CString(s)
|
||||
ss.cstrings = append(ss.cstrings, cstr)
|
||||
return cstr
|
||||
}
|
||||
|
||||
func (ss *saslSession) Close() {
|
||||
for _, cstr := range ss.cstrings {
|
||||
C.free(unsafe.Pointer(cstr))
|
||||
}
|
||||
ss.cstrings = nil
|
||||
|
||||
if ss.callbacks != nil {
|
||||
C.free(unsafe.Pointer(ss.callbacks))
|
||||
}
|
||||
|
||||
// The documentation of SASL dispose makes it clear that this should only
|
||||
// be done when the connection is done, not when the authentication phase
|
||||
// is done, because an encryption layer may have been negotiated.
|
||||
// Even then, we'll do this for now, because it's simpler and prevents
|
||||
// keeping track of this state for every socket. If it breaks, we'll fix it.
|
||||
C.sasl_dispose(&ss.conn)
|
||||
}
|
||||
|
||||
func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) {
|
||||
ss.step++
|
||||
if ss.step > 10 {
|
||||
return nil, false, fmt.Errorf("too many SASL steps without authentication")
|
||||
}
|
||||
var cclientData *C.char
|
||||
var cclientDataLen C.uint
|
||||
var rc C.int
|
||||
if ss.step == 1 {
|
||||
var mechanism *C.char // ignored - must match cred
|
||||
rc = C.sasl_client_start(ss.conn, ss.cstr(ss.mech), nil, &cclientData, &cclientDataLen, &mechanism)
|
||||
} else {
|
||||
var cserverData *C.char
|
||||
var cserverDataLen C.uint
|
||||
if len(serverData) > 0 {
|
||||
cserverData = (*C.char)(unsafe.Pointer(&serverData[0]))
|
||||
cserverDataLen = C.uint(len(serverData))
|
||||
}
|
||||
rc = C.sasl_client_step(ss.conn, cserverData, cserverDataLen, nil, &cclientData, &cclientDataLen)
|
||||
}
|
||||
if cclientData != nil && cclientDataLen > 0 {
|
||||
clientData = C.GoBytes(unsafe.Pointer(cclientData), C.int(cclientDataLen))
|
||||
}
|
||||
if rc == C.SASL_OK {
|
||||
return clientData, true, nil
|
||||
}
|
||||
if rc == C.SASL_CONTINUE {
|
||||
return clientData, false, nil
|
||||
}
|
||||
return nil, false, saslError(rc, ss.conn, "cannot establish SASL session")
|
||||
}
|
||||
|
||||
func saslError(rc C.int, conn *C.sasl_conn_t, msg string) error {
|
||||
var detail string
|
||||
if conn == nil {
|
||||
detail = C.GoString(C.sasl_errstring(rc, nil, nil))
|
||||
} else {
|
||||
detail = C.GoString(C.sasl_errdetail(conn))
|
||||
}
|
||||
return fmt.Errorf(msg + ": " + detail)
|
||||
}
|
118
vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c
generated
vendored
Normal file
118
vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
#include "sasl_windows.h"
|
||||
|
||||
static const LPSTR SSPI_PACKAGE_NAME = "kerberos";
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle *cred_handle, char *username, char *password, char *domain)
|
||||
{
|
||||
SEC_WINNT_AUTH_IDENTITY auth_identity;
|
||||
SECURITY_INTEGER ignored;
|
||||
|
||||
auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI;
|
||||
auth_identity.User = (LPSTR) username;
|
||||
auth_identity.UserLength = strlen(username);
|
||||
auth_identity.Password = (LPSTR) password;
|
||||
auth_identity.PasswordLength = strlen(password);
|
||||
auth_identity.Domain = (LPSTR) domain;
|
||||
auth_identity.DomainLength = strlen(domain);
|
||||
return call_sspi_acquire_credentials_handle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, &auth_identity, NULL, NULL, cred_handle, &ignored);
|
||||
}
|
||||
|
||||
int sspi_step(CredHandle *cred_handle, int has_context, CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *target)
|
||||
{
|
||||
SecBufferDesc inbuf;
|
||||
SecBuffer in_bufs[1];
|
||||
SecBufferDesc outbuf;
|
||||
SecBuffer out_bufs[1];
|
||||
|
||||
if (has_context > 0) {
|
||||
// If we already have a context, we now have data to send.
|
||||
// Put this data in an inbuf.
|
||||
inbuf.ulVersion = SECBUFFER_VERSION;
|
||||
inbuf.cBuffers = 1;
|
||||
inbuf.pBuffers = in_bufs;
|
||||
in_bufs[0].pvBuffer = *buffer;
|
||||
in_bufs[0].cbBuffer = *buffer_length;
|
||||
in_bufs[0].BufferType = SECBUFFER_TOKEN;
|
||||
}
|
||||
|
||||
outbuf.ulVersion = SECBUFFER_VERSION;
|
||||
outbuf.cBuffers = 1;
|
||||
outbuf.pBuffers = out_bufs;
|
||||
out_bufs[0].pvBuffer = NULL;
|
||||
out_bufs[0].cbBuffer = 0;
|
||||
out_bufs[0].BufferType = SECBUFFER_TOKEN;
|
||||
|
||||
ULONG context_attr = 0;
|
||||
|
||||
int ret = call_sspi_initialize_security_context(cred_handle,
|
||||
has_context > 0 ? context : NULL,
|
||||
(LPSTR) target,
|
||||
ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH,
|
||||
0,
|
||||
SECURITY_NETWORK_DREP,
|
||||
has_context > 0 ? &inbuf : NULL,
|
||||
0,
|
||||
context,
|
||||
&outbuf,
|
||||
&context_attr,
|
||||
NULL);
|
||||
|
||||
*buffer = malloc(out_bufs[0].cbBuffer);
|
||||
*buffer_length = out_bufs[0].cbBuffer;
|
||||
memcpy(*buffer, out_bufs[0].pvBuffer, *buffer_length);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sspi_send_client_authz_id(CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *user_plus_realm)
|
||||
{
|
||||
SecPkgContext_Sizes sizes;
|
||||
SECURITY_STATUS status = call_sspi_query_context_attributes(context, SECPKG_ATTR_SIZES, &sizes);
|
||||
|
||||
if (status != SEC_E_OK) {
|
||||
return status;
|
||||
}
|
||||
|
||||
size_t user_plus_realm_length = strlen(user_plus_realm);
|
||||
int msgSize = 4 + user_plus_realm_length;
|
||||
char *msg = malloc((sizes.cbSecurityTrailer + msgSize + sizes.cbBlockSize) * sizeof(char));
|
||||
msg[sizes.cbSecurityTrailer + 0] = 1;
|
||||
msg[sizes.cbSecurityTrailer + 1] = 0;
|
||||
msg[sizes.cbSecurityTrailer + 2] = 0;
|
||||
msg[sizes.cbSecurityTrailer + 3] = 0;
|
||||
memcpy(&msg[sizes.cbSecurityTrailer + 4], user_plus_realm, user_plus_realm_length);
|
||||
|
||||
SecBuffer wrapBufs[3];
|
||||
SecBufferDesc wrapBufDesc;
|
||||
wrapBufDesc.cBuffers = 3;
|
||||
wrapBufDesc.pBuffers = wrapBufs;
|
||||
wrapBufDesc.ulVersion = SECBUFFER_VERSION;
|
||||
|
||||
wrapBufs[0].cbBuffer = sizes.cbSecurityTrailer;
|
||||
wrapBufs[0].BufferType = SECBUFFER_TOKEN;
|
||||
wrapBufs[0].pvBuffer = msg;
|
||||
|
||||
wrapBufs[1].cbBuffer = msgSize;
|
||||
wrapBufs[1].BufferType = SECBUFFER_DATA;
|
||||
wrapBufs[1].pvBuffer = msg + sizes.cbSecurityTrailer;
|
||||
|
||||
wrapBufs[2].cbBuffer = sizes.cbBlockSize;
|
||||
wrapBufs[2].BufferType = SECBUFFER_PADDING;
|
||||
wrapBufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + msgSize;
|
||||
|
||||
status = call_sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0);
|
||||
if (status != SEC_E_OK) {
|
||||
free(msg);
|
||||
return status;
|
||||
}
|
||||
|
||||
*buffer_length = wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer + wrapBufs[2].cbBuffer;
|
||||
*buffer = malloc(*buffer_length);
|
||||
|
||||
memcpy(*buffer, wrapBufs[0].pvBuffer, wrapBufs[0].cbBuffer);
|
||||
memcpy(*buffer + wrapBufs[0].cbBuffer, wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer);
|
||||
memcpy(*buffer + wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer, wrapBufs[2].pvBuffer, wrapBufs[2].cbBuffer);
|
||||
|
||||
free(msg);
|
||||
return SEC_E_OK;
|
||||
}
|
140
vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go
generated
vendored
Normal file
140
vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
package sasl
|
||||
|
||||
// #include "sasl_windows.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type saslStepper interface {
|
||||
Step(serverData []byte) (clientData []byte, done bool, err error)
|
||||
Close()
|
||||
}
|
||||
|
||||
type saslSession struct {
|
||||
// Credentials
|
||||
mech string
|
||||
service string
|
||||
host string
|
||||
userPlusRealm string
|
||||
target string
|
||||
domain string
|
||||
|
||||
// Internal state
|
||||
authComplete bool
|
||||
errored bool
|
||||
step int
|
||||
|
||||
// C internal state
|
||||
credHandle C.CredHandle
|
||||
context C.CtxtHandle
|
||||
hasContext C.int
|
||||
|
||||
// Keep track of pointers we need to explicitly free
|
||||
stringsToFree []*C.char
|
||||
}
|
||||
|
||||
var initError error
|
||||
var initOnce sync.Once
|
||||
|
||||
func initSSPI() {
|
||||
rc := C.load_secur32_dll()
|
||||
if rc != 0 {
|
||||
initError = fmt.Errorf("Error loading libraries: %v", rc)
|
||||
}
|
||||
}
|
||||
|
||||
func New(username, password, mechanism, service, host string) (saslStepper, error) {
|
||||
initOnce.Do(initSSPI)
|
||||
ss := &saslSession{mech: mechanism, hasContext: 0, userPlusRealm: username}
|
||||
if service == "" {
|
||||
service = "mongodb"
|
||||
}
|
||||
if i := strings.Index(host, ":"); i >= 0 {
|
||||
host = host[:i]
|
||||
}
|
||||
ss.service = service
|
||||
ss.host = host
|
||||
|
||||
usernameComponents := strings.Split(username, "@")
|
||||
if len(usernameComponents) < 2 {
|
||||
return nil, fmt.Errorf("Username '%v' doesn't contain a realm!", username)
|
||||
}
|
||||
user := usernameComponents[0]
|
||||
ss.domain = usernameComponents[1]
|
||||
ss.target = fmt.Sprintf("%s/%s", ss.service, ss.host)
|
||||
|
||||
var status C.SECURITY_STATUS
|
||||
// Step 0: call AcquireCredentialsHandle to get a nice SSPI CredHandle
|
||||
if len(password) > 0 {
|
||||
status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), ss.cstr(password), ss.cstr(ss.domain))
|
||||
} else {
|
||||
status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), nil, ss.cstr(ss.domain))
|
||||
}
|
||||
if status != C.SEC_E_OK {
|
||||
ss.errored = true
|
||||
return nil, fmt.Errorf("Couldn't create new SSPI client, error code %v", status)
|
||||
}
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func (ss *saslSession) cstr(s string) *C.char {
|
||||
cstr := C.CString(s)
|
||||
ss.stringsToFree = append(ss.stringsToFree, cstr)
|
||||
return cstr
|
||||
}
|
||||
|
||||
func (ss *saslSession) Close() {
|
||||
for _, cstr := range ss.stringsToFree {
|
||||
C.free(unsafe.Pointer(cstr))
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) {
|
||||
ss.step++
|
||||
if ss.step > 10 {
|
||||
return nil, false, fmt.Errorf("too many SSPI steps without authentication")
|
||||
}
|
||||
var buffer C.PVOID
|
||||
var bufferLength C.ULONG
|
||||
if len(serverData) > 0 {
|
||||
buffer = (C.PVOID)(unsafe.Pointer(&serverData[0]))
|
||||
bufferLength = C.ULONG(len(serverData))
|
||||
}
|
||||
var status C.int
|
||||
if ss.authComplete {
|
||||
// Step 3: last bit of magic to use the correct server credentials
|
||||
status = C.sspi_send_client_authz_id(&ss.context, &buffer, &bufferLength, ss.cstr(ss.userPlusRealm))
|
||||
} else {
|
||||
// Step 1 + Step 2: set up security context with the server and TGT
|
||||
status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, &buffer, &bufferLength, ss.cstr(ss.target))
|
||||
}
|
||||
if buffer != C.PVOID(nil) {
|
||||
defer C.free(unsafe.Pointer(buffer))
|
||||
}
|
||||
if status != C.SEC_E_OK && status != C.SEC_I_CONTINUE_NEEDED {
|
||||
ss.errored = true
|
||||
return nil, false, ss.handleSSPIErrorCode(status)
|
||||
}
|
||||
|
||||
clientData = C.GoBytes(unsafe.Pointer(buffer), C.int(bufferLength))
|
||||
if status == C.SEC_E_OK {
|
||||
ss.authComplete = true
|
||||
return clientData, true, nil
|
||||
} else {
|
||||
ss.hasContext = 1
|
||||
return clientData, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *saslSession) handleSSPIErrorCode(code C.int) error {
|
||||
switch {
|
||||
case code == C.SEC_E_TARGET_UNKNOWN:
|
||||
return fmt.Errorf("Target %v@%v not found", ss.target, ss.domain)
|
||||
}
|
||||
return fmt.Errorf("Unknown error doing step %v, error code %v", ss.step, code)
|
||||
}
|
7
vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h
generated
vendored
Normal file
7
vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
#include <windows.h>
|
||||
|
||||
#include "sspi_windows.h"
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain);
|
||||
int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* target);
|
||||
int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm);
|
96
vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c
generated
vendored
Normal file
96
vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
// Code adapted from the NodeJS kerberos library:
|
||||
//
|
||||
// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.c
|
||||
//
|
||||
// Under the terms of the Apache License, Version 2.0:
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "sspi_windows.h"
|
||||
|
||||
static HINSTANCE sspi_secur32_dll = NULL;
|
||||
|
||||
int load_secur32_dll()
|
||||
{
|
||||
sspi_secur32_dll = LoadLibrary("secur32.dll");
|
||||
if (sspi_secur32_dll == NULL) {
|
||||
return GetLastError();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo)
|
||||
{
|
||||
if (sspi_secur32_dll == NULL) {
|
||||
return -1;
|
||||
}
|
||||
encryptMessage_fn pfn_encryptMessage = (encryptMessage_fn) GetProcAddress(sspi_secur32_dll, "EncryptMessage");
|
||||
if (!pfn_encryptMessage) {
|
||||
return -2;
|
||||
}
|
||||
return (*pfn_encryptMessage)(phContext, fQOP, pMessage, MessageSeqNo);
|
||||
}
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(
|
||||
LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse,
|
||||
void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
|
||||
PCredHandle phCredential, PTimeStamp ptsExpiry)
|
||||
{
|
||||
if (sspi_secur32_dll == NULL) {
|
||||
return -1;
|
||||
}
|
||||
acquireCredentialsHandle_fn pfn_acquireCredentialsHandle;
|
||||
#ifdef _UNICODE
|
||||
pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleW");
|
||||
#else
|
||||
pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleA");
|
||||
#endif
|
||||
if (!pfn_acquireCredentialsHandle) {
|
||||
return -2;
|
||||
}
|
||||
return (*pfn_acquireCredentialsHandle)(
|
||||
pszPrincipal, pszPackage, fCredentialUse, pvLogonId, pAuthData,
|
||||
pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry);
|
||||
}
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(
|
||||
PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName,
|
||||
unsigned long fContextReq, unsigned long Reserved1, unsigned long TargetDataRep,
|
||||
PSecBufferDesc pInput, unsigned long Reserved2, PCtxtHandle phNewContext,
|
||||
PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry)
|
||||
{
|
||||
if (sspi_secur32_dll == NULL) {
|
||||
return -1;
|
||||
}
|
||||
initializeSecurityContext_fn pfn_initializeSecurityContext;
|
||||
#ifdef _UNICODE
|
||||
pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextW");
|
||||
#else
|
||||
pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextA");
|
||||
#endif
|
||||
if (!pfn_initializeSecurityContext) {
|
||||
return -2;
|
||||
}
|
||||
return (*pfn_initializeSecurityContext)(
|
||||
phCredential, phContext, pszTargetName, fContextReq, Reserved1, TargetDataRep,
|
||||
pInput, Reserved2, phNewContext, pOutput, pfContextAttr, ptsExpiry);
|
||||
}
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer)
|
||||
{
|
||||
if (sspi_secur32_dll == NULL) {
|
||||
return -1;
|
||||
}
|
||||
queryContextAttributes_fn pfn_queryContextAttributes;
|
||||
#ifdef _UNICODE
|
||||
pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesW");
|
||||
#else
|
||||
pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesA");
|
||||
#endif
|
||||
if (!pfn_queryContextAttributes) {
|
||||
return -2;
|
||||
}
|
||||
return (*pfn_queryContextAttributes)(phContext, ulAttribute, pBuffer);
|
||||
}
|
70
vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h
generated
vendored
Normal file
70
vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
// Code adapted from the NodeJS kerberos library:
|
||||
//
|
||||
// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.h
|
||||
//
|
||||
// Under the terms of the Apache License, Version 2.0:
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
#ifndef SSPI_WINDOWS_H
|
||||
#define SSPI_WINDOWS_H
|
||||
|
||||
#define SECURITY_WIN32 1
|
||||
|
||||
#include <windows.h>
|
||||
#include <sspi.h>
|
||||
|
||||
int load_secur32_dll();
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo);
|
||||
|
||||
typedef DWORD (WINAPI *encryptMessage_fn)(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo);
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(
|
||||
LPSTR pszPrincipal, // Name of principal
|
||||
LPSTR pszPackage, // Name of package
|
||||
unsigned long fCredentialUse, // Flags indicating use
|
||||
void *pvLogonId, // Pointer to logon ID
|
||||
void *pAuthData, // Package specific data
|
||||
SEC_GET_KEY_FN pGetKeyFn, // Pointer to GetKey() func
|
||||
void *pvGetKeyArgument, // Value to pass to GetKey()
|
||||
PCredHandle phCredential, // (out) Cred Handle
|
||||
PTimeStamp ptsExpiry // (out) Lifetime (optional)
|
||||
);
|
||||
|
||||
typedef DWORD (WINAPI *acquireCredentialsHandle_fn)(
|
||||
LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse,
|
||||
void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
|
||||
PCredHandle phCredential, PTimeStamp ptsExpiry
|
||||
);
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(
|
||||
PCredHandle phCredential, // Cred to base context
|
||||
PCtxtHandle phContext, // Existing context (OPT)
|
||||
LPSTR pszTargetName, // Name of target
|
||||
unsigned long fContextReq, // Context Requirements
|
||||
unsigned long Reserved1, // Reserved, MBZ
|
||||
unsigned long TargetDataRep, // Data rep of target
|
||||
PSecBufferDesc pInput, // Input Buffers
|
||||
unsigned long Reserved2, // Reserved, MBZ
|
||||
PCtxtHandle phNewContext, // (out) New Context handle
|
||||
PSecBufferDesc pOutput, // (inout) Output Buffers
|
||||
unsigned long *pfContextAttr, // (out) Context attrs
|
||||
PTimeStamp ptsExpiry // (out) Life span (OPT)
|
||||
);
|
||||
|
||||
typedef DWORD (WINAPI *initializeSecurityContext_fn)(
|
||||
PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, unsigned long fContextReq,
|
||||
unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2,
|
||||
PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry);
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(
|
||||
PCtxtHandle phContext, // Context to query
|
||||
unsigned long ulAttribute, // Attribute to query
|
||||
void *pBuffer // Buffer for attributes
|
||||
);
|
||||
|
||||
typedef DWORD (WINAPI *queryContextAttributes_fn)(
|
||||
PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer);
|
||||
|
||||
#endif // SSPI_WINDOWS_H
|
266
vendor/gopkg.in/mgo.v2/internal/scram/scram.go
generated
vendored
Normal file
266
vendor/gopkg.in/mgo.v2/internal/scram/scram.go
generated
vendored
Normal file
@ -0,0 +1,266 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2014 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Pacakage scram implements a SCRAM-{SHA-1,etc} client per RFC5802.
|
||||
//
|
||||
// http://tools.ietf.org/html/rfc5802
|
||||
//
|
||||
package scram
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc).
|
||||
//
|
||||
// A Client may be used within a SASL conversation with logic resembling:
|
||||
//
|
||||
// var in []byte
|
||||
// var client = scram.NewClient(sha1.New, user, pass)
|
||||
// for client.Step(in) {
|
||||
// out := client.Out()
|
||||
// // send out to server
|
||||
// in := serverOut
|
||||
// }
|
||||
// if client.Err() != nil {
|
||||
// // auth failed
|
||||
// }
|
||||
//
|
||||
type Client struct {
|
||||
newHash func() hash.Hash
|
||||
|
||||
user string
|
||||
pass string
|
||||
step int
|
||||
out bytes.Buffer
|
||||
err error
|
||||
|
||||
clientNonce []byte
|
||||
serverNonce []byte
|
||||
saltedPass []byte
|
||||
authMsg bytes.Buffer
|
||||
}
|
||||
|
||||
// NewClient returns a new SCRAM-* client with the provided hash algorithm.
|
||||
//
|
||||
// For SCRAM-SHA-1, for example, use:
|
||||
//
|
||||
// client := scram.NewClient(sha1.New, user, pass)
|
||||
//
|
||||
func NewClient(newHash func() hash.Hash, user, pass string) *Client {
|
||||
c := &Client{
|
||||
newHash: newHash,
|
||||
user: user,
|
||||
pass: pass,
|
||||
}
|
||||
c.out.Grow(256)
|
||||
c.authMsg.Grow(256)
|
||||
return c
|
||||
}
|
||||
|
||||
// Out returns the data to be sent to the server in the current step.
|
||||
func (c *Client) Out() []byte {
|
||||
if c.out.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
return c.out.Bytes()
|
||||
}
|
||||
|
||||
// Err returns the error that ocurred, or nil if there were no errors.
|
||||
func (c *Client) Err() error {
|
||||
return c.err
|
||||
}
|
||||
|
||||
// SetNonce sets the client nonce to the provided value.
|
||||
// If not set, the nonce is generated automatically out of crypto/rand on the first step.
|
||||
func (c *Client) SetNonce(nonce []byte) {
|
||||
c.clientNonce = nonce
|
||||
}
|
||||
|
||||
var escaper = strings.NewReplacer("=", "=3D", ",", "=2C")
|
||||
|
||||
// Step processes the incoming data from the server and makes the
|
||||
// next round of data for the server available via Client.Out.
|
||||
// Step returns false if there are no errors and more data is
|
||||
// still expected.
|
||||
func (c *Client) Step(in []byte) bool {
|
||||
c.out.Reset()
|
||||
if c.step > 2 || c.err != nil {
|
||||
return false
|
||||
}
|
||||
c.step++
|
||||
switch c.step {
|
||||
case 1:
|
||||
c.err = c.step1(in)
|
||||
case 2:
|
||||
c.err = c.step2(in)
|
||||
case 3:
|
||||
c.err = c.step3(in)
|
||||
}
|
||||
return c.step > 2 || c.err != nil
|
||||
}
|
||||
|
||||
func (c *Client) step1(in []byte) error {
|
||||
if len(c.clientNonce) == 0 {
|
||||
const nonceLen = 6
|
||||
buf := make([]byte, nonceLen + b64.EncodedLen(nonceLen))
|
||||
if _, err := rand.Read(buf[:nonceLen]); err != nil {
|
||||
return fmt.Errorf("cannot read random SCRAM-SHA-1 nonce from operating system: %v", err)
|
||||
}
|
||||
c.clientNonce = buf[nonceLen:]
|
||||
b64.Encode(c.clientNonce, buf[:nonceLen])
|
||||
}
|
||||
c.authMsg.WriteString("n=")
|
||||
escaper.WriteString(&c.authMsg, c.user)
|
||||
c.authMsg.WriteString(",r=")
|
||||
c.authMsg.Write(c.clientNonce)
|
||||
|
||||
c.out.WriteString("n,,")
|
||||
c.out.Write(c.authMsg.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
var b64 = base64.StdEncoding
|
||||
|
||||
func (c *Client) step2(in []byte) error {
|
||||
c.authMsg.WriteByte(',')
|
||||
c.authMsg.Write(in)
|
||||
|
||||
fields := bytes.Split(in, []byte(","))
|
||||
if len(fields) != 3 {
|
||||
return fmt.Errorf("expected 3 fields in first SCRAM-SHA-1 server message, got %d: %q", len(fields), in)
|
||||
}
|
||||
if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 {
|
||||
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 nonce: %q", fields[0])
|
||||
}
|
||||
if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 {
|
||||
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 salt: %q", fields[1])
|
||||
}
|
||||
if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 {
|
||||
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2])
|
||||
}
|
||||
|
||||
c.serverNonce = fields[0][2:]
|
||||
if !bytes.HasPrefix(c.serverNonce, c.clientNonce) {
|
||||
return fmt.Errorf("server SCRAM-SHA-1 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce)
|
||||
}
|
||||
|
||||
salt := make([]byte, b64.DecodedLen(len(fields[1][2:])))
|
||||
n, err := b64.Decode(salt, fields[1][2:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot decode SCRAM-SHA-1 salt sent by server: %q", fields[1])
|
||||
}
|
||||
salt = salt[:n]
|
||||
iterCount, err := strconv.Atoi(string(fields[2][2:]))
|
||||
if err != nil {
|
||||
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2])
|
||||
}
|
||||
c.saltPassword(salt, iterCount)
|
||||
|
||||
c.authMsg.WriteString(",c=biws,r=")
|
||||
c.authMsg.Write(c.serverNonce)
|
||||
|
||||
c.out.WriteString("c=biws,r=")
|
||||
c.out.Write(c.serverNonce)
|
||||
c.out.WriteString(",p=")
|
||||
c.out.Write(c.clientProof())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) step3(in []byte) error {
|
||||
var isv, ise bool
|
||||
var fields = bytes.Split(in, []byte(","))
|
||||
if len(fields) == 1 {
|
||||
isv = bytes.HasPrefix(fields[0], []byte("v="))
|
||||
ise = bytes.HasPrefix(fields[0], []byte("e="))
|
||||
}
|
||||
if ise {
|
||||
return fmt.Errorf("SCRAM-SHA-1 authentication error: %s", fields[0][2:])
|
||||
} else if !isv {
|
||||
return fmt.Errorf("unsupported SCRAM-SHA-1 final message from server: %q", in)
|
||||
}
|
||||
if !bytes.Equal(c.serverSignature(), fields[0][2:]) {
|
||||
return fmt.Errorf("cannot authenticate SCRAM-SHA-1 server signature: %q", fields[0][2:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) saltPassword(salt []byte, iterCount int) {
|
||||
mac := hmac.New(c.newHash, []byte(c.pass))
|
||||
mac.Write(salt)
|
||||
mac.Write([]byte{0, 0, 0, 1})
|
||||
ui := mac.Sum(nil)
|
||||
hi := make([]byte, len(ui))
|
||||
copy(hi, ui)
|
||||
for i := 1; i < iterCount; i++ {
|
||||
mac.Reset()
|
||||
mac.Write(ui)
|
||||
mac.Sum(ui[:0])
|
||||
for j, b := range ui {
|
||||
hi[j] ^= b
|
||||
}
|
||||
}
|
||||
c.saltedPass = hi
|
||||
}
|
||||
|
||||
func (c *Client) clientProof() []byte {
|
||||
mac := hmac.New(c.newHash, c.saltedPass)
|
||||
mac.Write([]byte("Client Key"))
|
||||
clientKey := mac.Sum(nil)
|
||||
hash := c.newHash()
|
||||
hash.Write(clientKey)
|
||||
storedKey := hash.Sum(nil)
|
||||
mac = hmac.New(c.newHash, storedKey)
|
||||
mac.Write(c.authMsg.Bytes())
|
||||
clientProof := mac.Sum(nil)
|
||||
for i, b := range clientKey {
|
||||
clientProof[i] ^= b
|
||||
}
|
||||
clientProof64 := make([]byte, b64.EncodedLen(len(clientProof)))
|
||||
b64.Encode(clientProof64, clientProof)
|
||||
return clientProof64
|
||||
}
|
||||
|
||||
func (c *Client) serverSignature() []byte {
|
||||
mac := hmac.New(c.newHash, c.saltedPass)
|
||||
mac.Write([]byte("Server Key"))
|
||||
serverKey := mac.Sum(nil)
|
||||
|
||||
mac = hmac.New(c.newHash, serverKey)
|
||||
mac.Write(c.authMsg.Bytes())
|
||||
serverSignature := mac.Sum(nil)
|
||||
|
||||
encoded := make([]byte, b64.EncodedLen(len(serverSignature)))
|
||||
b64.Encode(encoded, serverSignature)
|
||||
return encoded
|
||||
}
|
67
vendor/gopkg.in/mgo.v2/internal/scram/scram_test.go
generated
vendored
Normal file
67
vendor/gopkg.in/mgo.v2/internal/scram/scram_test.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
package scram_test
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"testing"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/mgo.v2/internal/scram"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ = Suite(&S{})
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type S struct{}
|
||||
|
||||
var tests = [][]string{{
|
||||
"U: user pencil",
|
||||
"N: fyko+d2lbbFgONRv9qkxdawL",
|
||||
"C: n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL",
|
||||
"S: r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096",
|
||||
"C: c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=",
|
||||
"S: v=rmF9pqV8S7suAoZWja4dJRkFsKQ=",
|
||||
}, {
|
||||
"U: root fe8c89e308ec08763df36333cbf5d3a2",
|
||||
"N: OTcxNDk5NjM2MzE5",
|
||||
"C: n,,n=root,r=OTcxNDk5NjM2MzE5",
|
||||
"S: r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,s=XRDkVrFC9JuL7/F4tG0acQ==,i=10000",
|
||||
"C: c=biws,r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,p=6y1jp9R7ETyouTXS9fW9k5UHdBc=",
|
||||
"S: v=LBnd9dUJRxdqZiEq91NKP3z/bHA=",
|
||||
}}
|
||||
|
||||
func (s *S) TestExamples(c *C) {
|
||||
for _, steps := range tests {
|
||||
if len(steps) < 2 || len(steps[0]) < 3 || !strings.HasPrefix(steps[0], "U: ") {
|
||||
c.Fatalf("Invalid test: %#v", steps)
|
||||
}
|
||||
auth := strings.Fields(steps[0][3:])
|
||||
client := scram.NewClient(sha1.New, auth[0], auth[1])
|
||||
first, done := true, false
|
||||
c.Logf("-----")
|
||||
c.Logf("%s", steps[0])
|
||||
for _, step := range steps[1:] {
|
||||
c.Logf("%s", step)
|
||||
switch step[:3] {
|
||||
case "N: ":
|
||||
client.SetNonce([]byte(step[3:]))
|
||||
case "C: ":
|
||||
if first {
|
||||
first = false
|
||||
done = client.Step(nil)
|
||||
}
|
||||
c.Assert(done, Equals, false)
|
||||
c.Assert(client.Err(), IsNil)
|
||||
c.Assert(string(client.Out()), Equals, step[3:])
|
||||
case "S: ":
|
||||
first = false
|
||||
done = client.Step([]byte(step[3:]))
|
||||
default:
|
||||
panic("invalid test line: " + step)
|
||||
}
|
||||
}
|
||||
c.Assert(done, Equals, true)
|
||||
c.Assert(client.Err(), IsNil)
|
||||
}
|
||||
}
|
133
vendor/gopkg.in/mgo.v2/log.go
generated
vendored
Normal file
133
vendor/gopkg.in/mgo.v2/log.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Logging integration.
|
||||
|
||||
// Avoid importing the log type information unnecessarily. There's a small cost
|
||||
// associated with using an interface rather than the type. Depending on how
|
||||
// often the logger is plugged in, it would be worth using the type instead.
|
||||
type log_Logger interface {
|
||||
Output(calldepth int, s string) error
|
||||
}
|
||||
|
||||
var (
|
||||
globalLogger log_Logger
|
||||
globalDebug bool
|
||||
globalMutex sync.Mutex
|
||||
)
|
||||
|
||||
// RACE WARNING: There are known data races when logging, which are manually
|
||||
// silenced when the race detector is in use. These data races won't be
|
||||
// observed in typical use, because logging is supposed to be set up once when
|
||||
// the application starts. Having raceDetector as a constant, the compiler
|
||||
// should elide the locks altogether in actual use.
|
||||
|
||||
// Specify the *log.Logger object where log messages should be sent to.
|
||||
func SetLogger(logger log_Logger) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
globalLogger = logger
|
||||
}
|
||||
|
||||
// Enable the delivery of debug messages to the logger. Only meaningful
|
||||
// if a logger is also set.
|
||||
func SetDebug(debug bool) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
globalDebug = debug
|
||||
}
|
||||
|
||||
func log(v ...interface{}) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
if globalLogger != nil {
|
||||
globalLogger.Output(2, fmt.Sprint(v...))
|
||||
}
|
||||
}
|
||||
|
||||
func logln(v ...interface{}) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
if globalLogger != nil {
|
||||
globalLogger.Output(2, fmt.Sprintln(v...))
|
||||
}
|
||||
}
|
||||
|
||||
func logf(format string, v ...interface{}) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
if globalLogger != nil {
|
||||
globalLogger.Output(2, fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
||||
|
||||
func debug(v ...interface{}) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
if globalDebug && globalLogger != nil {
|
||||
globalLogger.Output(2, fmt.Sprint(v...))
|
||||
}
|
||||
}
|
||||
|
||||
func debugln(v ...interface{}) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
if globalDebug && globalLogger != nil {
|
||||
globalLogger.Output(2, fmt.Sprintln(v...))
|
||||
}
|
||||
}
|
||||
|
||||
func debugf(format string, v ...interface{}) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
if globalDebug && globalLogger != nil {
|
||||
globalLogger.Output(2, fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
91
vendor/gopkg.in/mgo.v2/queue.go
generated
vendored
Normal file
91
vendor/gopkg.in/mgo.v2/queue.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
type queue struct {
|
||||
elems []interface{}
|
||||
nelems, popi, pushi int
|
||||
}
|
||||
|
||||
func (q *queue) Len() int {
|
||||
return q.nelems
|
||||
}
|
||||
|
||||
func (q *queue) Push(elem interface{}) {
|
||||
//debugf("Pushing(pushi=%d popi=%d cap=%d): %#v\n",
|
||||
// q.pushi, q.popi, len(q.elems), elem)
|
||||
if q.nelems == len(q.elems) {
|
||||
q.expand()
|
||||
}
|
||||
q.elems[q.pushi] = elem
|
||||
q.nelems++
|
||||
q.pushi = (q.pushi + 1) % len(q.elems)
|
||||
//debugf(" Pushed(pushi=%d popi=%d cap=%d): %#v\n",
|
||||
// q.pushi, q.popi, len(q.elems), elem)
|
||||
}
|
||||
|
||||
func (q *queue) Pop() (elem interface{}) {
|
||||
//debugf("Popping(pushi=%d popi=%d cap=%d)\n",
|
||||
// q.pushi, q.popi, len(q.elems))
|
||||
if q.nelems == 0 {
|
||||
return nil
|
||||
}
|
||||
elem = q.elems[q.popi]
|
||||
q.elems[q.popi] = nil // Help GC.
|
||||
q.nelems--
|
||||
q.popi = (q.popi + 1) % len(q.elems)
|
||||
//debugf(" Popped(pushi=%d popi=%d cap=%d): %#v\n",
|
||||
// q.pushi, q.popi, len(q.elems), elem)
|
||||
return elem
|
||||
}
|
||||
|
||||
func (q *queue) expand() {
|
||||
curcap := len(q.elems)
|
||||
var newcap int
|
||||
if curcap == 0 {
|
||||
newcap = 8
|
||||
} else if curcap < 1024 {
|
||||
newcap = curcap * 2
|
||||
} else {
|
||||
newcap = curcap + (curcap / 4)
|
||||
}
|
||||
elems := make([]interface{}, newcap)
|
||||
|
||||
if q.popi == 0 {
|
||||
copy(elems, q.elems)
|
||||
q.pushi = curcap
|
||||
} else {
|
||||
newpopi := newcap - (curcap - q.popi)
|
||||
copy(elems, q.elems[:q.popi])
|
||||
copy(elems[newpopi:], q.elems[q.popi:])
|
||||
q.popi = newpopi
|
||||
}
|
||||
for i := range q.elems {
|
||||
q.elems[i] = nil // Help GC.
|
||||
}
|
||||
q.elems = elems
|
||||
}
|
101
vendor/gopkg.in/mgo.v2/queue_test.go
generated
vendored
Normal file
101
vendor/gopkg.in/mgo.v2/queue_test.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type QS struct{}
|
||||
|
||||
var _ = Suite(&QS{})
|
||||
|
||||
func (s *QS) TestSequentialGrowth(c *C) {
|
||||
q := queue{}
|
||||
n := 2048
|
||||
for i := 0; i != n; i++ {
|
||||
q.Push(i)
|
||||
}
|
||||
for i := 0; i != n; i++ {
|
||||
c.Assert(q.Pop(), Equals, i)
|
||||
}
|
||||
}
|
||||
|
||||
var queueTestLists = [][]int{
|
||||
// {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
|
||||
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
|
||||
// {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7}
|
||||
{0, 1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11},
|
||||
|
||||
// {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7}
|
||||
{0, 1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11},
|
||||
|
||||
// {0, 1, 2, 3, 4, 5, 6, 7, 8}
|
||||
{0, 1, 2, 3, 4, 5, 6, 7, 8,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8},
|
||||
}
|
||||
|
||||
func (s *QS) TestQueueTestLists(c *C) {
|
||||
test := []int{}
|
||||
testi := 0
|
||||
reset := func() {
|
||||
test = test[0:0]
|
||||
testi = 0
|
||||
}
|
||||
push := func(i int) {
|
||||
test = append(test, i)
|
||||
}
|
||||
pop := func() (i int) {
|
||||
if testi == len(test) {
|
||||
return -1
|
||||
}
|
||||
i = test[testi]
|
||||
testi++
|
||||
return
|
||||
}
|
||||
|
||||
for _, list := range queueTestLists {
|
||||
reset()
|
||||
q := queue{}
|
||||
for _, n := range list {
|
||||
if n == -1 {
|
||||
c.Assert(q.Pop(), Equals, pop(), Commentf("With list %#v", list))
|
||||
} else {
|
||||
q.Push(n)
|
||||
push(n)
|
||||
}
|
||||
}
|
||||
|
||||
for n := pop(); n != -1; n = pop() {
|
||||
c.Assert(q.Pop(), Equals, n, Commentf("With list %#v", list))
|
||||
}
|
||||
|
||||
c.Assert(q.Pop(), Equals, nil, Commentf("With list %#v", list))
|
||||
}
|
||||
}
|
5
vendor/gopkg.in/mgo.v2/raceoff.go
generated
vendored
Normal file
5
vendor/gopkg.in/mgo.v2/raceoff.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
// +build !race
|
||||
|
||||
package mgo
|
||||
|
||||
const raceDetector = false
|
5
vendor/gopkg.in/mgo.v2/raceon.go
generated
vendored
Normal file
5
vendor/gopkg.in/mgo.v2/raceon.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
// +build race
|
||||
|
||||
package mgo
|
||||
|
||||
const raceDetector = true
|
11
vendor/gopkg.in/mgo.v2/saslimpl.go
generated
vendored
Normal file
11
vendor/gopkg.in/mgo.v2/saslimpl.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
//+build sasl
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"gopkg.in/mgo.v2/internal/sasl"
|
||||
)
|
||||
|
||||
func saslNew(cred Credential, host string) (saslStepper, error) {
|
||||
return sasl.New(cred.Username, cred.Password, cred.Mechanism, cred.Service, host)
|
||||
}
|
11
vendor/gopkg.in/mgo.v2/saslstub.go
generated
vendored
Normal file
11
vendor/gopkg.in/mgo.v2/saslstub.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
//+build !sasl
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func saslNew(cred Credential, host string) (saslStepper, error) {
|
||||
return nil, fmt.Errorf("SASL support not enabled during build (-tags sasl)")
|
||||
}
|
448
vendor/gopkg.in/mgo.v2/server.go
generated
vendored
Normal file
448
vendor/gopkg.in/mgo.v2/server.go
generated
vendored
Normal file
@ -0,0 +1,448 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mongo server encapsulation.
|
||||
|
||||
type mongoServer struct {
|
||||
sync.RWMutex
|
||||
Addr string
|
||||
ResolvedAddr string
|
||||
tcpaddr *net.TCPAddr
|
||||
unusedSockets []*mongoSocket
|
||||
liveSockets []*mongoSocket
|
||||
closed bool
|
||||
abended bool
|
||||
sync chan bool
|
||||
dial dialer
|
||||
pingValue time.Duration
|
||||
pingIndex int
|
||||
pingCount uint32
|
||||
pingWindow [6]time.Duration
|
||||
info *mongoServerInfo
|
||||
}
|
||||
|
||||
type dialer struct {
|
||||
old func(addr net.Addr) (net.Conn, error)
|
||||
new func(addr *ServerAddr) (net.Conn, error)
|
||||
}
|
||||
|
||||
func (dial dialer) isSet() bool {
|
||||
return dial.old != nil || dial.new != nil
|
||||
}
|
||||
|
||||
type mongoServerInfo struct {
|
||||
Master bool
|
||||
Mongos bool
|
||||
Tags bson.D
|
||||
MaxWireVersion int
|
||||
SetName string
|
||||
}
|
||||
|
||||
var defaultServerInfo mongoServerInfo
|
||||
|
||||
func newServer(addr string, tcpaddr *net.TCPAddr, sync chan bool, dial dialer) *mongoServer {
|
||||
server := &mongoServer{
|
||||
Addr: addr,
|
||||
ResolvedAddr: tcpaddr.String(),
|
||||
tcpaddr: tcpaddr,
|
||||
sync: sync,
|
||||
dial: dial,
|
||||
info: &defaultServerInfo,
|
||||
}
|
||||
// Once so the server gets a ping value, then loop in background.
|
||||
server.pinger(false)
|
||||
go server.pinger(true)
|
||||
return server
|
||||
}
|
||||
|
||||
var errPoolLimit = errors.New("per-server connection limit reached")
|
||||
var errServerClosed = errors.New("server was closed")
|
||||
|
||||
// AcquireSocket returns a socket for communicating with the server.
|
||||
// This will attempt to reuse an old connection, if one is available. Otherwise,
|
||||
// it will establish a new one. The returned socket is owned by the call site,
|
||||
// and will return to the cache when the socket has its Release method called
|
||||
// the same number of times as AcquireSocket + Acquire were called for it.
|
||||
// If the poolLimit argument is greater than zero and the number of sockets in
|
||||
// use in this server is greater than the provided limit, errPoolLimit is
|
||||
// returned.
|
||||
func (server *mongoServer) AcquireSocket(poolLimit int, timeout time.Duration) (socket *mongoSocket, abended bool, err error) {
|
||||
for {
|
||||
server.Lock()
|
||||
abended = server.abended
|
||||
if server.closed {
|
||||
server.Unlock()
|
||||
return nil, abended, errServerClosed
|
||||
}
|
||||
n := len(server.unusedSockets)
|
||||
if poolLimit > 0 && len(server.liveSockets)-n >= poolLimit {
|
||||
server.Unlock()
|
||||
return nil, false, errPoolLimit
|
||||
}
|
||||
if n > 0 {
|
||||
socket = server.unusedSockets[n-1]
|
||||
server.unusedSockets[n-1] = nil // Help GC.
|
||||
server.unusedSockets = server.unusedSockets[:n-1]
|
||||
info := server.info
|
||||
server.Unlock()
|
||||
err = socket.InitialAcquire(info, timeout)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
server.Unlock()
|
||||
socket, err = server.Connect(timeout)
|
||||
if err == nil {
|
||||
server.Lock()
|
||||
// We've waited for the Connect, see if we got
|
||||
// closed in the meantime
|
||||
if server.closed {
|
||||
server.Unlock()
|
||||
socket.Release()
|
||||
socket.Close()
|
||||
return nil, abended, errServerClosed
|
||||
}
|
||||
server.liveSockets = append(server.liveSockets, socket)
|
||||
server.Unlock()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Connect establishes a new connection to the server. This should
|
||||
// generally be done through server.AcquireSocket().
|
||||
func (server *mongoServer) Connect(timeout time.Duration) (*mongoSocket, error) {
|
||||
server.RLock()
|
||||
master := server.info.Master
|
||||
dial := server.dial
|
||||
server.RUnlock()
|
||||
|
||||
logf("Establishing new connection to %s (timeout=%s)...", server.Addr, timeout)
|
||||
var conn net.Conn
|
||||
var err error
|
||||
switch {
|
||||
case !dial.isSet():
|
||||
// Cannot do this because it lacks timeout support. :-(
|
||||
//conn, err = net.DialTCP("tcp", nil, server.tcpaddr)
|
||||
conn, err = net.DialTimeout("tcp", server.ResolvedAddr, timeout)
|
||||
case dial.old != nil:
|
||||
conn, err = dial.old(server.tcpaddr)
|
||||
case dial.new != nil:
|
||||
conn, err = dial.new(&ServerAddr{server.Addr, server.tcpaddr})
|
||||
default:
|
||||
panic("dialer is set, but both dial.old and dial.new are nil")
|
||||
}
|
||||
if err != nil {
|
||||
logf("Connection to %s failed: %v", server.Addr, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
logf("Connection to %s established.", server.Addr)
|
||||
|
||||
stats.conn(+1, master)
|
||||
return newSocket(server, conn, timeout), nil
|
||||
}
|
||||
|
||||
// Close forces closing all sockets that are alive, whether
|
||||
// they're currently in use or not.
|
||||
func (server *mongoServer) Close() {
|
||||
server.Lock()
|
||||
server.closed = true
|
||||
liveSockets := server.liveSockets
|
||||
unusedSockets := server.unusedSockets
|
||||
server.liveSockets = nil
|
||||
server.unusedSockets = nil
|
||||
server.Unlock()
|
||||
logf("Connections to %s closing (%d live sockets).", server.Addr, len(liveSockets))
|
||||
for i, s := range liveSockets {
|
||||
s.Close()
|
||||
liveSockets[i] = nil
|
||||
}
|
||||
for i := range unusedSockets {
|
||||
unusedSockets[i] = nil
|
||||
}
|
||||
}
|
||||
|
||||
// RecycleSocket puts socket back into the unused cache.
|
||||
func (server *mongoServer) RecycleSocket(socket *mongoSocket) {
|
||||
server.Lock()
|
||||
if !server.closed {
|
||||
server.unusedSockets = append(server.unusedSockets, socket)
|
||||
}
|
||||
server.Unlock()
|
||||
}
|
||||
|
||||
func removeSocket(sockets []*mongoSocket, socket *mongoSocket) []*mongoSocket {
|
||||
for i, s := range sockets {
|
||||
if s == socket {
|
||||
copy(sockets[i:], sockets[i+1:])
|
||||
n := len(sockets) - 1
|
||||
sockets[n] = nil
|
||||
sockets = sockets[:n]
|
||||
break
|
||||
}
|
||||
}
|
||||
return sockets
|
||||
}
|
||||
|
||||
// AbendSocket notifies the server that the given socket has terminated
|
||||
// abnormally, and thus should be discarded rather than cached.
|
||||
func (server *mongoServer) AbendSocket(socket *mongoSocket) {
|
||||
server.Lock()
|
||||
server.abended = true
|
||||
if server.closed {
|
||||
server.Unlock()
|
||||
return
|
||||
}
|
||||
server.liveSockets = removeSocket(server.liveSockets, socket)
|
||||
server.unusedSockets = removeSocket(server.unusedSockets, socket)
|
||||
server.Unlock()
|
||||
// Maybe just a timeout, but suggest a cluster sync up just in case.
|
||||
select {
|
||||
case server.sync <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (server *mongoServer) SetInfo(info *mongoServerInfo) {
|
||||
server.Lock()
|
||||
server.info = info
|
||||
server.Unlock()
|
||||
}
|
||||
|
||||
func (server *mongoServer) Info() *mongoServerInfo {
|
||||
server.Lock()
|
||||
info := server.info
|
||||
server.Unlock()
|
||||
return info
|
||||
}
|
||||
|
||||
func (server *mongoServer) hasTags(serverTags []bson.D) bool {
|
||||
NextTagSet:
|
||||
for _, tags := range serverTags {
|
||||
NextReqTag:
|
||||
for _, req := range tags {
|
||||
for _, has := range server.info.Tags {
|
||||
if req.Name == has.Name {
|
||||
if req.Value == has.Value {
|
||||
continue NextReqTag
|
||||
}
|
||||
continue NextTagSet
|
||||
}
|
||||
}
|
||||
continue NextTagSet
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var pingDelay = 5 * time.Second
|
||||
|
||||
func (server *mongoServer) pinger(loop bool) {
|
||||
var delay time.Duration
|
||||
if raceDetector {
|
||||
// This variable is only ever touched by tests.
|
||||
globalMutex.Lock()
|
||||
delay = pingDelay
|
||||
globalMutex.Unlock()
|
||||
} else {
|
||||
delay = pingDelay
|
||||
}
|
||||
op := queryOp{
|
||||
collection: "admin.$cmd",
|
||||
query: bson.D{{"ping", 1}},
|
||||
flags: flagSlaveOk,
|
||||
limit: -1,
|
||||
}
|
||||
for {
|
||||
if loop {
|
||||
time.Sleep(delay)
|
||||
}
|
||||
op := op
|
||||
socket, _, err := server.AcquireSocket(0, 3*delay)
|
||||
if err == nil {
|
||||
start := time.Now()
|
||||
_, _ = socket.SimpleQuery(&op)
|
||||
delay := time.Now().Sub(start)
|
||||
|
||||
server.pingWindow[server.pingIndex] = delay
|
||||
server.pingIndex = (server.pingIndex + 1) % len(server.pingWindow)
|
||||
server.pingCount++
|
||||
var max time.Duration
|
||||
for i := 0; i < len(server.pingWindow) && uint32(i) < server.pingCount; i++ {
|
||||
if server.pingWindow[i] > max {
|
||||
max = server.pingWindow[i]
|
||||
}
|
||||
}
|
||||
socket.Release()
|
||||
server.Lock()
|
||||
if server.closed {
|
||||
loop = false
|
||||
}
|
||||
server.pingValue = max
|
||||
server.Unlock()
|
||||
logf("Ping for %s is %d ms", server.Addr, max/time.Millisecond)
|
||||
} else if err == errServerClosed {
|
||||
return
|
||||
}
|
||||
if !loop {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type mongoServerSlice []*mongoServer
|
||||
|
||||
func (s mongoServerSlice) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s mongoServerSlice) Less(i, j int) bool {
|
||||
return s[i].ResolvedAddr < s[j].ResolvedAddr
|
||||
}
|
||||
|
||||
func (s mongoServerSlice) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s mongoServerSlice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s mongoServerSlice) Search(resolvedAddr string) (i int, ok bool) {
|
||||
n := len(s)
|
||||
i = sort.Search(n, func(i int) bool {
|
||||
return s[i].ResolvedAddr >= resolvedAddr
|
||||
})
|
||||
return i, i != n && s[i].ResolvedAddr == resolvedAddr
|
||||
}
|
||||
|
||||
type mongoServers struct {
|
||||
slice mongoServerSlice
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Search(resolvedAddr string) (server *mongoServer) {
|
||||
if i, ok := servers.slice.Search(resolvedAddr); ok {
|
||||
return servers.slice[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Add(server *mongoServer) {
|
||||
servers.slice = append(servers.slice, server)
|
||||
servers.slice.Sort()
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Remove(other *mongoServer) (server *mongoServer) {
|
||||
if i, found := servers.slice.Search(other.ResolvedAddr); found {
|
||||
server = servers.slice[i]
|
||||
copy(servers.slice[i:], servers.slice[i+1:])
|
||||
n := len(servers.slice) - 1
|
||||
servers.slice[n] = nil // Help GC.
|
||||
servers.slice = servers.slice[:n]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Slice() []*mongoServer {
|
||||
return ([]*mongoServer)(servers.slice)
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Get(i int) *mongoServer {
|
||||
return servers.slice[i]
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Len() int {
|
||||
return len(servers.slice)
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Empty() bool {
|
||||
return len(servers.slice) == 0
|
||||
}
|
||||
|
||||
// BestFit returns the best guess of what would be the most interesting
|
||||
// server to perform operations on at this point in time.
|
||||
func (servers *mongoServers) BestFit(serverTags []bson.D) *mongoServer {
|
||||
var best *mongoServer
|
||||
for _, next := range servers.slice {
|
||||
if best == nil {
|
||||
best = next
|
||||
best.RLock()
|
||||
if serverTags != nil && !next.info.Mongos && !best.hasTags(serverTags) {
|
||||
best.RUnlock()
|
||||
best = nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
next.RLock()
|
||||
swap := false
|
||||
switch {
|
||||
case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags):
|
||||
// Must have requested tags.
|
||||
case next.info.Master != best.info.Master:
|
||||
// Prefer slaves.
|
||||
swap = best.info.Master
|
||||
case absDuration(next.pingValue-best.pingValue) > 15*time.Millisecond:
|
||||
// Prefer nearest server.
|
||||
swap = next.pingValue < best.pingValue
|
||||
case len(next.liveSockets)-len(next.unusedSockets) < len(best.liveSockets)-len(best.unusedSockets):
|
||||
// Prefer servers with less connections.
|
||||
swap = true
|
||||
}
|
||||
if swap {
|
||||
best.RUnlock()
|
||||
best = next
|
||||
} else {
|
||||
next.RUnlock()
|
||||
}
|
||||
}
|
||||
if best != nil {
|
||||
best.RUnlock()
|
||||
}
|
||||
return best
|
||||
}
|
||||
|
||||
func absDuration(d time.Duration) time.Duration {
|
||||
if d < 0 {
|
||||
return -d
|
||||
}
|
||||
return d
|
||||
}
|
4224
vendor/gopkg.in/mgo.v2/session.go
generated
vendored
Normal file
4224
vendor/gopkg.in/mgo.v2/session.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3704
vendor/gopkg.in/mgo.v2/session_test.go
generated
vendored
Normal file
3704
vendor/gopkg.in/mgo.v2/session_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
677
vendor/gopkg.in/mgo.v2/socket.go
generated
vendored
Normal file
677
vendor/gopkg.in/mgo.v2/socket.go
generated
vendored
Normal file
@ -0,0 +1,677 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
type replyFunc func(err error, reply *replyOp, docNum int, docData []byte)
|
||||
|
||||
type mongoSocket struct {
|
||||
sync.Mutex
|
||||
server *mongoServer // nil when cached
|
||||
conn net.Conn
|
||||
timeout time.Duration
|
||||
addr string // For debugging only.
|
||||
nextRequestId uint32
|
||||
replyFuncs map[uint32]replyFunc
|
||||
references int
|
||||
creds []Credential
|
||||
logout []Credential
|
||||
cachedNonce string
|
||||
gotNonce sync.Cond
|
||||
dead error
|
||||
serverInfo *mongoServerInfo
|
||||
}
|
||||
|
||||
type queryOpFlags uint32
|
||||
|
||||
const (
|
||||
_ queryOpFlags = 1 << iota
|
||||
flagTailable
|
||||
flagSlaveOk
|
||||
flagLogReplay
|
||||
flagNoCursorTimeout
|
||||
flagAwaitData
|
||||
)
|
||||
|
||||
type queryOp struct {
|
||||
collection string
|
||||
query interface{}
|
||||
skip int32
|
||||
limit int32
|
||||
selector interface{}
|
||||
flags queryOpFlags
|
||||
replyFunc replyFunc
|
||||
|
||||
options queryWrapper
|
||||
hasOptions bool
|
||||
serverTags []bson.D
|
||||
}
|
||||
|
||||
type queryWrapper struct {
|
||||
Query interface{} "$query"
|
||||
OrderBy interface{} "$orderby,omitempty"
|
||||
Hint interface{} "$hint,omitempty"
|
||||
Explain bool "$explain,omitempty"
|
||||
Snapshot bool "$snapshot,omitempty"
|
||||
ReadPreference bson.D "$readPreference,omitempty"
|
||||
MaxScan int "$maxScan,omitempty"
|
||||
MaxTimeMS int "$maxTimeMS,omitempty"
|
||||
Comment string "$comment,omitempty"
|
||||
}
|
||||
|
||||
func (op *queryOp) finalQuery(socket *mongoSocket) interface{} {
|
||||
if op.flags&flagSlaveOk != 0 && len(op.serverTags) > 0 && socket.ServerInfo().Mongos {
|
||||
op.hasOptions = true
|
||||
op.options.ReadPreference = bson.D{{"mode", "secondaryPreferred"}, {"tags", op.serverTags}}
|
||||
}
|
||||
if op.hasOptions {
|
||||
if op.query == nil {
|
||||
var empty bson.D
|
||||
op.options.Query = empty
|
||||
} else {
|
||||
op.options.Query = op.query
|
||||
}
|
||||
debugf("final query is %#v\n", &op.options)
|
||||
return &op.options
|
||||
}
|
||||
return op.query
|
||||
}
|
||||
|
||||
type getMoreOp struct {
|
||||
collection string
|
||||
limit int32
|
||||
cursorId int64
|
||||
replyFunc replyFunc
|
||||
}
|
||||
|
||||
type replyOp struct {
|
||||
flags uint32
|
||||
cursorId int64
|
||||
firstDoc int32
|
||||
replyDocs int32
|
||||
}
|
||||
|
||||
type insertOp struct {
|
||||
collection string // "database.collection"
|
||||
documents []interface{} // One or more documents to insert
|
||||
flags uint32
|
||||
}
|
||||
|
||||
type updateOp struct {
|
||||
collection string // "database.collection"
|
||||
selector interface{}
|
||||
update interface{}
|
||||
flags uint32
|
||||
}
|
||||
|
||||
type deleteOp struct {
|
||||
collection string // "database.collection"
|
||||
selector interface{}
|
||||
flags uint32
|
||||
}
|
||||
|
||||
type killCursorsOp struct {
|
||||
cursorIds []int64
|
||||
}
|
||||
|
||||
type requestInfo struct {
|
||||
bufferPos int
|
||||
replyFunc replyFunc
|
||||
}
|
||||
|
||||
func newSocket(server *mongoServer, conn net.Conn, timeout time.Duration) *mongoSocket {
|
||||
socket := &mongoSocket{
|
||||
conn: conn,
|
||||
addr: server.Addr,
|
||||
server: server,
|
||||
replyFuncs: make(map[uint32]replyFunc),
|
||||
}
|
||||
socket.gotNonce.L = &socket.Mutex
|
||||
if err := socket.InitialAcquire(server.Info(), timeout); err != nil {
|
||||
panic("newSocket: InitialAcquire returned error: " + err.Error())
|
||||
}
|
||||
stats.socketsAlive(+1)
|
||||
debugf("Socket %p to %s: initialized", socket, socket.addr)
|
||||
socket.resetNonce()
|
||||
go socket.readLoop()
|
||||
return socket
|
||||
}
|
||||
|
||||
// Server returns the server that the socket is associated with.
|
||||
// It returns nil while the socket is cached in its respective server.
|
||||
func (socket *mongoSocket) Server() *mongoServer {
|
||||
socket.Lock()
|
||||
server := socket.server
|
||||
socket.Unlock()
|
||||
return server
|
||||
}
|
||||
|
||||
// ServerInfo returns details for the server at the time the socket
|
||||
// was initially acquired.
|
||||
func (socket *mongoSocket) ServerInfo() *mongoServerInfo {
|
||||
socket.Lock()
|
||||
serverInfo := socket.serverInfo
|
||||
socket.Unlock()
|
||||
return serverInfo
|
||||
}
|
||||
|
||||
// InitialAcquire obtains the first reference to the socket, either
|
||||
// right after the connection is made or once a recycled socket is
|
||||
// being put back in use.
|
||||
func (socket *mongoSocket) InitialAcquire(serverInfo *mongoServerInfo, timeout time.Duration) error {
|
||||
socket.Lock()
|
||||
if socket.references > 0 {
|
||||
panic("Socket acquired out of cache with references")
|
||||
}
|
||||
if socket.dead != nil {
|
||||
dead := socket.dead
|
||||
socket.Unlock()
|
||||
return dead
|
||||
}
|
||||
socket.references++
|
||||
socket.serverInfo = serverInfo
|
||||
socket.timeout = timeout
|
||||
stats.socketsInUse(+1)
|
||||
stats.socketRefs(+1)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Acquire obtains an additional reference to the socket.
|
||||
// The socket will only be recycled when it's released as many
|
||||
// times as it's been acquired.
|
||||
func (socket *mongoSocket) Acquire() (info *mongoServerInfo) {
|
||||
socket.Lock()
|
||||
if socket.references == 0 {
|
||||
panic("Socket got non-initial acquire with references == 0")
|
||||
}
|
||||
// We'll track references to dead sockets as well.
|
||||
// Caller is still supposed to release the socket.
|
||||
socket.references++
|
||||
stats.socketRefs(+1)
|
||||
serverInfo := socket.serverInfo
|
||||
socket.Unlock()
|
||||
return serverInfo
|
||||
}
|
||||
|
||||
// Release decrements a socket reference. The socket will be
|
||||
// recycled once its released as many times as it's been acquired.
|
||||
func (socket *mongoSocket) Release() {
|
||||
socket.Lock()
|
||||
if socket.references == 0 {
|
||||
panic("socket.Release() with references == 0")
|
||||
}
|
||||
socket.references--
|
||||
stats.socketRefs(-1)
|
||||
if socket.references == 0 {
|
||||
stats.socketsInUse(-1)
|
||||
server := socket.server
|
||||
socket.Unlock()
|
||||
socket.LogoutAll()
|
||||
// If the socket is dead server is nil.
|
||||
if server != nil {
|
||||
server.RecycleSocket(socket)
|
||||
}
|
||||
} else {
|
||||
socket.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// SetTimeout changes the timeout used on socket operations.
|
||||
func (socket *mongoSocket) SetTimeout(d time.Duration) {
|
||||
socket.Lock()
|
||||
socket.timeout = d
|
||||
socket.Unlock()
|
||||
}
|
||||
|
||||
type deadlineType int
|
||||
|
||||
const (
|
||||
readDeadline deadlineType = 1
|
||||
writeDeadline deadlineType = 2
|
||||
)
|
||||
|
||||
func (socket *mongoSocket) updateDeadline(which deadlineType) {
|
||||
var when time.Time
|
||||
if socket.timeout > 0 {
|
||||
when = time.Now().Add(socket.timeout)
|
||||
}
|
||||
whichstr := ""
|
||||
switch which {
|
||||
case readDeadline | writeDeadline:
|
||||
whichstr = "read/write"
|
||||
socket.conn.SetDeadline(when)
|
||||
case readDeadline:
|
||||
whichstr = "read"
|
||||
socket.conn.SetReadDeadline(when)
|
||||
case writeDeadline:
|
||||
whichstr = "write"
|
||||
socket.conn.SetWriteDeadline(when)
|
||||
default:
|
||||
panic("invalid parameter to updateDeadline")
|
||||
}
|
||||
debugf("Socket %p to %s: updated %s deadline to %s ahead (%s)", socket, socket.addr, whichstr, socket.timeout, when)
|
||||
}
|
||||
|
||||
// Close terminates the socket use.
|
||||
func (socket *mongoSocket) Close() {
|
||||
socket.kill(errors.New("Closed explicitly"), false)
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) kill(err error, abend bool) {
|
||||
socket.Lock()
|
||||
if socket.dead != nil {
|
||||
debugf("Socket %p to %s: killed again: %s (previously: %s)", socket, socket.addr, err.Error(), socket.dead.Error())
|
||||
socket.Unlock()
|
||||
return
|
||||
}
|
||||
logf("Socket %p to %s: closing: %s (abend=%v)", socket, socket.addr, err.Error(), abend)
|
||||
socket.dead = err
|
||||
socket.conn.Close()
|
||||
stats.socketsAlive(-1)
|
||||
replyFuncs := socket.replyFuncs
|
||||
socket.replyFuncs = make(map[uint32]replyFunc)
|
||||
server := socket.server
|
||||
socket.server = nil
|
||||
socket.gotNonce.Broadcast()
|
||||
socket.Unlock()
|
||||
for _, replyFunc := range replyFuncs {
|
||||
logf("Socket %p to %s: notifying replyFunc of closed socket: %s", socket, socket.addr, err.Error())
|
||||
replyFunc(err, nil, -1, nil)
|
||||
}
|
||||
if abend {
|
||||
server.AbendSocket(socket)
|
||||
}
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) SimpleQuery(op *queryOp) (data []byte, err error) {
|
||||
var wait, change sync.Mutex
|
||||
var replyDone bool
|
||||
var replyData []byte
|
||||
var replyErr error
|
||||
wait.Lock()
|
||||
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
|
||||
change.Lock()
|
||||
if !replyDone {
|
||||
replyDone = true
|
||||
replyErr = err
|
||||
if err == nil {
|
||||
replyData = docData
|
||||
}
|
||||
}
|
||||
change.Unlock()
|
||||
wait.Unlock()
|
||||
}
|
||||
err = socket.Query(op)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wait.Lock()
|
||||
change.Lock()
|
||||
data = replyData
|
||||
err = replyErr
|
||||
change.Unlock()
|
||||
return data, err
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) Query(ops ...interface{}) (err error) {
|
||||
|
||||
if lops := socket.flushLogout(); len(lops) > 0 {
|
||||
ops = append(lops, ops...)
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 256)
|
||||
|
||||
// Serialize operations synchronously to avoid interrupting
|
||||
// other goroutines while we can't really be sending data.
|
||||
// Also, record id positions so that we can compute request
|
||||
// ids at once later with the lock already held.
|
||||
requests := make([]requestInfo, len(ops))
|
||||
requestCount := 0
|
||||
|
||||
for _, op := range ops {
|
||||
debugf("Socket %p to %s: serializing op: %#v", socket, socket.addr, op)
|
||||
start := len(buf)
|
||||
var replyFunc replyFunc
|
||||
switch op := op.(type) {
|
||||
|
||||
case *updateOp:
|
||||
buf = addHeader(buf, 2001)
|
||||
buf = addInt32(buf, 0) // Reserved
|
||||
buf = addCString(buf, op.collection)
|
||||
buf = addInt32(buf, int32(op.flags))
|
||||
debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector)
|
||||
buf, err = addBSON(buf, op.selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.update)
|
||||
buf, err = addBSON(buf, op.update)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *insertOp:
|
||||
buf = addHeader(buf, 2002)
|
||||
buf = addInt32(buf, int32(op.flags))
|
||||
buf = addCString(buf, op.collection)
|
||||
for _, doc := range op.documents {
|
||||
debugf("Socket %p to %s: serializing document for insertion: %#v", socket, socket.addr, doc)
|
||||
buf, err = addBSON(buf, doc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case *queryOp:
|
||||
buf = addHeader(buf, 2004)
|
||||
buf = addInt32(buf, int32(op.flags))
|
||||
buf = addCString(buf, op.collection)
|
||||
buf = addInt32(buf, op.skip)
|
||||
buf = addInt32(buf, op.limit)
|
||||
buf, err = addBSON(buf, op.finalQuery(socket))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if op.selector != nil {
|
||||
buf, err = addBSON(buf, op.selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
replyFunc = op.replyFunc
|
||||
|
||||
case *getMoreOp:
|
||||
buf = addHeader(buf, 2005)
|
||||
buf = addInt32(buf, 0) // Reserved
|
||||
buf = addCString(buf, op.collection)
|
||||
buf = addInt32(buf, op.limit)
|
||||
buf = addInt64(buf, op.cursorId)
|
||||
replyFunc = op.replyFunc
|
||||
|
||||
case *deleteOp:
|
||||
buf = addHeader(buf, 2006)
|
||||
buf = addInt32(buf, 0) // Reserved
|
||||
buf = addCString(buf, op.collection)
|
||||
buf = addInt32(buf, int32(op.flags))
|
||||
debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector)
|
||||
buf, err = addBSON(buf, op.selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *killCursorsOp:
|
||||
buf = addHeader(buf, 2007)
|
||||
buf = addInt32(buf, 0) // Reserved
|
||||
buf = addInt32(buf, int32(len(op.cursorIds)))
|
||||
for _, cursorId := range op.cursorIds {
|
||||
buf = addInt64(buf, cursorId)
|
||||
}
|
||||
|
||||
default:
|
||||
panic("internal error: unknown operation type")
|
||||
}
|
||||
|
||||
setInt32(buf, start, int32(len(buf)-start))
|
||||
|
||||
if replyFunc != nil {
|
||||
request := &requests[requestCount]
|
||||
request.replyFunc = replyFunc
|
||||
request.bufferPos = start
|
||||
requestCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Buffer is ready for the pipe. Lock, allocate ids, and enqueue.
|
||||
|
||||
socket.Lock()
|
||||
if socket.dead != nil {
|
||||
dead := socket.dead
|
||||
socket.Unlock()
|
||||
debugf("Socket %p to %s: failing query, already closed: %s", socket, socket.addr, socket.dead.Error())
|
||||
// XXX This seems necessary in case the session is closed concurrently
|
||||
// with a query being performed, but it's not yet tested:
|
||||
for i := 0; i != requestCount; i++ {
|
||||
request := &requests[i]
|
||||
if request.replyFunc != nil {
|
||||
request.replyFunc(dead, nil, -1, nil)
|
||||
}
|
||||
}
|
||||
return dead
|
||||
}
|
||||
|
||||
wasWaiting := len(socket.replyFuncs) > 0
|
||||
|
||||
// Reserve id 0 for requests which should have no responses.
|
||||
requestId := socket.nextRequestId + 1
|
||||
if requestId == 0 {
|
||||
requestId++
|
||||
}
|
||||
socket.nextRequestId = requestId + uint32(requestCount)
|
||||
for i := 0; i != requestCount; i++ {
|
||||
request := &requests[i]
|
||||
setInt32(buf, request.bufferPos+4, int32(requestId))
|
||||
socket.replyFuncs[requestId] = request.replyFunc
|
||||
requestId++
|
||||
}
|
||||
|
||||
debugf("Socket %p to %s: sending %d op(s) (%d bytes)", socket, socket.addr, len(ops), len(buf))
|
||||
stats.sentOps(len(ops))
|
||||
|
||||
socket.updateDeadline(writeDeadline)
|
||||
_, err = socket.conn.Write(buf)
|
||||
if !wasWaiting && requestCount > 0 {
|
||||
socket.updateDeadline(readDeadline)
|
||||
}
|
||||
socket.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func fill(r net.Conn, b []byte) error {
|
||||
l := len(b)
|
||||
n, err := r.Read(b)
|
||||
for n != l && err == nil {
|
||||
var ni int
|
||||
ni, err = r.Read(b[n:])
|
||||
n += ni
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Estimated minimum cost per socket: 1 goroutine + memory for the largest
|
||||
// document ever seen.
|
||||
func (socket *mongoSocket) readLoop() {
|
||||
p := make([]byte, 36) // 16 from header + 20 from OP_REPLY fixed fields
|
||||
s := make([]byte, 4)
|
||||
conn := socket.conn // No locking, conn never changes.
|
||||
for {
|
||||
// XXX Handle timeouts, , etc
|
||||
err := fill(conn, p)
|
||||
if err != nil {
|
||||
socket.kill(err, true)
|
||||
return
|
||||
}
|
||||
|
||||
totalLen := getInt32(p, 0)
|
||||
responseTo := getInt32(p, 8)
|
||||
opCode := getInt32(p, 12)
|
||||
|
||||
// Don't use socket.server.Addr here. socket is not
|
||||
// locked and socket.server may go away.
|
||||
debugf("Socket %p to %s: got reply (%d bytes)", socket, socket.addr, totalLen)
|
||||
|
||||
_ = totalLen
|
||||
|
||||
if opCode != 1 {
|
||||
socket.kill(errors.New("opcode != 1, corrupted data?"), true)
|
||||
return
|
||||
}
|
||||
|
||||
reply := replyOp{
|
||||
flags: uint32(getInt32(p, 16)),
|
||||
cursorId: getInt64(p, 20),
|
||||
firstDoc: getInt32(p, 28),
|
||||
replyDocs: getInt32(p, 32),
|
||||
}
|
||||
|
||||
stats.receivedOps(+1)
|
||||
stats.receivedDocs(int(reply.replyDocs))
|
||||
|
||||
socket.Lock()
|
||||
replyFunc, ok := socket.replyFuncs[uint32(responseTo)]
|
||||
if ok {
|
||||
delete(socket.replyFuncs, uint32(responseTo))
|
||||
}
|
||||
socket.Unlock()
|
||||
|
||||
if replyFunc != nil && reply.replyDocs == 0 {
|
||||
replyFunc(nil, &reply, -1, nil)
|
||||
} else {
|
||||
for i := 0; i != int(reply.replyDocs); i++ {
|
||||
err := fill(conn, s)
|
||||
if err != nil {
|
||||
if replyFunc != nil {
|
||||
replyFunc(err, nil, -1, nil)
|
||||
}
|
||||
socket.kill(err, true)
|
||||
return
|
||||
}
|
||||
|
||||
b := make([]byte, int(getInt32(s, 0)))
|
||||
|
||||
// copy(b, s) in an efficient way.
|
||||
b[0] = s[0]
|
||||
b[1] = s[1]
|
||||
b[2] = s[2]
|
||||
b[3] = s[3]
|
||||
|
||||
err = fill(conn, b[4:])
|
||||
if err != nil {
|
||||
if replyFunc != nil {
|
||||
replyFunc(err, nil, -1, nil)
|
||||
}
|
||||
socket.kill(err, true)
|
||||
return
|
||||
}
|
||||
|
||||
if globalDebug && globalLogger != nil {
|
||||
m := bson.M{}
|
||||
if err := bson.Unmarshal(b, m); err == nil {
|
||||
debugf("Socket %p to %s: received document: %#v", socket, socket.addr, m)
|
||||
}
|
||||
}
|
||||
|
||||
if replyFunc != nil {
|
||||
replyFunc(nil, &reply, i, b)
|
||||
}
|
||||
|
||||
// XXX Do bound checking against totalLen.
|
||||
}
|
||||
}
|
||||
|
||||
socket.Lock()
|
||||
if len(socket.replyFuncs) == 0 {
|
||||
// Nothing else to read for now. Disable deadline.
|
||||
socket.conn.SetReadDeadline(time.Time{})
|
||||
} else {
|
||||
socket.updateDeadline(readDeadline)
|
||||
}
|
||||
socket.Unlock()
|
||||
|
||||
// XXX Do bound checking against totalLen.
|
||||
}
|
||||
}
|
||||
|
||||
var emptyHeader = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
func addHeader(b []byte, opcode int) []byte {
|
||||
i := len(b)
|
||||
b = append(b, emptyHeader...)
|
||||
// Enough for current opcodes.
|
||||
b[i+12] = byte(opcode)
|
||||
b[i+13] = byte(opcode >> 8)
|
||||
return b
|
||||
}
|
||||
|
||||
func addInt32(b []byte, i int32) []byte {
|
||||
return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24))
|
||||
}
|
||||
|
||||
func addInt64(b []byte, i int64) []byte {
|
||||
return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24),
|
||||
byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56))
|
||||
}
|
||||
|
||||
func addCString(b []byte, s string) []byte {
|
||||
b = append(b, []byte(s)...)
|
||||
b = append(b, 0)
|
||||
return b
|
||||
}
|
||||
|
||||
func addBSON(b []byte, doc interface{}) ([]byte, error) {
|
||||
if doc == nil {
|
||||
return append(b, 5, 0, 0, 0, 0), nil
|
||||
}
|
||||
data, err := bson.Marshal(doc)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
return append(b, data...), nil
|
||||
}
|
||||
|
||||
func setInt32(b []byte, pos int, i int32) {
|
||||
b[pos] = byte(i)
|
||||
b[pos+1] = byte(i >> 8)
|
||||
b[pos+2] = byte(i >> 16)
|
||||
b[pos+3] = byte(i >> 24)
|
||||
}
|
||||
|
||||
func getInt32(b []byte, pos int) int32 {
|
||||
return (int32(b[pos+0])) |
|
||||
(int32(b[pos+1]) << 8) |
|
||||
(int32(b[pos+2]) << 16) |
|
||||
(int32(b[pos+3]) << 24)
|
||||
}
|
||||
|
||||
func getInt64(b []byte, pos int) int64 {
|
||||
return (int64(b[pos+0])) |
|
||||
(int64(b[pos+1]) << 8) |
|
||||
(int64(b[pos+2]) << 16) |
|
||||
(int64(b[pos+3]) << 24) |
|
||||
(int64(b[pos+4]) << 32) |
|
||||
(int64(b[pos+5]) << 40) |
|
||||
(int64(b[pos+6]) << 48) |
|
||||
(int64(b[pos+7]) << 56)
|
||||
}
|
147
vendor/gopkg.in/mgo.v2/stats.go
generated
vendored
Normal file
147
vendor/gopkg.in/mgo.v2/stats.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var stats *Stats
|
||||
var statsMutex sync.Mutex
|
||||
|
||||
func SetStats(enabled bool) {
|
||||
statsMutex.Lock()
|
||||
if enabled {
|
||||
if stats == nil {
|
||||
stats = &Stats{}
|
||||
}
|
||||
} else {
|
||||
stats = nil
|
||||
}
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
|
||||
func GetStats() (snapshot Stats) {
|
||||
statsMutex.Lock()
|
||||
snapshot = *stats
|
||||
statsMutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func ResetStats() {
|
||||
statsMutex.Lock()
|
||||
debug("Resetting stats")
|
||||
old := stats
|
||||
stats = &Stats{}
|
||||
// These are absolute values:
|
||||
stats.Clusters = old.Clusters
|
||||
stats.SocketsInUse = old.SocketsInUse
|
||||
stats.SocketsAlive = old.SocketsAlive
|
||||
stats.SocketRefs = old.SocketRefs
|
||||
statsMutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
Clusters int
|
||||
MasterConns int
|
||||
SlaveConns int
|
||||
SentOps int
|
||||
ReceivedOps int
|
||||
ReceivedDocs int
|
||||
SocketsAlive int
|
||||
SocketsInUse int
|
||||
SocketRefs int
|
||||
}
|
||||
|
||||
func (stats *Stats) cluster(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.Clusters += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) conn(delta int, master bool) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
if master {
|
||||
stats.MasterConns += delta
|
||||
} else {
|
||||
stats.SlaveConns += delta
|
||||
}
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) sentOps(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.SentOps += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) receivedOps(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.ReceivedOps += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) receivedDocs(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.ReceivedDocs += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) socketsInUse(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.SocketsInUse += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) socketsAlive(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.SocketsAlive += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) socketRefs(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.SocketRefs += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
254
vendor/gopkg.in/mgo.v2/suite_test.go
generated
vendored
Normal file
254
vendor/gopkg.in/mgo.v2/suite_test.go
generated
vendored
Normal file
@ -0,0 +1,254 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
var fast = flag.Bool("fast", false, "Skip slow tests")
|
||||
|
||||
type M bson.M
|
||||
|
||||
type cLogger C
|
||||
|
||||
func (c *cLogger) Output(calldepth int, s string) error {
|
||||
ns := time.Now().UnixNano()
|
||||
t := float64(ns%100e9) / 1e9
|
||||
((*C)(c)).Logf("[LOG] %.05f %s", t, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestAll(t *testing.T) {
|
||||
TestingT(t)
|
||||
}
|
||||
|
||||
type S struct {
|
||||
session *mgo.Session
|
||||
stopped bool
|
||||
build mgo.BuildInfo
|
||||
frozen []string
|
||||
}
|
||||
|
||||
func (s *S) versionAtLeast(v ...int) (result bool) {
|
||||
for i := range v {
|
||||
if i == len(s.build.VersionArray) {
|
||||
return false
|
||||
}
|
||||
if s.build.VersionArray[i] != v[i] {
|
||||
return s.build.VersionArray[i] >= v[i]
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var _ = Suite(&S{})
|
||||
|
||||
func (s *S) SetUpSuite(c *C) {
|
||||
mgo.SetDebug(true)
|
||||
mgo.SetStats(true)
|
||||
s.StartAll()
|
||||
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
s.build, err = session.BuildInfo()
|
||||
c.Check(err, IsNil)
|
||||
session.Close()
|
||||
}
|
||||
|
||||
func (s *S) SetUpTest(c *C) {
|
||||
err := run("mongo --nodb testdb/dropall.js")
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
mgo.SetLogger((*cLogger)(c))
|
||||
mgo.ResetStats()
|
||||
}
|
||||
|
||||
func (s *S) TearDownTest(c *C) {
|
||||
if s.stopped {
|
||||
s.StartAll()
|
||||
}
|
||||
for _, host := range s.frozen {
|
||||
if host != "" {
|
||||
s.Thaw(host)
|
||||
}
|
||||
}
|
||||
var stats mgo.Stats
|
||||
for i := 0; ; i++ {
|
||||
stats = mgo.GetStats()
|
||||
if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
|
||||
break
|
||||
}
|
||||
if i == 20 {
|
||||
c.Fatal("Test left sockets in a dirty state")
|
||||
}
|
||||
c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
for i := 0; ; i++ {
|
||||
stats = mgo.GetStats()
|
||||
if stats.Clusters == 0 {
|
||||
break
|
||||
}
|
||||
if i == 60 {
|
||||
c.Fatal("Test left clusters alive")
|
||||
}
|
||||
c.Logf("Waiting for clusters to die: %d alive", stats.Clusters)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) Stop(host string) {
|
||||
// Give a moment for slaves to sync and avoid getting rollback issues.
|
||||
panicOnWindows()
|
||||
time.Sleep(2 * time.Second)
|
||||
err := run("cd _testdb && supervisorctl stop " + supvName(host))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s.stopped = true
|
||||
}
|
||||
|
||||
func (s *S) pid(host string) int {
|
||||
output, err := exec.Command("lsof", "-iTCP:"+hostPort(host), "-sTCP:LISTEN", "-Fp").CombinedOutput()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pidstr := string(output[1 : len(output)-1])
|
||||
pid, err := strconv.Atoi(pidstr)
|
||||
if err != nil {
|
||||
panic("cannot convert pid to int: " + pidstr)
|
||||
}
|
||||
return pid
|
||||
}
|
||||
|
||||
func (s *S) Freeze(host string) {
|
||||
err := stop(s.pid(host))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s.frozen = append(s.frozen, host)
|
||||
}
|
||||
|
||||
func (s *S) Thaw(host string) {
|
||||
err := cont(s.pid(host))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for i, frozen := range s.frozen {
|
||||
if frozen == host {
|
||||
s.frozen[i] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) StartAll() {
|
||||
// Restart any stopped nodes.
|
||||
run("cd _testdb && supervisorctl start all")
|
||||
err := run("cd testdb && mongo --nodb wait.js")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s.stopped = false
|
||||
}
|
||||
|
||||
func run(command string) error {
|
||||
var output []byte
|
||||
var err error
|
||||
if runtime.GOOS == "windows" {
|
||||
output, err = exec.Command("cmd", "/C", command).CombinedOutput()
|
||||
} else {
|
||||
output, err = exec.Command("/bin/sh", "-c", command).CombinedOutput()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Failed to execute: %s: %s\n%s", command, err.Error(), string(output))
|
||||
return errors.New(msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var supvNames = map[string]string{
|
||||
"40001": "db1",
|
||||
"40002": "db2",
|
||||
"40011": "rs1a",
|
||||
"40012": "rs1b",
|
||||
"40013": "rs1c",
|
||||
"40021": "rs2a",
|
||||
"40022": "rs2b",
|
||||
"40023": "rs2c",
|
||||
"40031": "rs3a",
|
||||
"40032": "rs3b",
|
||||
"40033": "rs3c",
|
||||
"40041": "rs4a",
|
||||
"40101": "cfg1",
|
||||
"40102": "cfg2",
|
||||
"40103": "cfg3",
|
||||
"40201": "s1",
|
||||
"40202": "s2",
|
||||
"40203": "s3",
|
||||
}
|
||||
|
||||
// supvName returns the supervisord name for the given host address.
|
||||
func supvName(host string) string {
|
||||
host, port, err := net.SplitHostPort(host)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
name, ok := supvNames[port]
|
||||
if !ok {
|
||||
panic("Unknown host: " + host)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func hostPort(host string) string {
|
||||
_, port, err := net.SplitHostPort(host)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return port
|
||||
}
|
||||
|
||||
func panicOnWindows() {
|
||||
if runtime.GOOS == "windows" {
|
||||
panic("the test suite is not yet fully supported on Windows")
|
||||
}
|
||||
}
|
15
vendor/gopkg.in/mgo.v2/syscall_test.go
generated
vendored
Normal file
15
vendor/gopkg.in/mgo.v2/syscall_test.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// +build !windows
|
||||
|
||||
package mgo_test
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func stop(pid int) (err error) {
|
||||
return syscall.Kill(pid, syscall.SIGSTOP)
|
||||
}
|
||||
|
||||
func cont(pid int) (err error) {
|
||||
return syscall.Kill(pid, syscall.SIGCONT)
|
||||
}
|
11
vendor/gopkg.in/mgo.v2/syscall_windows_test.go
generated
vendored
Normal file
11
vendor/gopkg.in/mgo.v2/syscall_windows_test.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
package mgo_test
|
||||
|
||||
func stop(pid int) (err error) {
|
||||
panicOnWindows() // Always does.
|
||||
return nil
|
||||
}
|
||||
|
||||
func cont(pid int) (err error) {
|
||||
panicOnWindows() // Always does.
|
||||
return nil
|
||||
}
|
@ -23,7 +23,11 @@ import (
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func (s *TestSuite) TestVersion(c *C) {
|
||||
type VersionSuite struct{}
|
||||
|
||||
var _ = Suite(&VersionSuite{})
|
||||
|
||||
func (s *VersionSuite) TestVersion(c *C) {
|
||||
_, err := time.Parse(minioVersion, http.TimeFormat)
|
||||
c.Assert(err, NotNil)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user