mirror of
https://github.com/minio/minio.git
synced 2025-01-11 15:03:22 -05:00
config/main: Re-write config files - add to new config v3
- New config format. ``` { "version": "3", "address": ":9000", "backend": { "type": "fs", "disk": "/path" }, "credential": { "accessKey": "WLGDGYAQYIGI833EV05A", "secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF" }, "region": "us-east-1", "logger": { "file": { "enable": false, "fileName": "", "level": "error" }, "syslog": { "enable": false, "address": "", "level": "debug" }, "console": { "enable": true, "level": "fatal" } } } ``` New command lines in lieu of supporting XL. Minio initialize filesystem backend. ~~~ $ minio init fs <path> ~~~ Minio initialize XL backend. ~~~ $ minio init xl <url1>...<url16> ~~~ For 'fs' backend it starts the server. ~~~ $ minio server ~~~ For 'xl' backend it waits for servers to join. ~~~ $ minio server ... [PROGRESS BAR] of servers connecting ~~~ Now on other servers execute 'join' and they connect. ~~~ .... minio join <url1> -- from <url2> && minio server minio join <url1> -- from <url3> && minio server ... ... minio join <url1> -- from <url16> && minio server ~~~
This commit is contained in:
parent
85e50f2bb9
commit
aaf97ea02c
36
README.md
36
README.md
@ -84,46 +84,24 @@ $ make
|
||||
|
||||
### How to use Minio?
|
||||
|
||||
```
|
||||
NAME:
|
||||
minio server - Start Minio cloud storage server.
|
||||
|
||||
USAGE:
|
||||
minio server [OPTION VALUE] PATH
|
||||
|
||||
OPTION = expiry VALUE = NN[h|m|s] [DEFAULT=Unlimited]
|
||||
OPTION = min-free-disk VALUE = NN% [DEFAULT: 10%]
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio server on Linux.
|
||||
$ minio server /home/shared
|
||||
|
||||
2. Start minio server on Windows.
|
||||
$ minio server C:\MyShare
|
||||
|
||||
3. Start minio server bound to a specific IP:PORT, when you have multiple network interfaces.
|
||||
$ minio --address 192.168.1.101:9000 server /home/shared
|
||||
|
||||
4. Start minio server with minimum free disk threshold to 5%
|
||||
$ minio server min-free-disk 5% /home/shared/Pictures
|
||||
|
||||
5. Start minio server with minimum free disk threshold to 15% with auto expiration set to 1h
|
||||
$ minio server min-free-disk 15% expiry 1h /home/shared/Documents
|
||||
```
|
||||
~~~
|
||||
$ minio init ~/Photos
|
||||
~~~
|
||||
|
||||
#### Start Minio server.
|
||||
|
||||
~~~
|
||||
$ minio server ~/Photos
|
||||
$ minio server
|
||||
|
||||
AccessKey: WLGDGYAQYIGI833EV05A SecretKey: BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF Region: us-east-1
|
||||
|
||||
Minio Object Storage:
|
||||
http://127.0.0.1:9000
|
||||
http://10.0.0.3:9000
|
||||
http://10.1.10.177:9000
|
||||
|
||||
Minio Browser:
|
||||
http://127.0.0.1:9000
|
||||
http://10.0.0.3:9000
|
||||
http://10.1.10.177:9000
|
||||
|
||||
To configure Minio Client:
|
||||
$ wget https://dl.minio.io/client/mc/release/darwin-amd64/mc
|
||||
|
96
access-key.go
Normal file
96
access-key.go
Normal file
@ -0,0 +1,96 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// credential container for access and secret keys.
|
||||
type credential struct {
|
||||
AccessKeyID string `json:"accessKey"`
|
||||
SecretAccessKey string `json:"secretKey"`
|
||||
}
|
||||
|
||||
// stringer colorized access keys.
|
||||
func (a credential) String() string {
|
||||
accessStr := colorMagenta("AccessKey: ") + colorWhite(a.AccessKeyID)
|
||||
secretStr := colorMagenta("SecretKey: ") + colorWhite(a.SecretAccessKey)
|
||||
return fmt.Sprint(accessStr + " " + secretStr)
|
||||
}
|
||||
|
||||
const (
|
||||
minioAccessID = 20
|
||||
minioSecretID = 40
|
||||
)
|
||||
|
||||
// isValidSecretKey - validate secret key.
|
||||
var isValidSecretKey = regexp.MustCompile("^.{40}$")
|
||||
|
||||
// isValidAccessKey - validate access key.
|
||||
var isValidAccessKey = regexp.MustCompile("^[A-Z0-9\\-\\.\\_\\~]{20}$")
|
||||
|
||||
// mustGenAccessKeys - must generate access credentials.
|
||||
func mustGenAccessKeys() (creds credential) {
|
||||
creds, err := genAccessKeys()
|
||||
fatalIf(err.Trace(), "Unable to generate access keys.", nil)
|
||||
return creds
|
||||
}
|
||||
|
||||
// genAccessKeys - generate access credentials.
|
||||
func genAccessKeys() (credential, *probe.Error) {
|
||||
accessKeyID, err := genAccessKeyID()
|
||||
if err != nil {
|
||||
return credential{}, err.Trace()
|
||||
}
|
||||
secretAccessKey, err := genSecretAccessKey()
|
||||
if err != nil {
|
||||
return credential{}, err.Trace()
|
||||
}
|
||||
creds := credential{
|
||||
AccessKeyID: string(accessKeyID),
|
||||
SecretAccessKey: string(secretAccessKey),
|
||||
}
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// genAccessKeyID - generate random alpha numeric value using only uppercase characters
|
||||
// takes input as size in integer
|
||||
func genAccessKeyID() ([]byte, *probe.Error) {
|
||||
alpha := make([]byte, minioAccessID)
|
||||
if _, e := rand.Read(alpha); e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
for i := 0; i < minioAccessID; i++ {
|
||||
alpha[i] = alphaNumericTable[alpha[i]%byte(len(alphaNumericTable))]
|
||||
}
|
||||
return alpha, nil
|
||||
}
|
||||
|
||||
// genSecretAccessKey - generate random base64 numeric value from a random seed.
|
||||
func genSecretAccessKey() ([]byte, *probe.Error) {
|
||||
rb := make([]byte, minioSecretID)
|
||||
if _, e := rand.Read(rb); e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
return []byte(base64.StdEncoding.EncodeToString(rb))[:minioSecretID], nil
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"regexp"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
const (
|
||||
minioAccessID = 20
|
||||
minioSecretID = 40
|
||||
)
|
||||
|
||||
// isValidAccessKey - validate access key
|
||||
func isValidAccessKey(accessKeyID string) bool {
|
||||
if accessKeyID == "" {
|
||||
return true
|
||||
}
|
||||
regex := regexp.MustCompile("^[A-Z0-9\\-\\.\\_\\~]{20}$")
|
||||
return regex.MatchString(accessKeyID)
|
||||
}
|
||||
|
||||
// isValidSecretKey - validate secret key
|
||||
func isValidSecretKey(secretKeyID string) bool {
|
||||
regex := regexp.MustCompile("^.{40}$")
|
||||
return regex.MatchString(secretKeyID)
|
||||
}
|
||||
|
||||
// generateAccessKeyID - generate random alpha numeric value using only uppercase characters
|
||||
// takes input as size in integer
|
||||
func generateAccessKeyID() ([]byte, *probe.Error) {
|
||||
alpha := make([]byte, minioAccessID)
|
||||
if _, e := rand.Read(alpha); e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
for i := 0; i < minioAccessID; i++ {
|
||||
alpha[i] = alphaNumericTable[alpha[i]%byte(len(alphaNumericTable))]
|
||||
}
|
||||
return alpha, nil
|
||||
}
|
||||
|
||||
// generateSecretAccessKey - generate random base64 numeric value from a random seed.
|
||||
func generateSecretAccessKey() ([]byte, *probe.Error) {
|
||||
rb := make([]byte, minioSecretID)
|
||||
if _, e := rand.Read(rb); e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
return []byte(base64.StdEncoding.EncodeToString(rb))[:minioSecretID], nil
|
||||
}
|
||||
|
||||
// mustGenerateAccessKeyID - must generate random alpha numeric value using only uppercase characters
|
||||
// takes input as size in integer
|
||||
func mustGenerateAccessKeyID() []byte {
|
||||
alpha, err := generateAccessKeyID()
|
||||
fatalIf(err.Trace(), "Unable to generate accessKeyID.", nil)
|
||||
return alpha
|
||||
}
|
||||
|
||||
// mustGenerateSecretAccessKey - generate random base64 numeric value from a random seed.
|
||||
func mustGenerateSecretAccessKey() []byte {
|
||||
secretKey, err := generateSecretAccessKey()
|
||||
fatalIf(err.Trace(), "Unable to generate secretAccessKey.", nil)
|
||||
return secretKey
|
||||
}
|
@ -116,9 +116,11 @@ func (api storageAPI) GetBucketLocationHandler(w http.ResponseWriter, r *http.Re
|
||||
|
||||
// Generate response.
|
||||
encodedSuccessResponse := encodeResponse(LocationResponse{})
|
||||
if api.Region != "us-east-1" {
|
||||
// Get current region.
|
||||
region := serverConfig.GetRegion()
|
||||
if region != "us-east-1" {
|
||||
encodedSuccessResponse = encodeResponse(LocationResponse{
|
||||
Location: api.Region,
|
||||
Location: region,
|
||||
})
|
||||
}
|
||||
setCommonHeaders(w) // write headers.
|
||||
|
94
certs.go
Normal file
94
certs.go
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/go-homedir"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// createCertsPath create certs path.
|
||||
func createCertsPath() *probe.Error {
|
||||
certsPath, err := getCertsPath()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
if err := os.MkdirAll(certsPath, 0700); err != nil {
|
||||
return probe.NewError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCertsPath get certs path.
|
||||
func getCertsPath() (string, *probe.Error) {
|
||||
homeDir, e := homedir.Dir()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
certsPath := filepath.Join(homeDir, globalMinioCertsDir)
|
||||
return certsPath, nil
|
||||
}
|
||||
|
||||
// mustGetCertsPath must get certs path.
|
||||
func mustGetCertsPath() string {
|
||||
certsPath, err := getCertsPath()
|
||||
fatalIf(err.Trace(), "Unable to retrieve certs path.", nil)
|
||||
return certsPath
|
||||
}
|
||||
|
||||
// mustGetCertFile must get cert file.
|
||||
func mustGetCertFile() string {
|
||||
return filepath.Join(mustGetCertsPath(), globalMinioCertFile)
|
||||
}
|
||||
|
||||
// mustGetKeyFile must get key file.
|
||||
func mustGetKeyFile() string {
|
||||
return filepath.Join(mustGetCertsPath(), globalMinioKeyFile)
|
||||
}
|
||||
|
||||
// isCertFileExists verifies if cert file exists, returns true if
|
||||
// found, false otherwise.
|
||||
func isCertFileExists() bool {
|
||||
st, e := os.Stat(filepath.Join(mustGetCertsPath(), globalMinioCertFile))
|
||||
// If file exists and is regular return true.
|
||||
if e == nil && st.Mode().IsRegular() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isKeyFileExists verifies if key file exists, returns true if found,
|
||||
// false otherwise.
|
||||
func isKeyFileExists() bool {
|
||||
st, e := os.Stat(filepath.Join(mustGetCertsPath(), globalMinioKeyFile))
|
||||
// If file exists and is regular return true.
|
||||
if e == nil && st.Mode().IsRegular() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isSSL - returns true with both cert and key exists.
|
||||
func isSSL() bool {
|
||||
if isCertFileExists() && isKeyFileExists() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
@ -1,164 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/mc/pkg/console"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// Configure logger
|
||||
var configLoggerCmd = cli.Command{
|
||||
Name: "logger",
|
||||
Usage: "Configure logger.",
|
||||
Action: mainConfigLogger,
|
||||
CustomHelpTemplate: `NAME:
|
||||
minio config {{.Name}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
minio config {{.Name}} OPERATION [ARGS...]
|
||||
|
||||
OPERATION = add | list | remove
|
||||
|
||||
EXAMPLES:
|
||||
1. Configure new mongo logger.
|
||||
$ minio config {{.Name}} add mongo localhost:28710 mydb mylogger
|
||||
|
||||
2. Configure new syslog logger. NOTE: syslog logger is not supported on windows.
|
||||
$ minio config {{.Name}} add syslog localhost:554 udp
|
||||
|
||||
3. Configure new file logger. "/var/log" should be writable by user.
|
||||
$ minio config {{.Name}} add file /var/log/minio.log
|
||||
|
||||
4. List currently configured logger.
|
||||
$ minio config {{.Name}} list
|
||||
|
||||
5. Remove/Reset a configured logger.
|
||||
$ minio config {{.Name}} remove mongo
|
||||
`,
|
||||
}
|
||||
|
||||
// Inherit at one place
|
||||
type config struct {
|
||||
*configV2
|
||||
}
|
||||
|
||||
func mainConfigLogger(ctx *cli.Context) {
|
||||
if !ctx.Args().Present() || ctx.Args().First() == "help" {
|
||||
cli.ShowCommandHelpAndExit(ctx, "logger", 1) // last argument is exit code
|
||||
}
|
||||
conf, err := loadConfigV2()
|
||||
fatalIf(err.Trace(), "Unable to load config", nil)
|
||||
|
||||
if ctx.Args().Get(0) == "add" {
|
||||
args := ctx.Args().Tail()
|
||||
if args.Get(0) == "mongo" {
|
||||
enableLog2Mongo(&config{conf}, args.Tail())
|
||||
}
|
||||
if args.Get(0) == "syslog" {
|
||||
if runtime.GOOS == "windows" {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Syslog is not supported on windows.", nil)
|
||||
}
|
||||
enableLog2Syslog(&config{conf}, args.Tail())
|
||||
}
|
||||
if args.Get(0) == "file" {
|
||||
enableLog2File(&config{conf}, args.Tail())
|
||||
}
|
||||
}
|
||||
if ctx.Args().Get(0) == "remove" {
|
||||
args := ctx.Args().Tail()
|
||||
if args.Get(0) == "mongo" {
|
||||
conf.MongoLogger.Addr = ""
|
||||
conf.MongoLogger.DB = ""
|
||||
conf.MongoLogger.Collection = ""
|
||||
err := saveConfig(conf)
|
||||
fatalIf(err.Trace(), "Unable to save config.", nil)
|
||||
}
|
||||
if args.Get(0) == "syslog" {
|
||||
if runtime.GOOS == "windows" {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Syslog is not supported on windows.", nil)
|
||||
}
|
||||
conf.SyslogLogger.Network = ""
|
||||
conf.SyslogLogger.Addr = ""
|
||||
err := saveConfig(conf)
|
||||
fatalIf(err.Trace(), "Unable to save config.", nil)
|
||||
}
|
||||
if args.Get(0) == "file" {
|
||||
conf.FileLogger.Filename = ""
|
||||
err := saveConfig(conf)
|
||||
fatalIf(err.Trace(), "Unable to save config.", nil)
|
||||
}
|
||||
}
|
||||
if ctx.Args().Get(0) == "list" {
|
||||
console.Println(conf)
|
||||
}
|
||||
}
|
||||
|
||||
func enableLog2Mongo(conf *config, args cli.Args) {
|
||||
if conf.IsFileLoggingEnabled() {
|
||||
console.Infoln("File logging already enabled. Removing automatically by enabling mongo.")
|
||||
conf.FileLogger.Filename = ""
|
||||
}
|
||||
if conf.IsSysloggingEnabled() {
|
||||
console.Infoln("Syslog logging already enabled. Removing automatically by enabling mongo.")
|
||||
conf.SyslogLogger.Addr = ""
|
||||
conf.SyslogLogger.Network = ""
|
||||
}
|
||||
conf.MongoLogger.Addr = args.Get(0)
|
||||
conf.MongoLogger.DB = args.Get(1)
|
||||
conf.MongoLogger.Collection = args.Get(2)
|
||||
|
||||
err := saveConfig(conf.configV2)
|
||||
fatalIf(err.Trace(), "Unable to save mongo logging config.", nil)
|
||||
}
|
||||
|
||||
func enableLog2Syslog(conf *config, args cli.Args) {
|
||||
if conf.IsFileLoggingEnabled() {
|
||||
console.Infoln("File logging already enabled. Removing automatically by enabling syslog.")
|
||||
conf.FileLogger.Filename = ""
|
||||
}
|
||||
if conf.IsMongoLoggingEnabled() {
|
||||
console.Infoln("Mongo logging already enabled. Removing automatically by enabling syslog.")
|
||||
conf.MongoLogger.Addr = ""
|
||||
conf.MongoLogger.DB = ""
|
||||
conf.MongoLogger.Collection = ""
|
||||
}
|
||||
conf.SyslogLogger.Addr = args.Get(0)
|
||||
conf.SyslogLogger.Network = args.Get(1)
|
||||
err := saveConfig(conf.configV2)
|
||||
fatalIf(err.Trace(), "Unable to save syslog config.", nil)
|
||||
}
|
||||
|
||||
func enableLog2File(conf *config, args cli.Args) {
|
||||
if conf.IsSysloggingEnabled() {
|
||||
console.Infoln("Syslog logging already enabled. Removing automatically by enabling file logging.")
|
||||
conf.SyslogLogger.Addr = ""
|
||||
conf.SyslogLogger.Network = ""
|
||||
}
|
||||
if conf.IsMongoLoggingEnabled() {
|
||||
console.Infoln("Mongo logging already enabled. Removing automatically by enabling file logging.")
|
||||
conf.MongoLogger.Addr = ""
|
||||
conf.MongoLogger.DB = ""
|
||||
conf.MongoLogger.Collection = ""
|
||||
}
|
||||
conf.FileLogger.Filename = args.Get(0)
|
||||
err := saveConfig(conf.configV2)
|
||||
fatalIf(err.Trace(), "Unable to save file logging config.", nil)
|
||||
}
|
@ -1,53 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/minio/cli"
|
||||
|
||||
// Configure minio server
|
||||
//
|
||||
// ----
|
||||
// NOTE: that the configure command only writes values to the config file.
|
||||
// It does not use any configuration values from the environment variables.
|
||||
// ----
|
||||
//
|
||||
var configCmd = cli.Command{
|
||||
Name: "config",
|
||||
Usage: "Collection of config management commands.",
|
||||
Action: mainConfig,
|
||||
Subcommands: []cli.Command{
|
||||
configLoggerCmd,
|
||||
configVersionCmd,
|
||||
},
|
||||
CustomHelpTemplate: `NAME:
|
||||
{{.Name}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.Name}} {{if .Flags}}[global flags] {{end}}command{{if .Flags}} [command flags]{{end}} [arguments...]
|
||||
|
||||
COMMANDS:
|
||||
{{range .Commands}}{{ .Name }}{{ "\t" }}{{.Usage}}
|
||||
{{end}}
|
||||
`,
|
||||
}
|
||||
|
||||
// mainConfig is the handle for "minio config" command. provides sub-commands which write configuration data in json format to config file.
|
||||
func mainConfig(ctx *cli.Context) {
|
||||
if !ctx.Args().Present() || ctx.Args().First() == "help" {
|
||||
cli.ShowAppHelp(ctx)
|
||||
}
|
||||
}
|
98
config-migrate.go
Normal file
98
config-migrate.go
Normal file
@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/mc/pkg/console"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
func migrateConfig() {
|
||||
// Purge all configs with version '1'.
|
||||
purgeV1()
|
||||
// Migrate version '2' to '3'.
|
||||
migrateV2ToV3()
|
||||
}
|
||||
|
||||
// Version '1' is not supported anymore and deprecated, safe to delete.
|
||||
func purgeV1() {
|
||||
cv1, err := loadConfigV1()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
return
|
||||
}
|
||||
}
|
||||
fatalIf(err.Trace(), "Unable to load config version ‘1’.", nil)
|
||||
|
||||
if cv1.Version == "1" {
|
||||
console.Println("Unsupported config version ‘1’ found, removed successfully.")
|
||||
/// Purge old fsUsers.json file
|
||||
configPath, err := getConfigPath()
|
||||
fatalIf(err.Trace(), "Unable to retrieve config path.", nil)
|
||||
|
||||
configFile := filepath.Join(configPath, "fsUsers.json")
|
||||
os.RemoveAll(configFile)
|
||||
}
|
||||
fatalIf(probe.NewError(errors.New("")), "Unexpected version found ‘"+cv1.Version+"’, cannot migrate.", nil)
|
||||
}
|
||||
|
||||
func migrateV2ToV3() {
|
||||
cv2, err := loadConfigV2()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
return
|
||||
}
|
||||
}
|
||||
fatalIf(err.Trace(), "Unable to load config version ‘2’.", nil)
|
||||
if cv2.Version != "2" {
|
||||
return
|
||||
}
|
||||
serverConfig.SetAddr(":9000")
|
||||
serverConfig.SetCredential(credential{
|
||||
AccessKeyID: cv2.Credentials.AccessKeyID,
|
||||
SecretAccessKey: cv2.Credentials.SecretAccessKey,
|
||||
})
|
||||
serverConfig.SetRegion(cv2.Credentials.Region)
|
||||
serverConfig.SetConsoleLogger(consoleLogger{
|
||||
Enable: true,
|
||||
Level: "fatal",
|
||||
})
|
||||
flogger := fileLogger{}
|
||||
flogger.Level = "error"
|
||||
if cv2.FileLogger.Filename != "" {
|
||||
flogger.Enable = true
|
||||
flogger.Filename = cv2.FileLogger.Filename
|
||||
}
|
||||
serverConfig.SetFileLogger(flogger)
|
||||
|
||||
slogger := syslogLogger{}
|
||||
slogger.Level = "debug"
|
||||
if cv2.SyslogLogger.Addr != "" {
|
||||
slogger.Enable = true
|
||||
slogger.Addr = cv2.SyslogLogger.Addr
|
||||
}
|
||||
serverConfig.SetSyslogLogger(slogger)
|
||||
|
||||
err = serverConfig.Save()
|
||||
fatalIf(err.Trace(), "Migrating from version ‘"+cv2.Version+"’ to ‘"+serverConfig.GetVersion()+"’ failed.", nil)
|
||||
|
||||
console.Println("Migration from version ‘" + cv2.Version + "’ to ‘" + serverConfig.GetVersion() + "’ completed successfully.")
|
||||
}
|
81
config-old.go
Normal file
81
config-old.go
Normal file
@ -0,0 +1,81 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
/////////////////// Config V1 ///////////////////
|
||||
type configV1 struct {
|
||||
Version string `json:"version"`
|
||||
AccessKeyID string `json:"accessKeyId"`
|
||||
SecretAccessKey string `json:"secretAccessKey"`
|
||||
}
|
||||
|
||||
// loadConfigV1 load config
|
||||
func loadConfigV1() (*configV1, *probe.Error) {
|
||||
configPath, err := getConfigPath()
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
configFile := filepath.Join(configPath, "fsUsers.json")
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
return nil, probe.NewError(err)
|
||||
}
|
||||
a := &configV1{}
|
||||
a.Version = "1"
|
||||
qc, err := quick.New(a)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
if err := qc.Load(configFile); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return qc.Data().(*configV1), nil
|
||||
}
|
||||
|
||||
/////////////////// Config V2 ///////////////////
|
||||
type configV2 struct {
|
||||
Version string `json:"version"`
|
||||
Credentials struct {
|
||||
AccessKeyID string `json:"accessKeyId"`
|
||||
SecretAccessKey string `json:"secretAccessKey"`
|
||||
Region string `json:"region"`
|
||||
} `json:"credentials"`
|
||||
MongoLogger struct {
|
||||
Addr string `json:"addr"`
|
||||
DB string `json:"db"`
|
||||
Collection string `json:"collection"`
|
||||
} `json:"mongoLogger"`
|
||||
SyslogLogger struct {
|
||||
Network string `json:"network"`
|
||||
Addr string `json:"addr"`
|
||||
} `json:"syslogLogger"`
|
||||
FileLogger struct {
|
||||
Filename string `json:"filename"`
|
||||
} `json:"fileLogger"`
|
||||
}
|
||||
|
||||
// loadConfigV2 load config version '2'.
|
||||
func loadConfigV2() (*configV2, *probe.Error) {
|
||||
configFile, err := getConfigFile()
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
return nil, probe.NewError(err)
|
||||
}
|
||||
a := &configV2{}
|
||||
a.Version = "2"
|
||||
qc, err := quick.New(a)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
if err := qc.Load(configFile); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return qc.Data().(*configV2), nil
|
||||
}
|
240
config-v3.go
Normal file
240
config-v3.go
Normal file
@ -0,0 +1,240 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
// serverConfigV3 server configuration version '3'.
|
||||
type serverConfigV3 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// Backend configuration.
|
||||
Backend backend `json:"backend"`
|
||||
|
||||
// http Server configuration.
|
||||
Addr string `json:"address"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential credential `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger logger `json:"logger"`
|
||||
|
||||
// Read Write mutex.
|
||||
rwMutex *sync.RWMutex
|
||||
}
|
||||
|
||||
// backend type.
|
||||
type backend struct {
|
||||
Type string `json:"type"`
|
||||
Disk string `json:"disk,omitempty"`
|
||||
Disks []string `json:"disks,omitempty"`
|
||||
}
|
||||
|
||||
// initConfig - initialize server config. config version (called only once).
|
||||
func initConfig() *probe.Error {
|
||||
if !isConfigFileExists() {
|
||||
srvCfg := &serverConfigV3{}
|
||||
srvCfg.Version = globalMinioConfigVersion
|
||||
srvCfg.Region = "us-east-1"
|
||||
srvCfg.Credential = mustGenAccessKeys()
|
||||
srvCfg.rwMutex = &sync.RWMutex{}
|
||||
// Create config path.
|
||||
err := createConfigPath()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
|
||||
// Create certs path.
|
||||
err = createCertsPath()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
|
||||
// Save the new config globally.
|
||||
serverConfig = srvCfg
|
||||
|
||||
// Save config into file.
|
||||
err = serverConfig.Save()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
configFile, err := getConfigFile()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
if _, e := os.Stat(configFile); err != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
srvCfg := &serverConfigV3{}
|
||||
srvCfg.Version = globalMinioConfigVersion
|
||||
srvCfg.rwMutex = &sync.RWMutex{}
|
||||
qc, err := quick.New(srvCfg)
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
if err := qc.Load(configFile); err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
// Save the loaded config globally.
|
||||
serverConfig = qc.Data().(*serverConfigV3)
|
||||
return nil
|
||||
}
|
||||
|
||||
// serverConfig server config.
|
||||
var serverConfig *serverConfigV3
|
||||
|
||||
// GetVersion get current config version.
|
||||
func (s serverConfigV3) GetVersion() string {
|
||||
s.rwMutex.RLock()
|
||||
defer s.rwMutex.RUnlock()
|
||||
return s.Version
|
||||
}
|
||||
|
||||
/// Logger related.
|
||||
|
||||
// SetFileLogger set new file logger.
|
||||
func (s *serverConfigV3) SetFileLogger(flogger fileLogger) {
|
||||
s.rwMutex.Lock()
|
||||
defer s.rwMutex.Unlock()
|
||||
s.Logger.File = flogger
|
||||
}
|
||||
|
||||
// GetFileLogger get current file logger.
|
||||
func (s serverConfigV3) GetFileLogger() fileLogger {
|
||||
s.rwMutex.RLock()
|
||||
defer s.rwMutex.RUnlock()
|
||||
return s.Logger.File
|
||||
}
|
||||
|
||||
// SetConsoleLogger set new console logger.
|
||||
func (s *serverConfigV3) SetConsoleLogger(clogger consoleLogger) {
|
||||
s.rwMutex.Lock()
|
||||
defer s.rwMutex.Unlock()
|
||||
s.Logger.Console = clogger
|
||||
}
|
||||
|
||||
// GetConsoleLogger get current console logger.
|
||||
func (s serverConfigV3) GetConsoleLogger() consoleLogger {
|
||||
s.rwMutex.RLock()
|
||||
defer s.rwMutex.RUnlock()
|
||||
return s.Logger.Console
|
||||
}
|
||||
|
||||
// SetSyslogLogger set new syslog logger.
|
||||
func (s *serverConfigV3) SetSyslogLogger(slogger syslogLogger) {
|
||||
s.rwMutex.Lock()
|
||||
defer s.rwMutex.Unlock()
|
||||
s.Logger.Syslog = slogger
|
||||
}
|
||||
|
||||
// GetSyslogLogger get current syslog logger.
|
||||
func (s *serverConfigV3) GetSyslogLogger() syslogLogger {
|
||||
s.rwMutex.RLock()
|
||||
defer s.rwMutex.RUnlock()
|
||||
return s.Logger.Syslog
|
||||
}
|
||||
|
||||
// SetRegion set new region.
|
||||
func (s *serverConfigV3) SetRegion(region string) {
|
||||
s.rwMutex.Lock()
|
||||
defer s.rwMutex.Unlock()
|
||||
s.Region = region
|
||||
}
|
||||
|
||||
// GetRegion get current region.
|
||||
func (s serverConfigV3) GetRegion() string {
|
||||
s.rwMutex.RLock()
|
||||
defer s.rwMutex.RUnlock()
|
||||
return s.Region
|
||||
}
|
||||
|
||||
// SetAddr set new address.
|
||||
func (s *serverConfigV3) SetAddr(addr string) {
|
||||
s.rwMutex.Lock()
|
||||
defer s.rwMutex.Unlock()
|
||||
s.Addr = addr
|
||||
}
|
||||
|
||||
// GetAddr get current address.
|
||||
func (s serverConfigV3) GetAddr() string {
|
||||
s.rwMutex.RLock()
|
||||
defer s.rwMutex.RUnlock()
|
||||
return s.Addr
|
||||
}
|
||||
|
||||
// SetCredentials set new credentials.
|
||||
func (s *serverConfigV3) SetCredential(creds credential) {
|
||||
s.rwMutex.Lock()
|
||||
defer s.rwMutex.Unlock()
|
||||
s.Credential = creds
|
||||
}
|
||||
|
||||
// GetCredentials get current credentials.
|
||||
func (s serverConfigV3) GetCredential() credential {
|
||||
s.rwMutex.RLock()
|
||||
defer s.rwMutex.RUnlock()
|
||||
return s.Credential
|
||||
}
|
||||
|
||||
// SetBackend set new backend.
|
||||
func (s *serverConfigV3) SetBackend(bknd backend) {
|
||||
s.rwMutex.Lock()
|
||||
defer s.rwMutex.Unlock()
|
||||
s.Backend = bknd
|
||||
}
|
||||
|
||||
// Save config.
|
||||
func (s serverConfigV3) Save() *probe.Error {
|
||||
s.rwMutex.RLock()
|
||||
defer s.rwMutex.RUnlock()
|
||||
|
||||
// get config file.
|
||||
configFile, err := getConfigFile()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
|
||||
// initialize quick.
|
||||
qc, err := quick.New(&s)
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
|
||||
// Save config file.
|
||||
if err := qc.Save(configFile); err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
|
||||
// Return success.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s serverConfigV3) GetBackend() backend {
|
||||
s.rwMutex.RLock()
|
||||
defer s.rwMutex.RUnlock()
|
||||
return s.Backend
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/mc/pkg/console"
|
||||
)
|
||||
|
||||
// Print config version.
|
||||
var configVersionCmd = cli.Command{
|
||||
Name: "version",
|
||||
Usage: "Print config version.",
|
||||
Action: mainConfigVersion,
|
||||
CustomHelpTemplate: `NAME:
|
||||
minio config {{.Name}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
minio config {{.Name}}
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
func mainConfigVersion(ctx *cli.Context) {
|
||||
if ctx.Args().First() == "help" {
|
||||
cli.ShowCommandHelpAndExit(ctx, "version", 1) // last argument is exit code
|
||||
}
|
||||
|
||||
config, err := loadConfigV2()
|
||||
fatalIf(err.Trace(), "Unable to load config", nil)
|
||||
|
||||
// convert interface{} back to its original struct
|
||||
newConf := config
|
||||
type Version string
|
||||
console.Println(newConf.Version)
|
||||
}
|
92
config.go
Normal file
92
config.go
Normal file
@ -0,0 +1,92 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/go-homedir"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// configPath for custom config path only for testing purposes
|
||||
var customConfigPath string
|
||||
|
||||
// Sets a new config path.
|
||||
func setGlobalConfigPath(configPath string) {
|
||||
customConfigPath = configPath
|
||||
}
|
||||
|
||||
// getConfigPath get server config path
|
||||
func getConfigPath() (string, *probe.Error) {
|
||||
if customConfigPath != "" {
|
||||
return customConfigPath, nil
|
||||
}
|
||||
homeDir, e := homedir.Dir()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
configPath := filepath.Join(homeDir, globalMinioConfigDir)
|
||||
return configPath, nil
|
||||
}
|
||||
|
||||
// mustGetConfigPath must get server config path.
|
||||
func mustGetConfigPath() string {
|
||||
configPath, err := getConfigPath()
|
||||
fatalIf(err.Trace(), "Unable to get config path.", nil)
|
||||
return configPath
|
||||
}
|
||||
|
||||
// createConfigPath create server config path.
|
||||
func createConfigPath() *probe.Error {
|
||||
configPath, err := getConfigPath()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
if err := os.MkdirAll(configPath, 0700); err != nil {
|
||||
return probe.NewError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isConfigFileExists - returns true if config file exists.
|
||||
func isConfigFileExists() bool {
|
||||
st, e := os.Stat(mustGetConfigFile())
|
||||
// If file exists and is regular return true.
|
||||
if e == nil && st.Mode().IsRegular() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// mustGetConfigFile must get server config file.
|
||||
func mustGetConfigFile() string {
|
||||
configFile, err := getConfigFile()
|
||||
fatalIf(err.Trace(), "Unable to get config file.", nil)
|
||||
|
||||
return configFile
|
||||
}
|
||||
|
||||
// getConfigFile get server config file.
|
||||
func getConfigFile() (string, *probe.Error) {
|
||||
configPath, err := getConfigPath()
|
||||
if err != nil {
|
||||
return "", err.Trace()
|
||||
}
|
||||
return filepath.Join(configPath, globalMinioConfigFile), nil
|
||||
}
|
24
flags.go
24
flags.go
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -27,28 +27,6 @@ var (
|
||||
Value: mustGetConfigPath(),
|
||||
Usage: "Path to configuration folder.",
|
||||
}
|
||||
|
||||
addressFlag = cli.StringFlag{
|
||||
Name: "address",
|
||||
Value: ":9000",
|
||||
Usage: "ADDRESS:PORT for cloud storage access.",
|
||||
}
|
||||
|
||||
accessLogFlag = cli.BoolFlag{
|
||||
Name: "enable-accesslog",
|
||||
Hide: true,
|
||||
Usage: "Enable access logs for all incoming HTTP request.",
|
||||
}
|
||||
|
||||
certFlag = cli.StringFlag{
|
||||
Name: "cert",
|
||||
Usage: "Provide your domain certificate.",
|
||||
}
|
||||
|
||||
keyFlag = cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "Provide your domain private key.",
|
||||
}
|
||||
)
|
||||
|
||||
// registerFlag registers a cli flag
|
||||
|
@ -178,7 +178,7 @@ func setTimeValidityHandler(h http.Handler) http.Handler {
|
||||
|
||||
func (h timeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Verify if date headers are set, if not reject the request
|
||||
if r.Header.Get("Authorization") != "" {
|
||||
if _, ok := r.Header["Authorization"]; ok {
|
||||
date, e := parseDateHeader(r)
|
||||
if e != nil {
|
||||
// All our internal APIs are sensitive towards Date
|
||||
|
19
globals.go
19
globals.go
@ -16,7 +16,26 @@
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/fatih/color"
|
||||
|
||||
// Global constants for Minio.
|
||||
const (
|
||||
minGoVersion = ">= 1.6" // Minio requires at least Go v1.6
|
||||
)
|
||||
|
||||
// minio configuration related constants.
|
||||
const (
|
||||
globalMinioConfigVersion = "3"
|
||||
globalMinioConfigDir = ".minio"
|
||||
globalMinioCertsDir = ".minio/certs"
|
||||
globalMinioCertFile = "public.crt"
|
||||
globalMinioKeyFile = "private.key"
|
||||
globalMinioConfigFile = "config.json"
|
||||
)
|
||||
|
||||
// global colors.
|
||||
var (
|
||||
colorMagenta = color.New(color.FgMagenta, color.Bold).SprintfFunc()
|
||||
colorWhite = color.New(color.FgWhite, color.Bold).SprintfFunc()
|
||||
colorGreen = color.New(color.FgGreen, color.Bold).SprintfFunc()
|
||||
)
|
||||
|
18
jwt.go
18
jwt.go
@ -17,7 +17,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -28,8 +27,7 @@ import (
|
||||
|
||||
// JWT - jwt auth backend
|
||||
type JWT struct {
|
||||
accessKeyID []byte
|
||||
secretAccessKey []byte
|
||||
credential
|
||||
}
|
||||
|
||||
// Default - each token expires in 10hrs.
|
||||
@ -40,13 +38,11 @@ const (
|
||||
// initJWT - initialize.
|
||||
func initJWT() *JWT {
|
||||
jwt := &JWT{}
|
||||
// Load credentials.
|
||||
config, err := loadConfigV2()
|
||||
fatalIf(err.Trace("JWT"), "Unable to load configuration file.", nil)
|
||||
|
||||
// Save access, secret keys.
|
||||
jwt.accessKeyID = []byte(config.Credentials.AccessKeyID)
|
||||
jwt.secretAccessKey = []byte(config.Credentials.SecretAccessKey)
|
||||
jwt.credential = serverConfig.GetCredential()
|
||||
|
||||
// Return.
|
||||
return jwt
|
||||
}
|
||||
|
||||
@ -57,7 +53,7 @@ func (jwt *JWT) GenerateToken(userName string) (string, *probe.Error) {
|
||||
token.Claims["exp"] = time.Now().Add(time.Hour * tokenExpires).Unix()
|
||||
token.Claims["iat"] = time.Now().Unix()
|
||||
token.Claims["sub"] = userName
|
||||
tokenString, e := token.SignedString(jwt.secretAccessKey)
|
||||
tokenString, e := token.SignedString([]byte(jwt.SecretAccessKey))
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
@ -68,9 +64,9 @@ func (jwt *JWT) GenerateToken(userName string) (string, *probe.Error) {
|
||||
func (jwt *JWT) Authenticate(userName, password string) bool {
|
||||
userName = strings.TrimSpace(userName)
|
||||
password = strings.TrimSpace(password)
|
||||
if !bytes.Equal([]byte(userName), jwt.accessKeyID) {
|
||||
if userName != jwt.AccessKeyID {
|
||||
return false
|
||||
}
|
||||
hashedPassword, _ := bcrypt.GenerateFromPassword(jwt.secretAccessKey, bcrypt.DefaultCost)
|
||||
hashedPassword, _ := bcrypt.GenerateFromPassword([]byte(jwt.SecretAccessKey), bcrypt.DefaultCost)
|
||||
return bcrypt.CompareHashAndPassword(hashedPassword, []byte(password)) == nil
|
||||
}
|
||||
|
46
logger-console-hook.go
Normal file
46
logger-console-hook.go
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// consoleLogger - default logger if not other logging is enabled.
|
||||
type consoleLogger struct {
|
||||
Enable bool `json:"enable"`
|
||||
Level string `json:"level"`
|
||||
}
|
||||
|
||||
// enable console logger.
|
||||
func enableConsoleLogger() {
|
||||
clogger := serverConfig.GetConsoleLogger()
|
||||
if !clogger.Enable {
|
||||
// Disable console logger if asked for.
|
||||
log.Out = ioutil.Discard
|
||||
return
|
||||
}
|
||||
// log.Out and log.Formatter use the default versions.
|
||||
// Only set specific log level.
|
||||
lvl, e := logrus.ParseLevel(clogger.Level)
|
||||
fatalIf(probe.NewError(e), "Unknown log level detected, please fix your console logger configuration.", nil)
|
||||
|
||||
log.Level = lvl
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -24,29 +24,28 @@ import (
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
type fileLogger struct {
|
||||
Enable bool `json:"enable"`
|
||||
Filename string `json:"fileName"`
|
||||
Level string `json:"level"`
|
||||
}
|
||||
|
||||
type localFile struct {
|
||||
*os.File
|
||||
}
|
||||
|
||||
func log2File(filename string) *probe.Error {
|
||||
fileHook, e := newFile(filename)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
log.Hooks.Add(fileHook) // Add a local file hook.
|
||||
log.Formatter = &logrus.JSONFormatter{} // JSON formatted log.
|
||||
log.Level = logrus.InfoLevel // Minimum log level.
|
||||
return nil
|
||||
}
|
||||
|
||||
func newFile(filename string) (*localFile, error) {
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &localFile{file}, nil
|
||||
func enableFileLogger(filename string) {
|
||||
file, e := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
fatalIf(probe.NewError(e), "Unable to open log file.", nil)
|
||||
|
||||
// Add a local file hook.
|
||||
log.Hooks.Add(&localFile{file})
|
||||
// Set default JSON formatter.
|
||||
log.Formatter = new(logrus.JSONFormatter)
|
||||
log.Level = logrus.InfoLevel // Minimum log level.
|
||||
}
|
||||
|
||||
// Fire fires the file logger hook and logs to the file.
|
||||
func (l *localFile) Fire(entry *logrus.Entry) error {
|
||||
line, err := entry.String()
|
||||
if err != nil {
|
||||
|
@ -1,75 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
// mongoDB collection
|
||||
type mongoDB struct {
|
||||
c *mgo.Collection
|
||||
}
|
||||
|
||||
func log2Mongo(url, db, collection string) *probe.Error {
|
||||
mongoHook, e := newMongo(url, db, collection)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
log.Hooks.Add(mongoHook) // Add mongodb hook.
|
||||
log.Formatter = &logrus.JSONFormatter{} // JSON formatted log.
|
||||
log.Level = logrus.InfoLevel // Minimum log level.
|
||||
return nil
|
||||
}
|
||||
|
||||
// newMongo -
|
||||
func newMongo(mgoEndpoint, db, collection string) (*mongoDB, error) {
|
||||
session, err := mgo.Dial(mgoEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mongoDB{c: session.DB(db).C(collection)}, nil
|
||||
}
|
||||
|
||||
// Fire - the log event
|
||||
func (h *mongoDB) Fire(entry *logrus.Entry) error {
|
||||
entry.Data["Level"] = entry.Level.String()
|
||||
entry.Data["Time"] = entry.Time
|
||||
entry.Data["Message"] = entry.Message
|
||||
mgoErr := h.c.Insert(bson.M(entry.Data))
|
||||
if mgoErr != nil {
|
||||
return fmt.Errorf("Failed to send log entry to mongodb: %s", mgoErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels -
|
||||
func (h *mongoDB) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
}
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -26,6 +26,12 @@ import (
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
type syslogLogger struct {
|
||||
Enable bool `json:"enable"`
|
||||
Addr string `json:"address"`
|
||||
Level string `json:"level"`
|
||||
}
|
||||
|
||||
// syslogHook to send logs via syslog.
|
||||
type syslogHook struct {
|
||||
writer *syslog.Writer
|
||||
@ -33,28 +39,27 @@ type syslogHook struct {
|
||||
syslogRaddr string
|
||||
}
|
||||
|
||||
func log2Syslog(network, raddr string) *probe.Error {
|
||||
syslogHook, e := newSyslog(network, raddr, syslog.LOG_ERR, "MINIO")
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
// enableSyslogLogger - enable logger at raddr.
|
||||
func enableSyslogLogger(raddr string) {
|
||||
syslogHook, e := newSyslog("udp", raddr, syslog.LOG_ERR, "MINIO")
|
||||
fatalIf(probe.NewError(e), "Unable to instantiate syslog.", nil)
|
||||
|
||||
log.Hooks.Add(syslogHook) // Add syslog hook.
|
||||
log.Formatter = &logrus.JSONFormatter{} // JSON formatted log.
|
||||
log.Level = logrus.InfoLevel // Minimum log level.
|
||||
return nil
|
||||
}
|
||||
|
||||
// newSyslog - Creates a hook to be added to an instance of logger.
|
||||
func newSyslog(network, raddr string, priority syslog.Priority, tag string) (*syslogHook, error) {
|
||||
w, err := syslog.Dial(network, raddr, priority, tag)
|
||||
return &syslogHook{w, network, raddr}, err
|
||||
w, e := syslog.Dial(network, raddr, priority, tag)
|
||||
return &syslogHook{w, network, raddr}, e
|
||||
}
|
||||
|
||||
// Fire - fire the log event
|
||||
func (hook *syslogHook) Fire(entry *logrus.Entry) error {
|
||||
line, err := entry.String()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read entry, %v", err)
|
||||
line, e := entry.String()
|
||||
if e != nil {
|
||||
return fmt.Errorf("Unable to read entry, %v", e)
|
||||
}
|
||||
switch entry.Level {
|
||||
case logrus.PanicLevel:
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -20,6 +20,13 @@ package main
|
||||
|
||||
import "github.com/minio/minio/pkg/probe"
|
||||
|
||||
func log2Syslog(network, raddr string) *probe.Error {
|
||||
return probe.NewError(errSysLogNotSupported)
|
||||
type syslogLogger struct {
|
||||
Enable bool `json:"enable"`
|
||||
Addr string `json:"address"`
|
||||
Level string `json:"level"`
|
||||
}
|
||||
|
||||
// enableSyslogLogger - unsupported on windows.
|
||||
func enableSyslogLogger(raddr string) {
|
||||
fatalIf(probe.NewError(errSyslogNotSupported), "Unable to enable syslog.", nil)
|
||||
}
|
||||
|
18
logger.go
18
logger.go
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -28,6 +28,21 @@ type fields map[string]interface{}
|
||||
|
||||
var log = logrus.New() // Default console logger.
|
||||
|
||||
// logger carries logging configuration for various supported loggers.
|
||||
// Currently supported loggers are
|
||||
//
|
||||
// - console [default]
|
||||
// - file
|
||||
// - syslog
|
||||
//
|
||||
type logger struct {
|
||||
Console consoleLogger `json:"console"`
|
||||
File fileLogger `json:"file"`
|
||||
Syslog syslogLogger `json:"syslog"`
|
||||
// Add new loggers here.
|
||||
}
|
||||
|
||||
// errorIf synonymous with fatalIf but doesn't exit on error != nil
|
||||
func errorIf(err *probe.Error, msg string, fields map[string]interface{}) {
|
||||
if err == nil {
|
||||
return
|
||||
@ -49,6 +64,7 @@ func errorIf(err *probe.Error, msg string, fields map[string]interface{}) {
|
||||
log.WithFields(fields).Error(msg)
|
||||
}
|
||||
|
||||
// fatalIf wrapper function which takes error and prints jsonic error messages.
|
||||
func fatalIf(err *probe.Error, msg string, fields map[string]interface{}) {
|
||||
if err == nil {
|
||||
return
|
||||
|
31
main.go
31
main.go
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -63,11 +63,23 @@ func init() {
|
||||
console.Fatalln("Please run ‘minio’ as a non-root user.")
|
||||
}
|
||||
|
||||
// Initialize config.
|
||||
err := initConfig()
|
||||
fatalIf(err.Trace(), "Unable to initialize minio config.", nil)
|
||||
}
|
||||
|
||||
func migrate() {
|
||||
// Migrate config file
|
||||
migrateConfig()
|
||||
|
||||
// Migrate other configs here.
|
||||
}
|
||||
|
||||
func enableLoggers() {
|
||||
// Enable all loggers here.
|
||||
enableConsoleLogger()
|
||||
|
||||
// Add your logger here.
|
||||
}
|
||||
|
||||
// Tries to get os/arch/platform specific information
|
||||
@ -118,24 +130,20 @@ func findClosestCommands(command string) []string {
|
||||
}
|
||||
|
||||
func registerApp() *cli.App {
|
||||
// register all commands
|
||||
// Register all commands.
|
||||
registerCommand(initCmd)
|
||||
registerCommand(serverCmd)
|
||||
registerCommand(configCmd)
|
||||
registerCommand(versionCmd)
|
||||
registerCommand(updateCmd)
|
||||
|
||||
// register all flags
|
||||
// Register all flags.
|
||||
registerFlag(configFolderFlag)
|
||||
registerFlag(addressFlag)
|
||||
registerFlag(accessLogFlag)
|
||||
registerFlag(certFlag)
|
||||
registerFlag(keyFlag)
|
||||
|
||||
// set up app
|
||||
// Set up app.
|
||||
app := cli.NewApp()
|
||||
app.Name = "Minio"
|
||||
app.Author = "Minio.io"
|
||||
app.Usage = "Cloud Storage Server for Micro Services."
|
||||
app.Usage = "Distributed Object Storage Server for Micro Services."
|
||||
app.Description = `Micro services environment provisions one Minio server per application instance. Scalability is achieved through large number of smaller personalized instances. This version of the Minio binary is built using Filesystem storage backend for magnetic and solid state disks.`
|
||||
app.Flags = flags
|
||||
app.Commands = commands
|
||||
@ -180,6 +188,9 @@ func main() {
|
||||
// Migrate any old version of config / state files to newer format.
|
||||
migrate()
|
||||
|
||||
// Enable all loggers by now.
|
||||
enableLoggers()
|
||||
|
||||
return nil
|
||||
}
|
||||
app.ExtraInfo = func() map[string]string {
|
||||
|
352
minio-main.go
Normal file
352
minio-main.go
Normal file
@ -0,0 +1,352 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/mc/pkg/console"
|
||||
"github.com/minio/minio/pkg/fs"
|
||||
"github.com/minio/minio/pkg/minhttp"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
var initCmd = cli.Command{
|
||||
Name: "init",
|
||||
Usage: "Initialize Minio cloud storage server.",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "address",
|
||||
Value: ":9000",
|
||||
},
|
||||
},
|
||||
Action: initMain,
|
||||
CustomHelpTemplate: `NAME:
|
||||
minio {{.Name}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
minio {{.Name}} [OPTION VALUE] PATH
|
||||
|
||||
OPTIONS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{end}}
|
||||
ENVIRONMENT VARIABLES:
|
||||
MINIO_ACCESS_KEY, MINIO_SECRET_KEY: Access and secret key to use.
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio server on Linux.
|
||||
$ minio {{.Name}} fs /home/shared
|
||||
|
||||
2. Start minio server on Windows.
|
||||
$ minio {{.Name}} fs C:\MyShare
|
||||
|
||||
3. Start minio server bound to a specific IP:PORT, when you have multiple network interfaces.
|
||||
$ minio {{.Name}} --address 192.168.1.101:9000 fs /home/shared
|
||||
|
||||
4. Start minio server with minimum free disk threshold to 5%
|
||||
$ minio {{.Name}} fs /home/shared/Pictures
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
var serverCmd = cli.Command{
|
||||
Name: "server",
|
||||
Usage: "Start Minio cloud storage server.",
|
||||
Flags: []cli.Flag{},
|
||||
Action: serverMain,
|
||||
CustomHelpTemplate: `NAME:
|
||||
minio {{.Name}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
minio {{.Name}}
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio server.
|
||||
$ minio {{.Name}}
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
// configureServer configure a new server instance
|
||||
func configureServer(filesystem fs.Filesystem) *http.Server {
|
||||
// Minio server config
|
||||
apiServer := &http.Server{
|
||||
Addr: serverConfig.GetAddr(),
|
||||
Handler: configureServerHandler(filesystem),
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
|
||||
// Configure TLS if certs are available.
|
||||
if isSSL() {
|
||||
var e error
|
||||
apiServer.TLSConfig = &tls.Config{}
|
||||
apiServer.TLSConfig.Certificates = make([]tls.Certificate, 1)
|
||||
apiServer.TLSConfig.Certificates[0], e = tls.LoadX509KeyPair(mustGetCertFile(), mustGetKeyFile())
|
||||
fatalIf(probe.NewError(e), "Unable to load certificates.", nil)
|
||||
}
|
||||
|
||||
// Returns configured HTTP server.
|
||||
return apiServer
|
||||
}
|
||||
|
||||
// Print listen ips.
|
||||
func printListenIPs(httpServerConf *http.Server) {
|
||||
host, port, e := net.SplitHostPort(httpServerConf.Addr)
|
||||
fatalIf(probe.NewError(e), "Unable to split host port.", nil)
|
||||
|
||||
var hosts []string
|
||||
switch {
|
||||
case host != "":
|
||||
hosts = append(hosts, host)
|
||||
default:
|
||||
addrs, e := net.InterfaceAddrs()
|
||||
fatalIf(probe.NewError(e), "Unable to get interface address.", nil)
|
||||
for _, addr := range addrs {
|
||||
if addr.Network() == "ip+net" {
|
||||
host := strings.Split(addr.String(), "/")[0]
|
||||
if ip := net.ParseIP(host); ip.To4() != nil {
|
||||
hosts = append(hosts, host)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, host := range hosts {
|
||||
if httpServerConf.TLSConfig != nil {
|
||||
console.Printf(" https://%s:%s\n", host, port)
|
||||
} else {
|
||||
console.Printf(" http://%s:%s\n", host, port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// initServer initialize server
|
||||
func initServer(c *cli.Context) {
|
||||
host, port, _ := net.SplitHostPort(c.String("address"))
|
||||
// If port empty, default to port '80'
|
||||
if port == "" {
|
||||
port = "80"
|
||||
// if SSL is enabled, choose port as "443" instead.
|
||||
if isSSL() {
|
||||
port = "443"
|
||||
}
|
||||
}
|
||||
|
||||
// Join host and port.
|
||||
serverConfig.SetAddr(net.JoinHostPort(host, port))
|
||||
|
||||
// Set backend FS type.
|
||||
if c.Args().Get(0) == "fs" {
|
||||
fsPath := strings.TrimSpace(c.Args().Get(1))
|
||||
// Last argument is always a file system path, verify if it exists and is accessible.
|
||||
_, e := os.Stat(fsPath)
|
||||
fatalIf(probe.NewError(e), "Unable to validate the path", nil)
|
||||
|
||||
serverConfig.SetBackend(backend{
|
||||
Type: "fs",
|
||||
Disk: fsPath,
|
||||
})
|
||||
} // else { Add backend XL type here.
|
||||
|
||||
// Fetch access keys from environment variables if any and update the config.
|
||||
accessKey := os.Getenv("MINIO_ACCESS_KEY")
|
||||
secretKey := os.Getenv("MINIO_SECRET_KEY")
|
||||
|
||||
// Validate if both keys are specified and they are valid save them.
|
||||
if accessKey != "" && secretKey != "" {
|
||||
if !isValidAccessKey.MatchString(accessKey) {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Access key does not have required length", nil)
|
||||
}
|
||||
if !isValidSecretKey.MatchString(secretKey) {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Secret key does not have required length", nil)
|
||||
}
|
||||
serverConfig.SetCredential(credential{
|
||||
AccessKeyID: accessKey,
|
||||
SecretAccessKey: secretKey,
|
||||
})
|
||||
}
|
||||
|
||||
// Save new config.
|
||||
err := serverConfig.Save()
|
||||
fatalIf(err.Trace(), "Unable to save config.", nil)
|
||||
|
||||
// Successfully written.
|
||||
backend := serverConfig.GetBackend()
|
||||
if backend.Type == "fs" {
|
||||
console.Println(colorGreen("Successfully initialized Minio at %s", backend.Disk))
|
||||
}
|
||||
}
|
||||
|
||||
// check init arguments.
|
||||
func checkInitSyntax(c *cli.Context) {
|
||||
if !c.Args().Present() || c.Args().First() == "help" {
|
||||
cli.ShowCommandHelpAndExit(c, "init", 1)
|
||||
}
|
||||
if len(c.Args()) > 2 {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Unnecessary arguments passed. Please refer ‘minio init --help’.", nil)
|
||||
}
|
||||
path := strings.TrimSpace(c.Args().Last())
|
||||
if path == "" {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Path argument cannot be empty.", nil)
|
||||
}
|
||||
}
|
||||
|
||||
// extract port number from address.
|
||||
// address should be of the form host:port
|
||||
func getPort(address string) int {
|
||||
_, portStr, e := net.SplitHostPort(address)
|
||||
fatalIf(probe.NewError(e), "Unable to split host port.", nil)
|
||||
portInt, e := strconv.Atoi(portStr)
|
||||
fatalIf(probe.NewError(e), "Invalid port number.", nil)
|
||||
return portInt
|
||||
}
|
||||
|
||||
// Make sure that none of the other processes are listening on the
|
||||
// specified port on any of the interfaces.
|
||||
//
|
||||
// On linux if a process is listening on 127.0.0.1:9000 then Listen()
|
||||
// on ":9000" fails with the error "port already in use".
|
||||
// However on Mac OSX Listen() on ":9000" falls back to the IPv6 address.
|
||||
// This causes confusion on Mac OSX that minio server is not reachable
|
||||
// on 127.0.0.1 even though minio server is running. So before we start
|
||||
// the minio server we make sure that the port is free on all the IPs.
|
||||
func checkPortAvailability(port int) {
|
||||
isAddrInUse := func(e error) bool {
|
||||
// Check if the syscall error is EADDRINUSE.
|
||||
// EADDRINUSE is the system call error if another process is
|
||||
// already listening at the specified port.
|
||||
neterr, ok := e.(*net.OpError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
osErr, ok := neterr.Err.(*os.SyscallError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
sysErr, ok := osErr.Err.(syscall.Errno)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if sysErr != syscall.EADDRINUSE {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
ifcs, e := net.Interfaces()
|
||||
if e != nil {
|
||||
fatalIf(probe.NewError(e), "Unable to list interfaces.", nil)
|
||||
}
|
||||
for _, ifc := range ifcs {
|
||||
addrs, e := ifc.Addrs()
|
||||
if e != nil {
|
||||
fatalIf(probe.NewError(e), fmt.Sprintf("Unable to list addresses on interface %s.", ifc.Name), nil)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
ipnet, ok := addr.(*net.IPNet)
|
||||
if !ok {
|
||||
errorIf(probe.NewError(errors.New("")), "Interface type assertion to (*net.IPNet) failed.", nil)
|
||||
continue
|
||||
}
|
||||
ip := ipnet.IP
|
||||
network := "tcp4"
|
||||
if ip.To4() == nil {
|
||||
network = "tcp6"
|
||||
}
|
||||
tcpAddr := net.TCPAddr{IP: ip, Port: port, Zone: ifc.Name}
|
||||
l, e := net.ListenTCP(network, &tcpAddr)
|
||||
if e != nil {
|
||||
if isAddrInUse(e) {
|
||||
// Fail if port is already in use.
|
||||
fatalIf(probe.NewError(e), fmt.Sprintf("Unable to listen on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
|
||||
} else {
|
||||
// Ignore other errors.
|
||||
continue
|
||||
}
|
||||
}
|
||||
e = l.Close()
|
||||
if e != nil {
|
||||
fatalIf(probe.NewError(e), fmt.Sprintf("Unable to close listener on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func initMain(c *cli.Context) {
|
||||
// check 'init' cli arguments.
|
||||
checkInitSyntax(c)
|
||||
|
||||
// Initialize server.
|
||||
initServer(c)
|
||||
}
|
||||
|
||||
func serverMain(c *cli.Context) {
|
||||
if c.Args().Present() || c.Args().First() == "help" {
|
||||
cli.ShowCommandHelpAndExit(c, "server", 1)
|
||||
}
|
||||
|
||||
backend := serverConfig.GetBackend()
|
||||
if backend.Type == "fs" {
|
||||
// Initialize file system.
|
||||
filesystem, err := fs.New(backend.Disk)
|
||||
fatalIf(err.Trace(backend.Type, backend.Disk), "Initializing filesystem failed.", nil)
|
||||
|
||||
// Configure server.
|
||||
apiServer := configureServer(filesystem)
|
||||
|
||||
// Credential.
|
||||
cred := serverConfig.GetCredential()
|
||||
|
||||
// Region.
|
||||
region := serverConfig.GetRegion()
|
||||
|
||||
// Print credentials and region.
|
||||
console.Println("\n" + cred.String() + " " + colorMagenta("Region: ") + colorWhite(region))
|
||||
|
||||
console.Println("\nMinio Object Storage:")
|
||||
// Print api listen ips.
|
||||
printListenIPs(apiServer)
|
||||
|
||||
console.Println("\nMinio Browser:")
|
||||
// Print browser listen ips.
|
||||
printListenIPs(apiServer)
|
||||
|
||||
console.Println("\nTo configure Minio Client:")
|
||||
|
||||
// Download 'mc' links.
|
||||
if runtime.GOOS == "windows" {
|
||||
console.Println(" Download 'mc' from https://dl.minio.io/client/mc/release/" + runtime.GOOS + "-" + runtime.GOARCH + "/mc.exe")
|
||||
console.Println(" $ mc.exe config host add myminio http://localhost:9000 " + cred.AccessKeyID + " " + cred.SecretAccessKey)
|
||||
} else {
|
||||
console.Println(" $ wget https://dl.minio.io/client/mc/release/" + runtime.GOOS + "-" + runtime.GOARCH + "/mc")
|
||||
console.Println(" $ chmod 755 mc")
|
||||
console.Println(" $ ./mc config host add myminio http://localhost:9000 " + cred.AccessKeyID + " " + cred.SecretAccessKey)
|
||||
}
|
||||
|
||||
// Start server.
|
||||
err = minhttp.ListenAndServe(apiServer)
|
||||
errorIf(err.Trace(), "Failed to start the minio server.", nil)
|
||||
}
|
||||
}
|
32
minio.md
Normal file
32
minio.md
Normal file
@ -0,0 +1,32 @@
|
||||
# Server command line SPEC. - Tue Mar 22 06:04:42 UTC 2016
|
||||
|
||||
Minio initialize filesystem backend.
|
||||
~~~
|
||||
$ minio init fs <path>
|
||||
~~~
|
||||
|
||||
Minio initialize XL backend.
|
||||
~~~
|
||||
$ minio init xl <url1>...<url16>
|
||||
~~~
|
||||
|
||||
For 'fs' backend it starts the server.
|
||||
~~~
|
||||
$ minio server
|
||||
~~~
|
||||
|
||||
For 'xl' backend it waits for servers to join.
|
||||
~~~
|
||||
$ minio server
|
||||
... [PROGRESS BAR] of servers connecting
|
||||
~~~
|
||||
|
||||
Now on other servers execute 'join' and they connect.
|
||||
~~~
|
||||
....
|
||||
minio join <url1> -- from <url2> && minio server
|
||||
minio join <url1> -- from <url3> && minio server
|
||||
...
|
||||
...
|
||||
minio join <url1> -- from <url16> && minio server
|
||||
~~~
|
@ -36,7 +36,7 @@ func TestListObjects(t *testing.T) {
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
fs, err := New(directory, 0)
|
||||
fs, err := New(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -579,7 +579,7 @@ func BenchmarkListObjects(b *testing.B) {
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory, 0)
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ func TestGetBucketInfo(t *testing.T) {
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory, 0)
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -104,7 +104,7 @@ func TestListBuckets(t *testing.T) {
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory, 0)
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -144,7 +144,7 @@ func TestDeleteBucket(t *testing.T) {
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory, 0)
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -165,7 +165,7 @@ func BenchmarkListBuckets(b *testing.B) {
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory, 0)
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -198,7 +198,7 @@ func BenchmarkDeleteBucket(b *testing.B) {
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory, 0)
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -233,7 +233,7 @@ func BenchmarkGetBucketInfo(b *testing.B) {
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory, 0)
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ func TestGetObjectInfo(t *testing.T) {
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
fs, err := New(directory, 0)
|
||||
fs, err := New(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -129,7 +129,7 @@ func TestGetObjectInfoCore(t *testing.T) {
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
fs, err := New(directory, 0)
|
||||
fs, err := New(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -214,7 +214,7 @@ func BenchmarkGetObject(b *testing.B) {
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory, 0)
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
11
pkg/fs/fs.go
11
pkg/fs/fs.go
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -96,7 +96,7 @@ type Multiparts struct {
|
||||
}
|
||||
|
||||
// New instantiate a new donut
|
||||
func New(rootPath string, minFreeDisk int64) (Filesystem, *probe.Error) {
|
||||
func New(rootPath string) (Filesystem, *probe.Error) {
|
||||
setFSMultipartsMetadataPath(filepath.Join(rootPath, "$multiparts-session.json"))
|
||||
|
||||
var err *probe.Error
|
||||
@ -126,7 +126,7 @@ func New(rootPath string, minFreeDisk int64) (Filesystem, *probe.Error) {
|
||||
/// Defaults
|
||||
|
||||
// minium free disk required for i/o operations to succeed.
|
||||
fs.minFreeDisk = minFreeDisk
|
||||
fs.minFreeDisk = 5
|
||||
|
||||
fs.listObjectMap = make(map[ListObjectParams][]ObjectInfoChannel)
|
||||
fs.listObjectMapMutex = &sync.Mutex{}
|
||||
@ -134,3 +134,8 @@ func New(rootPath string, minFreeDisk int64) (Filesystem, *probe.Error) {
|
||||
// Return here.
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
// GetRootPath - get root path.
|
||||
func (fs Filesystem) GetRootPath() string {
|
||||
return fs.path
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ func (s *MySuite) TestAPISuite(c *C) {
|
||||
path, e := ioutil.TempDir(os.TempDir(), "minio-")
|
||||
c.Check(e, IsNil)
|
||||
storageList = append(storageList, path)
|
||||
store, err := New(path, 0)
|
||||
store, err := New(path)
|
||||
c.Check(err, IsNil)
|
||||
return store
|
||||
}
|
||||
|
79
routers.go
79
routers.go
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -35,28 +35,22 @@ import (
|
||||
|
||||
// storageAPI container for S3 compatible API.
|
||||
type storageAPI struct {
|
||||
// Once true log all incoming requests.
|
||||
AccessLog bool
|
||||
// Filesystem instance.
|
||||
Filesystem fs.Filesystem
|
||||
// Signature instance.
|
||||
Signature *signature4.Sign
|
||||
// Region instance.
|
||||
Region string
|
||||
}
|
||||
|
||||
// webAPI container for Web API.
|
||||
type webAPI struct {
|
||||
// FSPath filesystem path.
|
||||
FSPath string
|
||||
// Once true log all incoming request.
|
||||
AccessLog bool
|
||||
// Minio client instance.
|
||||
Client *minio.Client
|
||||
|
||||
// private params.
|
||||
apiAddress string // api destination address.
|
||||
// accessKeys kept to be used internally.
|
||||
// credential kept to be used internally.
|
||||
accessKeyID string
|
||||
secretAccessKey string
|
||||
}
|
||||
@ -164,11 +158,28 @@ func registerAPIHandlers(mux *router.Router, a storageAPI, w *webAPI) {
|
||||
api.Methods("GET").HandlerFunc(a.ListBucketsHandler)
|
||||
}
|
||||
|
||||
// initWeb instantiate a new Web.
|
||||
func initWeb(conf cloudServerConfig) *webAPI {
|
||||
// configureServer handler returns final handler for the http server.
|
||||
func configureServerHandler(filesystem fs.Filesystem) http.Handler {
|
||||
// Access credentials.
|
||||
cred := serverConfig.GetCredential()
|
||||
|
||||
// Server region.
|
||||
region := serverConfig.GetRegion()
|
||||
|
||||
// Server addr.
|
||||
addr := serverConfig.GetAddr()
|
||||
|
||||
sign, err := signature4.New(cred.AccessKeyID, cred.SecretAccessKey, region)
|
||||
fatalIf(err.Trace(cred.AccessKeyID, cred.SecretAccessKey, region), "Initializing signature version '4' failed.", nil)
|
||||
|
||||
// Initialize API.
|
||||
api := storageAPI{
|
||||
Filesystem: filesystem,
|
||||
Signature: sign,
|
||||
}
|
||||
|
||||
// Split host port.
|
||||
host, port, e := net.SplitHostPort(conf.Address)
|
||||
fatalIf(probe.NewError(e), "Unable to parse web addess.", nil)
|
||||
host, port, _ := net.SplitHostPort(addr)
|
||||
|
||||
// Default host is 'localhost', if no host present.
|
||||
if host == "" {
|
||||
@ -176,44 +187,18 @@ func initWeb(conf cloudServerConfig) *webAPI {
|
||||
}
|
||||
|
||||
// Initialize minio client for AWS Signature Version '4'
|
||||
inSecure := !conf.TLS // Insecure true when TLS is false.
|
||||
client, e := minio.NewV4(net.JoinHostPort(host, port), conf.AccessKeyID, conf.SecretAccessKey, inSecure)
|
||||
disableSSL := !isSSL() // Insecure true when SSL is false.
|
||||
client, e := minio.NewV4(net.JoinHostPort(host, port), cred.AccessKeyID, cred.SecretAccessKey, disableSSL)
|
||||
fatalIf(probe.NewError(e), "Unable to initialize minio client", nil)
|
||||
|
||||
w := &webAPI{
|
||||
FSPath: conf.Path,
|
||||
AccessLog: conf.AccessLog,
|
||||
Client: client,
|
||||
apiAddress: conf.Address,
|
||||
accessKeyID: conf.AccessKeyID,
|
||||
secretAccessKey: conf.SecretAccessKey,
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
// initAPI instantiate a new StorageAPI.
|
||||
func initAPI(conf cloudServerConfig) storageAPI {
|
||||
fs, err := fs.New(conf.Path, conf.MinFreeDisk)
|
||||
fatalIf(err.Trace(), "Initializing filesystem failed.", nil)
|
||||
|
||||
sign, err := signature4.New(conf.AccessKeyID, conf.SecretAccessKey, conf.Region)
|
||||
fatalIf(err.Trace(conf.AccessKeyID, conf.SecretAccessKey, conf.Region), "Initializing signature version '4' failed.", nil)
|
||||
|
||||
return storageAPI{
|
||||
AccessLog: conf.AccessLog,
|
||||
Filesystem: fs,
|
||||
Signature: sign,
|
||||
Region: conf.Region,
|
||||
}
|
||||
}
|
||||
|
||||
// server handler returns final handler before initializing server.
|
||||
func serverHandler(conf cloudServerConfig) http.Handler {
|
||||
// Initialize API.
|
||||
api := initAPI(conf)
|
||||
|
||||
// Initialize Web.
|
||||
web := initWeb(conf)
|
||||
web := &webAPI{
|
||||
FSPath: filesystem.GetRootPath(),
|
||||
Client: client,
|
||||
apiAddress: addr,
|
||||
accessKeyID: cred.AccessKeyID,
|
||||
secretAccessKey: cred.SecretAccessKey,
|
||||
}
|
||||
|
||||
var handlerFns = []HandlerFunc{
|
||||
// Redirect some pre-defined browser request paths to a static
|
||||
|
277
server-config.go
277
server-config.go
@ -1,277 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/minio/go-homedir"
|
||||
"github.com/minio/mc/pkg/console"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
// configV1
|
||||
type configV1 struct {
|
||||
Version string `json:"version"`
|
||||
AccessKeyID string `json:"accessKeyId"`
|
||||
SecretAccessKey string `json:"secretAccessKey"`
|
||||
}
|
||||
|
||||
// configV2
|
||||
type configV2 struct {
|
||||
Version string `json:"version"`
|
||||
Credentials struct {
|
||||
AccessKeyID string `json:"accessKeyId"`
|
||||
SecretAccessKey string `json:"secretAccessKey"`
|
||||
Region string `json:"region"`
|
||||
} `json:"credentials"`
|
||||
MongoLogger struct {
|
||||
Addr string `json:"addr"`
|
||||
DB string `json:"db"`
|
||||
Collection string `json:"collection"`
|
||||
} `json:"mongoLogger"`
|
||||
SyslogLogger struct {
|
||||
Network string `json:"network"`
|
||||
Addr string `json:"addr"`
|
||||
} `json:"syslogLogger"`
|
||||
FileLogger struct {
|
||||
Filename string `json:"filename"`
|
||||
} `json:"fileLogger"`
|
||||
}
|
||||
|
||||
func (c *configV2) IsFileLoggingEnabled() bool {
|
||||
if c.FileLogger.Filename != "" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *configV2) IsSysloggingEnabled() bool {
|
||||
if c.SyslogLogger.Network != "" && c.SyslogLogger.Addr != "" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *configV2) IsMongoLoggingEnabled() bool {
|
||||
if c.MongoLogger.Addr != "" && c.MongoLogger.DB != "" && c.MongoLogger.Collection != "" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *configV2) String() string {
|
||||
white := color.New(color.FgWhite, color.Bold).SprintfFunc()
|
||||
var str string
|
||||
if c.IsMongoLoggingEnabled() {
|
||||
str = fmt.Sprintf("Mongo -> %s", white("Addr: %s, DB: %s, Collection: %s",
|
||||
c.MongoLogger.Addr, c.MongoLogger.DB, c.MongoLogger.Collection))
|
||||
}
|
||||
if c.IsSysloggingEnabled() {
|
||||
str = fmt.Sprintf("Syslog -> %s", white("Addr: %s, Network: %s",
|
||||
c.SyslogLogger.Addr, c.SyslogLogger.Network))
|
||||
}
|
||||
if c.IsFileLoggingEnabled() {
|
||||
str = fmt.Sprintf("File -> %s", white("Filename: %s", c.FileLogger.Filename))
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func (c *configV2) JSON() string {
|
||||
type logger struct {
|
||||
MongoLogger struct {
|
||||
Addr string `json:"addr"`
|
||||
DB string `json:"db"`
|
||||
Collection string `json:"collection"`
|
||||
} `json:"mongoLogger"`
|
||||
SyslogLogger struct {
|
||||
Network string `json:"network"`
|
||||
Addr string `json:"addr"`
|
||||
} `json:"syslogLogger"`
|
||||
FileLogger struct {
|
||||
Filename string `json:"filename"`
|
||||
} `json:"fileLogger"`
|
||||
}
|
||||
loggerBytes, err := json.Marshal(logger{
|
||||
MongoLogger: c.MongoLogger,
|
||||
SyslogLogger: c.SyslogLogger,
|
||||
FileLogger: c.FileLogger,
|
||||
})
|
||||
fatalIf(probe.NewError(err), "Unable to marshal logger struct into JSON.", nil)
|
||||
return string(loggerBytes)
|
||||
}
|
||||
|
||||
// configPath for custom config path only for testing purposes
|
||||
var customConfigPath string
|
||||
|
||||
// Sets a new config path.
|
||||
func setGlobalConfigPath(configPath string) {
|
||||
customConfigPath = configPath
|
||||
}
|
||||
|
||||
// getConfigPath get users config path
|
||||
func getConfigPath() (string, *probe.Error) {
|
||||
if customConfigPath != "" {
|
||||
return customConfigPath, nil
|
||||
}
|
||||
homeDir, e := homedir.Dir()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
configPath := filepath.Join(homeDir, ".minio")
|
||||
return configPath, nil
|
||||
}
|
||||
|
||||
func mustGetConfigPath() string {
|
||||
configPath, err := getConfigPath()
|
||||
fatalIf(err.Trace(), "Unable to get config path.", nil)
|
||||
return configPath
|
||||
}
|
||||
|
||||
// createConfigPath create users config path
|
||||
func createConfigPath() *probe.Error {
|
||||
configPath, err := getConfigPath()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
if e := os.MkdirAll(configPath, 0700); e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getConfigFile get users config file
|
||||
func getConfigFile() (string, *probe.Error) {
|
||||
configPath, err := getConfigPath()
|
||||
if err != nil {
|
||||
return "", err.Trace()
|
||||
}
|
||||
return filepath.Join(configPath, "config.json"), nil
|
||||
}
|
||||
|
||||
// saveConfig save config
|
||||
func saveConfig(a *configV2) *probe.Error {
|
||||
configFile, err := getConfigFile()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
qc, err := quick.New(a)
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
if err := qc.Save(configFile); err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadConfigV2 load config
|
||||
func loadConfigV2() (*configV2, *probe.Error) {
|
||||
configFile, err := getConfigFile()
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
return nil, probe.NewError(err)
|
||||
}
|
||||
a := &configV2{}
|
||||
a.Version = "2"
|
||||
qc, err := quick.New(a)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
if err := qc.Load(configFile); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return qc.Data().(*configV2), nil
|
||||
}
|
||||
|
||||
// loadConfigV1 load config
|
||||
func loadConfigV1() (*configV1, *probe.Error) {
|
||||
configPath, err := getConfigPath()
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
configFile := filepath.Join(configPath, "fsUsers.json")
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
return nil, probe.NewError(err)
|
||||
}
|
||||
a := &configV1{}
|
||||
a.Version = "1"
|
||||
qc, err := quick.New(a)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
if err := qc.Load(configFile); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return qc.Data().(*configV1), nil
|
||||
}
|
||||
|
||||
func newConfigV2() *configV2 {
|
||||
config := &configV2{}
|
||||
config.Version = "2"
|
||||
config.Credentials.AccessKeyID = ""
|
||||
config.Credentials.SecretAccessKey = ""
|
||||
config.Credentials.Region = "us-east-1"
|
||||
config.MongoLogger.Addr = ""
|
||||
config.MongoLogger.DB = ""
|
||||
config.MongoLogger.Collection = ""
|
||||
config.SyslogLogger.Network = ""
|
||||
config.SyslogLogger.Addr = ""
|
||||
config.FileLogger.Filename = ""
|
||||
return config
|
||||
}
|
||||
|
||||
func migrateConfig() {
|
||||
migrateV1ToV2()
|
||||
}
|
||||
|
||||
func migrateV1ToV2() {
|
||||
cv1, err := loadConfigV1()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
return
|
||||
}
|
||||
}
|
||||
fatalIf(err.Trace(), "Unable to load config version ‘1’.", nil)
|
||||
|
||||
if cv1.Version != "1" {
|
||||
fatalIf(probe.NewError(errors.New("")), "Invalid version loaded ‘"+cv1.Version+"’.", nil)
|
||||
}
|
||||
|
||||
cv2 := newConfigV2()
|
||||
cv2.Credentials.AccessKeyID = cv1.AccessKeyID
|
||||
cv2.Credentials.SecretAccessKey = cv1.SecretAccessKey
|
||||
err = saveConfig(cv2)
|
||||
fatalIf(err.Trace(), "Unable to save config version ‘2’.", nil)
|
||||
|
||||
console.Println("Migration from version ‘1’ to ‘2’ completed successfully.")
|
||||
|
||||
/// Purge old fsUsers.json file
|
||||
configPath, err := getConfigPath()
|
||||
fatalIf(err.Trace(), "Unable to retrieve config path.", nil)
|
||||
|
||||
configFile := filepath.Join(configPath, "fsUsers.json")
|
||||
os.RemoveAll(configFile)
|
||||
}
|
420
server-main.go
420
server-main.go
@ -1,420 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/mc/pkg/console"
|
||||
"github.com/minio/minio/pkg/minhttp"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
var serverCmd = cli.Command{
|
||||
Name: "server",
|
||||
Usage: "Start Minio cloud storage server.",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "min-free-disk, M",
|
||||
Value: "5%",
|
||||
},
|
||||
},
|
||||
Action: serverMain,
|
||||
CustomHelpTemplate: `NAME:
|
||||
minio {{.Name}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
minio {{.Name}} [OPTION VALUE] PATH
|
||||
|
||||
OPTIONS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{end}}
|
||||
|
||||
ENVIRONMENT VARIABLES:
|
||||
MINIO_ACCESS_KEY, MINIO_SECRET_KEY: Access and secret key to use.
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio server on Linux.
|
||||
$ minio {{.Name}} /home/shared
|
||||
|
||||
2. Start minio server on Windows.
|
||||
$ minio {{.Name}} C:\MyShare
|
||||
|
||||
3. Start minio server bound to a specific IP:PORT, when you have multiple network interfaces.
|
||||
$ minio --address 192.168.1.101:9000 {{.Name}} /home/shared
|
||||
|
||||
4. Start minio server with minimum free disk threshold to 5%
|
||||
$ minio {{.Name}} --min-free-disk 5% /home/shared/Pictures
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
// cloudServerConfig - http server config
|
||||
type cloudServerConfig struct {
|
||||
/// HTTP server options
|
||||
Address string // Address:Port listening
|
||||
AccessLog bool // Enable access log handler
|
||||
|
||||
// Credentials.
|
||||
AccessKeyID string // Access key id.
|
||||
SecretAccessKey string // Secret access key.
|
||||
Region string // Region string.
|
||||
|
||||
/// FS options
|
||||
Path string // Path to export for cloud storage
|
||||
MinFreeDisk int64 // Minimum free disk space for filesystem
|
||||
MaxBuckets int // Maximum number of buckets suppported by filesystem.
|
||||
|
||||
/// TLS service
|
||||
TLS bool // TLS on when certs are specified
|
||||
CertFile string // Domain certificate
|
||||
KeyFile string // Domain key
|
||||
}
|
||||
|
||||
// configureServer configure a new server instance
|
||||
func configureServer(conf cloudServerConfig) (*http.Server, *probe.Error) {
|
||||
// Minio server config
|
||||
apiServer := &http.Server{
|
||||
Addr: conf.Address,
|
||||
Handler: serverHandler(conf),
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
|
||||
if conf.TLS {
|
||||
var err error
|
||||
apiServer.TLSConfig = &tls.Config{}
|
||||
apiServer.TLSConfig.Certificates = make([]tls.Certificate, 1)
|
||||
apiServer.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(conf.CertFile, conf.KeyFile)
|
||||
if err != nil {
|
||||
return nil, probe.NewError(err)
|
||||
}
|
||||
}
|
||||
return apiServer, nil
|
||||
}
|
||||
|
||||
func printServerMsg(serverConf *http.Server) {
|
||||
host, port, e := net.SplitHostPort(serverConf.Addr)
|
||||
fatalIf(probe.NewError(e), "Unable to split host port.", nil)
|
||||
|
||||
var hosts []string
|
||||
switch {
|
||||
case host != "":
|
||||
hosts = append(hosts, host)
|
||||
default:
|
||||
addrs, e := net.InterfaceAddrs()
|
||||
fatalIf(probe.NewError(e), "Unable to get interface address.", nil)
|
||||
|
||||
for _, addr := range addrs {
|
||||
if addr.Network() == "ip+net" {
|
||||
host := strings.Split(addr.String(), "/")[0]
|
||||
if ip := net.ParseIP(host); ip.To4() != nil {
|
||||
hosts = append(hosts, host)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, host := range hosts {
|
||||
if serverConf.TLSConfig != nil {
|
||||
console.Printf(" https://%s:%s\n", host, port)
|
||||
} else {
|
||||
console.Printf(" http://%s:%s\n", host, port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parse input string with percent to int64
|
||||
func parsePercentToInt(s string, bitSize int) (int64, *probe.Error) {
|
||||
i := strings.Index(s, "%")
|
||||
if i < 0 {
|
||||
// no percentage string found try to parse the whole string anyways
|
||||
p, e := strconv.ParseInt(s, 10, bitSize)
|
||||
if e != nil {
|
||||
return 0, probe.NewError(e)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
p, e := strconv.ParseInt(s[:i], 10, bitSize)
|
||||
if e != nil {
|
||||
return 0, probe.NewError(e)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
func setLogger(conf *configV2) *probe.Error {
|
||||
if conf.IsMongoLoggingEnabled() {
|
||||
if err := log2Mongo(conf.MongoLogger.Addr, conf.MongoLogger.DB, conf.MongoLogger.Collection); err != nil {
|
||||
return err.Trace(conf.MongoLogger.Addr, conf.MongoLogger.DB, conf.MongoLogger.Collection)
|
||||
}
|
||||
}
|
||||
if conf.IsSysloggingEnabled() {
|
||||
if err := log2Syslog(conf.SyslogLogger.Network, conf.SyslogLogger.Addr); err != nil {
|
||||
return err.Trace(conf.SyslogLogger.Network, conf.SyslogLogger.Addr)
|
||||
}
|
||||
}
|
||||
if conf.IsFileLoggingEnabled() {
|
||||
if err := log2File(conf.FileLogger.Filename); err != nil {
|
||||
return err.Trace(conf.FileLogger.Filename)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generates config if it doesn't exist, otherwise returns back the saved ones.
|
||||
func getConfig() (*configV2, *probe.Error) {
|
||||
if err := createConfigPath(); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
if err := createBucketsConfigPath(); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
config, err := loadConfigV2()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
// Initialize new config, since config file doesn't exist yet
|
||||
config = &configV2{}
|
||||
config.Version = "2"
|
||||
config.Credentials.AccessKeyID = string(mustGenerateAccessKeyID())
|
||||
config.Credentials.SecretAccessKey = string(mustGenerateSecretAccessKey())
|
||||
config.Credentials.Region = "us-east-1"
|
||||
if err = saveConfig(config); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
type accessKeys struct {
|
||||
*configV2
|
||||
}
|
||||
|
||||
func (a accessKeys) String() string {
|
||||
magenta := color.New(color.FgMagenta, color.Bold).SprintFunc()
|
||||
white := color.New(color.FgWhite, color.Bold).SprintfFunc()
|
||||
return fmt.Sprint(magenta("AccessKey: ") + white(a.Credentials.AccessKeyID) + " " + magenta("SecretKey: ") + white(a.Credentials.SecretAccessKey) + " " + magenta("Region: ") + white(a.Credentials.Region))
|
||||
}
|
||||
|
||||
// JSON - json formatted output
|
||||
func (a accessKeys) JSON() string {
|
||||
b, e := json.Marshal(a)
|
||||
errorIf(probe.NewError(e), "Unable to marshal json", nil)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// initServer initialize server
|
||||
func initServer() (*configV2, *probe.Error) {
|
||||
conf, err := getConfig()
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
if err := setLogger(conf); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func checkServerSyntax(c *cli.Context) {
|
||||
if !c.Args().Present() || c.Args().First() == "help" {
|
||||
cli.ShowCommandHelpAndExit(c, "server", 1)
|
||||
}
|
||||
if len(c.Args()) > 1 {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Unnecessary arguments passed. Please refer ‘mc server help’", nil)
|
||||
}
|
||||
path := strings.TrimSpace(c.Args().Last())
|
||||
if path == "" {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Path argument cannot be empty.", nil)
|
||||
}
|
||||
}
|
||||
|
||||
// extract port number from address.
|
||||
// address should be of the form host:port
|
||||
func getPort(address string) int {
|
||||
_, portStr, e := net.SplitHostPort(address)
|
||||
fatalIf(probe.NewError(e), "Unable to split host port.", nil)
|
||||
portInt, e := strconv.Atoi(portStr)
|
||||
fatalIf(probe.NewError(e), "Invalid port number.", nil)
|
||||
return portInt
|
||||
}
|
||||
|
||||
// Make sure that none of the other processes are listening on the
|
||||
// specified port on any of the interfaces.
|
||||
//
|
||||
// On linux if a process is listening on 127.0.0.1:9000 then Listen()
|
||||
// on ":9000" fails with the error "port already in use".
|
||||
// However on Mac OSX Listen() on ":9000" falls back to the IPv6 address.
|
||||
// This causes confusion on Mac OSX that minio server is not reachable
|
||||
// on 127.0.0.1 even though minio server is running. So before we start
|
||||
// the minio server we make sure that the port is free on all the IPs.
|
||||
func checkPortAvailability(port int) {
|
||||
isAddrInUse := func(e error) bool {
|
||||
// Check if the syscall error is EADDRINUSE.
|
||||
// EADDRINUSE is the system call error if another process is
|
||||
// already listening at the specified port.
|
||||
neterr, ok := e.(*net.OpError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
osErr, ok := neterr.Err.(*os.SyscallError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
sysErr, ok := osErr.Err.(syscall.Errno)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if sysErr != syscall.EADDRINUSE {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
ifcs, e := net.Interfaces()
|
||||
if e != nil {
|
||||
fatalIf(probe.NewError(e), "Unable to list interfaces.", nil)
|
||||
}
|
||||
for _, ifc := range ifcs {
|
||||
addrs, e := ifc.Addrs()
|
||||
if e != nil {
|
||||
fatalIf(probe.NewError(e), fmt.Sprintf("Unable to list addresses on interface %s.", ifc.Name), nil)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
ipnet, ok := addr.(*net.IPNet)
|
||||
if !ok {
|
||||
errorIf(probe.NewError(errors.New("")), "Interface type assertion to (*net.IPNet) failed.", nil)
|
||||
continue
|
||||
}
|
||||
ip := ipnet.IP
|
||||
network := "tcp4"
|
||||
if ip.To4() == nil {
|
||||
network = "tcp6"
|
||||
}
|
||||
tcpAddr := net.TCPAddr{IP: ip, Port: port, Zone: ifc.Name}
|
||||
l, e := net.ListenTCP(network, &tcpAddr)
|
||||
if e != nil {
|
||||
if isAddrInUse(e) {
|
||||
// Fail if port is already in use.
|
||||
fatalIf(probe.NewError(e), fmt.Sprintf("Unable to listen on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
|
||||
} else {
|
||||
// Ignore other errors.
|
||||
continue
|
||||
}
|
||||
}
|
||||
e = l.Close()
|
||||
if e != nil {
|
||||
fatalIf(probe.NewError(e), fmt.Sprintf("Unable to close listener on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func serverMain(c *cli.Context) {
|
||||
checkServerSyntax(c)
|
||||
address := c.GlobalString("address")
|
||||
checkPortAvailability(getPort(address))
|
||||
|
||||
certFile := c.GlobalString("cert")
|
||||
keyFile := c.GlobalString("key")
|
||||
if (certFile != "" && keyFile == "") || (certFile == "" && keyFile != "") {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Both certificate and key are required to enable https.", nil)
|
||||
}
|
||||
|
||||
conf, err := initServer()
|
||||
fatalIf(err.Trace(), "Failed to read config for minio.", nil)
|
||||
|
||||
accessKey := os.Getenv("MINIO_ACCESS_KEY")
|
||||
secretKey := os.Getenv("MINIO_SECRET_KEY")
|
||||
if accessKey != "" && secretKey != "" {
|
||||
if !isValidAccessKey(accessKey) {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Access key does not have required length", nil)
|
||||
}
|
||||
if !isValidSecretKey(secretKey) {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Secret key does not have required length", nil)
|
||||
}
|
||||
|
||||
conf.Credentials.AccessKeyID = accessKey
|
||||
conf.Credentials.SecretAccessKey = secretKey
|
||||
|
||||
err = saveConfig(conf)
|
||||
fatalIf(err.Trace(), "Unable to save credentials to config.", nil)
|
||||
}
|
||||
|
||||
minFreeDisk, err := parsePercentToInt(c.String("min-free-disk"), 64)
|
||||
fatalIf(err.Trace(c.String("min-free-disk")), "Invalid minium free disk size "+c.String("min-free-disk")+" passed.", nil)
|
||||
|
||||
path := strings.TrimSpace(c.Args().Last())
|
||||
// Last argument is always path
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
fatalIf(probe.NewError(err), "Unable to validate the path", nil)
|
||||
}
|
||||
region := conf.Credentials.Region
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
tls := (certFile != "" && keyFile != "")
|
||||
serverConfig := cloudServerConfig{
|
||||
Address: address,
|
||||
AccessLog: c.GlobalBool("enable-accesslog"),
|
||||
AccessKeyID: conf.Credentials.AccessKeyID,
|
||||
SecretAccessKey: conf.Credentials.SecretAccessKey,
|
||||
Region: region,
|
||||
Path: path,
|
||||
MinFreeDisk: minFreeDisk,
|
||||
TLS: tls,
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
}
|
||||
|
||||
// configure server.
|
||||
apiServer, err := configureServer(serverConfig)
|
||||
errorIf(err.Trace(), "Failed to configure API server.", nil)
|
||||
|
||||
console.Println()
|
||||
// Print access keys and region.
|
||||
console.Println(accessKeys{conf})
|
||||
|
||||
console.Println("\nMinio Object Storage:")
|
||||
printServerMsg(apiServer)
|
||||
|
||||
console.Println("\nMinio Browser:")
|
||||
printServerMsg(apiServer)
|
||||
|
||||
console.Println("\nTo configure Minio Client:")
|
||||
if runtime.GOOS == "windows" {
|
||||
console.Println(" Download \"mc\" from https://dl.minio.io/client/mc/release/" + runtime.GOOS + "-" + runtime.GOARCH + "/mc.exe")
|
||||
console.Println(" $ mc.exe config host add myminio http://localhost:9000 " + conf.Credentials.AccessKeyID + " " + conf.Credentials.SecretAccessKey)
|
||||
} else {
|
||||
console.Println(" $ wget https://dl.minio.io/client/mc/release/" + runtime.GOOS + "-" + runtime.GOARCH + "/mc")
|
||||
console.Println(" $ chmod 755 mc")
|
||||
console.Println(" $ ./mc config host add myminio http://localhost:9000 " + conf.Credentials.AccessKeyID + " " + conf.Credentials.SecretAccessKey)
|
||||
}
|
||||
|
||||
// Start server.
|
||||
err = minhttp.ListenAndServe(apiServer)
|
||||
errorIf(err.Trace(), "Failed to start the minio server.", nil)
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -43,11 +43,10 @@ const (
|
||||
)
|
||||
|
||||
type MyAPIFSCacheSuite struct {
|
||||
root string
|
||||
req *http.Request
|
||||
body io.ReadSeeker
|
||||
accessKeyID string
|
||||
secretAccessKey string
|
||||
root string
|
||||
req *http.Request
|
||||
body io.ReadSeeker
|
||||
credential credential
|
||||
}
|
||||
|
||||
var _ = Suite(&MyAPIFSCacheSuite{})
|
||||
@ -77,31 +76,28 @@ func (s *MyAPIFSCacheSuite) SetUpSuite(c *C) {
|
||||
fsroot, e := ioutil.TempDir(os.TempDir(), "api-")
|
||||
c.Assert(e, IsNil)
|
||||
|
||||
accessKeyID, err := generateAccessKeyID()
|
||||
c.Assert(err, IsNil)
|
||||
secretAccessKey, err := generateSecretAccessKey()
|
||||
c.Assert(err, IsNil)
|
||||
// Initialize server config.
|
||||
initConfig()
|
||||
|
||||
conf := newConfigV2()
|
||||
conf.Credentials.AccessKeyID = string(accessKeyID)
|
||||
conf.Credentials.SecretAccessKey = string(secretAccessKey)
|
||||
s.accessKeyID = string(accessKeyID)
|
||||
s.secretAccessKey = string(secretAccessKey)
|
||||
// Get credential.
|
||||
s.credential = serverConfig.GetCredential()
|
||||
|
||||
// do this only once here
|
||||
// Set a default region.
|
||||
serverConfig.SetRegion("us-east-1")
|
||||
|
||||
// Set a new address.
|
||||
serverConfig.SetAddr(":" + strconv.Itoa(getFreePort()))
|
||||
|
||||
// Do this only once here
|
||||
setGlobalConfigPath(root)
|
||||
|
||||
c.Assert(saveConfig(conf), IsNil)
|
||||
// Save config.
|
||||
c.Assert(serverConfig.Save(), IsNil)
|
||||
|
||||
cloudServer := cloudServerConfig{
|
||||
Address: ":" + strconv.Itoa(getFreePort()),
|
||||
Path: fsroot,
|
||||
MinFreeDisk: 0,
|
||||
AccessKeyID: s.accessKeyID,
|
||||
SecretAccessKey: s.secretAccessKey,
|
||||
Region: "us-east-1",
|
||||
}
|
||||
httpHandler := serverHandler(cloudServer)
|
||||
fs, err := fs.New(fsroot)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
httpHandler := configureServerHandler(fs)
|
||||
testAPIFSCacheServer = httptest.NewServer(httpHandler)
|
||||
}
|
||||
|
||||
@ -249,7 +245,7 @@ func (s *MyAPIFSCacheSuite) newRequest(method, urlStr string, contentLength int6
|
||||
stringToSign = stringToSign + scope + "\n"
|
||||
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
|
||||
|
||||
date := sumHMAC([]byte("AWS4"+s.secretAccessKey), []byte(t.Format(yyyymmdd)))
|
||||
date := sumHMAC([]byte("AWS4"+s.credential.SecretAccessKey), []byte(t.Format(yyyymmdd)))
|
||||
region := sumHMAC(date, []byte("us-east-1"))
|
||||
service := sumHMAC(region, []byte("s3"))
|
||||
signingKey := sumHMAC(service, []byte("aws4_request"))
|
||||
@ -258,7 +254,7 @@ func (s *MyAPIFSCacheSuite) newRequest(method, urlStr string, contentLength int6
|
||||
|
||||
// final Authorization header
|
||||
parts := []string{
|
||||
"AWS4-HMAC-SHA256" + " Credential=" + s.accessKeyID + "/" + scope,
|
||||
"AWS4-HMAC-SHA256" + " Credential=" + s.credential.AccessKeyID + "/" + scope,
|
||||
"SignedHeaders=" + signedHeaders,
|
||||
"Signature=" + signature,
|
||||
}
|
||||
@ -269,10 +265,10 @@ func (s *MyAPIFSCacheSuite) newRequest(method, urlStr string, contentLength int6
|
||||
}
|
||||
|
||||
func (s *MyAPIFSCacheSuite) TestAuth(c *C) {
|
||||
secretID, err := generateSecretAccessKey()
|
||||
secretID, err := genSecretAccessKey()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
accessID, err := generateAccessKeyID()
|
||||
accessID, err := genAccessKeyID()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
c.Assert(len(secretID), Equals, minioSecretID)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,8 +18,8 @@ package main
|
||||
|
||||
import "errors"
|
||||
|
||||
// errSysLogNotSupported - this message is only meaningful on windows
|
||||
var errSysLogNotSupported = errors.New("Syslog logger not supported on windows")
|
||||
// errSyslogNotSupported - this message is only meaningful on windows
|
||||
var errSyslogNotSupported = errors.New("Syslog logger not supported on windows")
|
||||
|
||||
// errInvalidArgument means that input argument is invalid.
|
||||
var errInvalidArgument = errors.New("Invalid arguments specified")
|
||||
|
499
vendor/github.com/minio/minio-go/API.md
generated
vendored
Normal file
499
vendor/github.com/minio/minio-go/API.md
generated
vendored
Normal file
@ -0,0 +1,499 @@
|
||||
## API Documentation
|
||||
|
||||
### Minio client object creation
|
||||
Minio client object is created using minio-go:
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/minio/minio-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
|
||||
if err !!= nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
s3Client can be used to perform operations on S3 storage. APIs are described below.
|
||||
|
||||
### Bucket operations
|
||||
|
||||
* [`MakeBucket`](#MakeBucket)
|
||||
* [`ListBuckets`](#ListBuckets)
|
||||
* [`BucketExists`](#BucketExists)
|
||||
* [`RemoveBucket`](#RemoveBucket)
|
||||
* [`ListObjects`](#ListObjects)
|
||||
* [`ListIncompleteUploads`](#ListIncompleteUploads)
|
||||
|
||||
### Object operations
|
||||
|
||||
* [`GetObject`](#GetObject)
|
||||
* [`PutObject`](#PutObject)
|
||||
* [`StatObject`](#StatObject)
|
||||
* [`RemoveObject`](#RemoveObject)
|
||||
* [`RemoveIncompleteUpload`](#RemoveIncompleteUpload)
|
||||
|
||||
### File operations.
|
||||
|
||||
* [`FPutObject`](#FPutObject)
|
||||
* [`FGetObject`](#FPutObject)
|
||||
|
||||
### Bucket policy operations.
|
||||
|
||||
* [`SetBucketPolicy`](#SetBucketPolicy)
|
||||
* [`GetBucketPolicy`](#GetBucketPolicy)
|
||||
* [`RemoveBucketPolicy`](#RemoveBucketPolicy)
|
||||
|
||||
### Presigned operations
|
||||
|
||||
* [`PresignedGetObject`](#PresignedGetObject)
|
||||
* [`PresignedPutObject`](#PresignedPutObject)
|
||||
* [`PresignedPostPolicy`](#PresignedPostPolicy)
|
||||
|
||||
### Bucket operations
|
||||
---------------------------------------
|
||||
<a name="MakeBucket">
|
||||
#### MakeBucket(bucketName, location)
|
||||
Create a new bucket.
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_ - Name of the bucket.
|
||||
* `location` _string_ - region valid values are _us-west-1_, _us-west-2_, _eu-west-1_, _eu-central-1_, _ap-southeast-1_, _ap-northeast-1_, _ap-southeast-2_, _sa-east-1_
|
||||
|
||||
__Example__
|
||||
```go
|
||||
err := s3Client.MakeBucket("mybucket", "us-west-1")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println("Successfully created mybucket.")
|
||||
```
|
||||
---------------------------------------
|
||||
<a name="ListBuckets">
|
||||
#### ListBuckets()
|
||||
List all buckets.
|
||||
|
||||
`bucketList` emits bucket with the format:
|
||||
* `bucket.Name` _string_: bucket name
|
||||
* `bucket.CreationDate` time.Time : date when bucket was created
|
||||
|
||||
__Example__
|
||||
```go
|
||||
buckets, err := s3Client.ListBuckets()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
for _, bucket := range buckets {
|
||||
fmt.Println(bucket)
|
||||
}
|
||||
```
|
||||
---------------------------------------
|
||||
<a name="BucketExists">
|
||||
#### BucketExists(bucketName)
|
||||
Check if bucket exists.
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_ : name of the bucket
|
||||
|
||||
__Example__
|
||||
```go
|
||||
err := s3Client.BucketExists("mybucket")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
---------------------------------------
|
||||
<a name="RemoveBucket">
|
||||
#### RemoveBucket(bucketName)
|
||||
Remove a bucket.
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_ : name of the bucket
|
||||
|
||||
__Example__
|
||||
```go
|
||||
err := s3Client.RemoveBucket("mybucket")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
---------------------------------------
|
||||
<a name="GetBucketPolicy">
|
||||
#### GetBucketPolicy(bucketName, objectPrefix)
|
||||
Get access permissions on a bucket or a prefix.
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_ : name of the bucket
|
||||
* `objectPrefix` _string_ : name of the object prefix
|
||||
|
||||
__Example__
|
||||
```go
|
||||
bucketPolicy, err := s3Client.GetBucketPolicy("mybucket")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println("Access permissions for mybucket is", bucketPolicy)
|
||||
```
|
||||
---------------------------------------
|
||||
<a name="SetBucketPolicy">
|
||||
#### SetBucketPolicy(bucketname, objectPrefix, policy)
|
||||
Set access permissions on bucket or an object prefix.
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_: name of the bucket
|
||||
* `objectPrefix` _string_ : name of the object prefix
|
||||
* `policy` _BucketPolicy_: policy can be _non_, _readonly_, _readwrite_, _writeonly_
|
||||
|
||||
__Example__
|
||||
```go
|
||||
err := s3Client.SetBucketPolicy("mybucket", "myprefix", "readwrite")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
---------------------------------------
|
||||
<a name="RemoveBucketPolicy">
|
||||
#### RemoveBucketPolicy(bucketname, objectPrefix)
|
||||
Remove existing permissions on bucket or an object prefix.
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_: name of the bucket
|
||||
* `objectPrefix` _string_ : name of the object prefix
|
||||
|
||||
__Example__
|
||||
```go
|
||||
err := s3Client.RemoveBucketPolicy("mybucket", "myprefix")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
---------------------------------------
|
||||
<a name="ListObjects">
|
||||
#### ListObjects(bucketName, prefix, recursive, doneCh)
|
||||
List objects in a bucket.
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_: name of the bucket
|
||||
* `objectPrefix` _string_: the prefix of the objects that should be listed
|
||||
* `recursive` _bool_: `true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'
|
||||
* `doneCh` chan struct{} : channel for pro-actively closing the internal go routine
|
||||
|
||||
__Return Value__
|
||||
* `<-chan ObjectInfo` _chan ObjectInfo_: Read channel for all the objects in the bucket, the object is of the format:
|
||||
* `objectInfo.Key` _string_: name of the object
|
||||
* `objectInfo.Size` _int64_: size of the object
|
||||
* `objectInfo.ETag` _string_: etag of the object
|
||||
* `objectInfo.LastModified` _time.Time_: modified time stamp
|
||||
|
||||
__Example__
|
||||
```go
|
||||
// Create a done channel to control 'ListObjects' go routine.
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
// Indicate to our routine to exit cleanly upon return.
|
||||
defer close(doneCh)
|
||||
|
||||
isRecursive := true
|
||||
objectCh := s3Client.ListObjects("mybucket", "myprefix", isRecursive, doneCh)
|
||||
for object := range objectCh {
|
||||
if object.Err != nil {
|
||||
fmt.Println(object.Err)
|
||||
return
|
||||
}
|
||||
fmt.Println(object)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
---------------------------------------
|
||||
<a name="ListIncompleteUploads">
|
||||
#### ListIncompleteUploads(bucketName, prefix, recursive)
|
||||
List partially uploaded objects in a bucket.
|
||||
|
||||
__Arguments__
|
||||
* `bucketname` _string_: name of the bucket
|
||||
* `prefix` _string_: prefix of the object names that are partially uploaded
|
||||
* `recursive` bool: directory style listing when false, recursive listing when true
|
||||
* `doneCh` chan struct{} : channel for pro-actively closing the internal go routine
|
||||
|
||||
__Return Value__
|
||||
* `<-chan ObjectMultipartInfo` _chan ObjectMultipartInfo_ : emits multipart objects of the format:
|
||||
* `multiPartObjInfo.Key` _string_: name of the incomplete object
|
||||
* `multiPartObjInfo.UploadID` _string_: upload ID of the incomplete object
|
||||
* `multiPartObjInfo.Size` _int64_: size of the incompletely uploaded object
|
||||
|
||||
__Example__
|
||||
```go
|
||||
// Create a done channel to control 'ListObjects' go routine.
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
// Indicate to our routine to exit cleanly upon return.
|
||||
defer close(doneCh)
|
||||
|
||||
isRecursive := true
|
||||
multiPartObjectCh := s3Client.ListIncompleteUploads("mybucket", "myprefix", isRecursive, doneCh)
|
||||
for multiPartObject := range multiPartObjectCh {
|
||||
if multiPartObject.Err != nil {
|
||||
fmt.Println(multiPartObject.Err)
|
||||
return
|
||||
}
|
||||
fmt.Println(multiPartObject)
|
||||
}
|
||||
```
|
||||
|
||||
---------------------------------------
|
||||
### Object operations
|
||||
<a name="GetObject">
|
||||
#### GetObject(bucketName, objectName)
|
||||
Download an object.
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_: name of the bucket
|
||||
* `objectName` _string_: name of the object
|
||||
|
||||
__Return Value__
|
||||
* `object` _*minio.Object_ : _minio.Object_ represents object reader.
|
||||
|
||||
__Example__
|
||||
```go
|
||||
object, err := s3Client.GetObject("mybucket", "photo.jpg")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
localFile _ := os.Open("/tmp/local-file")
|
||||
if _, err := io.Copy(localFile, object); err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
---------------------------------------
|
||||
---------------------------------------
|
||||
<a name="FGetObject">
|
||||
#### FGetObject(bucketName, objectName, filePath)
|
||||
Callback is called with `error` in case of error or `null` in case of success
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_: name of the bucket
|
||||
* `objectName` _string_: name of the object
|
||||
* `filePath` _string_: path to which the object data will be written to
|
||||
|
||||
__Example__
|
||||
```go
|
||||
err := s3Client.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
---------------------------------------
|
||||
<a name="PutObject">
|
||||
#### PutObject(bucketName, objectName, reader, contentType)
|
||||
Upload an object.
|
||||
|
||||
Uploading a stream
|
||||
__Arguments__
|
||||
* `bucketName` _string_: name of the bucket
|
||||
* `objectName` _string_: name of the object
|
||||
* `reader` _io.Reader_: Any golang object implementing io.Reader
|
||||
* `contentType` _string_: content type of the object.
|
||||
|
||||
__Example__
|
||||
```go
|
||||
file, err := os.Open("my-testfile")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
---------------------------------------
|
||||
<a name="FPutObject">
|
||||
#### FPutObject(bucketName, objectName, filePath, contentType)
|
||||
Uploads the object using contents from a file
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_: name of the bucket
|
||||
* `objectName` _string_: name of the object
|
||||
* `filePath` _string_: file path of the file to be uploaded
|
||||
* `contentType` _string_: content type of the object
|
||||
|
||||
__Example__
|
||||
```go
|
||||
n, err := s3Client.FPutObject("my-bucketname", "my-objectname", "/tmp/my-filename.csv", "application/csv")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
---------------------------------------
|
||||
<a name="StatObject">
|
||||
#### StatObject(bucketName, objectName)
|
||||
Get metadata of an object.
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_: name of the bucket
|
||||
* `objectName` _string_: name of the object
|
||||
|
||||
__Return Value__
|
||||
`objInfo` _ObjectInfo_ : object stat info for following format:
|
||||
* `objInfo.Size` _int64_: size of the object
|
||||
* `objInfo.ETag` _string_: etag of the object
|
||||
* `objInfo.ContentType` _string_: Content-Type of the object
|
||||
* `objInfo.LastModified` _string_: modified time stamp
|
||||
|
||||
__Example__
|
||||
```go
|
||||
objInfo, err := s3Client.StatObject("mybucket", "photo.jpg")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println(objInfo)
|
||||
```
|
||||
---------------------------------------
|
||||
<a name="RemoveObject">
|
||||
#### RemoveObject(bucketName, objectName)
|
||||
Remove an object.
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_: name of the bucket
|
||||
* `objectName` _string_: name of the object
|
||||
|
||||
__Example__
|
||||
```go
|
||||
err := s3Client.RemoveObject("mybucket", "photo.jpg")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
---------------------------------------
|
||||
<a name="RemoveIncompleteUpload">
|
||||
#### RemoveIncompleteUpload(bucketName, objectName)
|
||||
Remove an partially uploaded object.
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_: name of the bucket
|
||||
* `objectName` _string_: name of the object
|
||||
|
||||
__Example__
|
||||
```go
|
||||
err := s3Client.RemoveIncompleteUpload("mybucket", "photo.jpg")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
### Presigned operations
|
||||
---------------------------------------
|
||||
<a name="PresignedGetObject">
|
||||
#### PresignedGetObject(bucketName, objectName, expiry)
|
||||
Generate a presigned URL for GET.
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_: name of the bucket.
|
||||
* `objectName` _string_: name of the object.
|
||||
* `expiry` _time.Duration_: expiry in seconds.
|
||||
`reqParams` _url.Values_ : additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_
|
||||
|
||||
__Example__
|
||||
```go
|
||||
// Set request parameters for content-disposition.
|
||||
reqParams := make(url.Values)
|
||||
reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
|
||||
|
||||
// Generates a presigned url which expires in a day.
|
||||
presignedURL, err := s3Client.PresignedGetObject("mybucket", "photo.jpg", time.Second * 24 * 60 * 60, reqParams)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
---------------------------------------
|
||||
<a name="PresignedPutObject">
|
||||
#### PresignedPutObject(bucketName, objectName, expiry)
|
||||
Generate a presigned URL for PUT.
|
||||
<blockquote>
|
||||
NOTE: you can upload to S3 only with specified object name.
|
||||
</blockquote>
|
||||
|
||||
__Arguments__
|
||||
* `bucketName` _string_: name of the bucket
|
||||
* `objectName` _string_: name of the object
|
||||
* `expiry` _time.Duration_: expiry in seconds
|
||||
|
||||
__Example__
|
||||
```go
|
||||
// Generates a url which expires in a day.
|
||||
presignedURL, err := s3Client.PresignedPutObject("mybucket", "photo.jpg", time.Second * 24 * 60 * 60)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
---------------------------------------
|
||||
<a name="PresignedPostPolicy">
|
||||
#### PresignedPostPolicy
|
||||
PresignedPostPolicy we can provide policies specifying conditions restricting
|
||||
what you want to allow in a POST request, such as bucket name where objects can be
|
||||
uploaded, key name prefixes that you want to allow for the object being created and more.
|
||||
|
||||
We need to create our policy first:
|
||||
```go
|
||||
policy := minio.NewPostPolicy()
|
||||
```
|
||||
Apply upload policy restrictions:
|
||||
```go
|
||||
policy.SetBucket("my-bucketname")
|
||||
policy.SetKey("my-objectname")
|
||||
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
|
||||
|
||||
// Only allow 'png' images.
|
||||
policy.SetContentType("image/png")
|
||||
|
||||
// Only allow content size in range 1KB to 1MB.
|
||||
policy.SetContentLengthRange(1024, 1024*1024)
|
||||
```
|
||||
Get the POST form key/value object:
|
||||
```go
|
||||
formData, err := s3Client.PresignedPostPolicy(policy)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
POST your content from the command line using `curl`:
|
||||
```go
|
||||
fmt.Printf("curl ")
|
||||
for k, v := range m {
|
||||
fmt.Printf("-F %s=%s ", k, v)
|
||||
}
|
||||
fmt.Printf("-F file=@/etc/bash.bashrc ")
|
||||
fmt.Printf("https://my-bucketname.s3.amazonaws.com\n")
|
||||
```
|
2
vendor/github.com/minio/minio-go/INSTALLGO.md
generated
vendored
2
vendor/github.com/minio/minio-go/INSTALLGO.md
generated
vendored
@ -71,7 +71,7 @@ export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec
|
||||
export PATH=$PATH:${GOPATH}/bin
|
||||
```
|
||||
|
||||
##### Source the new enviornment
|
||||
##### Source the new environment
|
||||
|
||||
```sh
|
||||
$ source ~/.bash_profile
|
||||
|
13
vendor/github.com/minio/minio-go/README.md
generated
vendored
13
vendor/github.com/minio/minio-go/README.md
generated
vendored
@ -61,12 +61,14 @@ func main() {
|
||||
|
||||
## Documentation
|
||||
|
||||
[API documentation](./API.md)
|
||||
|
||||
## Examples
|
||||
|
||||
### Bucket Operations.
|
||||
* [MakeBucket(bucketName, BucketACL, location) error](examples/s3/makebucket.go)
|
||||
* [MakeBucket(bucketName, location) error](examples/s3/makebucket.go)
|
||||
* [BucketExists(bucketName) error](examples/s3/bucketexists.go)
|
||||
* [RemoveBucket(bucketName) error](examples/s3/removebucket.go)
|
||||
* [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go)
|
||||
* [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go)
|
||||
* [ListBuckets() []BucketInfo](examples/s3/listbuckets.go)
|
||||
* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectInfo](examples/s3/listobjects.go)
|
||||
* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartInfo](examples/s3/listincompleteuploads.go)
|
||||
@ -87,6 +89,11 @@ func main() {
|
||||
* [PresignedPutObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedputobject.go)
|
||||
* [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go)
|
||||
|
||||
### Bucket Policy Operations.
|
||||
* [SetBucketPolicy(bucketName, objectPrefix, BucketPolicy) error](examples/s3/setbucketpolicy.go)
|
||||
* [GetBucketPolicy(bucketName, objectPrefix) (BucketPolicy, error)](examples/s3/getbucketpolicy.go)
|
||||
* [RemoveBucketPolicy(bucketName, objectPrefix) error](examples/s3/removebucketpolicy.go)
|
||||
|
||||
### API Reference
|
||||
|
||||
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/minio/minio-go)
|
||||
|
235
vendor/github.com/minio/minio-go/api-get.go
generated
vendored
235
vendor/github.com/minio/minio-go/api-get.go
generated
vendored
@ -20,115 +20,89 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GetBucketACL - Get the permissions on an existing bucket.
|
||||
//
|
||||
// Returned values are:
|
||||
//
|
||||
// private - Owner gets full access.
|
||||
// public-read - Owner gets full access, others get read access.
|
||||
// public-read-write - Owner gets full access, others get full access
|
||||
// too.
|
||||
// authenticated-read - Owner gets full access, authenticated users
|
||||
// get read access.
|
||||
func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
|
||||
// GetBucketPolicy - get bucket policy at a given path.
|
||||
func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy BucketPolicy, err error) {
|
||||
// Input validation.
|
||||
if err := isValidBucketName(bucketName); err != nil {
|
||||
return "", err
|
||||
return BucketPolicyNone, err
|
||||
}
|
||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||
return BucketPolicyNone, err
|
||||
}
|
||||
policy, err := c.getBucketPolicy(bucketName, objectPrefix)
|
||||
if err != nil {
|
||||
return BucketPolicyNone, err
|
||||
}
|
||||
if policy.Statements == nil {
|
||||
return BucketPolicyNone, nil
|
||||
}
|
||||
if isBucketPolicyReadWrite(policy.Statements, bucketName, objectPrefix) {
|
||||
return BucketPolicyReadWrite, nil
|
||||
} else if isBucketPolicyWriteOnly(policy.Statements, bucketName, objectPrefix) {
|
||||
return BucketPolicyWriteOnly, nil
|
||||
} else if isBucketPolicyReadOnly(policy.Statements, bucketName, objectPrefix) {
|
||||
return BucketPolicyReadOnly, nil
|
||||
}
|
||||
return BucketPolicyNone, nil
|
||||
}
|
||||
|
||||
// Set acl query.
|
||||
func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (BucketAccessPolicy, error) {
|
||||
// Input validation.
|
||||
if err := isValidBucketName(bucketName); err != nil {
|
||||
return BucketAccessPolicy{}, err
|
||||
}
|
||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||
return BucketAccessPolicy{}, err
|
||||
}
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("acl", "")
|
||||
urlValues.Set("policy", "")
|
||||
|
||||
// Execute GET acl on bucketName.
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod("GET", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return BucketAccessPolicy{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
// Decode access control policy.
|
||||
policy := accessControlPolicy{}
|
||||
err = xmlDecoder(resp.Body, &policy)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// We need to avoid following de-serialization check for Google
|
||||
// Cloud Storage. On Google Cloud Storage "private" canned ACL's
|
||||
// policy do not have grant list. Treat it as a valid case, check
|
||||
// for all other vendors.
|
||||
if !isGoogleEndpoint(c.endpointURL) {
|
||||
if policy.AccessControlList.Grant == nil {
|
||||
errorResponse := ErrorResponse{
|
||||
Code: "InternalError",
|
||||
Message: "Access control Grant list is empty. " + reportIssue,
|
||||
BucketName: bucketName,
|
||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||
HostID: resp.Header.Get("x-amz-id-2"),
|
||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||
errResponse := httpRespToErrorResponse(resp, bucketName, "")
|
||||
if ToErrorResponse(errResponse).Code == "NoSuchBucketPolicy" {
|
||||
return BucketAccessPolicy{Version: "2012-10-17"}, nil
|
||||
}
|
||||
return "", errorResponse
|
||||
return BucketAccessPolicy{}, errResponse
|
||||
}
|
||||
}
|
||||
|
||||
// Boolean cues to indentify right canned acls.
|
||||
var publicRead, publicWrite, authenticatedRead bool
|
||||
|
||||
// Handle grants.
|
||||
grants := policy.AccessControlList.Grant
|
||||
for _, g := range grants {
|
||||
if g.Grantee.URI == "" && g.Permission == "FULL_CONTROL" {
|
||||
continue
|
||||
}
|
||||
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
|
||||
authenticatedRead = true
|
||||
break
|
||||
} else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
|
||||
publicWrite = true
|
||||
} else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
|
||||
publicRead = true
|
||||
}
|
||||
// Read access policy up to maxAccessPolicySize.
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
|
||||
// bucket policies are limited to 20KB in size, using a limit reader.
|
||||
bucketPolicyBuf, err := ioutil.ReadAll(io.LimitReader(resp.Body, maxAccessPolicySize))
|
||||
if err != nil {
|
||||
return BucketAccessPolicy{}, err
|
||||
}
|
||||
|
||||
// Verify if acl is authenticated read.
|
||||
if authenticatedRead {
|
||||
return BucketACL("authenticated-read"), nil
|
||||
policy, err := unMarshalBucketPolicy(bucketPolicyBuf)
|
||||
if err != nil {
|
||||
return BucketAccessPolicy{}, err
|
||||
}
|
||||
// Verify if acl is private.
|
||||
if !publicWrite && !publicRead {
|
||||
return BucketACL("private"), nil
|
||||
}
|
||||
// Verify if acl is public-read.
|
||||
if !publicWrite && publicRead {
|
||||
return BucketACL("public-read"), nil
|
||||
}
|
||||
// Verify if acl is public-read-write.
|
||||
if publicRead && publicWrite {
|
||||
return BucketACL("public-read-write"), nil
|
||||
}
|
||||
|
||||
return "", ErrorResponse{
|
||||
Code: "NoSuchBucketPolicy",
|
||||
Message: "The specified bucket does not have a bucket policy.",
|
||||
BucketName: bucketName,
|
||||
RequestID: "minio",
|
||||
// Sort the policy actions and resources for convenience.
|
||||
for _, statement := range policy.Statements {
|
||||
sort.Strings(statement.Actions)
|
||||
sort.Strings(statement.Resources)
|
||||
}
|
||||
return policy, nil
|
||||
}
|
||||
|
||||
// GetObject - returns an seekable, readable object.
|
||||
@ -140,8 +114,9 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
||||
if err := isValidObjectName(objectName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Send an explicit info to get the actual object size.
|
||||
objectInfo, err := c.StatObject(bucketName, objectName)
|
||||
|
||||
// Start the request as soon Get is initiated.
|
||||
httpReader, objectInfo, err := c.getObject(bucketName, objectName, 0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -153,8 +128,7 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
||||
// Create done channel.
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
// This routine feeds partial object data as and when the caller
|
||||
// reads.
|
||||
// This routine feeds partial object data as and when the caller reads.
|
||||
go func() {
|
||||
defer close(reqCh)
|
||||
defer close(resCh)
|
||||
@ -167,27 +141,27 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
||||
return
|
||||
// Request message.
|
||||
case req := <-reqCh:
|
||||
// Get shortest length.
|
||||
// NOTE: Last remaining bytes are usually smaller than
|
||||
// req.Buffer size. Use that as the final length.
|
||||
// Don't use Math.min() here to avoid converting int64 to float64
|
||||
length := int64(len(req.Buffer))
|
||||
if objectInfo.Size-req.Offset < length {
|
||||
length = objectInfo.Size - req.Offset
|
||||
}
|
||||
httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length))
|
||||
if err != nil {
|
||||
resCh <- readResponse{
|
||||
Error: err,
|
||||
// Offset changes fetch the new object at an Offset.
|
||||
if req.DidOffsetChange {
|
||||
// Read from offset.
|
||||
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, 0)
|
||||
if err != nil {
|
||||
resCh <- readResponse{
|
||||
Error: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Read at least req.Buffer bytes, if not we have
|
||||
// reached our EOF.
|
||||
size, err := io.ReadFull(httpReader, req.Buffer)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// If an EOF happens after reading some but not
|
||||
// all the bytes ReadFull returns ErrUnexpectedEOF
|
||||
err = io.EOF
|
||||
}
|
||||
// Reply back how much was read.
|
||||
resCh <- readResponse{
|
||||
Size: int(size),
|
||||
Error: err,
|
||||
@ -208,8 +182,9 @@ type readResponse struct {
|
||||
// Read request message container to communicate with internal
|
||||
// go-routine.
|
||||
type readRequest struct {
|
||||
Buffer []byte
|
||||
Offset int64 // readAt offset.
|
||||
Buffer []byte
|
||||
Offset int64 // readAt offset.
|
||||
DidOffsetChange bool
|
||||
}
|
||||
|
||||
// Object represents an open object. It implements Read, ReadAt,
|
||||
@ -222,6 +197,7 @@ type Object struct {
|
||||
reqCh chan<- readRequest
|
||||
resCh <-chan readResponse
|
||||
doneCh chan<- struct{}
|
||||
prevOffset int64
|
||||
currOffset int64
|
||||
objectInfo ObjectInfo
|
||||
|
||||
@ -244,7 +220,7 @@ func (o *Object) Read(b []byte) (n int, err error) {
|
||||
o.mutex.Lock()
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
// Previous prevErr is which was saved in previous operation.
|
||||
// prevErr is previous error saved from previous operation.
|
||||
if o.prevErr != nil || o.isClosed {
|
||||
return 0, o.prevErr
|
||||
}
|
||||
@ -254,13 +230,27 @@ func (o *Object) Read(b []byte) (n int, err error) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Send current information over control channel to indicate we
|
||||
// are ready.
|
||||
// Send current information over control channel to indicate we are ready.
|
||||
reqMsg := readRequest{}
|
||||
|
||||
// Send the offset and pointer to the buffer over the channel.
|
||||
// Send the pointer to the buffer over the channel.
|
||||
reqMsg.Buffer = b
|
||||
reqMsg.Offset = o.currOffset
|
||||
|
||||
// Verify if offset has changed and currOffset is greater than
|
||||
// previous offset. Perhaps due to Seek().
|
||||
offsetChange := o.prevOffset - o.currOffset
|
||||
if offsetChange < 0 {
|
||||
offsetChange = -offsetChange
|
||||
}
|
||||
if offsetChange > 0 {
|
||||
// Fetch the new reader at the current offset again.
|
||||
reqMsg.Offset = o.currOffset
|
||||
reqMsg.DidOffsetChange = true
|
||||
} else {
|
||||
// No offset changes no need to fetch new reader, continue
|
||||
// reading.
|
||||
reqMsg.DidOffsetChange = false
|
||||
reqMsg.Offset = 0
|
||||
}
|
||||
|
||||
// Send read request over the control channel.
|
||||
o.reqCh <- reqMsg
|
||||
@ -274,6 +264,9 @@ func (o *Object) Read(b []byte) (n int, err error) {
|
||||
// Update current offset.
|
||||
o.currOffset += bytesRead
|
||||
|
||||
// Save the current offset as previous offset.
|
||||
o.prevOffset = o.currOffset
|
||||
|
||||
if dataMsg.Error == nil {
|
||||
// If currOffset read is equal to objectSize
|
||||
// We have reached end of file, we return io.EOF.
|
||||
@ -317,7 +310,7 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
|
||||
o.mutex.Lock()
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
// prevErr is which was saved in previous operation.
|
||||
// prevErr is error which was saved in previous operation.
|
||||
if o.prevErr != nil || o.isClosed {
|
||||
return 0, o.prevErr
|
||||
}
|
||||
@ -334,7 +327,16 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
|
||||
|
||||
// Send the offset and pointer to the buffer over the channel.
|
||||
reqMsg.Buffer = b
|
||||
reqMsg.Offset = offset
|
||||
|
||||
// For ReadAt offset always changes, minor optimization where
|
||||
// offset same as currOffset we don't change the offset.
|
||||
reqMsg.DidOffsetChange = offset != o.currOffset
|
||||
if reqMsg.DidOffsetChange {
|
||||
// Set new offset.
|
||||
reqMsg.Offset = offset
|
||||
// Save new offset as current offset.
|
||||
o.currOffset = offset
|
||||
}
|
||||
|
||||
// Send read request over the control channel.
|
||||
o.reqCh <- reqMsg
|
||||
@ -345,10 +347,16 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
|
||||
// Bytes read.
|
||||
bytesRead := int64(dataMsg.Size)
|
||||
|
||||
// Update current offset.
|
||||
o.currOffset += bytesRead
|
||||
|
||||
// Save current offset as previous offset before returning.
|
||||
o.prevOffset = o.currOffset
|
||||
|
||||
if dataMsg.Error == nil {
|
||||
// If offset+bytes read is equal to objectSize
|
||||
// If currentOffset is equal to objectSize
|
||||
// we have reached end of file, we return io.EOF.
|
||||
if offset+bytesRead == o.objectInfo.Size {
|
||||
if o.currOffset >= o.objectInfo.Size {
|
||||
return dataMsg.Size, io.EOF
|
||||
}
|
||||
return dataMsg.Size, nil
|
||||
@ -378,7 +386,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
if o.prevErr != nil {
|
||||
// At EOF seeking is legal, for any other errors we return.
|
||||
// At EOF seeking is legal allow only io.EOF, for any other errors we return.
|
||||
if o.prevErr != io.EOF {
|
||||
return 0, o.prevErr
|
||||
}
|
||||
@ -388,6 +396,11 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
|
||||
if offset < 0 && whence != 2 {
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
|
||||
}
|
||||
|
||||
// Save current offset as previous offset.
|
||||
o.prevOffset = o.currOffset
|
||||
|
||||
// Switch through whence.
|
||||
switch whence {
|
||||
default:
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
|
||||
|
168
vendor/github.com/minio/minio-go/api-put-bucket.go
generated
vendored
168
vendor/github.com/minio/minio-go/api-put-bucket.go
generated
vendored
@ -18,8 +18,11 @@ package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -27,28 +30,18 @@ import (
|
||||
|
||||
/// Bucket operations
|
||||
|
||||
// MakeBucket makes a new bucket.
|
||||
// MakeBucket creates a new bucket with bucketName.
|
||||
//
|
||||
// Optional arguments are acl and location - by default all buckets are created
|
||||
// with ``private`` acl and in US Standard region.
|
||||
//
|
||||
// ACL valid values - http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
|
||||
//
|
||||
// private - owner gets full access [default].
|
||||
// public-read - owner gets full access, all others get read access.
|
||||
// public-read-write - owner gets full access, all others get full access too.
|
||||
// authenticated-read - owner gets full access, authenticated users get read access.
|
||||
// Location is an optional argument, by default all buckets are
|
||||
// created in US Standard Region.
|
||||
//
|
||||
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
|
||||
func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) error {
|
||||
func (c Client) MakeBucket(bucketName string, location string) error {
|
||||
// Validate the input arguments.
|
||||
if err := isValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if !acl.isValidBucketACL() {
|
||||
return ErrInvalidArgument("Unrecognized ACL " + acl.String())
|
||||
}
|
||||
|
||||
// If location is empty, treat is a default region 'us-east-1'.
|
||||
if location == "" {
|
||||
@ -56,7 +49,7 @@ func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) er
|
||||
}
|
||||
|
||||
// Instantiate the request.
|
||||
req, err := c.makeBucketRequest(bucketName, acl, location)
|
||||
req, err := c.makeBucketRequest(bucketName, location)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -82,14 +75,11 @@ func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) er
|
||||
}
|
||||
|
||||
// makeBucketRequest constructs request for makeBucket.
|
||||
func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location string) (*http.Request, error) {
|
||||
func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) {
|
||||
// Validate input arguments.
|
||||
if err := isValidBucketName(bucketName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !acl.isValidBucketACL() {
|
||||
return nil, ErrInvalidArgument("Unrecognized ACL " + acl.String())
|
||||
}
|
||||
|
||||
// In case of Amazon S3. The make bucket issued on already
|
||||
// existing bucket would fail with 'AuthorizationMalformed' error
|
||||
@ -106,12 +96,6 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// by default bucket acl is set to private.
|
||||
req.Header.Set("x-amz-acl", "private")
|
||||
if acl != "" {
|
||||
req.Header.Set("x-amz-acl", string(acl))
|
||||
}
|
||||
|
||||
// set UserAgent for the request.
|
||||
c.setUserAgent(req)
|
||||
|
||||
@ -131,9 +115,12 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str
|
||||
}
|
||||
createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
|
||||
req.Body = ioutil.NopCloser(createBucketConfigBuffer)
|
||||
req.ContentLength = int64(createBucketConfigBuffer.Len())
|
||||
req.ContentLength = int64(len(createBucketConfigBytes))
|
||||
// Set content-md5.
|
||||
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
|
||||
if c.signature.isV4() {
|
||||
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBuffer.Bytes())))
|
||||
// Set sha256.
|
||||
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
|
||||
}
|
||||
}
|
||||
|
||||
@ -150,57 +137,122 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// SetBucketACL set the permissions on an existing bucket using access control lists (ACL).
|
||||
// SetBucketPolicy set the access permissions on an existing bucket.
|
||||
//
|
||||
// For example
|
||||
//
|
||||
// private - owner gets full access [default].
|
||||
// public-read - owner gets full access, all others get read access.
|
||||
// public-read-write - owner gets full access, all others get full access too.
|
||||
// authenticated-read - owner gets full access, authenticated users get read access.
|
||||
func (c Client) SetBucketACL(bucketName string, acl BucketACL) error {
|
||||
// none - owner gets full access [default].
|
||||
// readonly - anonymous get access for everyone at a given object prefix.
|
||||
// readwrite - anonymous list/put/delete access to a given object prefix.
|
||||
// writeonly - anonymous put/delete access to a given object prefix.
|
||||
func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy BucketPolicy) error {
|
||||
// Input validation.
|
||||
if err := isValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if !acl.isValidBucketACL() {
|
||||
return ErrInvalidArgument("Unrecognized ACL " + acl.String())
|
||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||
return err
|
||||
}
|
||||
if !bucketPolicy.isValidBucketPolicy() {
|
||||
return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
|
||||
}
|
||||
policy, err := c.getBucketPolicy(bucketName, objectPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For bucket policy set to 'none' we need to remove the policy.
|
||||
if bucketPolicy == BucketPolicyNone && policy.Statements == nil {
|
||||
// No policies to set, return success.
|
||||
return nil
|
||||
}
|
||||
// Remove any previous policies at this path.
|
||||
policy.Statements = removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
|
||||
|
||||
bucketResourceStatement := &Statement{}
|
||||
objectResourceStatement := &Statement{}
|
||||
if bucketPolicy == BucketPolicyReadWrite {
|
||||
// Read write policy.
|
||||
bucketResourceStatement.Effect = "Allow"
|
||||
bucketResourceStatement.Principal.AWS = []string{"*"}
|
||||
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
|
||||
bucketResourceStatement.Actions = readWriteBucketActions
|
||||
objectResourceStatement.Effect = "Allow"
|
||||
objectResourceStatement.Principal.AWS = []string{"*"}
|
||||
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
|
||||
objectResourceStatement.Actions = readWriteObjectActions
|
||||
// Save the read write policy.
|
||||
policy.Statements = append(policy.Statements, *bucketResourceStatement, *objectResourceStatement)
|
||||
} else if bucketPolicy == BucketPolicyReadOnly {
|
||||
// Read only policy.
|
||||
bucketResourceStatement.Effect = "Allow"
|
||||
bucketResourceStatement.Principal.AWS = []string{"*"}
|
||||
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
|
||||
bucketResourceStatement.Actions = readOnlyBucketActions
|
||||
objectResourceStatement.Effect = "Allow"
|
||||
objectResourceStatement.Principal.AWS = []string{"*"}
|
||||
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
|
||||
objectResourceStatement.Actions = readOnlyObjectActions
|
||||
// Save the read only policy.
|
||||
policy.Statements = append(policy.Statements, *bucketResourceStatement, *objectResourceStatement)
|
||||
} else if bucketPolicy == BucketPolicyWriteOnly {
|
||||
// Write only policy.
|
||||
bucketResourceStatement.Effect = "Allow"
|
||||
bucketResourceStatement.Principal.AWS = []string{"*"}
|
||||
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
|
||||
bucketResourceStatement.Actions = writeOnlyBucketActions
|
||||
objectResourceStatement.Effect = "Allow"
|
||||
objectResourceStatement.Principal.AWS = []string{"*"}
|
||||
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
|
||||
objectResourceStatement.Actions = writeOnlyObjectActions
|
||||
// Save the write only policy.
|
||||
policy.Statements = append(policy.Statements, *bucketResourceStatement, *objectResourceStatement)
|
||||
}
|
||||
// Save the updated policies.
|
||||
return c.putBucketPolicy(bucketName, policy)
|
||||
}
|
||||
|
||||
// Saves a new bucket policy.
|
||||
func (c Client) putBucketPolicy(bucketName string, policy BucketAccessPolicy) error {
|
||||
// Input validation.
|
||||
if err := isValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set acl query.
|
||||
// If there are no policy statements, we should remove entire policy.
|
||||
if len(policy.Statements) == 0 {
|
||||
return c.removeBucketPolicy(bucketName)
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("acl", "")
|
||||
urlValues.Set("policy", "")
|
||||
|
||||
// Add misc headers.
|
||||
customHeader := make(http.Header)
|
||||
|
||||
if acl != "" {
|
||||
customHeader.Set("x-amz-acl", acl.String())
|
||||
} else {
|
||||
customHeader.Set("x-amz-acl", "private")
|
||||
policyBytes, err := json.Marshal(&policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Execute PUT bucket.
|
||||
resp, err := c.executeMethod("PUT", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
customHeader: customHeader,
|
||||
})
|
||||
policyBuffer := bytes.NewReader(policyBytes)
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: policyBuffer,
|
||||
contentLength: int64(len(policyBytes)),
|
||||
contentMD5Bytes: sumMD5(policyBytes),
|
||||
contentSHA256Bytes: sum256(policyBytes),
|
||||
}
|
||||
|
||||
// Execute PUT to upload a new bucket policy.
|
||||
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
// if error return.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
// return
|
||||
return nil
|
||||
}
|
||||
|
139
vendor/github.com/minio/minio-go/api-put-object-common.go
generated
vendored
139
vendor/github.com/minio/minio-go/api-put-object-common.go
generated
vendored
@ -19,6 +19,7 @@ package minio
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math"
|
||||
@ -94,62 +95,13 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las
|
||||
return totalPartsCount, partSize, lastPartSize, nil
|
||||
}
|
||||
|
||||
// Compatibility code for Golang < 1.5.x.
|
||||
// copyBuffer is identical to io.CopyBuffer, since such a function is
|
||||
// not available/implemented in Golang version < 1.5.x, we use a
|
||||
// custom call exactly implementng io.CopyBuffer from Golang > 1.5.x
|
||||
// version does.
|
||||
// hashCopyBuffer is identical to hashCopyN except that it doesn't take
|
||||
// any size argument but takes a buffer argument and reader should be
|
||||
// of io.ReaderAt interface.
|
||||
//
|
||||
// copyBuffer stages through the provided buffer (if one is required)
|
||||
// rather than allocating a temporary one. If buf is nil, one is
|
||||
// allocated; otherwise if it has zero length, copyBuffer panics.
|
||||
//
|
||||
// FIXME: Remove this code when distributions move to newer Golang versions.
|
||||
func copyBuffer(writer io.Writer, reader io.Reader, buf []byte) (written int64, err error) {
|
||||
// If the reader has a WriteTo method, use it to do the copy.
|
||||
// Avoids an allocation and a copy.
|
||||
if wt, ok := reader.(io.WriterTo); ok {
|
||||
return wt.WriteTo(writer)
|
||||
}
|
||||
// Similarly, if the writer has a ReadFrom method, use it to do
|
||||
// the copy.
|
||||
if rt, ok := writer.(io.ReaderFrom); ok {
|
||||
return rt.ReadFrom(reader)
|
||||
}
|
||||
if buf == nil {
|
||||
buf = make([]byte, 32*1024)
|
||||
}
|
||||
for {
|
||||
nr, er := reader.Read(buf)
|
||||
if nr > 0 {
|
||||
nw, ew := writer.Write(buf[0:nr])
|
||||
if nw > 0 {
|
||||
written += int64(nw)
|
||||
}
|
||||
if ew != nil {
|
||||
err = ew
|
||||
break
|
||||
}
|
||||
if nr != nw {
|
||||
err = io.ErrShortWrite
|
||||
break
|
||||
}
|
||||
}
|
||||
if er == io.EOF {
|
||||
break
|
||||
}
|
||||
if er != nil {
|
||||
err = er
|
||||
break
|
||||
}
|
||||
}
|
||||
return written, err
|
||||
}
|
||||
|
||||
// hashCopyBuffer is identical to hashCopyN except that it stages
|
||||
// through the provided buffer (if one is required) rather than
|
||||
// allocating a temporary one. If buf is nil, one is allocated for 5MiB.
|
||||
func (c Client) hashCopyBuffer(writer io.Writer, reader io.Reader, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) {
|
||||
// Stages reads from offsets into the buffer, if buffer is nil it is
|
||||
// initialized to optimalBufferSize.
|
||||
func (c Client) hashCopyBuffer(writer io.Writer, reader io.ReaderAt, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) {
|
||||
// MD5 and SHA256 hasher.
|
||||
var hashMD5, hashSHA256 hash.Hash
|
||||
// MD5 and SHA256 hasher.
|
||||
@ -160,14 +112,61 @@ func (c Client) hashCopyBuffer(writer io.Writer, reader io.Reader, buf []byte) (
|
||||
hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
|
||||
}
|
||||
|
||||
// Allocate buf if not initialized.
|
||||
// Buffer is nil, initialize.
|
||||
if buf == nil {
|
||||
buf = make([]byte, optimalReadBufferSize)
|
||||
}
|
||||
|
||||
// Offset to start reading from.
|
||||
var readAtOffset int64
|
||||
|
||||
// Following block reads data at an offset from the input
|
||||
// reader and copies data to into local temporary file.
|
||||
for {
|
||||
readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
|
||||
if rerr != nil {
|
||||
if rerr != io.EOF {
|
||||
return nil, nil, 0, rerr
|
||||
}
|
||||
}
|
||||
writeSize, werr := hashWriter.Write(buf[:readAtSize])
|
||||
if werr != nil {
|
||||
return nil, nil, 0, werr
|
||||
}
|
||||
if readAtSize != writeSize {
|
||||
return nil, nil, 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
|
||||
}
|
||||
readAtOffset += int64(writeSize)
|
||||
size += int64(writeSize)
|
||||
if rerr == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Finalize md5 sum and sha256 sum.
|
||||
md5Sum = hashMD5.Sum(nil)
|
||||
if c.signature.isV4() {
|
||||
sha256Sum = hashSHA256.Sum(nil)
|
||||
}
|
||||
return md5Sum, sha256Sum, size, err
|
||||
}
|
||||
|
||||
// hashCopy is identical to hashCopyN except that it doesn't take
|
||||
// any size argument.
|
||||
func (c Client) hashCopy(writer io.Writer, reader io.Reader) (md5Sum, sha256Sum []byte, size int64, err error) {
|
||||
// MD5 and SHA256 hasher.
|
||||
var hashMD5, hashSHA256 hash.Hash
|
||||
// MD5 and SHA256 hasher.
|
||||
hashMD5 = md5.New()
|
||||
hashWriter := io.MultiWriter(writer, hashMD5)
|
||||
if c.signature.isV4() {
|
||||
hashSHA256 = sha256.New()
|
||||
hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
|
||||
}
|
||||
|
||||
// Using copyBuffer to copy in large buffers, default buffer
|
||||
// for io.Copy of 32KiB is too small.
|
||||
size, err = copyBuffer(hashWriter, reader, buf)
|
||||
size, err = io.Copy(hashWriter, reader)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
@ -244,12 +243,8 @@ func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadI
|
||||
return uploadID, isNew, nil
|
||||
}
|
||||
|
||||
// computeHashBuffer - Calculates MD5 and SHA256 for an input read
|
||||
// Seeker is identical to computeHash except that it stages
|
||||
// through the provided buffer (if one is required) rather than
|
||||
// allocating a temporary one. If buf is nil, it uses a temporary
|
||||
// buffer.
|
||||
func (c Client) computeHashBuffer(reader io.ReadSeeker, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) {
|
||||
// computeHash - Calculates MD5 and SHA256 for an input read Seeker.
|
||||
func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) {
|
||||
// MD5 and SHA256 hasher.
|
||||
var hashMD5, hashSHA256 hash.Hash
|
||||
// MD5 and SHA256 hasher.
|
||||
@ -261,16 +256,9 @@ func (c Client) computeHashBuffer(reader io.ReadSeeker, buf []byte) (md5Sum, sha
|
||||
}
|
||||
|
||||
// If no buffer is provided, no need to allocate just use io.Copy.
|
||||
if buf == nil {
|
||||
size, err = io.Copy(hashWriter, reader)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
} else {
|
||||
size, err = copyBuffer(hashWriter, reader, buf)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
size, err = io.Copy(hashWriter, reader)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
// Seek back reader to the beginning location.
|
||||
@ -285,8 +273,3 @@ func (c Client) computeHashBuffer(reader io.ReadSeeker, buf []byte) (md5Sum, sha
|
||||
}
|
||||
return md5Sum, sha256Sum, size, nil
|
||||
}
|
||||
|
||||
// computeHash - Calculates MD5 and SHA256 for an input read Seeker.
|
||||
func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) {
|
||||
return c.computeHashBuffer(reader, nil)
|
||||
}
|
||||
|
4
vendor/github.com/minio/minio-go/api-put-object-readat.go
generated
vendored
4
vendor/github.com/minio/minio-go/api-put-object-readat.go
generated
vendored
@ -97,7 +97,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
||||
tmpBuffer := new(bytes.Buffer)
|
||||
|
||||
// Read defaults to reading at 5MiB buffer.
|
||||
readBuffer := make([]byte, optimalReadBufferSize)
|
||||
readAtBuffer := make([]byte, optimalReadBufferSize)
|
||||
|
||||
// Upload all the missing parts.
|
||||
for partNumber <= lastPartNumber {
|
||||
@ -147,7 +147,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
||||
// Calculates MD5 and SHA256 sum for a section reader.
|
||||
var md5Sum, sha256Sum []byte
|
||||
var prtSize int64
|
||||
md5Sum, sha256Sum, prtSize, err = c.hashCopyBuffer(tmpBuffer, sectionReader, readBuffer)
|
||||
md5Sum, sha256Sum, prtSize, err = c.hashCopyBuffer(tmpBuffer, sectionReader, readAtBuffer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
48
vendor/github.com/minio/minio-go/api-remove.go
generated
vendored
48
vendor/github.com/minio/minio-go/api-remove.go
generated
vendored
@ -50,6 +50,54 @@ func (c Client) RemoveBucket(bucketName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBucketPolicy remove a bucket policy on given path.
|
||||
func (c Client) RemoveBucketPolicy(bucketName, objectPrefix string) error {
|
||||
// Input validation.
|
||||
if err := isValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||
return err
|
||||
}
|
||||
policy, err := c.getBucketPolicy(bucketName, objectPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// No bucket policy found, nothing to remove return success.
|
||||
if policy.Statements == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save new statements after removing requested bucket policy.
|
||||
policy.Statements = removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
|
||||
|
||||
// Commit the update policy.
|
||||
return c.putBucketPolicy(bucketName, policy)
|
||||
}
|
||||
|
||||
// Removes all policies on a bucket.
|
||||
func (c Client) removeBucketPolicy(bucketName string) error {
|
||||
// Input validation.
|
||||
if err := isValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("policy", "")
|
||||
|
||||
// Execute DELETE on objectName.
|
||||
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveObject remove an object from a bucket.
|
||||
func (c Client) RemoveObject(bucketName, objectName string) error {
|
||||
// Input validation.
|
||||
|
@ -171,27 +171,3 @@ type createBucketConfiguration struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
|
||||
Location string `xml:"LocationConstraint"`
|
||||
}
|
||||
|
||||
// grant container for the grantee and his or her permissions.
|
||||
type grant struct {
|
||||
// grantee container for DisplayName and ID of the person being
|
||||
// granted permissions.
|
||||
Grantee struct {
|
||||
ID string
|
||||
DisplayName string
|
||||
EmailAddress string
|
||||
Type string
|
||||
URI string
|
||||
}
|
||||
Permission string
|
||||
}
|
||||
|
||||
// accessControlPolicy contains the elements providing ACL permissions
|
||||
// for a bucket.
|
||||
type accessControlPolicy struct {
|
||||
// accessControlList container for ACL information.
|
||||
AccessControlList struct {
|
||||
Grant []grant
|
||||
}
|
||||
Owner owner
|
||||
}
|
57
vendor/github.com/minio/minio-go/api.go
generated
vendored
57
vendor/github.com/minio/minio-go/api.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
@ -30,6 +31,7 @@ import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -57,9 +59,12 @@ type Client struct {
|
||||
httpClient *http.Client
|
||||
bucketLocCache *bucketLocationCache
|
||||
|
||||
// Advanced functionality
|
||||
// Advanced functionality.
|
||||
isTraceEnabled bool
|
||||
traceOutput io.Writer
|
||||
|
||||
// Random seed.
|
||||
random *rand.Rand
|
||||
}
|
||||
|
||||
// Global constants.
|
||||
@ -120,6 +125,29 @@ func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*
|
||||
return clnt, nil
|
||||
}
|
||||
|
||||
// lockedRandSource provides protected rand source, implements rand.Source interface.
|
||||
type lockedRandSource struct {
|
||||
lk sync.Mutex
|
||||
src rand.Source
|
||||
}
|
||||
|
||||
// Int63 returns a non-negative pseudo-random 63-bit integer as an
|
||||
// int64.
|
||||
func (r *lockedRandSource) Int63() (n int64) {
|
||||
r.lk.Lock()
|
||||
n = r.src.Int63()
|
||||
r.lk.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Seed uses the provided seed value to initialize the generator to a
|
||||
// deterministic state.
|
||||
func (r *lockedRandSource) Seed(seed int64) {
|
||||
r.lk.Lock()
|
||||
r.src.Seed(seed)
|
||||
r.lk.Unlock()
|
||||
}
|
||||
|
||||
func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
|
||||
// construct endpoint.
|
||||
endpointURL, err := getEndpointURL(endpoint, insecure)
|
||||
@ -147,8 +175,12 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*
|
||||
Transport: http.DefaultTransport,
|
||||
}
|
||||
|
||||
// Instantiae bucket location cache.
|
||||
clnt.bucketLocCache = newBucketLocationCache()
|
||||
|
||||
// Introduce a new locked random seed.
|
||||
clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
|
||||
|
||||
// Return.
|
||||
return clnt, nil
|
||||
}
|
||||
@ -384,7 +416,7 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
|
||||
// error until maxRetries have been exhausted, retry attempts are
|
||||
// performed after waiting for a given period of time in a
|
||||
// binomial fashion.
|
||||
for range newRetryTimer(MaxRetry, time.Second, time.Second*30, MaxJitter) {
|
||||
for range c.newRetryTimer(MaxRetry, time.Second, time.Second*30, MaxJitter) {
|
||||
if isRetryable {
|
||||
// Seek back to beginning for each attempt.
|
||||
if _, err = bodySeeker.Seek(0, 0); err != nil {
|
||||
@ -422,6 +454,15 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
|
||||
}
|
||||
}
|
||||
|
||||
// Read the body to be saved later.
|
||||
errBodyBytes, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Save the body.
|
||||
errBodySeeker := bytes.NewReader(errBodyBytes)
|
||||
res.Body = ioutil.NopCloser(errBodySeeker)
|
||||
|
||||
// For errors verify if its retryable otherwise fail quickly.
|
||||
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
|
||||
// Bucket region if set in error response, we can retry the
|
||||
@ -435,6 +476,16 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
|
||||
if isS3CodeRetryable(errResponse.Code) {
|
||||
continue // Retry.
|
||||
}
|
||||
|
||||
// Verify if http status code is retryable.
|
||||
if isHTTPStatusRetryable(res.StatusCode) {
|
||||
continue // Retry.
|
||||
}
|
||||
|
||||
// Save the body back again.
|
||||
errBodySeeker.Seek(0, 0) // Seek back to starting point.
|
||||
res.Body = ioutil.NopCloser(errBodySeeker)
|
||||
|
||||
// For all other cases break out of the retry loop.
|
||||
break
|
||||
}
|
||||
@ -522,7 +573,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
||||
|
||||
// set md5Sum for content protection.
|
||||
if metadata.contentMD5Bytes != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
|
||||
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
|
||||
}
|
||||
|
||||
// Sign the request for all authenticated requests.
|
||||
|
75
vendor/github.com/minio/minio-go/bucket-acl.go
generated
vendored
75
vendor/github.com/minio/minio-go/bucket-acl.go
generated
vendored
@ -1,75 +0,0 @@
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
// BucketACL - Bucket level access control.
|
||||
type BucketACL string
|
||||
|
||||
// Different types of ACL's currently supported for buckets.
|
||||
const (
|
||||
bucketPrivate = BucketACL("private")
|
||||
bucketReadOnly = BucketACL("public-read")
|
||||
bucketPublic = BucketACL("public-read-write")
|
||||
bucketAuthenticated = BucketACL("authenticated-read")
|
||||
)
|
||||
|
||||
// Stringify acl.
|
||||
func (b BucketACL) String() string {
|
||||
if string(b) == "" {
|
||||
return "private"
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// isValidBucketACL - Is provided acl string supported.
|
||||
func (b BucketACL) isValidBucketACL() bool {
|
||||
switch true {
|
||||
case b.isPrivate():
|
||||
fallthrough
|
||||
case b.isReadOnly():
|
||||
fallthrough
|
||||
case b.isPublic():
|
||||
fallthrough
|
||||
case b.isAuthenticated():
|
||||
return true
|
||||
case b.String() == "private":
|
||||
// By default its "private"
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isPrivate - Is acl Private.
|
||||
func (b BucketACL) isPrivate() bool {
|
||||
return b == bucketPrivate
|
||||
}
|
||||
|
||||
// isPublicRead - Is acl PublicRead.
|
||||
func (b BucketACL) isReadOnly() bool {
|
||||
return b == bucketReadOnly
|
||||
}
|
||||
|
||||
// isPublicReadWrite - Is acl PublicReadWrite.
|
||||
func (b BucketACL) isPublic() bool {
|
||||
return b == bucketPublic
|
||||
}
|
||||
|
||||
// isAuthenticated - Is acl AuthenticatedRead.
|
||||
func (b BucketACL) isAuthenticated() bool {
|
||||
return b == bucketAuthenticated
|
||||
}
|
19
vendor/github.com/minio/minio-go/bucket-cache.go
generated
vendored
19
vendor/github.com/minio/minio-go/bucket-cache.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@ -67,11 +68,6 @@ func (r *bucketLocationCache) Delete(bucketName string) {
|
||||
|
||||
// getBucketLocation - Get location for the bucketName from location map cache.
|
||||
func (c Client) getBucketLocation(bucketName string) (string, error) {
|
||||
// For anonymous requests, default to "us-east-1" and let other calls
|
||||
// move forward.
|
||||
if c.anonymous {
|
||||
return "us-east-1", nil
|
||||
}
|
||||
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
||||
return location, nil
|
||||
}
|
||||
@ -92,15 +88,10 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = httpRespToErrorResponse(resp, bucketName, "")
|
||||
errResp := ToErrorResponse(err)
|
||||
// AccessDenied without a signature mismatch code,
|
||||
// usually means that the bucket policy has certain
|
||||
// restrictions where some API operations are not
|
||||
// allowed. Handle this case so that top level callers can
|
||||
// interpret this easily and fall back if needed to a
|
||||
// lower functionality call. Read each individual API
|
||||
// specific code for such fallbacks.
|
||||
if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied" {
|
||||
// In this case return as "us-east-1" and let the call fail.
|
||||
// For access denied error, it could be an anonymous
|
||||
// request. Move forward and let the top level callers
|
||||
// succeed if possible based on their policy.
|
||||
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||
return "us-east-1", nil
|
||||
}
|
||||
return "", err
|
||||
|
392
vendor/github.com/minio/minio-go/bucket-policy.go
generated
vendored
Normal file
392
vendor/github.com/minio/minio-go/bucket-policy.go
generated
vendored
Normal file
@ -0,0 +1,392 @@
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// maximum supported access policy size.
|
||||
const maxAccessPolicySize = 20 * 1024 * 1024 // 20KiB.
|
||||
|
||||
// Resource prefix for all aws resources.
|
||||
const awsResourcePrefix = "arn:aws:s3:::"
|
||||
|
||||
// BucketPolicy - Bucket level policy.
|
||||
type BucketPolicy string
|
||||
|
||||
// Different types of Policies currently supported for buckets.
|
||||
const (
|
||||
BucketPolicyNone BucketPolicy = "none"
|
||||
BucketPolicyReadOnly = "readonly"
|
||||
BucketPolicyReadWrite = "readwrite"
|
||||
BucketPolicyWriteOnly = "writeonly"
|
||||
)
|
||||
|
||||
// isValidBucketPolicy - Is provided policy value supported.
|
||||
func (p BucketPolicy) isValidBucketPolicy() bool {
|
||||
switch p {
|
||||
case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// User - canonical users list.
|
||||
type User struct {
|
||||
AWS []string
|
||||
}
|
||||
|
||||
// Statement - minio policy statement
|
||||
type Statement struct {
|
||||
Sid string
|
||||
Effect string
|
||||
Principal User `json:"Principal"`
|
||||
Actions []string `json:"Action"`
|
||||
Resources []string `json:"Resource"`
|
||||
Conditions map[string]map[string]string `json:"Condition,omitempty"`
|
||||
}
|
||||
|
||||
// BucketAccessPolicy - minio policy collection
|
||||
type BucketAccessPolicy struct {
|
||||
Version string // date in 0000-00-00 format
|
||||
Statements []Statement `json:"Statement"`
|
||||
}
|
||||
|
||||
// Read write actions.
|
||||
var (
|
||||
readWriteBucketActions = []string{
|
||||
"s3:GetBucketLocation",
|
||||
"s3:ListBucket",
|
||||
"s3:ListBucketMultipartUploads",
|
||||
// Add more bucket level read-write actions here.
|
||||
}
|
||||
readWriteObjectActions = []string{
|
||||
"s3:AbortMultipartUpload",
|
||||
"s3:DeleteObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListMultipartUploadParts",
|
||||
"s3:PutObject",
|
||||
// Add more object level read-write actions here.
|
||||
}
|
||||
)
|
||||
|
||||
// Write only actions.
|
||||
var (
|
||||
writeOnlyBucketActions = []string{
|
||||
"s3:GetBucketLocation",
|
||||
"s3:ListBucketMultipartUploads",
|
||||
// Add more bucket level write actions here.
|
||||
}
|
||||
writeOnlyObjectActions = []string{
|
||||
"s3:AbortMultipartUpload",
|
||||
"s3:DeleteObject",
|
||||
"s3:ListMultipartUploadParts",
|
||||
"s3:PutObject",
|
||||
// Add more object level write actions here.
|
||||
}
|
||||
)
|
||||
|
||||
// Read only actions.
|
||||
var (
|
||||
readOnlyBucketActions = []string{
|
||||
"s3:GetBucketLocation",
|
||||
"s3:ListBucket",
|
||||
// Add more bucket level read actions here.
|
||||
}
|
||||
readOnlyObjectActions = []string{
|
||||
"s3:GetObject",
|
||||
// Add more object level read actions here.
|
||||
}
|
||||
)
|
||||
|
||||
// subsetActions returns true if the first array is completely
|
||||
// contained in the second array. There must be at least
|
||||
// the same number of duplicate values in second as there
|
||||
// are in first.
|
||||
func subsetActions(first, second []string) bool {
|
||||
set := make(map[string]int)
|
||||
for _, value := range second {
|
||||
set[value]++
|
||||
}
|
||||
for _, value := range first {
|
||||
if count, found := set[value]; !found {
|
||||
return false
|
||||
} else if count < 1 {
|
||||
return false
|
||||
} else {
|
||||
set[value] = count - 1
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Verifies if we have read/write policy set at bucketName, objectPrefix.
|
||||
func isBucketPolicyReadWrite(statements []Statement, bucketName string, objectPrefix string) bool {
|
||||
var commonActions, readWrite bool
|
||||
sort.Strings(readWriteBucketActions)
|
||||
sort.Strings(readWriteObjectActions)
|
||||
for _, statement := range statements {
|
||||
for _, resource := range statement.Resources {
|
||||
if resource == awsResourcePrefix+bucketName {
|
||||
if subsetActions(readWriteBucketActions, statement.Actions) {
|
||||
commonActions = true
|
||||
continue
|
||||
}
|
||||
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
|
||||
if subsetActions(readWriteObjectActions, statement.Actions) {
|
||||
readWrite = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return commonActions && readWrite
|
||||
}
|
||||
|
||||
// Verifies if we have write only policy set at bucketName, objectPrefix.
|
||||
func isBucketPolicyWriteOnly(statements []Statement, bucketName string, objectPrefix string) bool {
|
||||
var commonActions, writeOnly bool
|
||||
sort.Strings(writeOnlyBucketActions)
|
||||
sort.Strings(writeOnlyObjectActions)
|
||||
for _, statement := range statements {
|
||||
for _, resource := range statement.Resources {
|
||||
if resource == awsResourcePrefix+bucketName {
|
||||
if subsetActions(writeOnlyBucketActions, statement.Actions) {
|
||||
commonActions = true
|
||||
continue
|
||||
}
|
||||
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
|
||||
if subsetActions(writeOnlyObjectActions, statement.Actions) {
|
||||
writeOnly = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return commonActions && writeOnly
|
||||
}
|
||||
|
||||
// Verifies if we have read only policy set at bucketName, objectPrefix.
|
||||
func isBucketPolicyReadOnly(statements []Statement, bucketName string, objectPrefix string) bool {
|
||||
var commonActions, readOnly bool
|
||||
sort.Strings(readOnlyBucketActions)
|
||||
sort.Strings(readOnlyObjectActions)
|
||||
for _, statement := range statements {
|
||||
for _, resource := range statement.Resources {
|
||||
if resource == awsResourcePrefix+bucketName {
|
||||
if subsetActions(readOnlyBucketActions, statement.Actions) {
|
||||
commonActions = true
|
||||
continue
|
||||
}
|
||||
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
|
||||
if subsetActions(readOnlyObjectActions, statement.Actions) {
|
||||
readOnly = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return commonActions && readOnly
|
||||
}
|
||||
|
||||
// Removes read write bucket policy if found.
|
||||
func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName string, objectPrefix string) []Statement {
|
||||
var newStatements []Statement
|
||||
for _, statement := range statements {
|
||||
for _, resource := range statement.Resources {
|
||||
if resource == awsResourcePrefix+bucketName {
|
||||
var newActions []string
|
||||
for _, action := range statement.Actions {
|
||||
switch action {
|
||||
case "s3:GetBucketLocation", "s3:ListBucket", "s3:ListBucketMultipartUploads":
|
||||
continue
|
||||
}
|
||||
newActions = append(newActions, action)
|
||||
}
|
||||
statement.Actions = newActions
|
||||
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
|
||||
var newActions []string
|
||||
for _, action := range statement.Actions {
|
||||
switch action {
|
||||
case "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:DeleteObject", "s3:GetObject":
|
||||
continue
|
||||
}
|
||||
newActions = append(newActions, action)
|
||||
}
|
||||
statement.Actions = newActions
|
||||
}
|
||||
}
|
||||
if len(statement.Actions) != 0 {
|
||||
newStatements = append(newStatements, statement)
|
||||
}
|
||||
}
|
||||
return newStatements
|
||||
}
|
||||
|
||||
// Removes write only bucket policy if found.
|
||||
func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
|
||||
var newStatements []Statement
|
||||
for _, statement := range statements {
|
||||
for _, resource := range statement.Resources {
|
||||
if resource == awsResourcePrefix+bucketName {
|
||||
var newActions []string
|
||||
for _, action := range statement.Actions {
|
||||
switch action {
|
||||
case "s3:GetBucketLocation", "s3:ListBucketMultipartUploads":
|
||||
continue
|
||||
}
|
||||
newActions = append(newActions, action)
|
||||
}
|
||||
statement.Actions = newActions
|
||||
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
|
||||
var newActions []string
|
||||
for _, action := range statement.Actions {
|
||||
switch action {
|
||||
case "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:DeleteObject":
|
||||
continue
|
||||
}
|
||||
newActions = append(newActions, action)
|
||||
}
|
||||
statement.Actions = newActions
|
||||
}
|
||||
}
|
||||
if len(statement.Actions) != 0 {
|
||||
newStatements = append(newStatements, statement)
|
||||
}
|
||||
}
|
||||
return newStatements
|
||||
}
|
||||
|
||||
// Removes read only bucket policy if found.
|
||||
func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
|
||||
var newStatements []Statement
|
||||
for _, statement := range statements {
|
||||
for _, resource := range statement.Resources {
|
||||
if resource == awsResourcePrefix+bucketName {
|
||||
var newActions []string
|
||||
for _, action := range statement.Actions {
|
||||
switch action {
|
||||
case "s3:GetBucketLocation", "s3:ListBucket":
|
||||
continue
|
||||
}
|
||||
newActions = append(newActions, action)
|
||||
}
|
||||
statement.Actions = newActions
|
||||
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
|
||||
var newActions []string
|
||||
for _, action := range statement.Actions {
|
||||
if action == "s3:GetObject" {
|
||||
continue
|
||||
}
|
||||
newActions = append(newActions, action)
|
||||
}
|
||||
statement.Actions = newActions
|
||||
}
|
||||
}
|
||||
if len(statement.Actions) != 0 {
|
||||
newStatements = append(newStatements, statement)
|
||||
}
|
||||
}
|
||||
return newStatements
|
||||
}
|
||||
|
||||
// Remove bucket policies based on the type.
|
||||
func removeBucketPolicyStatement(statements []Statement, bucketName string, objectPrefix string) []Statement {
|
||||
// Verify type of policy to be removed.
|
||||
if isBucketPolicyReadWrite(statements, bucketName, objectPrefix) {
|
||||
statements = removeBucketPolicyStatementReadWrite(statements, bucketName, objectPrefix)
|
||||
} else if isBucketPolicyWriteOnly(statements, bucketName, objectPrefix) {
|
||||
statements = removeBucketPolicyStatementWriteOnly(statements, bucketName, objectPrefix)
|
||||
} else if isBucketPolicyReadOnly(statements, bucketName, objectPrefix) {
|
||||
statements = removeBucketPolicyStatementReadOnly(statements, bucketName, objectPrefix)
|
||||
}
|
||||
return statements
|
||||
}
|
||||
|
||||
// Unmarshals bucket policy byte array into a structured bucket access policy.
|
||||
func unMarshalBucketPolicy(bucketPolicyBuf []byte) (BucketAccessPolicy, error) {
|
||||
// Untyped lazy JSON struct.
|
||||
type bucketAccessPolicyUntyped struct {
|
||||
Version string
|
||||
Statement []struct {
|
||||
Sid string
|
||||
Effect string
|
||||
Principal struct {
|
||||
AWS json.RawMessage
|
||||
}
|
||||
Action json.RawMessage
|
||||
Resource json.RawMessage
|
||||
Condition map[string]map[string]string
|
||||
}
|
||||
}
|
||||
var policyUntyped = bucketAccessPolicyUntyped{}
|
||||
// Unmarshal incoming policy into an untyped structure, to be
|
||||
// evaluated lazily later.
|
||||
err := json.Unmarshal(bucketPolicyBuf, &policyUntyped)
|
||||
if err != nil {
|
||||
return BucketAccessPolicy{}, err
|
||||
}
|
||||
var policy = BucketAccessPolicy{}
|
||||
policy.Version = policyUntyped.Version
|
||||
for _, stmtUntyped := range policyUntyped.Statement {
|
||||
statement := Statement{}
|
||||
// These are properly typed messages.
|
||||
statement.Sid = stmtUntyped.Sid
|
||||
statement.Effect = stmtUntyped.Effect
|
||||
statement.Conditions = stmtUntyped.Condition
|
||||
|
||||
// AWS user can have two different types, either as []string
|
||||
// and either as regular 'string'. We fall back to doing this
|
||||
// since there is no other easier way to fix this.
|
||||
err = json.Unmarshal(stmtUntyped.Principal.AWS, &statement.Principal.AWS)
|
||||
if err != nil {
|
||||
var awsUser string
|
||||
err = json.Unmarshal(stmtUntyped.Principal.AWS, &awsUser)
|
||||
if err != nil {
|
||||
return BucketAccessPolicy{}, err
|
||||
}
|
||||
statement.Principal.AWS = []string{awsUser}
|
||||
}
|
||||
// Actions can have two different types, either as []string
|
||||
// and either as regular 'string'. We fall back to doing this
|
||||
// since there is no other easier way to fix this.
|
||||
err = json.Unmarshal(stmtUntyped.Action, &statement.Actions)
|
||||
if err != nil {
|
||||
var action string
|
||||
err = json.Unmarshal(stmtUntyped.Action, &action)
|
||||
if err != nil {
|
||||
return BucketAccessPolicy{}, err
|
||||
}
|
||||
statement.Actions = []string{action}
|
||||
}
|
||||
// Resources can have two different types, either as []string
|
||||
// and either as regular 'string'. We fall back to doing this
|
||||
// since there is no other easier way to fix this.
|
||||
err = json.Unmarshal(stmtUntyped.Resource, &statement.Resources)
|
||||
if err != nil {
|
||||
var resource string
|
||||
err = json.Unmarshal(stmtUntyped.Resource, &resource)
|
||||
if err != nil {
|
||||
return BucketAccessPolicy{}, err
|
||||
}
|
||||
statement.Resources = []string{resource}
|
||||
}
|
||||
// Append the typed policy.
|
||||
policy.Statements = append(policy.Statements, statement)
|
||||
}
|
||||
return policy, nil
|
||||
}
|
6
vendor/github.com/minio/minio-go/request-signature-v2.go
generated
vendored
6
vendor/github.com/minio/minio-go/request-signature-v2.go
generated
vendored
@ -124,7 +124,7 @@ func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
|
||||
// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
|
||||
//
|
||||
// StringToSign = HTTP-Verb + "\n" +
|
||||
// Content-MD5 + "\n" +
|
||||
// Content-Md5 + "\n" +
|
||||
// Content-Type + "\n" +
|
||||
// Date + "\n" +
|
||||
// CanonicalizedProtocolHeaders +
|
||||
@ -172,7 +172,7 @@ func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
|
||||
// From the Amazon docs:
|
||||
//
|
||||
// StringToSign = HTTP-Verb + "\n" +
|
||||
// Content-MD5 + "\n" +
|
||||
// Content-Md5 + "\n" +
|
||||
// Content-Type + "\n" +
|
||||
// Date + "\n" +
|
||||
// CanonicalizedProtocolHeaders +
|
||||
@ -192,7 +192,7 @@ func getStringToSignV2(req http.Request) string {
|
||||
func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) {
|
||||
buf.WriteString(req.Method)
|
||||
buf.WriteByte('\n')
|
||||
buf.WriteString(req.Header.Get("Content-MD5"))
|
||||
buf.WriteString(req.Header.Get("Content-Md5"))
|
||||
buf.WriteByte('\n')
|
||||
buf.WriteString(req.Header.Get("Content-Type"))
|
||||
buf.WriteByte('\n')
|
||||
|
98
vendor/github.com/minio/minio-go/retry.go
generated
vendored
98
vendor/github.com/minio/minio-go/retry.go
generated
vendored
@ -17,8 +17,10 @@
|
||||
package minio
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -33,19 +35,36 @@ const NoJitter = 0.0
|
||||
|
||||
// newRetryTimer creates a timer with exponentially increasing delays
|
||||
// until the maximum retry attempts are reached.
|
||||
func newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int {
|
||||
func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int {
|
||||
attemptCh := make(chan int)
|
||||
|
||||
// Seed random function with current unix nano time.
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
// computes the exponential backoff duration according to
|
||||
// https://www.awsarchitectureblog.com/2015/03/backoff.html
|
||||
exponentialBackoffWait := func(attempt int) time.Duration {
|
||||
// normalize jitter to the range [0, 1.0]
|
||||
if jitter < NoJitter {
|
||||
jitter = NoJitter
|
||||
}
|
||||
if jitter > MaxJitter {
|
||||
jitter = MaxJitter
|
||||
}
|
||||
|
||||
//sleep = random_between(0, min(cap, base * 2 ** attempt))
|
||||
sleep := unit * time.Duration(1<<uint(attempt))
|
||||
if sleep > cap {
|
||||
sleep = cap
|
||||
}
|
||||
if jitter != NoJitter {
|
||||
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
|
||||
}
|
||||
return sleep
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(attemptCh)
|
||||
for i := 0; i < maxRetry; i++ {
|
||||
attemptCh <- i + 1 // Attempts start from 1.
|
||||
// Grow the interval at an exponential rate,
|
||||
// starting at unit and capping at cap
|
||||
time.Sleep(exponentialBackoffWait(unit, i, cap, jitter))
|
||||
time.Sleep(exponentialBackoffWait(i))
|
||||
}
|
||||
}()
|
||||
return attemptCh
|
||||
@ -56,40 +75,47 @@ func isNetErrorRetryable(err error) bool {
|
||||
switch err.(type) {
|
||||
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
|
||||
return true
|
||||
case *url.Error:
|
||||
// For a URL error, where it replies back "connection closed"
|
||||
// retry again.
|
||||
if strings.Contains(err.Error(), "Connection closed by foreign host") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// computes the exponential backoff duration according to
|
||||
// https://www.awsarchitectureblog.com/2015/03/backoff.html
|
||||
func exponentialBackoffWait(base time.Duration, attempt int, cap time.Duration, jitter float64) time.Duration {
|
||||
// normalize jitter to the range [0, 1.0]
|
||||
if jitter < NoJitter {
|
||||
jitter = NoJitter
|
||||
}
|
||||
if jitter > MaxJitter {
|
||||
jitter = MaxJitter
|
||||
}
|
||||
//sleep = random_between(0, min(cap, base * 2 ** attempt))
|
||||
sleep := base * time.Duration(1<<uint(attempt))
|
||||
if sleep > cap {
|
||||
sleep = cap
|
||||
}
|
||||
if jitter != NoJitter {
|
||||
sleep -= time.Duration(rand.Float64() * float64(sleep) * jitter)
|
||||
}
|
||||
return sleep
|
||||
// List of AWS S3 error codes which are retryable.
|
||||
var retryableS3Codes = map[string]struct{}{
|
||||
"RequestError": {},
|
||||
"RequestTimeout": {},
|
||||
"Throttling": {},
|
||||
"ThrottlingException": {},
|
||||
"RequestLimitExceeded": {},
|
||||
"RequestThrottled": {},
|
||||
"InternalError": {},
|
||||
"ExpiredToken": {},
|
||||
"ExpiredTokenException": {},
|
||||
// Add more AWS S3 codes here.
|
||||
}
|
||||
|
||||
// isS3CodeRetryable - is s3 error code retryable.
|
||||
func isS3CodeRetryable(s3Code string) bool {
|
||||
switch s3Code {
|
||||
case "RequestError", "RequestTimeout", "Throttling", "ThrottlingException":
|
||||
fallthrough
|
||||
case "RequestLimitExceeded", "RequestThrottled", "InternalError":
|
||||
fallthrough
|
||||
case "ExpiredToken", "ExpiredTokenException":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
func isS3CodeRetryable(s3Code string) (ok bool) {
|
||||
_, ok = retryableS3Codes[s3Code]
|
||||
return ok
|
||||
}
|
||||
|
||||
// List of HTTP status codes which are retryable.
|
||||
var retryableHTTPStatusCodes = map[int]struct{}{
|
||||
429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
|
||||
http.StatusInternalServerError: {},
|
||||
http.StatusBadGateway: {},
|
||||
http.StatusServiceUnavailable: {},
|
||||
// Add more HTTP status codes here.
|
||||
}
|
||||
|
||||
// isHTTPStatusRetryable - is HTTP error code retryable.
|
||||
func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
|
||||
_, ok = retryableHTTPStatusCodes[httpStatusCode]
|
||||
return ok
|
||||
}
|
||||
|
8
vendor/github.com/minio/minio-go/utils.go
generated
vendored
8
vendor/github.com/minio/minio-go/utils.go
generated
vendored
@ -19,6 +19,7 @@ package minio
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
@ -47,6 +48,13 @@ func sum256(data []byte) []byte {
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
// sumMD5 calculate sumMD5 sum for an input byte array.
|
||||
func sumMD5(data []byte) []byte {
|
||||
hash := md5.New()
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
// sumHMAC calculate hmac between two input byte array.
|
||||
func sumHMAC(key []byte, data []byte) []byte {
|
||||
hash := hmac.New(sha256.New, key)
|
||||
|
25
vendor/gopkg.in/mgo.v2/LICENSE
generated
vendored
25
vendor/gopkg.in/mgo.v2/LICENSE
generated
vendored
@ -1,25 +0,0 @@
|
||||
mgo - MongoDB driver for Go
|
||||
|
||||
Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
5
vendor/gopkg.in/mgo.v2/Makefile
generated
vendored
5
vendor/gopkg.in/mgo.v2/Makefile
generated
vendored
@ -1,5 +0,0 @@
|
||||
startdb:
|
||||
@testdb/setup.sh start
|
||||
|
||||
stopdb:
|
||||
@testdb/setup.sh stop
|
4
vendor/gopkg.in/mgo.v2/README.md
generated
vendored
4
vendor/gopkg.in/mgo.v2/README.md
generated
vendored
@ -1,4 +0,0 @@
|
||||
The MongoDB driver for Go
|
||||
-------------------------
|
||||
|
||||
Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details.
|
467
vendor/gopkg.in/mgo.v2/auth.go
generated
vendored
467
vendor/gopkg.in/mgo.v2/auth.go
generated
vendored
@ -1,467 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
"gopkg.in/mgo.v2/internal/scram"
|
||||
)
|
||||
|
||||
type authCmd struct {
|
||||
Authenticate int
|
||||
|
||||
Nonce string
|
||||
User string
|
||||
Key string
|
||||
}
|
||||
|
||||
type startSaslCmd struct {
|
||||
StartSASL int `bson:"startSasl"`
|
||||
}
|
||||
|
||||
type authResult struct {
|
||||
ErrMsg string
|
||||
Ok bool
|
||||
}
|
||||
|
||||
type getNonceCmd struct {
|
||||
GetNonce int
|
||||
}
|
||||
|
||||
type getNonceResult struct {
|
||||
Nonce string
|
||||
Err string "$err"
|
||||
Code int
|
||||
}
|
||||
|
||||
type logoutCmd struct {
|
||||
Logout int
|
||||
}
|
||||
|
||||
type saslCmd struct {
|
||||
Start int `bson:"saslStart,omitempty"`
|
||||
Continue int `bson:"saslContinue,omitempty"`
|
||||
ConversationId int `bson:"conversationId,omitempty"`
|
||||
Mechanism string `bson:"mechanism,omitempty"`
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
type saslResult struct {
|
||||
Ok bool `bson:"ok"`
|
||||
NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?)
|
||||
Done bool
|
||||
|
||||
ConversationId int `bson:"conversationId"`
|
||||
Payload []byte
|
||||
ErrMsg string
|
||||
}
|
||||
|
||||
type saslStepper interface {
|
||||
Step(serverData []byte) (clientData []byte, done bool, err error)
|
||||
Close()
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) getNonce() (nonce string, err error) {
|
||||
socket.Lock()
|
||||
for socket.cachedNonce == "" && socket.dead == nil {
|
||||
debugf("Socket %p to %s: waiting for nonce", socket, socket.addr)
|
||||
socket.gotNonce.Wait()
|
||||
}
|
||||
if socket.cachedNonce == "mongos" {
|
||||
socket.Unlock()
|
||||
return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth")
|
||||
}
|
||||
debugf("Socket %p to %s: got nonce", socket, socket.addr)
|
||||
nonce, err = socket.cachedNonce, socket.dead
|
||||
socket.cachedNonce = ""
|
||||
socket.Unlock()
|
||||
if err != nil {
|
||||
nonce = ""
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) resetNonce() {
|
||||
debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr)
|
||||
op := &queryOp{}
|
||||
op.query = &getNonceCmd{GetNonce: 1}
|
||||
op.collection = "admin.$cmd"
|
||||
op.limit = -1
|
||||
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
|
||||
if err != nil {
|
||||
socket.kill(errors.New("getNonce: "+err.Error()), true)
|
||||
return
|
||||
}
|
||||
result := &getNonceResult{}
|
||||
err = bson.Unmarshal(docData, &result)
|
||||
if err != nil {
|
||||
socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true)
|
||||
return
|
||||
}
|
||||
debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result)
|
||||
if result.Code == 13390 {
|
||||
// mongos doesn't yet support auth (see http://j.mp/mongos-auth)
|
||||
result.Nonce = "mongos"
|
||||
} else if result.Nonce == "" {
|
||||
var msg string
|
||||
if result.Err != "" {
|
||||
msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code)
|
||||
} else {
|
||||
msg = "Got an empty nonce"
|
||||
}
|
||||
socket.kill(errors.New(msg), true)
|
||||
return
|
||||
}
|
||||
socket.Lock()
|
||||
if socket.cachedNonce != "" {
|
||||
socket.Unlock()
|
||||
panic("resetNonce: nonce already cached")
|
||||
}
|
||||
socket.cachedNonce = result.Nonce
|
||||
socket.gotNonce.Signal()
|
||||
socket.Unlock()
|
||||
}
|
||||
err := socket.Query(op)
|
||||
if err != nil {
|
||||
socket.kill(errors.New("resetNonce: "+err.Error()), true)
|
||||
}
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) Login(cred Credential) error {
|
||||
socket.Lock()
|
||||
if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 {
|
||||
cred.Mechanism = "SCRAM-SHA-1"
|
||||
}
|
||||
for _, sockCred := range socket.creds {
|
||||
if sockCred == cred {
|
||||
debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if socket.dropLogout(cred) {
|
||||
debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
}
|
||||
socket.Unlock()
|
||||
|
||||
debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username)
|
||||
|
||||
var err error
|
||||
switch cred.Mechanism {
|
||||
case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501.
|
||||
err = socket.loginClassic(cred)
|
||||
case "PLAIN":
|
||||
err = socket.loginPlain(cred)
|
||||
case "MONGODB-X509":
|
||||
err = socket.loginX509(cred)
|
||||
default:
|
||||
// Try SASL for everything else, if it is available.
|
||||
err = socket.loginSASL(cred)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
debugf("Socket %p to %s: login error: %s", socket, socket.addr, err)
|
||||
} else {
|
||||
debugf("Socket %p to %s: login successful", socket, socket.addr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginClassic(cred Credential) error {
|
||||
// Note that this only works properly because this function is
|
||||
// synchronous, which means the nonce won't get reset while we're
|
||||
// using it and any other login requests will block waiting for a
|
||||
// new nonce provided in the defer call below.
|
||||
nonce, err := socket.getNonce()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer socket.resetNonce()
|
||||
|
||||
psum := md5.New()
|
||||
psum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
|
||||
|
||||
ksum := md5.New()
|
||||
ksum.Write([]byte(nonce + cred.Username))
|
||||
ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil))))
|
||||
|
||||
key := hex.EncodeToString(ksum.Sum(nil))
|
||||
|
||||
cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key}
|
||||
res := authResult{}
|
||||
return socket.loginRun(cred.Source, &cmd, &res, func() error {
|
||||
if !res.Ok {
|
||||
return errors.New(res.ErrMsg)
|
||||
}
|
||||
socket.Lock()
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
type authX509Cmd struct {
|
||||
Authenticate int
|
||||
User string
|
||||
Mechanism string
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginX509(cred Credential) error {
|
||||
cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"}
|
||||
res := authResult{}
|
||||
return socket.loginRun(cred.Source, &cmd, &res, func() error {
|
||||
if !res.Ok {
|
||||
return errors.New(res.ErrMsg)
|
||||
}
|
||||
socket.Lock()
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginPlain(cred Credential) error {
|
||||
cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)}
|
||||
res := authResult{}
|
||||
return socket.loginRun(cred.Source, &cmd, &res, func() error {
|
||||
if !res.Ok {
|
||||
return errors.New(res.ErrMsg)
|
||||
}
|
||||
socket.Lock()
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginSASL(cred Credential) error {
|
||||
var sasl saslStepper
|
||||
var err error
|
||||
if cred.Mechanism == "SCRAM-SHA-1" {
|
||||
// SCRAM is handled without external libraries.
|
||||
sasl = saslNewScram(cred)
|
||||
} else if len(cred.ServiceHost) > 0 {
|
||||
sasl, err = saslNew(cred, cred.ServiceHost)
|
||||
} else {
|
||||
sasl, err = saslNew(cred, socket.Server().Addr)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sasl.Close()
|
||||
|
||||
// The goal of this logic is to carry a locked socket until the
|
||||
// local SASL step confirms the auth is valid; the socket needs to be
|
||||
// locked so that concurrent action doesn't leave the socket in an
|
||||
// auth state that doesn't reflect the operations that took place.
|
||||
// As a simple case, imagine inverting login=>logout to logout=>login.
|
||||
//
|
||||
// The logic below works because the lock func isn't called concurrently.
|
||||
locked := false
|
||||
lock := func(b bool) {
|
||||
if locked != b {
|
||||
locked = b
|
||||
if b {
|
||||
socket.Lock()
|
||||
} else {
|
||||
socket.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lock(true)
|
||||
defer lock(false)
|
||||
|
||||
start := 1
|
||||
cmd := saslCmd{}
|
||||
res := saslResult{}
|
||||
for {
|
||||
payload, done, err := sasl.Step(res.Payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done && res.Done {
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
break
|
||||
}
|
||||
lock(false)
|
||||
|
||||
cmd = saslCmd{
|
||||
Start: start,
|
||||
Continue: 1 - start,
|
||||
ConversationId: res.ConversationId,
|
||||
Mechanism: cred.Mechanism,
|
||||
Payload: payload,
|
||||
}
|
||||
start = 0
|
||||
err = socket.loginRun(cred.Source, &cmd, &res, func() error {
|
||||
// See the comment on lock for why this is necessary.
|
||||
lock(true)
|
||||
if !res.Ok || res.NotOk {
|
||||
return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done && res.Done {
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func saslNewScram(cred Credential) *saslScram {
|
||||
credsum := md5.New()
|
||||
credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
|
||||
client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil)))
|
||||
return &saslScram{cred: cred, client: client}
|
||||
}
|
||||
|
||||
type saslScram struct {
|
||||
cred Credential
|
||||
client *scram.Client
|
||||
}
|
||||
|
||||
func (s *saslScram) Close() {}
|
||||
|
||||
func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) {
|
||||
more := s.client.Step(serverData)
|
||||
return s.client.Out(), !more, s.client.Err()
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error {
|
||||
var mutex sync.Mutex
|
||||
var replyErr error
|
||||
mutex.Lock()
|
||||
|
||||
op := queryOp{}
|
||||
op.query = query
|
||||
op.collection = db + ".$cmd"
|
||||
op.limit = -1
|
||||
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
|
||||
defer mutex.Unlock()
|
||||
|
||||
if err != nil {
|
||||
replyErr = err
|
||||
return
|
||||
}
|
||||
|
||||
err = bson.Unmarshal(docData, result)
|
||||
if err != nil {
|
||||
replyErr = err
|
||||
} else {
|
||||
// Must handle this within the read loop for the socket, so
|
||||
// that concurrent login requests are properly ordered.
|
||||
replyErr = f()
|
||||
}
|
||||
}
|
||||
|
||||
err := socket.Query(&op)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mutex.Lock() // Wait.
|
||||
return replyErr
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) Logout(db string) {
|
||||
socket.Lock()
|
||||
cred, found := socket.dropAuth(db)
|
||||
if found {
|
||||
debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db)
|
||||
socket.logout = append(socket.logout, cred)
|
||||
}
|
||||
socket.Unlock()
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) LogoutAll() {
|
||||
socket.Lock()
|
||||
if l := len(socket.creds); l > 0 {
|
||||
debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l)
|
||||
socket.logout = append(socket.logout, socket.creds...)
|
||||
socket.creds = socket.creds[0:0]
|
||||
}
|
||||
socket.Unlock()
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) flushLogout() (ops []interface{}) {
|
||||
socket.Lock()
|
||||
if l := len(socket.logout); l > 0 {
|
||||
debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l)
|
||||
for i := 0; i != l; i++ {
|
||||
op := queryOp{}
|
||||
op.query = &logoutCmd{1}
|
||||
op.collection = socket.logout[i].Source + ".$cmd"
|
||||
op.limit = -1
|
||||
ops = append(ops, &op)
|
||||
}
|
||||
socket.logout = socket.logout[0:0]
|
||||
}
|
||||
socket.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) {
|
||||
for i, sockCred := range socket.creds {
|
||||
if sockCred.Source == db {
|
||||
copy(socket.creds[i:], socket.creds[i+1:])
|
||||
socket.creds = socket.creds[:len(socket.creds)-1]
|
||||
return sockCred, true
|
||||
}
|
||||
}
|
||||
return cred, false
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) dropLogout(cred Credential) (found bool) {
|
||||
for i, sockCred := range socket.logout {
|
||||
if sockCred == cred {
|
||||
copy(socket.logout[i:], socket.logout[i+1:])
|
||||
socket.logout = socket.logout[:len(socket.logout)-1]
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
1180
vendor/gopkg.in/mgo.v2/auth_test.go
generated
vendored
1180
vendor/gopkg.in/mgo.v2/auth_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
25
vendor/gopkg.in/mgo.v2/bson/LICENSE
generated
vendored
25
vendor/gopkg.in/mgo.v2/bson/LICENSE
generated
vendored
@ -1,25 +0,0 @@
|
||||
BSON library for Go
|
||||
|
||||
Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
716
vendor/gopkg.in/mgo.v2/bson/bson.go
generated
vendored
716
vendor/gopkg.in/mgo.v2/bson/bson.go
generated
vendored
@ -1,716 +0,0 @@
|
||||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package bson is an implementation of the BSON specification for Go:
|
||||
//
|
||||
// http://bsonspec.org
|
||||
//
|
||||
// It was created as part of the mgo MongoDB driver for Go, but is standalone
|
||||
// and may be used on its own without the driver.
|
||||
package bson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// The public API.
|
||||
|
||||
// A value implementing the bson.Getter interface will have its GetBSON
|
||||
// method called when the given value has to be marshalled, and the result
|
||||
// of this method will be marshaled in place of the actual object.
|
||||
//
|
||||
// If GetBSON returns return a non-nil error, the marshalling procedure
|
||||
// will stop and error out with the provided value.
|
||||
type Getter interface {
|
||||
GetBSON() (interface{}, error)
|
||||
}
|
||||
|
||||
// A value implementing the bson.Setter interface will receive the BSON
|
||||
// value via the SetBSON method during unmarshaling, and the object
|
||||
// itself will not be changed as usual.
|
||||
//
|
||||
// If setting the value works, the method should return nil or alternatively
|
||||
// bson.SetZero to set the respective field to its zero value (nil for
|
||||
// pointer types). If SetBSON returns a value of type bson.TypeError, the
|
||||
// BSON value will be omitted from a map or slice being decoded and the
|
||||
// unmarshalling will continue. If it returns any other non-nil error, the
|
||||
// unmarshalling procedure will stop and error out with the provided value.
|
||||
//
|
||||
// This interface is generally useful in pointer receivers, since the method
|
||||
// will want to change the receiver. A type field that implements the Setter
|
||||
// interface doesn't have to be a pointer, though.
|
||||
//
|
||||
// Unlike the usual behavior, unmarshalling onto a value that implements a
|
||||
// Setter interface will NOT reset the value to its zero state. This allows
|
||||
// the value to decide by itself how to be unmarshalled.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type MyString string
|
||||
//
|
||||
// func (s *MyString) SetBSON(raw bson.Raw) error {
|
||||
// return raw.Unmarshal(s)
|
||||
// }
|
||||
//
|
||||
type Setter interface {
|
||||
SetBSON(raw Raw) error
|
||||
}
|
||||
|
||||
// SetZero may be returned from a SetBSON method to have the value set to
|
||||
// its respective zero value. When used in pointer values, this will set the
|
||||
// field to nil rather than to the pre-allocated value.
|
||||
var SetZero = errors.New("set to zero")
|
||||
|
||||
// M is a convenient alias for a map[string]interface{} map, useful for
|
||||
// dealing with BSON in a native way. For instance:
|
||||
//
|
||||
// bson.M{"a": 1, "b": true}
|
||||
//
|
||||
// There's no special handling for this type in addition to what's done anyway
|
||||
// for an equivalent map type. Elements in the map will be dumped in an
|
||||
// undefined ordered. See also the bson.D type for an ordered alternative.
|
||||
type M map[string]interface{}
|
||||
|
||||
// D represents a BSON document containing ordered elements. For example:
|
||||
//
|
||||
// bson.D{{"a", 1}, {"b", true}}
|
||||
//
|
||||
// In some situations, such as when creating indexes for MongoDB, the order in
|
||||
// which the elements are defined is important. If the order is not important,
|
||||
// using a map is generally more comfortable. See bson.M and bson.RawD.
|
||||
type D []DocElem
|
||||
|
||||
// DocElem is an element of the bson.D document representation.
|
||||
type DocElem struct {
|
||||
Name string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// Map returns a map out of the ordered element name/value pairs in d.
|
||||
func (d D) Map() (m M) {
|
||||
m = make(M, len(d))
|
||||
for _, item := range d {
|
||||
m[item.Name] = item.Value
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// The Raw type represents raw unprocessed BSON documents and elements.
|
||||
// Kind is the kind of element as defined per the BSON specification, and
|
||||
// Data is the raw unprocessed data for the respective element.
|
||||
// Using this type it is possible to unmarshal or marshal values partially.
|
||||
//
|
||||
// Relevant documentation:
|
||||
//
|
||||
// http://bsonspec.org/#/specification
|
||||
//
|
||||
type Raw struct {
|
||||
Kind byte
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// RawD represents a BSON document containing raw unprocessed elements.
|
||||
// This low-level representation may be useful when lazily processing
|
||||
// documents of uncertain content, or when manipulating the raw content
|
||||
// documents in general.
|
||||
type RawD []RawDocElem
|
||||
|
||||
// See the RawD type.
|
||||
type RawDocElem struct {
|
||||
Name string
|
||||
Value Raw
|
||||
}
|
||||
|
||||
// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
|
||||
// long. MongoDB objects by default have such a property set in their "_id"
|
||||
// property.
|
||||
//
|
||||
// http://www.mongodb.org/display/DOCS/Object+IDs
|
||||
type ObjectId string
|
||||
|
||||
// ObjectIdHex returns an ObjectId from the provided hex representation.
|
||||
// Calling this function with an invalid hex representation will
|
||||
// cause a runtime panic. See the IsObjectIdHex function.
|
||||
func ObjectIdHex(s string) ObjectId {
|
||||
d, err := hex.DecodeString(s)
|
||||
if err != nil || len(d) != 12 {
|
||||
panic(fmt.Sprintf("Invalid input to ObjectIdHex: %q", s))
|
||||
}
|
||||
return ObjectId(d)
|
||||
}
|
||||
|
||||
// IsObjectIdHex returns whether s is a valid hex representation of
|
||||
// an ObjectId. See the ObjectIdHex function.
|
||||
func IsObjectIdHex(s string) bool {
|
||||
if len(s) != 24 {
|
||||
return false
|
||||
}
|
||||
_, err := hex.DecodeString(s)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// objectIdCounter is atomically incremented when generating a new ObjectId
|
||||
// using NewObjectId() function. It's used as a counter part of an id.
|
||||
var objectIdCounter uint32 = readRandomUint32()
|
||||
|
||||
// readRandomUint32 returns a random objectIdCounter.
|
||||
func readRandomUint32() uint32 {
|
||||
var b [4]byte
|
||||
_, err := io.ReadFull(rand.Reader, b[:])
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot read random object id: %v", err))
|
||||
}
|
||||
return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
|
||||
}
|
||||
|
||||
|
||||
// machineId stores machine id generated once and used in subsequent calls
|
||||
// to NewObjectId function.
|
||||
var machineId = readMachineId()
|
||||
|
||||
// readMachineId generates and returns a machine id.
|
||||
// If this function fails to get the hostname it will cause a runtime error.
|
||||
func readMachineId() []byte {
|
||||
var sum [3]byte
|
||||
id := sum[:]
|
||||
hostname, err1 := os.Hostname()
|
||||
if err1 != nil {
|
||||
_, err2 := io.ReadFull(rand.Reader, id)
|
||||
if err2 != nil {
|
||||
panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
|
||||
}
|
||||
return id
|
||||
}
|
||||
hw := md5.New()
|
||||
hw.Write([]byte(hostname))
|
||||
copy(id, hw.Sum(nil))
|
||||
return id
|
||||
}
|
||||
|
||||
// NewObjectId returns a new unique ObjectId.
|
||||
func NewObjectId() ObjectId {
|
||||
var b [12]byte
|
||||
// Timestamp, 4 bytes, big endian
|
||||
binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
|
||||
// Machine, first 3 bytes of md5(hostname)
|
||||
b[4] = machineId[0]
|
||||
b[5] = machineId[1]
|
||||
b[6] = machineId[2]
|
||||
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
|
||||
pid := os.Getpid()
|
||||
b[7] = byte(pid >> 8)
|
||||
b[8] = byte(pid)
|
||||
// Increment, 3 bytes, big endian
|
||||
i := atomic.AddUint32(&objectIdCounter, 1)
|
||||
b[9] = byte(i >> 16)
|
||||
b[10] = byte(i >> 8)
|
||||
b[11] = byte(i)
|
||||
return ObjectId(b[:])
|
||||
}
|
||||
|
||||
// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
|
||||
// with the provided number of seconds from epoch UTC, and all other parts
|
||||
// filled with zeroes. It's not safe to insert a document with an id generated
|
||||
// by this method, it is useful only for queries to find documents with ids
|
||||
// generated before or after the specified timestamp.
|
||||
func NewObjectIdWithTime(t time.Time) ObjectId {
|
||||
var b [12]byte
|
||||
binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
|
||||
return ObjectId(string(b[:]))
|
||||
}
|
||||
|
||||
// String returns a hex string representation of the id.
|
||||
// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
|
||||
func (id ObjectId) String() string {
|
||||
return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
|
||||
}
|
||||
|
||||
// Hex returns a hex representation of the ObjectId.
|
||||
func (id ObjectId) Hex() string {
|
||||
return hex.EncodeToString([]byte(id))
|
||||
}
|
||||
|
||||
// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
|
||||
func (id ObjectId) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
|
||||
}
|
||||
|
||||
var nullBytes = []byte("null")
|
||||
|
||||
// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
|
||||
func (id *ObjectId) UnmarshalJSON(data []byte) error {
|
||||
if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
|
||||
*id = ""
|
||||
return nil
|
||||
}
|
||||
if len(data) != 26 || data[0] != '"' || data[25] != '"' {
|
||||
return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s", string(data)))
|
||||
}
|
||||
var buf [12]byte
|
||||
_, err := hex.Decode(buf[:], data[1:25])
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s (%s)", string(data), err))
|
||||
}
|
||||
*id = ObjectId(string(buf[:]))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
|
||||
func (id ObjectId) Valid() bool {
|
||||
return len(id) == 12
|
||||
}
|
||||
|
||||
// byteSlice returns byte slice of id from start to end.
|
||||
// Calling this function with an invalid id will cause a runtime panic.
|
||||
func (id ObjectId) byteSlice(start, end int) []byte {
|
||||
if len(id) != 12 {
|
||||
panic(fmt.Sprintf("Invalid ObjectId: %q", string(id)))
|
||||
}
|
||||
return []byte(string(id)[start:end])
|
||||
}
|
||||
|
||||
// Time returns the timestamp part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Time() time.Time {
|
||||
// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
|
||||
secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
|
||||
return time.Unix(secs, 0)
|
||||
}
|
||||
|
||||
// Machine returns the 3-byte machine id part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Machine() []byte {
|
||||
return id.byteSlice(4, 7)
|
||||
}
|
||||
|
||||
// Pid returns the process id part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Pid() uint16 {
|
||||
return binary.BigEndian.Uint16(id.byteSlice(7, 9))
|
||||
}
|
||||
|
||||
// Counter returns the incrementing value part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Counter() int32 {
|
||||
b := id.byteSlice(9, 12)
|
||||
// Counter is stored as big-endian 3-byte value
|
||||
return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
|
||||
}
|
||||
|
||||
// The Symbol type is similar to a string and is used in languages with a
|
||||
// distinct symbol type.
|
||||
type Symbol string
|
||||
|
||||
// Now returns the current time with millisecond precision. MongoDB stores
|
||||
// timestamps with the same precision, so a Time returned from this method
|
||||
// will not change after a roundtrip to the database. That's the only reason
|
||||
// why this function exists. Using the time.Now function also works fine
|
||||
// otherwise.
|
||||
func Now() time.Time {
|
||||
return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
|
||||
}
|
||||
|
||||
// MongoTimestamp is a special internal type used by MongoDB that for some
|
||||
// strange reason has its own datatype defined in BSON.
|
||||
type MongoTimestamp int64
|
||||
|
||||
type orderKey int64
|
||||
|
||||
// MaxKey is a special value that compares higher than all other possible BSON
|
||||
// values in a MongoDB database.
|
||||
var MaxKey = orderKey(1<<63 - 1)
|
||||
|
||||
// MinKey is a special value that compares lower than all other possible BSON
|
||||
// values in a MongoDB database.
|
||||
var MinKey = orderKey(-1 << 63)
|
||||
|
||||
type undefined struct{}
|
||||
|
||||
// Undefined represents the undefined BSON value.
|
||||
var Undefined undefined
|
||||
|
||||
// Binary is a representation for non-standard binary values. Any kind should
|
||||
// work, but the following are known as of this writing:
|
||||
//
|
||||
// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
|
||||
// 0x01 - Function (!?)
|
||||
// 0x02 - Obsolete generic.
|
||||
// 0x03 - UUID
|
||||
// 0x05 - MD5
|
||||
// 0x80 - User defined.
|
||||
//
|
||||
type Binary struct {
|
||||
Kind byte
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// RegEx represents a regular expression. The Options field may contain
|
||||
// individual characters defining the way in which the pattern should be
|
||||
// applied, and must be sorted. Valid options as of this writing are 'i' for
|
||||
// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
|
||||
// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
|
||||
// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
|
||||
// unicode. The value of the Options parameter is not verified before being
|
||||
// marshaled into the BSON format.
|
||||
type RegEx struct {
|
||||
Pattern string
|
||||
Options string
|
||||
}
|
||||
|
||||
// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
|
||||
// will be marshaled as a mapping from identifiers to values that may be
|
||||
// used when evaluating the provided Code.
|
||||
type JavaScript struct {
|
||||
Code string
|
||||
Scope interface{}
|
||||
}
|
||||
|
||||
// DBPointer refers to a document id in a namespace.
|
||||
//
|
||||
// This type is deprecated in the BSON specification and should not be used
|
||||
// except for backwards compatibility with ancient applications.
|
||||
type DBPointer struct {
|
||||
Namespace string
|
||||
Id ObjectId
|
||||
}
|
||||
|
||||
const initialBufferSize = 64
|
||||
|
||||
func handleErr(err *error) {
|
||||
if r := recover(); r != nil {
|
||||
if _, ok := r.(runtime.Error); ok {
|
||||
panic(r)
|
||||
} else if _, ok := r.(externalPanic); ok {
|
||||
panic(r)
|
||||
} else if s, ok := r.(string); ok {
|
||||
*err = errors.New(s)
|
||||
} else if e, ok := r.(error); ok {
|
||||
*err = e
|
||||
} else {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal serializes the in value, which may be a map or a struct value.
|
||||
// In the case of struct values, only exported fields will be serialized,
|
||||
// and the order of serialized fields will match that of the struct itself.
|
||||
// The lowercased field name is used as the key for each exported field,
|
||||
// but this behavior may be changed using the respective field tag.
|
||||
// The tag may also contain flags to tweak the marshalling behavior for
|
||||
// the field. The tag formats accepted are:
|
||||
//
|
||||
// "[<key>][,<flag1>[,<flag2>]]"
|
||||
//
|
||||
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported:
|
||||
//
|
||||
// omitempty Only include the field if it's not set to the zero
|
||||
// value for the type or to empty slices or maps.
|
||||
//
|
||||
// minsize Marshal an int64 value as an int32, if that's feasible
|
||||
// while preserving the numeric value.
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map,
|
||||
// causing all of its fields or keys to be processed as if
|
||||
// they were part of the outer struct. For maps, keys must
|
||||
// not conflict with the bson keys of other struct fields.
|
||||
//
|
||||
// Some examples:
|
||||
//
|
||||
// type T struct {
|
||||
// A bool
|
||||
// B int "myb"
|
||||
// C string "myc,omitempty"
|
||||
// D string `bson:",omitempty" json:"jsonkey"`
|
||||
// E int64 ",minsize"
|
||||
// F int64 "myf,omitempty,minsize"
|
||||
// }
|
||||
//
|
||||
func Marshal(in interface{}) (out []byte, err error) {
|
||||
defer handleErr(&err)
|
||||
e := &encoder{make([]byte, 0, initialBufferSize)}
|
||||
e.addDoc(reflect.ValueOf(in))
|
||||
return e.out, nil
|
||||
}
|
||||
|
||||
// Unmarshal deserializes data from in into the out value. The out value
|
||||
// must be a map, a pointer to a struct, or a pointer to a bson.D value.
|
||||
// The lowercased field name is used as the key for each exported field,
|
||||
// but this behavior may be changed using the respective field tag.
|
||||
// The tag may also contain flags to tweak the marshalling behavior for
|
||||
// the field. The tag formats accepted are:
|
||||
//
|
||||
// "[<key>][,<flag1>[,<flag2>]]"
|
||||
//
|
||||
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported during unmarshal (see the
|
||||
// Marshal method for other flags):
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map.
|
||||
// Inlined structs are handled as if its fields were part
|
||||
// of the outer struct. An inlined map causes keys that do
|
||||
// not match any other struct field to be inserted in the
|
||||
// map rather than being discarded as usual.
|
||||
//
|
||||
// The target field or element types of out may not necessarily match
|
||||
// the BSON values of the provided data. The following conversions are
|
||||
// made automatically:
|
||||
//
|
||||
// - Numeric types are converted if at least the integer part of the
|
||||
// value would be preserved correctly
|
||||
// - Bools are converted to numeric types as 1 or 0
|
||||
// - Numeric types are converted to bools as true if not 0 or false otherwise
|
||||
// - Binary and string BSON data is converted to a string, array or byte slice
|
||||
//
|
||||
// If the value would not fit the type and cannot be converted, it's
|
||||
// silently skipped.
|
||||
//
|
||||
// Pointer values are initialized when necessary.
|
||||
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
if raw, ok := out.(*Raw); ok {
|
||||
raw.Kind = 3
|
||||
raw.Data = in
|
||||
return nil
|
||||
}
|
||||
defer handleErr(&err)
|
||||
v := reflect.ValueOf(out)
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
fallthrough
|
||||
case reflect.Map:
|
||||
d := newDecoder(in)
|
||||
d.readDocTo(v)
|
||||
case reflect.Struct:
|
||||
return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
|
||||
default:
|
||||
return errors.New("Unmarshal needs a map or a pointer to a struct.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmarshal deserializes raw into the out value. If the out value type
|
||||
// is not compatible with raw, a *bson.TypeError is returned.
|
||||
//
|
||||
// See the Unmarshal function documentation for more details on the
|
||||
// unmarshalling process.
|
||||
func (raw Raw) Unmarshal(out interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
v := reflect.ValueOf(out)
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
v = v.Elem()
|
||||
fallthrough
|
||||
case reflect.Map:
|
||||
d := newDecoder(raw.Data)
|
||||
good := d.readElemTo(v, raw.Kind)
|
||||
if !good {
|
||||
return &TypeError{v.Type(), raw.Kind}
|
||||
}
|
||||
case reflect.Struct:
|
||||
return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
|
||||
default:
|
||||
return errors.New("Raw Unmarshal needs a map or a valid pointer.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TypeError struct {
|
||||
Type reflect.Type
|
||||
Kind byte
|
||||
}
|
||||
|
||||
func (e *TypeError) Error() string {
|
||||
return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Maintain a mapping of keys to structure field indexes
|
||||
|
||||
type structInfo struct {
|
||||
FieldsMap map[string]fieldInfo
|
||||
FieldsList []fieldInfo
|
||||
InlineMap int
|
||||
Zero reflect.Value
|
||||
}
|
||||
|
||||
type fieldInfo struct {
|
||||
Key string
|
||||
Num int
|
||||
OmitEmpty bool
|
||||
MinSize bool
|
||||
Inline []int
|
||||
}
|
||||
|
||||
var structMap = make(map[reflect.Type]*structInfo)
|
||||
var structMapMutex sync.RWMutex
|
||||
|
||||
type externalPanic string
|
||||
|
||||
func (e externalPanic) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
structMapMutex.RLock()
|
||||
sinfo, found := structMap[st]
|
||||
structMapMutex.RUnlock()
|
||||
if found {
|
||||
return sinfo, nil
|
||||
}
|
||||
n := st.NumField()
|
||||
fieldsMap := make(map[string]fieldInfo)
|
||||
fieldsList := make([]fieldInfo, 0, n)
|
||||
inlineMap := -1
|
||||
for i := 0; i != n; i++ {
|
||||
field := st.Field(i)
|
||||
if field.PkgPath != "" {
|
||||
continue // Private field
|
||||
}
|
||||
|
||||
info := fieldInfo{Num: i}
|
||||
|
||||
tag := field.Tag.Get("bson")
|
||||
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
|
||||
tag = string(field.Tag)
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
// XXX Drop this after a few releases.
|
||||
if s := strings.Index(tag, "/"); s >= 0 {
|
||||
recommend := tag[:s]
|
||||
for _, c := range tag[s+1:] {
|
||||
switch c {
|
||||
case 'c':
|
||||
recommend += ",omitempty"
|
||||
case 's':
|
||||
recommend += ",minsize"
|
||||
default:
|
||||
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", string([]byte{uint8(c)}), tag, st)
|
||||
panic(externalPanic(msg))
|
||||
}
|
||||
}
|
||||
msg := fmt.Sprintf("Replace tag %q in field %s of type %s by %q", tag, field.Name, st, recommend)
|
||||
panic(externalPanic(msg))
|
||||
}
|
||||
|
||||
inline := false
|
||||
fields := strings.Split(tag, ",")
|
||||
if len(fields) > 1 {
|
||||
for _, flag := range fields[1:] {
|
||||
switch flag {
|
||||
case "omitempty":
|
||||
info.OmitEmpty = true
|
||||
case "minsize":
|
||||
info.MinSize = true
|
||||
case "inline":
|
||||
inline = true
|
||||
default:
|
||||
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
|
||||
panic(externalPanic(msg))
|
||||
}
|
||||
}
|
||||
tag = fields[0]
|
||||
}
|
||||
|
||||
if inline {
|
||||
switch field.Type.Kind() {
|
||||
case reflect.Map:
|
||||
if inlineMap >= 0 {
|
||||
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
||||
}
|
||||
if field.Type.Key() != reflect.TypeOf("") {
|
||||
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
|
||||
}
|
||||
inlineMap = info.Num
|
||||
case reflect.Struct:
|
||||
sinfo, err := getStructInfo(field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, finfo := range sinfo.FieldsList {
|
||||
if _, found := fieldsMap[finfo.Key]; found {
|
||||
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if finfo.Inline == nil {
|
||||
finfo.Inline = []int{i, finfo.Num}
|
||||
} else {
|
||||
finfo.Inline = append([]int{i}, finfo.Inline...)
|
||||
}
|
||||
fieldsMap[finfo.Key] = finfo
|
||||
fieldsList = append(fieldsList, finfo)
|
||||
}
|
||||
default:
|
||||
panic("Option ,inline needs a struct value or map field")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if tag != "" {
|
||||
info.Key = tag
|
||||
} else {
|
||||
info.Key = strings.ToLower(field.Name)
|
||||
}
|
||||
|
||||
if _, found = fieldsMap[info.Key]; found {
|
||||
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
fieldsList = append(fieldsList, info)
|
||||
fieldsMap[info.Key] = info
|
||||
}
|
||||
sinfo = &structInfo{
|
||||
fieldsMap,
|
||||
fieldsList,
|
||||
inlineMap,
|
||||
reflect.New(st).Elem(),
|
||||
}
|
||||
structMapMutex.Lock()
|
||||
structMap[st] = sinfo
|
||||
structMapMutex.Unlock()
|
||||
return sinfo, nil
|
||||
}
|
1693
vendor/gopkg.in/mgo.v2/bson/bson_test.go
generated
vendored
1693
vendor/gopkg.in/mgo.v2/bson/bson_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
842
vendor/gopkg.in/mgo.v2/bson/decode.go
generated
vendored
842
vendor/gopkg.in/mgo.v2/bson/decode.go
generated
vendored
@ -1,842 +0,0 @@
|
||||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// gobson - BSON library for Go.
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
in []byte
|
||||
i int
|
||||
docType reflect.Type
|
||||
}
|
||||
|
||||
var typeM = reflect.TypeOf(M{})
|
||||
|
||||
func newDecoder(in []byte) *decoder {
|
||||
return &decoder{in, 0, typeM}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Some helper functions.
|
||||
|
||||
func corrupted() {
|
||||
panic("Document is corrupted")
|
||||
}
|
||||
|
||||
func settableValueOf(i interface{}) reflect.Value {
|
||||
v := reflect.ValueOf(i)
|
||||
sv := reflect.New(v.Type()).Elem()
|
||||
sv.Set(v)
|
||||
return sv
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Unmarshaling of documents.
|
||||
|
||||
const (
|
||||
setterUnknown = iota
|
||||
setterNone
|
||||
setterType
|
||||
setterAddr
|
||||
)
|
||||
|
||||
var setterStyles map[reflect.Type]int
|
||||
var setterIface reflect.Type
|
||||
var setterMutex sync.RWMutex
|
||||
|
||||
func init() {
|
||||
var iface Setter
|
||||
setterIface = reflect.TypeOf(&iface).Elem()
|
||||
setterStyles = make(map[reflect.Type]int)
|
||||
}
|
||||
|
||||
func setterStyle(outt reflect.Type) int {
|
||||
setterMutex.RLock()
|
||||
style := setterStyles[outt]
|
||||
setterMutex.RUnlock()
|
||||
if style == setterUnknown {
|
||||
setterMutex.Lock()
|
||||
defer setterMutex.Unlock()
|
||||
if outt.Implements(setterIface) {
|
||||
setterStyles[outt] = setterType
|
||||
} else if reflect.PtrTo(outt).Implements(setterIface) {
|
||||
setterStyles[outt] = setterAddr
|
||||
} else {
|
||||
setterStyles[outt] = setterNone
|
||||
}
|
||||
style = setterStyles[outt]
|
||||
}
|
||||
return style
|
||||
}
|
||||
|
||||
func getSetter(outt reflect.Type, out reflect.Value) Setter {
|
||||
style := setterStyle(outt)
|
||||
if style == setterNone {
|
||||
return nil
|
||||
}
|
||||
if style == setterAddr {
|
||||
if !out.CanAddr() {
|
||||
return nil
|
||||
}
|
||||
out = out.Addr()
|
||||
} else if outt.Kind() == reflect.Ptr && out.IsNil() {
|
||||
out.Set(reflect.New(outt.Elem()))
|
||||
}
|
||||
return out.Interface().(Setter)
|
||||
}
|
||||
|
||||
func clearMap(m reflect.Value) {
|
||||
var none reflect.Value
|
||||
for _, k := range m.MapKeys() {
|
||||
m.SetMapIndex(k, none)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readDocTo(out reflect.Value) {
|
||||
var elemType reflect.Type
|
||||
outt := out.Type()
|
||||
outk := outt.Kind()
|
||||
|
||||
for {
|
||||
if outk == reflect.Ptr && out.IsNil() {
|
||||
out.Set(reflect.New(outt.Elem()))
|
||||
}
|
||||
if setter := getSetter(outt, out); setter != nil {
|
||||
var raw Raw
|
||||
d.readDocTo(reflect.ValueOf(&raw))
|
||||
err := setter.SetBSON(raw)
|
||||
if _, ok := err.(*TypeError); err != nil && !ok {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if outk == reflect.Ptr {
|
||||
out = out.Elem()
|
||||
outt = out.Type()
|
||||
outk = out.Kind()
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
var fieldsMap map[string]fieldInfo
|
||||
var inlineMap reflect.Value
|
||||
start := d.i
|
||||
|
||||
origout := out
|
||||
if outk == reflect.Interface {
|
||||
if d.docType.Kind() == reflect.Map {
|
||||
mv := reflect.MakeMap(d.docType)
|
||||
out.Set(mv)
|
||||
out = mv
|
||||
} else {
|
||||
dv := reflect.New(d.docType).Elem()
|
||||
out.Set(dv)
|
||||
out = dv
|
||||
}
|
||||
outt = out.Type()
|
||||
outk = outt.Kind()
|
||||
}
|
||||
|
||||
docType := d.docType
|
||||
keyType := typeString
|
||||
convertKey := false
|
||||
switch outk {
|
||||
case reflect.Map:
|
||||
keyType = outt.Key()
|
||||
if keyType.Kind() != reflect.String {
|
||||
panic("BSON map must have string keys. Got: " + outt.String())
|
||||
}
|
||||
if keyType != typeString {
|
||||
convertKey = true
|
||||
}
|
||||
elemType = outt.Elem()
|
||||
if elemType == typeIface {
|
||||
d.docType = outt
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(out.Type()))
|
||||
} else if out.Len() > 0 {
|
||||
clearMap(out)
|
||||
}
|
||||
case reflect.Struct:
|
||||
if outt != typeRaw {
|
||||
sinfo, err := getStructInfo(out.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fieldsMap = sinfo.FieldsMap
|
||||
out.Set(sinfo.Zero)
|
||||
if sinfo.InlineMap != -1 {
|
||||
inlineMap = out.Field(sinfo.InlineMap)
|
||||
if !inlineMap.IsNil() && inlineMap.Len() > 0 {
|
||||
clearMap(inlineMap)
|
||||
}
|
||||
elemType = inlineMap.Type().Elem()
|
||||
if elemType == typeIface {
|
||||
d.docType = inlineMap.Type()
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
switch outt.Elem() {
|
||||
case typeDocElem:
|
||||
origout.Set(d.readDocElems(outt))
|
||||
return
|
||||
case typeRawDocElem:
|
||||
origout.Set(d.readRawDocElems(outt))
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
panic("Unsupported document type for unmarshalling: " + out.Type().String())
|
||||
}
|
||||
|
||||
end := int(d.readInt32())
|
||||
end += d.i - 4
|
||||
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
for d.in[d.i] != '\x00' {
|
||||
kind := d.readByte()
|
||||
name := d.readCStr()
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
|
||||
switch outk {
|
||||
case reflect.Map:
|
||||
e := reflect.New(elemType).Elem()
|
||||
if d.readElemTo(e, kind) {
|
||||
k := reflect.ValueOf(name)
|
||||
if convertKey {
|
||||
k = k.Convert(keyType)
|
||||
}
|
||||
out.SetMapIndex(k, e)
|
||||
}
|
||||
case reflect.Struct:
|
||||
if outt == typeRaw {
|
||||
d.dropElem(kind)
|
||||
} else {
|
||||
if info, ok := fieldsMap[name]; ok {
|
||||
if info.Inline == nil {
|
||||
d.readElemTo(out.Field(info.Num), kind)
|
||||
} else {
|
||||
d.readElemTo(out.FieldByIndex(info.Inline), kind)
|
||||
}
|
||||
} else if inlineMap.IsValid() {
|
||||
if inlineMap.IsNil() {
|
||||
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||||
}
|
||||
e := reflect.New(elemType).Elem()
|
||||
if d.readElemTo(e, kind) {
|
||||
inlineMap.SetMapIndex(reflect.ValueOf(name), e)
|
||||
}
|
||||
} else {
|
||||
d.dropElem(kind)
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
}
|
||||
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
d.i++ // '\x00'
|
||||
if d.i != end {
|
||||
corrupted()
|
||||
}
|
||||
d.docType = docType
|
||||
|
||||
if outt == typeRaw {
|
||||
out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readArrayDocTo(out reflect.Value) {
|
||||
end := int(d.readInt32())
|
||||
end += d.i - 4
|
||||
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
i := 0
|
||||
l := out.Len()
|
||||
for d.in[d.i] != '\x00' {
|
||||
if i >= l {
|
||||
panic("Length mismatch on array field")
|
||||
}
|
||||
kind := d.readByte()
|
||||
for d.i < end && d.in[d.i] != '\x00' {
|
||||
d.i++
|
||||
}
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
d.i++
|
||||
d.readElemTo(out.Index(i), kind)
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i != l {
|
||||
panic("Length mismatch on array field")
|
||||
}
|
||||
d.i++ // '\x00'
|
||||
if d.i != end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
|
||||
tmp := make([]reflect.Value, 0, 8)
|
||||
elemType := t.Elem()
|
||||
if elemType == typeRawDocElem {
|
||||
d.dropElem(0x04)
|
||||
return reflect.Zero(t).Interface()
|
||||
}
|
||||
|
||||
end := int(d.readInt32())
|
||||
end += d.i - 4
|
||||
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
for d.in[d.i] != '\x00' {
|
||||
kind := d.readByte()
|
||||
for d.i < end && d.in[d.i] != '\x00' {
|
||||
d.i++
|
||||
}
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
d.i++
|
||||
e := reflect.New(elemType).Elem()
|
||||
if d.readElemTo(e, kind) {
|
||||
tmp = append(tmp, e)
|
||||
}
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
d.i++ // '\x00'
|
||||
if d.i != end {
|
||||
corrupted()
|
||||
}
|
||||
|
||||
n := len(tmp)
|
||||
slice := reflect.MakeSlice(t, n, n)
|
||||
for i := 0; i != n; i++ {
|
||||
slice.Index(i).Set(tmp[i])
|
||||
}
|
||||
return slice.Interface()
|
||||
}
|
||||
|
||||
var typeSlice = reflect.TypeOf([]interface{}{})
|
||||
var typeIface = typeSlice.Elem()
|
||||
|
||||
func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
|
||||
docType := d.docType
|
||||
d.docType = typ
|
||||
slice := make([]DocElem, 0, 8)
|
||||
d.readDocWith(func(kind byte, name string) {
|
||||
e := DocElem{Name: name}
|
||||
v := reflect.ValueOf(&e.Value)
|
||||
if d.readElemTo(v.Elem(), kind) {
|
||||
slice = append(slice, e)
|
||||
}
|
||||
})
|
||||
slicev := reflect.New(typ).Elem()
|
||||
slicev.Set(reflect.ValueOf(slice))
|
||||
d.docType = docType
|
||||
return slicev
|
||||
}
|
||||
|
||||
func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
|
||||
docType := d.docType
|
||||
d.docType = typ
|
||||
slice := make([]RawDocElem, 0, 8)
|
||||
d.readDocWith(func(kind byte, name string) {
|
||||
e := RawDocElem{Name: name}
|
||||
v := reflect.ValueOf(&e.Value)
|
||||
if d.readElemTo(v.Elem(), kind) {
|
||||
slice = append(slice, e)
|
||||
}
|
||||
})
|
||||
slicev := reflect.New(typ).Elem()
|
||||
slicev.Set(reflect.ValueOf(slice))
|
||||
d.docType = docType
|
||||
return slicev
|
||||
}
|
||||
|
||||
func (d *decoder) readDocWith(f func(kind byte, name string)) {
|
||||
end := int(d.readInt32())
|
||||
end += d.i - 4
|
||||
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
for d.in[d.i] != '\x00' {
|
||||
kind := d.readByte()
|
||||
name := d.readCStr()
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
f(kind, name)
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
d.i++ // '\x00'
|
||||
if d.i != end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Unmarshaling of individual elements within a document.
|
||||
|
||||
var blackHole = settableValueOf(struct{}{})
|
||||
|
||||
func (d *decoder) dropElem(kind byte) {
|
||||
d.readElemTo(blackHole, kind)
|
||||
}
|
||||
|
||||
// Attempt to decode an element from the document and put it into out.
|
||||
// If the types are not compatible, the returned ok value will be
|
||||
// false and out will be unchanged.
|
||||
func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
|
||||
|
||||
start := d.i
|
||||
|
||||
if kind == 0x03 {
|
||||
// Delegate unmarshaling of documents.
|
||||
outt := out.Type()
|
||||
outk := out.Kind()
|
||||
switch outk {
|
||||
case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
|
||||
d.readDocTo(out)
|
||||
return true
|
||||
}
|
||||
if setterStyle(outt) != setterNone {
|
||||
d.readDocTo(out)
|
||||
return true
|
||||
}
|
||||
if outk == reflect.Slice {
|
||||
switch outt.Elem() {
|
||||
case typeDocElem:
|
||||
out.Set(d.readDocElems(outt))
|
||||
case typeRawDocElem:
|
||||
out.Set(d.readRawDocElems(outt))
|
||||
}
|
||||
return true
|
||||
}
|
||||
d.readDocTo(blackHole)
|
||||
return true
|
||||
}
|
||||
|
||||
var in interface{}
|
||||
|
||||
switch kind {
|
||||
case 0x01: // Float64
|
||||
in = d.readFloat64()
|
||||
case 0x02: // UTF-8 string
|
||||
in = d.readStr()
|
||||
case 0x03: // Document
|
||||
panic("Can't happen. Handled above.")
|
||||
case 0x04: // Array
|
||||
outt := out.Type()
|
||||
if setterStyle(outt) != setterNone {
|
||||
// Skip the value so its data is handed to the setter below.
|
||||
d.dropElem(kind)
|
||||
break
|
||||
}
|
||||
for outt.Kind() == reflect.Ptr {
|
||||
outt = outt.Elem()
|
||||
}
|
||||
switch outt.Kind() {
|
||||
case reflect.Array:
|
||||
d.readArrayDocTo(out)
|
||||
return true
|
||||
case reflect.Slice:
|
||||
in = d.readSliceDoc(outt)
|
||||
default:
|
||||
in = d.readSliceDoc(typeSlice)
|
||||
}
|
||||
case 0x05: // Binary
|
||||
b := d.readBinary()
|
||||
if b.Kind == 0x00 || b.Kind == 0x02 {
|
||||
in = b.Data
|
||||
} else {
|
||||
in = b
|
||||
}
|
||||
case 0x06: // Undefined (obsolete, but still seen in the wild)
|
||||
in = Undefined
|
||||
case 0x07: // ObjectId
|
||||
in = ObjectId(d.readBytes(12))
|
||||
case 0x08: // Bool
|
||||
in = d.readBool()
|
||||
case 0x09: // Timestamp
|
||||
// MongoDB handles timestamps as milliseconds.
|
||||
i := d.readInt64()
|
||||
if i == -62135596800000 {
|
||||
in = time.Time{} // In UTC for convenience.
|
||||
} else {
|
||||
in = time.Unix(i/1e3, i%1e3*1e6)
|
||||
}
|
||||
case 0x0A: // Nil
|
||||
in = nil
|
||||
case 0x0B: // RegEx
|
||||
in = d.readRegEx()
|
||||
case 0x0C:
|
||||
in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
|
||||
case 0x0D: // JavaScript without scope
|
||||
in = JavaScript{Code: d.readStr()}
|
||||
case 0x0E: // Symbol
|
||||
in = Symbol(d.readStr())
|
||||
case 0x0F: // JavaScript with scope
|
||||
d.i += 4 // Skip length
|
||||
js := JavaScript{d.readStr(), make(M)}
|
||||
d.readDocTo(reflect.ValueOf(js.Scope))
|
||||
in = js
|
||||
case 0x10: // Int32
|
||||
in = int(d.readInt32())
|
||||
case 0x11: // Mongo-specific timestamp
|
||||
in = MongoTimestamp(d.readInt64())
|
||||
case 0x12: // Int64
|
||||
in = d.readInt64()
|
||||
case 0x7F: // Max key
|
||||
in = MaxKey
|
||||
case 0xFF: // Min key
|
||||
in = MinKey
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
|
||||
}
|
||||
|
||||
outt := out.Type()
|
||||
|
||||
if outt == typeRaw {
|
||||
out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
|
||||
return true
|
||||
}
|
||||
|
||||
if setter := getSetter(outt, out); setter != nil {
|
||||
err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
|
||||
if err == SetZero {
|
||||
out.Set(reflect.Zero(outt))
|
||||
return true
|
||||
}
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
if _, ok := err.(*TypeError); !ok {
|
||||
panic(err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if in == nil {
|
||||
out.Set(reflect.Zero(outt))
|
||||
return true
|
||||
}
|
||||
|
||||
outk := outt.Kind()
|
||||
|
||||
// Dereference and initialize pointer if necessary.
|
||||
first := true
|
||||
for outk == reflect.Ptr {
|
||||
if !out.IsNil() {
|
||||
out = out.Elem()
|
||||
} else {
|
||||
elem := reflect.New(outt.Elem())
|
||||
if first {
|
||||
// Only set if value is compatible.
|
||||
first = false
|
||||
defer func(out, elem reflect.Value) {
|
||||
if good {
|
||||
out.Set(elem)
|
||||
}
|
||||
}(out, elem)
|
||||
} else {
|
||||
out.Set(elem)
|
||||
}
|
||||
out = elem
|
||||
}
|
||||
outt = out.Type()
|
||||
outk = outt.Kind()
|
||||
}
|
||||
|
||||
inv := reflect.ValueOf(in)
|
||||
if outt == inv.Type() {
|
||||
out.Set(inv)
|
||||
return true
|
||||
}
|
||||
|
||||
switch outk {
|
||||
case reflect.Interface:
|
||||
out.Set(inv)
|
||||
return true
|
||||
case reflect.String:
|
||||
switch inv.Kind() {
|
||||
case reflect.String:
|
||||
out.SetString(inv.String())
|
||||
return true
|
||||
case reflect.Slice:
|
||||
if b, ok := in.([]byte); ok {
|
||||
out.SetString(string(b))
|
||||
return true
|
||||
}
|
||||
case reflect.Int, reflect.Int64:
|
||||
if outt == typeJSONNumber {
|
||||
out.SetString(strconv.FormatInt(inv.Int(), 10))
|
||||
return true
|
||||
}
|
||||
case reflect.Float64:
|
||||
if outt == typeJSONNumber {
|
||||
out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
|
||||
return true
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
// Remember, array (0x04) slices are built with the correct
|
||||
// element type. If we are here, must be a cross BSON kind
|
||||
// conversion (e.g. 0x05 unmarshalling on string).
|
||||
if outt.Elem().Kind() != reflect.Uint8 {
|
||||
break
|
||||
}
|
||||
switch inv.Kind() {
|
||||
case reflect.String:
|
||||
slice := []byte(inv.String())
|
||||
out.Set(reflect.ValueOf(slice))
|
||||
return true
|
||||
case reflect.Slice:
|
||||
switch outt.Kind() {
|
||||
case reflect.Array:
|
||||
reflect.Copy(out, inv)
|
||||
case reflect.Slice:
|
||||
out.SetBytes(inv.Bytes())
|
||||
}
|
||||
return true
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch inv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
out.SetInt(inv.Int())
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
out.SetInt(int64(inv.Float()))
|
||||
return true
|
||||
case reflect.Bool:
|
||||
if inv.Bool() {
|
||||
out.SetInt(1)
|
||||
} else {
|
||||
out.SetInt(0)
|
||||
}
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
panic("can't happen: no uint types in BSON (!?)")
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
switch inv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
out.SetUint(uint64(inv.Int()))
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
out.SetUint(uint64(inv.Float()))
|
||||
return true
|
||||
case reflect.Bool:
|
||||
if inv.Bool() {
|
||||
out.SetUint(1)
|
||||
} else {
|
||||
out.SetUint(0)
|
||||
}
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
panic("Can't happen. No uint types in BSON.")
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch inv.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
out.SetFloat(inv.Float())
|
||||
return true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
out.SetFloat(float64(inv.Int()))
|
||||
return true
|
||||
case reflect.Bool:
|
||||
if inv.Bool() {
|
||||
out.SetFloat(1)
|
||||
} else {
|
||||
out.SetFloat(0)
|
||||
}
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
panic("Can't happen. No uint types in BSON?")
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch inv.Kind() {
|
||||
case reflect.Bool:
|
||||
out.SetBool(inv.Bool())
|
||||
return true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
out.SetBool(inv.Int() != 0)
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
out.SetBool(inv.Float() != 0)
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
panic("Can't happen. No uint types in BSON?")
|
||||
}
|
||||
case reflect.Struct:
|
||||
if outt == typeURL && inv.Kind() == reflect.String {
|
||||
u, err := url.Parse(inv.String())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
out.Set(reflect.ValueOf(u).Elem())
|
||||
return true
|
||||
}
|
||||
if outt == typeBinary {
|
||||
if b, ok := in.([]byte); ok {
|
||||
out.Set(reflect.ValueOf(Binary{Data: b}))
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Parsers of basic types.
|
||||
|
||||
func (d *decoder) readRegEx() RegEx {
|
||||
re := RegEx{}
|
||||
re.Pattern = d.readCStr()
|
||||
re.Options = d.readCStr()
|
||||
return re
|
||||
}
|
||||
|
||||
func (d *decoder) readBinary() Binary {
|
||||
l := d.readInt32()
|
||||
b := Binary{}
|
||||
b.Kind = d.readByte()
|
||||
b.Data = d.readBytes(l)
|
||||
if b.Kind == 0x02 && len(b.Data) >= 4 {
|
||||
// Weird obsolete format with redundant length.
|
||||
b.Data = b.Data[4:]
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (d *decoder) readStr() string {
|
||||
l := d.readInt32()
|
||||
b := d.readBytes(l - 1)
|
||||
if d.readByte() != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (d *decoder) readCStr() string {
|
||||
start := d.i
|
||||
end := start
|
||||
l := len(d.in)
|
||||
for ; end != l; end++ {
|
||||
if d.in[end] == '\x00' {
|
||||
break
|
||||
}
|
||||
}
|
||||
d.i = end + 1
|
||||
if d.i > l {
|
||||
corrupted()
|
||||
}
|
||||
return string(d.in[start:end])
|
||||
}
|
||||
|
||||
func (d *decoder) readBool() bool {
|
||||
b := d.readByte()
|
||||
if b == 0 {
|
||||
return false
|
||||
}
|
||||
if b == 1 {
|
||||
return true
|
||||
}
|
||||
panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
|
||||
}
|
||||
|
||||
func (d *decoder) readFloat64() float64 {
|
||||
return math.Float64frombits(uint64(d.readInt64()))
|
||||
}
|
||||
|
||||
func (d *decoder) readInt32() int32 {
|
||||
b := d.readBytes(4)
|
||||
return int32((uint32(b[0]) << 0) |
|
||||
(uint32(b[1]) << 8) |
|
||||
(uint32(b[2]) << 16) |
|
||||
(uint32(b[3]) << 24))
|
||||
}
|
||||
|
||||
func (d *decoder) readInt64() int64 {
|
||||
b := d.readBytes(8)
|
||||
return int64((uint64(b[0]) << 0) |
|
||||
(uint64(b[1]) << 8) |
|
||||
(uint64(b[2]) << 16) |
|
||||
(uint64(b[3]) << 24) |
|
||||
(uint64(b[4]) << 32) |
|
||||
(uint64(b[5]) << 40) |
|
||||
(uint64(b[6]) << 48) |
|
||||
(uint64(b[7]) << 56))
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() byte {
|
||||
i := d.i
|
||||
d.i++
|
||||
if d.i > len(d.in) {
|
||||
corrupted()
|
||||
}
|
||||
return d.in[i]
|
||||
}
|
||||
|
||||
func (d *decoder) readBytes(length int32) []byte {
|
||||
if length < 0 {
|
||||
corrupted()
|
||||
}
|
||||
start := d.i
|
||||
d.i += int(length)
|
||||
if d.i < start || d.i > len(d.in) {
|
||||
corrupted()
|
||||
}
|
||||
return d.in[start : start+int(length)]
|
||||
}
|
509
vendor/gopkg.in/mgo.v2/bson/encode.go
generated
vendored
509
vendor/gopkg.in/mgo.v2/bson/encode.go
generated
vendored
@ -1,509 +0,0 @@
|
||||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// gobson - BSON library for Go.
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Some internal infrastructure.
|
||||
|
||||
var (
|
||||
typeBinary = reflect.TypeOf(Binary{})
|
||||
typeObjectId = reflect.TypeOf(ObjectId(""))
|
||||
typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")})
|
||||
typeSymbol = reflect.TypeOf(Symbol(""))
|
||||
typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
|
||||
typeOrderKey = reflect.TypeOf(MinKey)
|
||||
typeDocElem = reflect.TypeOf(DocElem{})
|
||||
typeRawDocElem = reflect.TypeOf(RawDocElem{})
|
||||
typeRaw = reflect.TypeOf(Raw{})
|
||||
typeURL = reflect.TypeOf(url.URL{})
|
||||
typeTime = reflect.TypeOf(time.Time{})
|
||||
typeString = reflect.TypeOf("")
|
||||
typeJSONNumber = reflect.TypeOf(json.Number(""))
|
||||
)
|
||||
|
||||
const itoaCacheSize = 32
|
||||
|
||||
var itoaCache []string
|
||||
|
||||
func init() {
|
||||
itoaCache = make([]string, itoaCacheSize)
|
||||
for i := 0; i != itoaCacheSize; i++ {
|
||||
itoaCache[i] = strconv.Itoa(i)
|
||||
}
|
||||
}
|
||||
|
||||
func itoa(i int) string {
|
||||
if i < itoaCacheSize {
|
||||
return itoaCache[i]
|
||||
}
|
||||
return strconv.Itoa(i)
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Marshaling of the document value itself.
|
||||
|
||||
type encoder struct {
|
||||
out []byte
|
||||
}
|
||||
|
||||
func (e *encoder) addDoc(v reflect.Value) {
|
||||
for {
|
||||
if vi, ok := v.Interface().(Getter); ok {
|
||||
getv, err := vi.GetBSON()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
v = reflect.ValueOf(getv)
|
||||
continue
|
||||
}
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if v.Type() == typeRaw {
|
||||
raw := v.Interface().(Raw)
|
||||
if raw.Kind != 0x03 && raw.Kind != 0x00 {
|
||||
panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
|
||||
}
|
||||
if len(raw.Data) == 0 {
|
||||
panic("Attempted to marshal empty Raw document")
|
||||
}
|
||||
e.addBytes(raw.Data...)
|
||||
return
|
||||
}
|
||||
|
||||
start := e.reserveInt32()
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Map:
|
||||
e.addMap(v)
|
||||
case reflect.Struct:
|
||||
e.addStruct(v)
|
||||
case reflect.Array, reflect.Slice:
|
||||
e.addSlice(v)
|
||||
default:
|
||||
panic("Can't marshal " + v.Type().String() + " as a BSON document")
|
||||
}
|
||||
|
||||
e.addBytes(0)
|
||||
e.setInt32(start, int32(len(e.out)-start))
|
||||
}
|
||||
|
||||
func (e *encoder) addMap(v reflect.Value) {
|
||||
for _, k := range v.MapKeys() {
|
||||
e.addElem(k.String(), v.MapIndex(k), false)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) addStruct(v reflect.Value) {
|
||||
sinfo, err := getStructInfo(v.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var value reflect.Value
|
||||
if sinfo.InlineMap >= 0 {
|
||||
m := v.Field(sinfo.InlineMap)
|
||||
if m.Len() > 0 {
|
||||
for _, k := range m.MapKeys() {
|
||||
ks := k.String()
|
||||
if _, found := sinfo.FieldsMap[ks]; found {
|
||||
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
|
||||
}
|
||||
e.addElem(ks, m.MapIndex(k), false)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, info := range sinfo.FieldsList {
|
||||
if info.Inline == nil {
|
||||
value = v.Field(info.Num)
|
||||
} else {
|
||||
value = v.FieldByIndex(info.Inline)
|
||||
}
|
||||
if info.OmitEmpty && isZero(value) {
|
||||
continue
|
||||
}
|
||||
e.addElem(info.Key, value, info.MinSize)
|
||||
}
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
return len(v.String()) == 0
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return v.IsNil()
|
||||
case reflect.Slice:
|
||||
return v.Len() == 0
|
||||
case reflect.Map:
|
||||
return v.Len() == 0
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Struct:
|
||||
vt := v.Type()
|
||||
if vt == typeTime {
|
||||
return v.Interface().(time.Time).IsZero()
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if vt.Field(i).PkgPath != "" {
|
||||
continue // Private field
|
||||
}
|
||||
if !isZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *encoder) addSlice(v reflect.Value) {
|
||||
vi := v.Interface()
|
||||
if d, ok := vi.(D); ok {
|
||||
for _, elem := range d {
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
if d, ok := vi.(RawD); ok {
|
||||
for _, elem := range d {
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
l := v.Len()
|
||||
et := v.Type().Elem()
|
||||
if et == typeDocElem {
|
||||
for i := 0; i < l; i++ {
|
||||
elem := v.Index(i).Interface().(DocElem)
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
if et == typeRawDocElem {
|
||||
for i := 0; i < l; i++ {
|
||||
elem := v.Index(i).Interface().(RawDocElem)
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
e.addElem(itoa(i), v.Index(i), false)
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Marshaling of elements in a document.
|
||||
|
||||
func (e *encoder) addElemName(kind byte, name string) {
|
||||
e.addBytes(kind)
|
||||
e.addBytes([]byte(name)...)
|
||||
e.addBytes(0)
|
||||
}
|
||||
|
||||
func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
|
||||
|
||||
if !v.IsValid() {
|
||||
e.addElemName('\x0A', name)
|
||||
return
|
||||
}
|
||||
|
||||
if getter, ok := v.Interface().(Getter); ok {
|
||||
getv, err := getter.GetBSON()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
e.addElem(name, reflect.ValueOf(getv), minSize)
|
||||
return
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
|
||||
case reflect.Interface:
|
||||
e.addElem(name, v.Elem(), minSize)
|
||||
|
||||
case reflect.Ptr:
|
||||
e.addElem(name, v.Elem(), minSize)
|
||||
|
||||
case reflect.String:
|
||||
s := v.String()
|
||||
switch v.Type() {
|
||||
case typeObjectId:
|
||||
if len(s) != 12 {
|
||||
panic("ObjectIDs must be exactly 12 bytes long (got " +
|
||||
strconv.Itoa(len(s)) + ")")
|
||||
}
|
||||
e.addElemName('\x07', name)
|
||||
e.addBytes([]byte(s)...)
|
||||
case typeSymbol:
|
||||
e.addElemName('\x0E', name)
|
||||
e.addStr(s)
|
||||
case typeJSONNumber:
|
||||
n := v.Interface().(json.Number)
|
||||
if i, err := n.Int64(); err == nil {
|
||||
e.addElemName('\x12', name)
|
||||
e.addInt64(i)
|
||||
} else if f, err := n.Float64(); err == nil {
|
||||
e.addElemName('\x01', name)
|
||||
e.addFloat64(f)
|
||||
} else {
|
||||
panic("failed to convert json.Number to a number: " + s)
|
||||
}
|
||||
default:
|
||||
e.addElemName('\x02', name)
|
||||
e.addStr(s)
|
||||
}
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
e.addElemName('\x01', name)
|
||||
e.addFloat64(v.Float())
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
u := v.Uint()
|
||||
if int64(u) < 0 {
|
||||
panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
|
||||
} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
|
||||
e.addElemName('\x10', name)
|
||||
e.addInt32(int32(u))
|
||||
} else {
|
||||
e.addElemName('\x12', name)
|
||||
e.addInt64(int64(u))
|
||||
}
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch v.Type() {
|
||||
case typeMongoTimestamp:
|
||||
e.addElemName('\x11', name)
|
||||
e.addInt64(v.Int())
|
||||
|
||||
case typeOrderKey:
|
||||
if v.Int() == int64(MaxKey) {
|
||||
e.addElemName('\x7F', name)
|
||||
} else {
|
||||
e.addElemName('\xFF', name)
|
||||
}
|
||||
|
||||
default:
|
||||
i := v.Int()
|
||||
if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
|
||||
// It fits into an int32, encode as such.
|
||||
e.addElemName('\x10', name)
|
||||
e.addInt32(int32(i))
|
||||
} else {
|
||||
e.addElemName('\x12', name)
|
||||
e.addInt64(i)
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Bool:
|
||||
e.addElemName('\x08', name)
|
||||
if v.Bool() {
|
||||
e.addBytes(1)
|
||||
} else {
|
||||
e.addBytes(0)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
e.addElemName('\x03', name)
|
||||
e.addDoc(v)
|
||||
|
||||
case reflect.Slice:
|
||||
vt := v.Type()
|
||||
et := vt.Elem()
|
||||
if et.Kind() == reflect.Uint8 {
|
||||
e.addElemName('\x05', name)
|
||||
e.addBinary('\x00', v.Bytes())
|
||||
} else if et == typeDocElem || et == typeRawDocElem {
|
||||
e.addElemName('\x03', name)
|
||||
e.addDoc(v)
|
||||
} else {
|
||||
e.addElemName('\x04', name)
|
||||
e.addDoc(v)
|
||||
}
|
||||
|
||||
case reflect.Array:
|
||||
et := v.Type().Elem()
|
||||
if et.Kind() == reflect.Uint8 {
|
||||
e.addElemName('\x05', name)
|
||||
if v.CanAddr() {
|
||||
e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte))
|
||||
} else {
|
||||
n := v.Len()
|
||||
e.addInt32(int32(n))
|
||||
e.addBytes('\x00')
|
||||
for i := 0; i < n; i++ {
|
||||
el := v.Index(i)
|
||||
e.addBytes(byte(el.Uint()))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
e.addElemName('\x04', name)
|
||||
e.addDoc(v)
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
switch s := v.Interface().(type) {
|
||||
|
||||
case Raw:
|
||||
kind := s.Kind
|
||||
if kind == 0x00 {
|
||||
kind = 0x03
|
||||
}
|
||||
if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
|
||||
panic("Attempted to marshal empty Raw document")
|
||||
}
|
||||
e.addElemName(kind, name)
|
||||
e.addBytes(s.Data...)
|
||||
|
||||
case Binary:
|
||||
e.addElemName('\x05', name)
|
||||
e.addBinary(s.Kind, s.Data)
|
||||
|
||||
case DBPointer:
|
||||
e.addElemName('\x0C', name)
|
||||
e.addStr(s.Namespace)
|
||||
if len(s.Id) != 12 {
|
||||
panic("ObjectIDs must be exactly 12 bytes long (got " +
|
||||
strconv.Itoa(len(s.Id)) + ")")
|
||||
}
|
||||
e.addBytes([]byte(s.Id)...)
|
||||
|
||||
case RegEx:
|
||||
e.addElemName('\x0B', name)
|
||||
e.addCStr(s.Pattern)
|
||||
e.addCStr(s.Options)
|
||||
|
||||
case JavaScript:
|
||||
if s.Scope == nil {
|
||||
e.addElemName('\x0D', name)
|
||||
e.addStr(s.Code)
|
||||
} else {
|
||||
e.addElemName('\x0F', name)
|
||||
start := e.reserveInt32()
|
||||
e.addStr(s.Code)
|
||||
e.addDoc(reflect.ValueOf(s.Scope))
|
||||
e.setInt32(start, int32(len(e.out)-start))
|
||||
}
|
||||
|
||||
case time.Time:
|
||||
// MongoDB handles timestamps as milliseconds.
|
||||
e.addElemName('\x09', name)
|
||||
e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
|
||||
|
||||
case url.URL:
|
||||
e.addElemName('\x02', name)
|
||||
e.addStr(s.String())
|
||||
|
||||
case undefined:
|
||||
e.addElemName('\x06', name)
|
||||
|
||||
default:
|
||||
e.addElemName('\x03', name)
|
||||
e.addDoc(v)
|
||||
}
|
||||
|
||||
default:
|
||||
panic("Can't marshal " + v.Type().String() + " in a BSON document")
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Marshaling of base types.
|
||||
|
||||
func (e *encoder) addBinary(subtype byte, v []byte) {
|
||||
if subtype == 0x02 {
|
||||
// Wonder how that brilliant idea came to life. Obsolete, luckily.
|
||||
e.addInt32(int32(len(v) + 4))
|
||||
e.addBytes(subtype)
|
||||
e.addInt32(int32(len(v)))
|
||||
} else {
|
||||
e.addInt32(int32(len(v)))
|
||||
e.addBytes(subtype)
|
||||
}
|
||||
e.addBytes(v...)
|
||||
}
|
||||
|
||||
func (e *encoder) addStr(v string) {
|
||||
e.addInt32(int32(len(v) + 1))
|
||||
e.addCStr(v)
|
||||
}
|
||||
|
||||
func (e *encoder) addCStr(v string) {
|
||||
e.addBytes([]byte(v)...)
|
||||
e.addBytes(0)
|
||||
}
|
||||
|
||||
func (e *encoder) reserveInt32() (pos int) {
|
||||
pos = len(e.out)
|
||||
e.addBytes(0, 0, 0, 0)
|
||||
return pos
|
||||
}
|
||||
|
||||
func (e *encoder) setInt32(pos int, v int32) {
|
||||
e.out[pos+0] = byte(v)
|
||||
e.out[pos+1] = byte(v >> 8)
|
||||
e.out[pos+2] = byte(v >> 16)
|
||||
e.out[pos+3] = byte(v >> 24)
|
||||
}
|
||||
|
||||
func (e *encoder) addInt32(v int32) {
|
||||
u := uint32(v)
|
||||
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
|
||||
}
|
||||
|
||||
func (e *encoder) addInt64(v int64) {
|
||||
u := uint64(v)
|
||||
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
|
||||
byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
|
||||
}
|
||||
|
||||
func (e *encoder) addFloat64(v float64) {
|
||||
e.addInt64(int64(math.Float64bits(v)))
|
||||
}
|
||||
|
||||
func (e *encoder) addBytes(v ...byte) {
|
||||
e.out = append(e.out, v...)
|
||||
}
|
241
vendor/gopkg.in/mgo.v2/bson/specdata_test.go
generated
vendored
241
vendor/gopkg.in/mgo.v2/bson/specdata_test.go
generated
vendored
@ -1,241 +0,0 @@
|
||||
package bson_test
|
||||
|
||||
var specTests = []string{
|
||||
`
|
||||
---
|
||||
description: "Array type"
|
||||
documents:
|
||||
-
|
||||
decoded:
|
||||
a : []
|
||||
encoded: 0D000000046100050000000000
|
||||
-
|
||||
decoded:
|
||||
a: [10]
|
||||
encoded: 140000000461000C0000001030000A0000000000
|
||||
-
|
||||
# Decode an array that uses an empty string as the key
|
||||
decodeOnly : true
|
||||
decoded:
|
||||
a: [10]
|
||||
encoded: 130000000461000B00000010000A0000000000
|
||||
-
|
||||
# Decode an array that uses a non-numeric string as the key
|
||||
decodeOnly : true
|
||||
decoded:
|
||||
a: [10]
|
||||
encoded: 150000000461000D000000106162000A0000000000
|
||||
|
||||
|
||||
`, `
|
||||
---
|
||||
description: "Boolean type"
|
||||
documents:
|
||||
-
|
||||
encoded: "090000000862000100"
|
||||
decoded: { "b" : true }
|
||||
-
|
||||
encoded: "090000000862000000"
|
||||
decoded: { "b" : false }
|
||||
|
||||
|
||||
`, `
|
||||
---
|
||||
description: "Corrupted BSON"
|
||||
documents:
|
||||
-
|
||||
encoded: "09000000016600"
|
||||
error: "truncated double"
|
||||
-
|
||||
encoded: "09000000026600"
|
||||
error: "truncated string"
|
||||
-
|
||||
encoded: "09000000036600"
|
||||
error: "truncated document"
|
||||
-
|
||||
encoded: "09000000046600"
|
||||
error: "truncated array"
|
||||
-
|
||||
encoded: "09000000056600"
|
||||
error: "truncated binary"
|
||||
-
|
||||
encoded: "09000000076600"
|
||||
error: "truncated objectid"
|
||||
-
|
||||
encoded: "09000000086600"
|
||||
error: "truncated boolean"
|
||||
-
|
||||
encoded: "09000000096600"
|
||||
error: "truncated date"
|
||||
-
|
||||
encoded: "090000000b6600"
|
||||
error: "truncated regex"
|
||||
-
|
||||
encoded: "090000000c6600"
|
||||
error: "truncated db pointer"
|
||||
-
|
||||
encoded: "0C0000000d6600"
|
||||
error: "truncated javascript"
|
||||
-
|
||||
encoded: "0C0000000e6600"
|
||||
error: "truncated symbol"
|
||||
-
|
||||
encoded: "0C0000000f6600"
|
||||
error: "truncated javascript with scope"
|
||||
-
|
||||
encoded: "0C000000106600"
|
||||
error: "truncated int32"
|
||||
-
|
||||
encoded: "0C000000116600"
|
||||
error: "truncated timestamp"
|
||||
-
|
||||
encoded: "0C000000126600"
|
||||
error: "truncated int64"
|
||||
-
|
||||
encoded: "0400000000"
|
||||
error: basic
|
||||
-
|
||||
encoded: "0500000001"
|
||||
error: basic
|
||||
-
|
||||
encoded: "05000000"
|
||||
error: basic
|
||||
-
|
||||
encoded: "0700000002610078563412"
|
||||
error: basic
|
||||
-
|
||||
encoded: "090000001061000500"
|
||||
error: basic
|
||||
-
|
||||
encoded: "00000000000000000000"
|
||||
error: basic
|
||||
-
|
||||
encoded: "1300000002666f6f00040000006261720000"
|
||||
error: "basic"
|
||||
-
|
||||
encoded: "1800000003666f6f000f0000001062617200ffffff7f0000"
|
||||
error: basic
|
||||
-
|
||||
encoded: "1500000003666f6f000c0000000862617200010000"
|
||||
error: basic
|
||||
-
|
||||
encoded: "1c00000003666f6f001200000002626172000500000062617a000000"
|
||||
error: basic
|
||||
-
|
||||
encoded: "1000000002610004000000616263ff00"
|
||||
error: string is not null-terminated
|
||||
-
|
||||
encoded: "0c0000000200000000000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "120000000200ffffffff666f6f6261720000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "0c0000000e00000000000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "120000000e00ffffffff666f6f6261720000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "180000000c00fa5bd841d6585d9900"
|
||||
error: ""
|
||||
-
|
||||
encoded: "1e0000000c00ffffffff666f6f626172005259b56afa5bd841d6585d9900"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "0c0000000d00000000000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "0c0000000d00ffffffff0000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "1c0000000f001500000000000000000c000000020001000000000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "1c0000000f0015000000ffffffff000c000000020001000000000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "1c0000000f001500000001000000000c000000020000000000000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "1c0000000f001500000001000000000c0000000200ffffffff000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "0E00000008616263646566676869707172737475"
|
||||
error: "Run-on CString"
|
||||
-
|
||||
encoded: "0100000000"
|
||||
error: "An object size that's too small to even include the object size, but is correctly encoded, along with a correct EOO (and no data)"
|
||||
-
|
||||
encoded: "1a0000000e74657374000c00000068656c6c6f20776f726c6400000500000000"
|
||||
error: "One object, but with object size listed smaller than it is in the data"
|
||||
-
|
||||
encoded: "05000000"
|
||||
error: "One object, missing the EOO at the end"
|
||||
-
|
||||
encoded: "0500000001"
|
||||
error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01"
|
||||
-
|
||||
encoded: "05000000ff"
|
||||
error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff"
|
||||
-
|
||||
encoded: "0500000070"
|
||||
error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70"
|
||||
-
|
||||
encoded: "07000000000000"
|
||||
error: "Invalid BSON type low range"
|
||||
-
|
||||
encoded: "07000000800000"
|
||||
error: "Invalid BSON type high range"
|
||||
-
|
||||
encoded: "090000000862000200"
|
||||
error: "Invalid boolean value of 2"
|
||||
-
|
||||
encoded: "09000000086200ff00"
|
||||
error: "Invalid boolean value of -1"
|
||||
`, `
|
||||
---
|
||||
description: "Int32 type"
|
||||
documents:
|
||||
-
|
||||
decoded:
|
||||
i: -2147483648
|
||||
encoded: 0C0000001069000000008000
|
||||
-
|
||||
decoded:
|
||||
i: 2147483647
|
||||
encoded: 0C000000106900FFFFFF7F00
|
||||
-
|
||||
decoded:
|
||||
i: -1
|
||||
encoded: 0C000000106900FFFFFFFF00
|
||||
-
|
||||
decoded:
|
||||
i: 0
|
||||
encoded: 0C0000001069000000000000
|
||||
-
|
||||
decoded:
|
||||
i: 1
|
||||
encoded: 0C0000001069000100000000
|
||||
|
||||
`, `
|
||||
---
|
||||
description: "String type"
|
||||
documents:
|
||||
-
|
||||
decoded:
|
||||
s : ""
|
||||
encoded: 0D000000027300010000000000
|
||||
-
|
||||
decoded:
|
||||
s: "a"
|
||||
encoded: 0E00000002730002000000610000
|
||||
-
|
||||
decoded:
|
||||
s: "This is a string"
|
||||
encoded: 1D0000000273001100000054686973206973206120737472696E670000
|
||||
-
|
||||
decoded:
|
||||
s: "κόσμε"
|
||||
encoded: 180000000273000C000000CEBAE1BDB9CF83CEBCCEB50000
|
||||
`}
|
258
vendor/gopkg.in/mgo.v2/bulk.go
generated
vendored
258
vendor/gopkg.in/mgo.v2/bulk.go
generated
vendored
@ -1,258 +0,0 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
// Bulk represents an operation that can be prepared with several
|
||||
// orthogonal changes before being delivered to the server.
|
||||
//
|
||||
// Relevant documentation:
|
||||
//
|
||||
// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api
|
||||
//
|
||||
type Bulk struct {
|
||||
c *Collection
|
||||
ordered bool
|
||||
actions []bulkAction
|
||||
}
|
||||
|
||||
type bulkOp int
|
||||
|
||||
const (
|
||||
bulkInsert bulkOp = iota + 1
|
||||
bulkUpdate
|
||||
bulkUpdateAll
|
||||
)
|
||||
|
||||
type bulkAction struct {
|
||||
op bulkOp
|
||||
docs []interface{}
|
||||
}
|
||||
|
||||
type bulkUpdateOp []interface{}
|
||||
|
||||
// BulkError holds an error returned from running a Bulk operation.
|
||||
//
|
||||
// TODO: This is private for the moment, until we understand exactly how
|
||||
// to report these multi-errors in a useful and convenient way.
|
||||
type bulkError struct {
|
||||
errs []error
|
||||
}
|
||||
|
||||
// BulkResult holds the results for a bulk operation.
|
||||
type BulkResult struct {
|
||||
Matched int
|
||||
Modified int // Available only for MongoDB 2.6+
|
||||
|
||||
// Be conservative while we understand exactly how to report these
|
||||
// results in a useful and convenient way, and also how to emulate
|
||||
// them with prior servers.
|
||||
private bool
|
||||
}
|
||||
|
||||
func (e *bulkError) Error() string {
|
||||
if len(e.errs) == 0 {
|
||||
return "invalid bulkError instance: no errors"
|
||||
}
|
||||
if len(e.errs) == 1 {
|
||||
return e.errs[0].Error()
|
||||
}
|
||||
msgs := make(map[string]bool)
|
||||
for _, err := range e.errs {
|
||||
msgs[err.Error()] = true
|
||||
}
|
||||
if len(msgs) == 1 {
|
||||
for msg := range msgs {
|
||||
return msg
|
||||
}
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("multiple errors in bulk operation:\n")
|
||||
for msg := range msgs {
|
||||
buf.WriteString(" - ")
|
||||
buf.WriteString(msg)
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Bulk returns a value to prepare the execution of a bulk operation.
|
||||
//
|
||||
// WARNING: This API is still experimental.
|
||||
//
|
||||
func (c *Collection) Bulk() *Bulk {
|
||||
return &Bulk{c: c, ordered: true}
|
||||
}
|
||||
|
||||
// Unordered puts the bulk operation in unordered mode.
|
||||
//
|
||||
// In unordered mode the indvidual operations may be sent
|
||||
// out of order, which means latter operations may proceed
|
||||
// even if prior ones have failed.
|
||||
func (b *Bulk) Unordered() {
|
||||
b.ordered = false
|
||||
}
|
||||
|
||||
func (b *Bulk) action(op bulkOp) *bulkAction {
|
||||
if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op {
|
||||
return &b.actions[len(b.actions)-1]
|
||||
}
|
||||
if !b.ordered {
|
||||
for i := range b.actions {
|
||||
if b.actions[i].op == op {
|
||||
return &b.actions[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
b.actions = append(b.actions, bulkAction{op: op})
|
||||
return &b.actions[len(b.actions)-1]
|
||||
}
|
||||
|
||||
// Insert queues up the provided documents for insertion.
|
||||
func (b *Bulk) Insert(docs ...interface{}) {
|
||||
action := b.action(bulkInsert)
|
||||
action.docs = append(action.docs, docs...)
|
||||
}
|
||||
|
||||
// Update queues up the provided pairs of updating instructions.
|
||||
// The first element of each pair selects which documents must be
|
||||
// updated, and the second element defines how to update it.
|
||||
// Each pair matches exactly one document for updating at most.
|
||||
func (b *Bulk) Update(pairs ...interface{}) {
|
||||
if len(pairs)%2 != 0 {
|
||||
panic("Bulk.Update requires an even number of parameters")
|
||||
}
|
||||
action := b.action(bulkUpdate)
|
||||
for i := 0; i < len(pairs); i += 2 {
|
||||
selector := pairs[i]
|
||||
if selector == nil {
|
||||
selector = bson.D{}
|
||||
}
|
||||
action.docs = append(action.docs, &updateOp{
|
||||
Collection: b.c.FullName,
|
||||
Selector: selector,
|
||||
Update: pairs[i+1],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateAll queues up the provided pairs of updating instructions.
|
||||
// The first element of each pair selects which documents must be
|
||||
// updated, and the second element defines how to update it.
|
||||
// Each pair updates all documents matching the selector.
|
||||
func (b *Bulk) UpdateAll(pairs ...interface{}) {
|
||||
if len(pairs)%2 != 0 {
|
||||
panic("Bulk.UpdateAll requires an even number of parameters")
|
||||
}
|
||||
action := b.action(bulkUpdate)
|
||||
for i := 0; i < len(pairs); i += 2 {
|
||||
selector := pairs[i]
|
||||
if selector == nil {
|
||||
selector = bson.D{}
|
||||
}
|
||||
action.docs = append(action.docs, &updateOp{
|
||||
Collection: b.c.FullName,
|
||||
Selector: selector,
|
||||
Update: pairs[i+1],
|
||||
Flags: 2,
|
||||
Multi: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Upsert queues up the provided pairs of upserting instructions.
|
||||
// The first element of each pair selects which documents must be
|
||||
// updated, and the second element defines how to update it.
|
||||
// Each pair matches exactly one document for updating at most.
|
||||
func (b *Bulk) Upsert(pairs ...interface{}) {
|
||||
if len(pairs)%2 != 0 {
|
||||
panic("Bulk.Update requires an even number of parameters")
|
||||
}
|
||||
action := b.action(bulkUpdate)
|
||||
for i := 0; i < len(pairs); i += 2 {
|
||||
selector := pairs[i]
|
||||
if selector == nil {
|
||||
selector = bson.D{}
|
||||
}
|
||||
action.docs = append(action.docs, &updateOp{
|
||||
Collection: b.c.FullName,
|
||||
Selector: selector,
|
||||
Update: pairs[i+1],
|
||||
Flags: 1,
|
||||
Upsert: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs all the operations queued up.
|
||||
//
|
||||
// If an error is reported on an unordered bulk operation, the error value may
|
||||
// be an aggregation of all issues observed. As an exception to that, Insert
|
||||
// operations running on MongoDB versions prior to 2.6 will report the last
|
||||
// error only due to a limitation in the wire protocol.
|
||||
func (b *Bulk) Run() (*BulkResult, error) {
|
||||
var result BulkResult
|
||||
var berr bulkError
|
||||
var failed bool
|
||||
for i := range b.actions {
|
||||
action := &b.actions[i]
|
||||
var ok bool
|
||||
switch action.op {
|
||||
case bulkInsert:
|
||||
ok = b.runInsert(action, &result, &berr)
|
||||
case bulkUpdate:
|
||||
ok = b.runUpdate(action, &result, &berr)
|
||||
default:
|
||||
panic("unknown bulk operation")
|
||||
}
|
||||
if !ok {
|
||||
failed = true
|
||||
if b.ordered {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if failed {
|
||||
return nil, &berr
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError) bool {
|
||||
op := &insertOp{b.c.FullName, action.docs, 0}
|
||||
if !b.ordered {
|
||||
op.flags = 1 // ContinueOnError
|
||||
}
|
||||
lerr, err := b.c.writeOp(op, b.ordered)
|
||||
return b.checkSuccess(berr, lerr, err)
|
||||
}
|
||||
|
||||
func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError) bool {
|
||||
ok := true
|
||||
for _, op := range action.docs {
|
||||
lerr, err := b.c.writeOp(op, b.ordered)
|
||||
if !b.checkSuccess(berr, lerr, err) {
|
||||
ok = false
|
||||
if b.ordered {
|
||||
break
|
||||
}
|
||||
}
|
||||
result.Matched += lerr.N
|
||||
result.Modified += lerr.modified
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
func (b *Bulk) checkSuccess(berr *bulkError, lerr *LastError, err error) bool {
|
||||
if lerr != nil && len(lerr.errors) > 0 {
|
||||
berr.errs = append(berr.errs, lerr.errors...)
|
||||
return false
|
||||
} else if err != nil {
|
||||
berr.errs = append(berr.errs, err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
344
vendor/gopkg.in/mgo.v2/bulk_test.go
generated
vendored
344
vendor/gopkg.in/mgo.v2/bulk_test.go
generated
vendored
@ -1,344 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2015 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo_test
|
||||
|
||||
import (
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/mgo.v2"
|
||||
)
|
||||
|
||||
func (s *S) TestBulkInsert(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
bulk := coll.Bulk()
|
||||
bulk.Insert(M{"n": 1})
|
||||
bulk.Insert(M{"n": 2}, M{"n": 3})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkInsertError(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
bulk := coll.Bulk()
|
||||
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
|
||||
_, err = bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*duplicate key.*")
|
||||
c.Assert(mgo.IsDup(err), Equals, true)
|
||||
|
||||
type doc struct {
|
||||
N int `_id`
|
||||
}
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("_id").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{1}, {2}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkInsertErrorUnordered(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
|
||||
_, err = bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*duplicate key.*")
|
||||
|
||||
type doc struct {
|
||||
N int `_id`
|
||||
}
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("_id").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) {
|
||||
// The server has a batch limit of 1000 documents when using write commands.
|
||||
// This artificial limit did not exist with the old wire protocol, so to
|
||||
// avoid compatibility issues the implementation internally split batches
|
||||
// into the proper size and delivers them one by one. This test ensures that
|
||||
// the behavior of unordered (that is, continue on error) remains correct
|
||||
// when errors happen and there are batches left.
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
|
||||
const total = 4096
|
||||
type doc struct {
|
||||
Id int `_id`
|
||||
}
|
||||
docs := make([]interface{}, total)
|
||||
for i := 0; i < total; i++ {
|
||||
docs[i] = doc{i}
|
||||
}
|
||||
docs[1] = doc{0}
|
||||
bulk.Insert(docs...)
|
||||
_, err = bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*duplicate key.*")
|
||||
|
||||
n, err := coll.Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, total-1)
|
||||
|
||||
var res doc
|
||||
err = coll.FindId(1500).One(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res.Id, Equals, 1500)
|
||||
}
|
||||
|
||||
func (s *S) TestBulkError(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
// If it's just the same string multiple times, join it into a single message.
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2})
|
||||
_, err = bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*duplicate key.*")
|
||||
c.Assert(err, Not(ErrorMatches), ".*duplicate key.*duplicate key")
|
||||
c.Assert(mgo.IsDup(err), Equals, true)
|
||||
|
||||
// With matching errors but different messages, present them all.
|
||||
bulk = coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Insert(M{"_id": "dupone"}, M{"_id": "dupone"}, M{"_id": "duptwo"}, M{"_id": "duptwo"})
|
||||
_, err = bulk.Run()
|
||||
if s.versionAtLeast(2, 6) {
|
||||
c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n( - .*duplicate.*\n){2}$")
|
||||
c.Assert(err, ErrorMatches, "(?s).*dupone.*")
|
||||
c.Assert(err, ErrorMatches, "(?s).*duptwo.*")
|
||||
} else {
|
||||
// Wire protocol query doesn't return all errors.
|
||||
c.Assert(err, ErrorMatches, ".*duplicate.*")
|
||||
}
|
||||
c.Assert(mgo.IsDup(err), Equals, true)
|
||||
|
||||
// With mixed errors, present them all.
|
||||
bulk = coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Insert(M{"_id": 1}, M{"_id": []int{2}})
|
||||
_, err = bulk.Run()
|
||||
if s.versionAtLeast(2, 6) {
|
||||
c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*\n - .*array.*\n$")
|
||||
} else {
|
||||
// Wire protocol query doesn't return all errors.
|
||||
c.Assert(err, ErrorMatches, ".*array.*")
|
||||
}
|
||||
c.Assert(mgo.IsDup(err), Equals, false)
|
||||
}
|
||||
|
||||
func (s *S) TestBulkUpdate(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.Update(M{"n": 1}, M{"$set": M{"n": 1}})
|
||||
bulk.Update(M{"n": 2}, M{"$set": M{"n": 20}})
|
||||
bulk.Update(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
|
||||
bulk.Update(M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 3}, M{"$set": M{"n": 30}})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r.Matched, Equals, 4)
|
||||
if s.versionAtLeast(2, 6) {
|
||||
c.Assert(r.Modified, Equals, 3)
|
||||
}
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{10}, {20}, {30}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkUpdateError(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.Update(
|
||||
M{"n": 1}, M{"$set": M{"n": 10}},
|
||||
M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
|
||||
M{"n": 3}, M{"$set": M{"n": 30}},
|
||||
)
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*_id.*")
|
||||
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{2}, {3}, {10}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkUpdateErrorUnordered(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Update(
|
||||
M{"n": 1}, M{"$set": M{"n": 10}},
|
||||
M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
|
||||
M{"n": 3}, M{"$set": M{"n": 30}},
|
||||
)
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*_id.*")
|
||||
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{2}, {10}, {30}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkUpdateAll(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}})
|
||||
bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}})
|
||||
bulk.UpdateAll(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
|
||||
bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r.Matched, Equals, 6)
|
||||
if s.versionAtLeast(2, 6) {
|
||||
c.Assert(r.Modified, Equals, 5)
|
||||
}
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{3}, {4}, {5}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkMixedUnordered(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
// Abuse undefined behavior to ensure the desired implementation is in place.
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Insert(M{"n": 1})
|
||||
bulk.Update(M{"n": 2}, M{"$inc": M{"n": 1}})
|
||||
bulk.Insert(M{"n": 2})
|
||||
bulk.Update(M{"n": 3}, M{"$inc": M{"n": 1}})
|
||||
bulk.Update(M{"n": 1}, M{"$inc": M{"n": 1}})
|
||||
bulk.Insert(M{"n": 3})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r.Matched, Equals, 3)
|
||||
if s.versionAtLeast(2, 6) {
|
||||
c.Assert(r.Modified, Equals, 3)
|
||||
}
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{2}, {3}, {4}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkUpsert(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.Upsert(M{"n": 2}, M{"$set": M{"n": 20}})
|
||||
bulk.Upsert(M{"n": 4}, M{"$set": M{"n": 40}}, M{"n": 3}, M{"$set": M{"n": 30}})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{1}, {20}, {30}, {40}})
|
||||
}
|
679
vendor/gopkg.in/mgo.v2/cluster.go
generated
vendored
679
vendor/gopkg.in/mgo.v2/cluster.go
generated
vendored
@ -1,679 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mongo cluster encapsulation.
|
||||
//
|
||||
// A cluster enables the communication with one or more servers participating
|
||||
// in a mongo cluster. This works with individual servers, a replica set,
|
||||
// a replica pair, one or multiple mongos routers, etc.
|
||||
|
||||
type mongoCluster struct {
|
||||
sync.RWMutex
|
||||
serverSynced sync.Cond
|
||||
userSeeds []string
|
||||
dynaSeeds []string
|
||||
servers mongoServers
|
||||
masters mongoServers
|
||||
references int
|
||||
syncing bool
|
||||
direct bool
|
||||
failFast bool
|
||||
syncCount uint
|
||||
setName string
|
||||
cachedIndex map[string]bool
|
||||
sync chan bool
|
||||
dial dialer
|
||||
}
|
||||
|
||||
func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster {
|
||||
cluster := &mongoCluster{
|
||||
userSeeds: userSeeds,
|
||||
references: 1,
|
||||
direct: direct,
|
||||
failFast: failFast,
|
||||
dial: dial,
|
||||
setName: setName,
|
||||
}
|
||||
cluster.serverSynced.L = cluster.RWMutex.RLocker()
|
||||
cluster.sync = make(chan bool, 1)
|
||||
stats.cluster(+1)
|
||||
go cluster.syncServersLoop()
|
||||
return cluster
|
||||
}
|
||||
|
||||
// Acquire increases the reference count for the cluster.
|
||||
func (cluster *mongoCluster) Acquire() {
|
||||
cluster.Lock()
|
||||
cluster.references++
|
||||
debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references)
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
// Release decreases the reference count for the cluster. Once
|
||||
// it reaches zero, all servers will be closed.
|
||||
func (cluster *mongoCluster) Release() {
|
||||
cluster.Lock()
|
||||
if cluster.references == 0 {
|
||||
panic("cluster.Release() with references == 0")
|
||||
}
|
||||
cluster.references--
|
||||
debugf("Cluster %p released (refs=%d)", cluster, cluster.references)
|
||||
if cluster.references == 0 {
|
||||
for _, server := range cluster.servers.Slice() {
|
||||
server.Close()
|
||||
}
|
||||
// Wake up the sync loop so it can die.
|
||||
cluster.syncServers()
|
||||
stats.cluster(-1)
|
||||
}
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) LiveServers() (servers []string) {
|
||||
cluster.RLock()
|
||||
for _, serv := range cluster.servers.Slice() {
|
||||
servers = append(servers, serv.Addr)
|
||||
}
|
||||
cluster.RUnlock()
|
||||
return servers
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) removeServer(server *mongoServer) {
|
||||
cluster.Lock()
|
||||
cluster.masters.Remove(server)
|
||||
other := cluster.servers.Remove(server)
|
||||
cluster.Unlock()
|
||||
if other != nil {
|
||||
other.Close()
|
||||
log("Removed server ", server.Addr, " from cluster.")
|
||||
}
|
||||
server.Close()
|
||||
}
|
||||
|
||||
type isMasterResult struct {
|
||||
IsMaster bool
|
||||
Secondary bool
|
||||
Primary string
|
||||
Hosts []string
|
||||
Passives []string
|
||||
Tags bson.D
|
||||
Msg string
|
||||
SetName string `bson:"setName"`
|
||||
MaxWireVersion int `bson:"maxWireVersion"`
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error {
|
||||
// Monotonic let's it talk to a slave and still hold the socket.
|
||||
session := newSession(Monotonic, cluster, 10*time.Second)
|
||||
session.setSocket(socket)
|
||||
err := session.Run("ismaster", result)
|
||||
session.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
type possibleTimeout interface {
|
||||
Timeout() bool
|
||||
}
|
||||
|
||||
var syncSocketTimeout = 5 * time.Second
|
||||
|
||||
func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) {
|
||||
var syncTimeout time.Duration
|
||||
if raceDetector {
|
||||
// This variable is only ever touched by tests.
|
||||
globalMutex.Lock()
|
||||
syncTimeout = syncSocketTimeout
|
||||
globalMutex.Unlock()
|
||||
} else {
|
||||
syncTimeout = syncSocketTimeout
|
||||
}
|
||||
|
||||
addr := server.Addr
|
||||
log("SYNC Processing ", addr, "...")
|
||||
|
||||
// Retry a few times to avoid knocking a server down for a hiccup.
|
||||
var result isMasterResult
|
||||
var tryerr error
|
||||
for retry := 0; ; retry++ {
|
||||
if retry == 3 || retry == 1 && cluster.failFast {
|
||||
return nil, nil, tryerr
|
||||
}
|
||||
if retry > 0 {
|
||||
// Don't abuse the server needlessly if there's something actually wrong.
|
||||
if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() {
|
||||
// Give a chance for waiters to timeout as well.
|
||||
cluster.serverSynced.Broadcast()
|
||||
}
|
||||
time.Sleep(syncShortDelay)
|
||||
}
|
||||
|
||||
// It's not clear what would be a good timeout here. Is it
|
||||
// better to wait longer or to retry?
|
||||
socket, _, err := server.AcquireSocket(0, syncTimeout)
|
||||
if err != nil {
|
||||
tryerr = err
|
||||
logf("SYNC Failed to get socket to %s: %v", addr, err)
|
||||
continue
|
||||
}
|
||||
err = cluster.isMaster(socket, &result)
|
||||
socket.Release()
|
||||
if err != nil {
|
||||
tryerr = err
|
||||
logf("SYNC Command 'ismaster' to %s failed: %v", addr, err)
|
||||
continue
|
||||
}
|
||||
debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result)
|
||||
break
|
||||
}
|
||||
|
||||
if cluster.setName != "" && result.SetName != cluster.setName {
|
||||
logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName)
|
||||
return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName)
|
||||
}
|
||||
|
||||
if result.IsMaster {
|
||||
debugf("SYNC %s is a master.", addr)
|
||||
if !server.info.Master {
|
||||
// Made an incorrect assumption above, so fix stats.
|
||||
stats.conn(-1, false)
|
||||
stats.conn(+1, true)
|
||||
}
|
||||
} else if result.Secondary {
|
||||
debugf("SYNC %s is a slave.", addr)
|
||||
} else if cluster.direct {
|
||||
logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr)
|
||||
} else {
|
||||
logf("SYNC %s is neither a master nor a slave.", addr)
|
||||
// Let stats track it as whatever was known before.
|
||||
return nil, nil, errors.New(addr + " is not a master nor slave")
|
||||
}
|
||||
|
||||
info = &mongoServerInfo{
|
||||
Master: result.IsMaster,
|
||||
Mongos: result.Msg == "isdbgrid",
|
||||
Tags: result.Tags,
|
||||
SetName: result.SetName,
|
||||
MaxWireVersion: result.MaxWireVersion,
|
||||
}
|
||||
|
||||
hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives))
|
||||
if result.Primary != "" {
|
||||
// First in the list to speed up master discovery.
|
||||
hosts = append(hosts, result.Primary)
|
||||
}
|
||||
hosts = append(hosts, result.Hosts...)
|
||||
hosts = append(hosts, result.Passives...)
|
||||
|
||||
debugf("SYNC %s knows about the following peers: %#v", addr, hosts)
|
||||
return info, hosts, nil
|
||||
}
|
||||
|
||||
type syncKind bool
|
||||
|
||||
const (
|
||||
completeSync syncKind = true
|
||||
partialSync syncKind = false
|
||||
)
|
||||
|
||||
func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) {
|
||||
cluster.Lock()
|
||||
current := cluster.servers.Search(server.ResolvedAddr)
|
||||
if current == nil {
|
||||
if syncKind == partialSync {
|
||||
cluster.Unlock()
|
||||
server.Close()
|
||||
log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.")
|
||||
return
|
||||
}
|
||||
cluster.servers.Add(server)
|
||||
if info.Master {
|
||||
cluster.masters.Add(server)
|
||||
log("SYNC Adding ", server.Addr, " to cluster as a master.")
|
||||
} else {
|
||||
log("SYNC Adding ", server.Addr, " to cluster as a slave.")
|
||||
}
|
||||
} else {
|
||||
if server != current {
|
||||
panic("addServer attempting to add duplicated server")
|
||||
}
|
||||
if server.Info().Master != info.Master {
|
||||
if info.Master {
|
||||
log("SYNC Server ", server.Addr, " is now a master.")
|
||||
cluster.masters.Add(server)
|
||||
} else {
|
||||
log("SYNC Server ", server.Addr, " is now a slave.")
|
||||
cluster.masters.Remove(server)
|
||||
}
|
||||
}
|
||||
}
|
||||
server.SetInfo(info)
|
||||
debugf("SYNC Broadcasting availability of server %s", server.Addr)
|
||||
cluster.serverSynced.Broadcast()
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) getKnownAddrs() []string {
|
||||
cluster.RLock()
|
||||
max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len()
|
||||
seen := make(map[string]bool, max)
|
||||
known := make([]string, 0, max)
|
||||
|
||||
add := func(addr string) {
|
||||
if _, found := seen[addr]; !found {
|
||||
seen[addr] = true
|
||||
known = append(known, addr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, addr := range cluster.userSeeds {
|
||||
add(addr)
|
||||
}
|
||||
for _, addr := range cluster.dynaSeeds {
|
||||
add(addr)
|
||||
}
|
||||
for _, serv := range cluster.servers.Slice() {
|
||||
add(serv.Addr)
|
||||
}
|
||||
cluster.RUnlock()
|
||||
|
||||
return known
|
||||
}
|
||||
|
||||
// syncServers injects a value into the cluster.sync channel to force
|
||||
// an iteration of the syncServersLoop function.
|
||||
func (cluster *mongoCluster) syncServers() {
|
||||
select {
|
||||
case cluster.sync <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// How long to wait for a checkup of the cluster topology if nothing
|
||||
// else kicks a synchronization before that.
|
||||
const syncServersDelay = 30 * time.Second
|
||||
const syncShortDelay = 500 * time.Millisecond
|
||||
|
||||
// syncServersLoop loops while the cluster is alive to keep its idea of
|
||||
// the server topology up-to-date. It must be called just once from
|
||||
// newCluster. The loop iterates once syncServersDelay has passed, or
|
||||
// if somebody injects a value into the cluster.sync channel to force a
|
||||
// synchronization. A loop iteration will contact all servers in
|
||||
// parallel, ask them about known peers and their own role within the
|
||||
// cluster, and then attempt to do the same with all the peers
|
||||
// retrieved.
|
||||
func (cluster *mongoCluster) syncServersLoop() {
|
||||
for {
|
||||
debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster)
|
||||
|
||||
cluster.Lock()
|
||||
if cluster.references == 0 {
|
||||
cluster.Unlock()
|
||||
break
|
||||
}
|
||||
cluster.references++ // Keep alive while syncing.
|
||||
direct := cluster.direct
|
||||
cluster.Unlock()
|
||||
|
||||
cluster.syncServersIteration(direct)
|
||||
|
||||
// We just synchronized, so consume any outstanding requests.
|
||||
select {
|
||||
case <-cluster.sync:
|
||||
default:
|
||||
}
|
||||
|
||||
cluster.Release()
|
||||
|
||||
// Hold off before allowing another sync. No point in
|
||||
// burning CPU looking for down servers.
|
||||
if !cluster.failFast {
|
||||
time.Sleep(syncShortDelay)
|
||||
}
|
||||
|
||||
cluster.Lock()
|
||||
if cluster.references == 0 {
|
||||
cluster.Unlock()
|
||||
break
|
||||
}
|
||||
cluster.syncCount++
|
||||
// Poke all waiters so they have a chance to timeout or
|
||||
// restart syncing if they wish to.
|
||||
cluster.serverSynced.Broadcast()
|
||||
// Check if we have to restart immediately either way.
|
||||
restart := !direct && cluster.masters.Empty() || cluster.servers.Empty()
|
||||
cluster.Unlock()
|
||||
|
||||
if restart {
|
||||
log("SYNC No masters found. Will synchronize again.")
|
||||
time.Sleep(syncShortDelay)
|
||||
continue
|
||||
}
|
||||
|
||||
debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster)
|
||||
|
||||
// Hold off until somebody explicitly requests a synchronization
|
||||
// or it's time to check for a cluster topology change again.
|
||||
select {
|
||||
case <-cluster.sync:
|
||||
case <-time.After(syncServersDelay):
|
||||
}
|
||||
}
|
||||
debugf("SYNC Cluster %p is stopping its sync loop.", cluster)
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer {
|
||||
cluster.RLock()
|
||||
server := cluster.servers.Search(tcpaddr.String())
|
||||
cluster.RUnlock()
|
||||
if server != nil {
|
||||
return server
|
||||
}
|
||||
return newServer(addr, tcpaddr, cluster.sync, cluster.dial)
|
||||
}
|
||||
|
||||
func resolveAddr(addr string) (*net.TCPAddr, error) {
|
||||
// Simple cases that do not need actual resolution. Works with IPv4 and v6.
|
||||
if host, port, err := net.SplitHostPort(addr); err == nil {
|
||||
if port, _ := strconv.Atoi(port); port > 0 {
|
||||
zone := ""
|
||||
if i := strings.LastIndex(host, "%"); i >= 0 {
|
||||
zone = host[i+1:]
|
||||
host = host[:i]
|
||||
}
|
||||
ip := net.ParseIP(host)
|
||||
if ip != nil {
|
||||
return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to resolve IPv4 and v6 concurrently.
|
||||
addrChan := make(chan *net.TCPAddr, 2)
|
||||
for _, network := range []string{"udp4", "udp6"} {
|
||||
network := network
|
||||
go func() {
|
||||
// The unfortunate UDP dialing hack allows having a timeout on address resolution.
|
||||
conn, err := net.DialTimeout(network, addr, 10*time.Second)
|
||||
if err != nil {
|
||||
addrChan <- nil
|
||||
} else {
|
||||
addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr))
|
||||
conn.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for the result of IPv4 and v6 resolution. Use IPv4 if available.
|
||||
tcpaddr := <-addrChan
|
||||
if tcpaddr == nil || len(tcpaddr.IP) != 4 {
|
||||
var timeout <-chan time.Time
|
||||
if tcpaddr != nil {
|
||||
// Don't wait too long if an IPv6 address is known.
|
||||
timeout = time.After(50 * time.Millisecond)
|
||||
}
|
||||
select {
|
||||
case <-timeout:
|
||||
case tcpaddr2 := <-addrChan:
|
||||
if tcpaddr == nil || tcpaddr2 != nil {
|
||||
// It's an IPv4 address or the only known address. Use it.
|
||||
tcpaddr = tcpaddr2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tcpaddr == nil {
|
||||
log("SYNC Failed to resolve server address: ", addr)
|
||||
return nil, errors.New("failed to resolve server address: " + addr)
|
||||
}
|
||||
if tcpaddr.String() != addr {
|
||||
debug("SYNC Address ", addr, " resolved as ", tcpaddr.String())
|
||||
}
|
||||
return tcpaddr, nil
|
||||
}
|
||||
|
||||
type pendingAdd struct {
|
||||
server *mongoServer
|
||||
info *mongoServerInfo
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) syncServersIteration(direct bool) {
|
||||
log("SYNC Starting full topology synchronization...")
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var m sync.Mutex
|
||||
notYetAdded := make(map[string]pendingAdd)
|
||||
addIfFound := make(map[string]bool)
|
||||
seen := make(map[string]bool)
|
||||
syncKind := partialSync
|
||||
|
||||
var spawnSync func(addr string, byMaster bool)
|
||||
spawnSync = func(addr string, byMaster bool) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
tcpaddr, err := resolveAddr(addr)
|
||||
if err != nil {
|
||||
log("SYNC Failed to start sync of ", addr, ": ", err.Error())
|
||||
return
|
||||
}
|
||||
resolvedAddr := tcpaddr.String()
|
||||
|
||||
m.Lock()
|
||||
if byMaster {
|
||||
if pending, ok := notYetAdded[resolvedAddr]; ok {
|
||||
delete(notYetAdded, resolvedAddr)
|
||||
m.Unlock()
|
||||
cluster.addServer(pending.server, pending.info, completeSync)
|
||||
return
|
||||
}
|
||||
addIfFound[resolvedAddr] = true
|
||||
}
|
||||
if seen[resolvedAddr] {
|
||||
m.Unlock()
|
||||
return
|
||||
}
|
||||
seen[resolvedAddr] = true
|
||||
m.Unlock()
|
||||
|
||||
server := cluster.server(addr, tcpaddr)
|
||||
info, hosts, err := cluster.syncServer(server)
|
||||
if err != nil {
|
||||
cluster.removeServer(server)
|
||||
return
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
add := direct || info.Master || addIfFound[resolvedAddr]
|
||||
if add {
|
||||
syncKind = completeSync
|
||||
} else {
|
||||
notYetAdded[resolvedAddr] = pendingAdd{server, info}
|
||||
}
|
||||
m.Unlock()
|
||||
if add {
|
||||
cluster.addServer(server, info, completeSync)
|
||||
}
|
||||
if !direct {
|
||||
for _, addr := range hosts {
|
||||
spawnSync(addr, info.Master)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
knownAddrs := cluster.getKnownAddrs()
|
||||
for _, addr := range knownAddrs {
|
||||
spawnSync(addr, false)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if syncKind == completeSync {
|
||||
logf("SYNC Synchronization was complete (got data from primary).")
|
||||
for _, pending := range notYetAdded {
|
||||
cluster.removeServer(pending.server)
|
||||
}
|
||||
} else {
|
||||
logf("SYNC Synchronization was partial (cannot talk to primary).")
|
||||
for _, pending := range notYetAdded {
|
||||
cluster.addServer(pending.server, pending.info, partialSync)
|
||||
}
|
||||
}
|
||||
|
||||
cluster.Lock()
|
||||
mastersLen := cluster.masters.Len()
|
||||
logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen)
|
||||
|
||||
// Update dynamic seeds, but only if we have any good servers. Otherwise,
|
||||
// leave them alone for better chances of a successful sync in the future.
|
||||
if syncKind == completeSync {
|
||||
dynaSeeds := make([]string, cluster.servers.Len())
|
||||
for i, server := range cluster.servers.Slice() {
|
||||
dynaSeeds[i] = server.Addr
|
||||
}
|
||||
cluster.dynaSeeds = dynaSeeds
|
||||
debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds)
|
||||
}
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
// AcquireSocket returns a socket to a server in the cluster. If slaveOk is
|
||||
// true, it will attempt to return a socket to a slave server. If it is
|
||||
// false, the socket will necessarily be to a master server.
|
||||
func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) {
|
||||
var started time.Time
|
||||
var syncCount uint
|
||||
warnedLimit := false
|
||||
for {
|
||||
cluster.RLock()
|
||||
for {
|
||||
mastersLen := cluster.masters.Len()
|
||||
slavesLen := cluster.servers.Len() - mastersLen
|
||||
debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen)
|
||||
if !(slaveOk && mode == Secondary) && mastersLen > 0 || slaveOk && slavesLen > 0 {
|
||||
break
|
||||
}
|
||||
if started.IsZero() {
|
||||
// Initialize after fast path above.
|
||||
started = time.Now()
|
||||
syncCount = cluster.syncCount
|
||||
} else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount {
|
||||
cluster.RUnlock()
|
||||
return nil, errors.New("no reachable servers")
|
||||
}
|
||||
log("Waiting for servers to synchronize...")
|
||||
cluster.syncServers()
|
||||
|
||||
// Remember: this will release and reacquire the lock.
|
||||
cluster.serverSynced.Wait()
|
||||
}
|
||||
|
||||
var server *mongoServer
|
||||
if slaveOk {
|
||||
server = cluster.servers.BestFit(mode, serverTags)
|
||||
} else {
|
||||
server = cluster.masters.BestFit(mode, nil)
|
||||
}
|
||||
cluster.RUnlock()
|
||||
|
||||
if server == nil {
|
||||
// Must have failed the requested tags. Sleep to avoid spinning.
|
||||
time.Sleep(1e8)
|
||||
continue
|
||||
}
|
||||
|
||||
s, abended, err := server.AcquireSocket(poolLimit, socketTimeout)
|
||||
if err == errPoolLimit {
|
||||
if !warnedLimit {
|
||||
warnedLimit = true
|
||||
log("WARNING: Per-server connection limit reached.")
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
cluster.removeServer(server)
|
||||
cluster.syncServers()
|
||||
continue
|
||||
}
|
||||
if abended && !slaveOk {
|
||||
var result isMasterResult
|
||||
err := cluster.isMaster(s, &result)
|
||||
if err != nil || !result.IsMaster {
|
||||
logf("Cannot confirm server %s as master (%v)", server.Addr, err)
|
||||
s.Release()
|
||||
cluster.syncServers()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
panic("unreached")
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) {
|
||||
cluster.Lock()
|
||||
if cluster.cachedIndex == nil {
|
||||
cluster.cachedIndex = make(map[string]bool)
|
||||
}
|
||||
if exists {
|
||||
cluster.cachedIndex[cacheKey] = true
|
||||
} else {
|
||||
delete(cluster.cachedIndex, cacheKey)
|
||||
}
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) {
|
||||
cluster.RLock()
|
||||
if cluster.cachedIndex != nil {
|
||||
result = cluster.cachedIndex[cacheKey]
|
||||
}
|
||||
cluster.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) ResetIndexCache() {
|
||||
cluster.Lock()
|
||||
cluster.cachedIndex = make(map[string]bool)
|
||||
cluster.Unlock()
|
||||
}
|
1915
vendor/gopkg.in/mgo.v2/cluster_test.go
generated
vendored
1915
vendor/gopkg.in/mgo.v2/cluster_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
31
vendor/gopkg.in/mgo.v2/doc.go
generated
vendored
31
vendor/gopkg.in/mgo.v2/doc.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
// Package mgo offers a rich MongoDB driver for Go.
|
||||
//
|
||||
// Details about the mgo project (pronounced as "mango") are found
|
||||
// in its web page:
|
||||
//
|
||||
// http://labix.org/mgo
|
||||
//
|
||||
// Usage of the driver revolves around the concept of sessions. To
|
||||
// get started, obtain a session using the Dial function:
|
||||
//
|
||||
// session, err := mgo.Dial(url)
|
||||
//
|
||||
// This will establish one or more connections with the cluster of
|
||||
// servers defined by the url parameter. From then on, the cluster
|
||||
// may be queried with multiple consistency rules (see SetMode) and
|
||||
// documents retrieved with statements such as:
|
||||
//
|
||||
// c := session.DB(database).C(collection)
|
||||
// err := c.Find(query).One(&result)
|
||||
//
|
||||
// New sessions are typically created by calling session.Copy on the
|
||||
// initial session obtained at dial time. These new sessions will share
|
||||
// the same cluster information and connection pool, and may be easily
|
||||
// handed into other methods and functions for organizing logic.
|
||||
// Every session created must have its Close method called at the end
|
||||
// of its life time, so its resources may be put back in the pool or
|
||||
// collected, depending on the case.
|
||||
//
|
||||
// For more details, see the documentation for the types and methods.
|
||||
//
|
||||
package mgo
|
33
vendor/gopkg.in/mgo.v2/export_test.go
generated
vendored
33
vendor/gopkg.in/mgo.v2/export_test.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func HackPingDelay(newDelay time.Duration) (restore func()) {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
|
||||
oldDelay := pingDelay
|
||||
restore = func() {
|
||||
globalMutex.Lock()
|
||||
pingDelay = oldDelay
|
||||
globalMutex.Unlock()
|
||||
}
|
||||
pingDelay = newDelay
|
||||
return
|
||||
}
|
||||
|
||||
func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
|
||||
oldTimeout := syncSocketTimeout
|
||||
restore = func() {
|
||||
globalMutex.Lock()
|
||||
syncSocketTimeout = oldTimeout
|
||||
globalMutex.Unlock()
|
||||
}
|
||||
syncSocketTimeout = newTimeout
|
||||
return
|
||||
}
|
755
vendor/gopkg.in/mgo.v2/gridfs.go
generated
vendored
755
vendor/gopkg.in/mgo.v2/gridfs.go
generated
vendored
@ -1,755 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
type GridFS struct {
|
||||
Files *Collection
|
||||
Chunks *Collection
|
||||
}
|
||||
|
||||
type gfsFileMode int
|
||||
|
||||
const (
|
||||
gfsClosed gfsFileMode = 0
|
||||
gfsReading gfsFileMode = 1
|
||||
gfsWriting gfsFileMode = 2
|
||||
)
|
||||
|
||||
type GridFile struct {
|
||||
m sync.Mutex
|
||||
c sync.Cond
|
||||
gfs *GridFS
|
||||
mode gfsFileMode
|
||||
err error
|
||||
|
||||
chunk int
|
||||
offset int64
|
||||
|
||||
wpending int
|
||||
wbuf []byte
|
||||
wsum hash.Hash
|
||||
|
||||
rbuf []byte
|
||||
rcache *gfsCachedChunk
|
||||
|
||||
doc gfsFile
|
||||
}
|
||||
|
||||
type gfsFile struct {
|
||||
Id interface{} "_id"
|
||||
ChunkSize int "chunkSize"
|
||||
UploadDate time.Time "uploadDate"
|
||||
Length int64 ",minsize"
|
||||
MD5 string
|
||||
Filename string ",omitempty"
|
||||
ContentType string "contentType,omitempty"
|
||||
Metadata *bson.Raw ",omitempty"
|
||||
}
|
||||
|
||||
type gfsChunk struct {
|
||||
Id interface{} "_id"
|
||||
FilesId interface{} "files_id"
|
||||
N int
|
||||
Data []byte
|
||||
}
|
||||
|
||||
type gfsCachedChunk struct {
|
||||
wait sync.Mutex
|
||||
n int
|
||||
data []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func newGridFS(db *Database, prefix string) *GridFS {
|
||||
return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")}
|
||||
}
|
||||
|
||||
func (gfs *GridFS) newFile() *GridFile {
|
||||
file := &GridFile{gfs: gfs}
|
||||
file.c.L = &file.m
|
||||
//runtime.SetFinalizer(file, finalizeFile)
|
||||
return file
|
||||
}
|
||||
|
||||
func finalizeFile(file *GridFile) {
|
||||
file.Close()
|
||||
}
|
||||
|
||||
// Create creates a new file with the provided name in the GridFS. If the file
|
||||
// name already exists, a new version will be inserted with an up-to-date
|
||||
// uploadDate that will cause it to be atomically visible to the Open and
|
||||
// OpenId methods. If the file name is not important, an empty name may be
|
||||
// provided and the file Id used instead.
|
||||
//
|
||||
// It's important to Close files whether they are being written to
|
||||
// or read from, and to check the err result to ensure the operation
|
||||
// completed successfully.
|
||||
//
|
||||
// A simple example inserting a new file:
|
||||
//
|
||||
// func check(err error) {
|
||||
// if err != nil {
|
||||
// panic(err.String())
|
||||
// }
|
||||
// }
|
||||
// file, err := db.GridFS("fs").Create("myfile.txt")
|
||||
// check(err)
|
||||
// n, err := file.Write([]byte("Hello world!"))
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
// fmt.Printf("%d bytes written\n", n)
|
||||
//
|
||||
// The io.Writer interface is implemented by *GridFile and may be used to
|
||||
// help on the file creation. For example:
|
||||
//
|
||||
// file, err := db.GridFS("fs").Create("myfile.txt")
|
||||
// check(err)
|
||||
// messages, err := os.Open("/var/log/messages")
|
||||
// check(err)
|
||||
// defer messages.Close()
|
||||
// err = io.Copy(file, messages)
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
//
|
||||
func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
|
||||
file = gfs.newFile()
|
||||
file.mode = gfsWriting
|
||||
file.wsum = md5.New()
|
||||
file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name}
|
||||
return
|
||||
}
|
||||
|
||||
// OpenId returns the file with the provided id, for reading.
|
||||
// If the file isn't found, err will be set to mgo.ErrNotFound.
|
||||
//
|
||||
// It's important to Close files whether they are being written to
|
||||
// or read from, and to check the err result to ensure the operation
|
||||
// completed successfully.
|
||||
//
|
||||
// The following example will print the first 8192 bytes from the file:
|
||||
//
|
||||
// func check(err error) {
|
||||
// if err != nil {
|
||||
// panic(err.String())
|
||||
// }
|
||||
// }
|
||||
// file, err := db.GridFS("fs").OpenId(objid)
|
||||
// check(err)
|
||||
// b := make([]byte, 8192)
|
||||
// n, err := file.Read(b)
|
||||
// check(err)
|
||||
// fmt.Println(string(b))
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
// fmt.Printf("%d bytes read\n", n)
|
||||
//
|
||||
// The io.Reader interface is implemented by *GridFile and may be used to
|
||||
// deal with it. As an example, the following snippet will dump the whole
|
||||
// file into the standard output:
|
||||
//
|
||||
// file, err := db.GridFS("fs").OpenId(objid)
|
||||
// check(err)
|
||||
// err = io.Copy(os.Stdout, file)
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
//
|
||||
func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) {
|
||||
var doc gfsFile
|
||||
err = gfs.Files.Find(bson.M{"_id": id}).One(&doc)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
file = gfs.newFile()
|
||||
file.mode = gfsReading
|
||||
file.doc = doc
|
||||
return
|
||||
}
|
||||
|
||||
// Open returns the most recently uploaded file with the provided
|
||||
// name, for reading. If the file isn't found, err will be set
|
||||
// to mgo.ErrNotFound.
|
||||
//
|
||||
// It's important to Close files whether they are being written to
|
||||
// or read from, and to check the err result to ensure the operation
|
||||
// completed successfully.
|
||||
//
|
||||
// The following example will print the first 8192 bytes from the file:
|
||||
//
|
||||
// file, err := db.GridFS("fs").Open("myfile.txt")
|
||||
// check(err)
|
||||
// b := make([]byte, 8192)
|
||||
// n, err := file.Read(b)
|
||||
// check(err)
|
||||
// fmt.Println(string(b))
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
// fmt.Printf("%d bytes read\n", n)
|
||||
//
|
||||
// The io.Reader interface is implemented by *GridFile and may be used to
|
||||
// deal with it. As an example, the following snippet will dump the whole
|
||||
// file into the standard output:
|
||||
//
|
||||
// file, err := db.GridFS("fs").Open("myfile.txt")
|
||||
// check(err)
|
||||
// err = io.Copy(os.Stdout, file)
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
//
|
||||
func (gfs *GridFS) Open(name string) (file *GridFile, err error) {
|
||||
var doc gfsFile
|
||||
err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
file = gfs.newFile()
|
||||
file.mode = gfsReading
|
||||
file.doc = doc
|
||||
return
|
||||
}
|
||||
|
||||
// OpenNext opens the next file from iter for reading, sets *file to it,
|
||||
// and returns true on the success case. If no more documents are available
|
||||
// on iter or an error occurred, *file is set to nil and the result is false.
|
||||
// Errors will be available via iter.Err().
|
||||
//
|
||||
// The iter parameter must be an iterator on the GridFS files collection.
|
||||
// Using the GridFS.Find method is an easy way to obtain such an iterator,
|
||||
// but any iterator on the collection will work.
|
||||
//
|
||||
// If the provided *file is non-nil, OpenNext will close it before attempting
|
||||
// to iterate to the next element. This means that in a loop one only
|
||||
// has to worry about closing files when breaking out of the loop early
|
||||
// (break, return, or panic).
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// gfs := db.GridFS("fs")
|
||||
// query := gfs.Find(nil).Sort("filename")
|
||||
// iter := query.Iter()
|
||||
// var f *mgo.GridFile
|
||||
// for gfs.OpenNext(iter, &f) {
|
||||
// fmt.Printf("Filename: %s\n", f.Name())
|
||||
// }
|
||||
// if iter.Close() != nil {
|
||||
// panic(iter.Close())
|
||||
// }
|
||||
//
|
||||
func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool {
|
||||
if *file != nil {
|
||||
// Ignoring the error here shouldn't be a big deal
|
||||
// as we're reading the file and the loop iteration
|
||||
// for this file is finished.
|
||||
_ = (*file).Close()
|
||||
}
|
||||
var doc gfsFile
|
||||
if !iter.Next(&doc) {
|
||||
*file = nil
|
||||
return false
|
||||
}
|
||||
f := gfs.newFile()
|
||||
f.mode = gfsReading
|
||||
f.doc = doc
|
||||
*file = f
|
||||
return true
|
||||
}
|
||||
|
||||
// Find runs query on GridFS's files collection and returns
|
||||
// the resulting Query.
|
||||
//
|
||||
// This logic:
|
||||
//
|
||||
// gfs := db.GridFS("fs")
|
||||
// iter := gfs.Find(nil).Iter()
|
||||
//
|
||||
// Is equivalent to:
|
||||
//
|
||||
// files := db.C("fs" + ".files")
|
||||
// iter := files.Find(nil).Iter()
|
||||
//
|
||||
func (gfs *GridFS) Find(query interface{}) *Query {
|
||||
return gfs.Files.Find(query)
|
||||
}
|
||||
|
||||
// RemoveId deletes the file with the provided id from the GridFS.
|
||||
func (gfs *GridFS) RemoveId(id interface{}) error {
|
||||
err := gfs.Files.Remove(bson.M{"_id": id})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}})
|
||||
return err
|
||||
}
|
||||
|
||||
type gfsDocId struct {
|
||||
Id interface{} "_id"
|
||||
}
|
||||
|
||||
// Remove deletes all files with the provided name from the GridFS.
|
||||
func (gfs *GridFS) Remove(name string) (err error) {
|
||||
iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter()
|
||||
var doc gfsDocId
|
||||
for iter.Next(&doc) {
|
||||
if e := gfs.RemoveId(doc.Id); e != nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = iter.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (file *GridFile) assertMode(mode gfsFileMode) {
|
||||
switch file.mode {
|
||||
case mode:
|
||||
return
|
||||
case gfsWriting:
|
||||
panic("GridFile is open for writing")
|
||||
case gfsReading:
|
||||
panic("GridFile is open for reading")
|
||||
case gfsClosed:
|
||||
panic("GridFile is closed")
|
||||
default:
|
||||
panic("internal error: missing GridFile mode")
|
||||
}
|
||||
}
|
||||
|
||||
// SetChunkSize sets size of saved chunks. Once the file is written to, it
|
||||
// will be split in blocks of that size and each block saved into an
|
||||
// independent chunk document. The default chunk size is 256kb.
|
||||
//
|
||||
// It is a runtime error to call this function once the file has started
|
||||
// being written to.
|
||||
func (file *GridFile) SetChunkSize(bytes int) {
|
||||
file.assertMode(gfsWriting)
|
||||
debugf("GridFile %p: setting chunk size to %d", file, bytes)
|
||||
file.m.Lock()
|
||||
file.doc.ChunkSize = bytes
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// Id returns the current file Id.
|
||||
func (file *GridFile) Id() interface{} {
|
||||
return file.doc.Id
|
||||
}
|
||||
|
||||
// SetId changes the current file Id.
|
||||
//
|
||||
// It is a runtime error to call this function once the file has started
|
||||
// being written to, or when the file is not open for writing.
|
||||
func (file *GridFile) SetId(id interface{}) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
file.doc.Id = id
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// Name returns the optional file name. An empty string will be returned
|
||||
// in case it is unset.
|
||||
func (file *GridFile) Name() string {
|
||||
return file.doc.Filename
|
||||
}
|
||||
|
||||
// SetName changes the optional file name. An empty string may be used to
|
||||
// unset it.
|
||||
//
|
||||
// It is a runtime error to call this function when the file is not open
|
||||
// for writing.
|
||||
func (file *GridFile) SetName(name string) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
file.doc.Filename = name
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// ContentType returns the optional file content type. An empty string will be
|
||||
// returned in case it is unset.
|
||||
func (file *GridFile) ContentType() string {
|
||||
return file.doc.ContentType
|
||||
}
|
||||
|
||||
// ContentType changes the optional file content type. An empty string may be
|
||||
// used to unset it.
|
||||
//
|
||||
// It is a runtime error to call this function when the file is not open
|
||||
// for writing.
|
||||
func (file *GridFile) SetContentType(ctype string) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
file.doc.ContentType = ctype
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// GetMeta unmarshals the optional "metadata" field associated with the
|
||||
// file into the result parameter. The meaning of keys under that field
|
||||
// is user-defined. For example:
|
||||
//
|
||||
// result := struct{ INode int }{}
|
||||
// err = file.GetMeta(&result)
|
||||
// if err != nil {
|
||||
// panic(err.String())
|
||||
// }
|
||||
// fmt.Printf("inode: %d\n", result.INode)
|
||||
//
|
||||
func (file *GridFile) GetMeta(result interface{}) (err error) {
|
||||
file.m.Lock()
|
||||
if file.doc.Metadata != nil {
|
||||
err = bson.Unmarshal(file.doc.Metadata.Data, result)
|
||||
}
|
||||
file.m.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetMeta changes the optional "metadata" field associated with the
|
||||
// file. The meaning of keys under that field is user-defined.
|
||||
// For example:
|
||||
//
|
||||
// file.SetMeta(bson.M{"inode": inode})
|
||||
//
|
||||
// It is a runtime error to call this function when the file is not open
|
||||
// for writing.
|
||||
func (file *GridFile) SetMeta(metadata interface{}) {
|
||||
file.assertMode(gfsWriting)
|
||||
data, err := bson.Marshal(metadata)
|
||||
file.m.Lock()
|
||||
if err != nil && file.err == nil {
|
||||
file.err = err
|
||||
} else {
|
||||
file.doc.Metadata = &bson.Raw{Data: data}
|
||||
}
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// Size returns the file size in bytes.
|
||||
func (file *GridFile) Size() (bytes int64) {
|
||||
file.m.Lock()
|
||||
bytes = file.doc.Length
|
||||
file.m.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// MD5 returns the file MD5 as a hex-encoded string.
|
||||
func (file *GridFile) MD5() (md5 string) {
|
||||
return file.doc.MD5
|
||||
}
|
||||
|
||||
// UploadDate returns the file upload time.
|
||||
func (file *GridFile) UploadDate() time.Time {
|
||||
return file.doc.UploadDate
|
||||
}
|
||||
|
||||
// SetUploadDate changes the file upload time.
|
||||
//
|
||||
// It is a runtime error to call this function when the file is not open
|
||||
// for writing.
|
||||
func (file *GridFile) SetUploadDate(t time.Time) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
file.doc.UploadDate = t
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// Close flushes any pending changes in case the file is being written
|
||||
// to, waits for any background operations to finish, and closes the file.
|
||||
//
|
||||
// It's important to Close files whether they are being written to
|
||||
// or read from, and to check the err result to ensure the operation
|
||||
// completed successfully.
|
||||
func (file *GridFile) Close() (err error) {
|
||||
file.m.Lock()
|
||||
defer file.m.Unlock()
|
||||
if file.mode == gfsWriting {
|
||||
if len(file.wbuf) > 0 && file.err == nil {
|
||||
file.insertChunk(file.wbuf)
|
||||
file.wbuf = file.wbuf[0:0]
|
||||
}
|
||||
file.completeWrite()
|
||||
} else if file.mode == gfsReading && file.rcache != nil {
|
||||
file.rcache.wait.Lock()
|
||||
file.rcache = nil
|
||||
}
|
||||
file.mode = gfsClosed
|
||||
debugf("GridFile %p: closed", file)
|
||||
return file.err
|
||||
}
|
||||
|
||||
func (file *GridFile) completeWrite() {
|
||||
for file.wpending > 0 {
|
||||
debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending)
|
||||
file.c.Wait()
|
||||
}
|
||||
if file.err == nil {
|
||||
hexsum := hex.EncodeToString(file.wsum.Sum(nil))
|
||||
if file.doc.UploadDate.IsZero() {
|
||||
file.doc.UploadDate = bson.Now()
|
||||
}
|
||||
file.doc.MD5 = hexsum
|
||||
file.err = file.gfs.Files.Insert(file.doc)
|
||||
file.gfs.Chunks.EnsureIndexKey("files_id", "n")
|
||||
}
|
||||
if file.err != nil {
|
||||
file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}})
|
||||
}
|
||||
}
|
||||
|
||||
// Abort cancels an in-progress write, preventing the file from being
|
||||
// automically created and ensuring previously written chunks are
|
||||
// removed when the file is closed.
|
||||
//
|
||||
// It is a runtime error to call Abort when the file was not opened
|
||||
// for writing.
|
||||
func (file *GridFile) Abort() {
|
||||
if file.mode != gfsWriting {
|
||||
panic("file.Abort must be called on file opened for writing")
|
||||
}
|
||||
file.err = errors.New("write aborted")
|
||||
}
|
||||
|
||||
// Write writes the provided data to the file and returns the
|
||||
// number of bytes written and an error in case something
|
||||
// wrong happened.
|
||||
//
|
||||
// The file will internally cache the data so that all but the last
|
||||
// chunk sent to the database have the size defined by SetChunkSize.
|
||||
// This also means that errors may be deferred until a future call
|
||||
// to Write or Close.
|
||||
//
|
||||
// The parameters and behavior of this function turn the file
|
||||
// into an io.Writer.
|
||||
func (file *GridFile) Write(data []byte) (n int, err error) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
debugf("GridFile %p: writing %d bytes", file, len(data))
|
||||
defer file.m.Unlock()
|
||||
|
||||
if file.err != nil {
|
||||
return 0, file.err
|
||||
}
|
||||
|
||||
n = len(data)
|
||||
file.doc.Length += int64(n)
|
||||
chunkSize := file.doc.ChunkSize
|
||||
|
||||
if len(file.wbuf)+len(data) < chunkSize {
|
||||
file.wbuf = append(file.wbuf, data...)
|
||||
return
|
||||
}
|
||||
|
||||
// First, flush file.wbuf complementing with data.
|
||||
if len(file.wbuf) > 0 {
|
||||
missing := chunkSize - len(file.wbuf)
|
||||
if missing > len(data) {
|
||||
missing = len(data)
|
||||
}
|
||||
file.wbuf = append(file.wbuf, data[:missing]...)
|
||||
data = data[missing:]
|
||||
file.insertChunk(file.wbuf)
|
||||
file.wbuf = file.wbuf[0:0]
|
||||
}
|
||||
|
||||
// Then, flush all chunks from data without copying.
|
||||
for len(data) > chunkSize {
|
||||
size := chunkSize
|
||||
if size > len(data) {
|
||||
size = len(data)
|
||||
}
|
||||
file.insertChunk(data[:size])
|
||||
data = data[size:]
|
||||
}
|
||||
|
||||
// And append the rest for a future call.
|
||||
file.wbuf = append(file.wbuf, data...)
|
||||
|
||||
return n, file.err
|
||||
}
|
||||
|
||||
func (file *GridFile) insertChunk(data []byte) {
|
||||
n := file.chunk
|
||||
file.chunk++
|
||||
debugf("GridFile %p: adding to checksum: %q", file, string(data))
|
||||
file.wsum.Write(data)
|
||||
|
||||
for file.doc.ChunkSize*file.wpending >= 1024*1024 {
|
||||
// Hold on.. we got a MB pending.
|
||||
file.c.Wait()
|
||||
if file.err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
file.wpending++
|
||||
|
||||
debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))
|
||||
|
||||
// We may not own the memory of data, so rather than
|
||||
// simply copying it, we'll marshal the document ahead of time.
|
||||
data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
|
||||
if err != nil {
|
||||
file.err = err
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
|
||||
file.m.Lock()
|
||||
file.wpending--
|
||||
if err != nil && file.err == nil {
|
||||
file.err = err
|
||||
}
|
||||
file.c.Broadcast()
|
||||
file.m.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
// Seek sets the offset for the next Read or Write on file to
|
||||
// offset, interpreted according to whence: 0 means relative to
|
||||
// the origin of the file, 1 means relative to the current offset,
|
||||
// and 2 means relative to the end. It returns the new offset and
|
||||
// an error, if any.
|
||||
func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) {
|
||||
file.m.Lock()
|
||||
debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence)
|
||||
defer file.m.Unlock()
|
||||
switch whence {
|
||||
case os.SEEK_SET:
|
||||
case os.SEEK_CUR:
|
||||
offset += file.offset
|
||||
case os.SEEK_END:
|
||||
offset += file.doc.Length
|
||||
default:
|
||||
panic("unsupported whence value")
|
||||
}
|
||||
if offset > file.doc.Length {
|
||||
return file.offset, errors.New("seek past end of file")
|
||||
}
|
||||
if offset == file.doc.Length {
|
||||
// If we're seeking to the end of the file,
|
||||
// no need to read anything. This enables
|
||||
// a client to find the size of the file using only the
|
||||
// io.ReadSeeker interface with low overhead.
|
||||
file.offset = offset
|
||||
return file.offset, nil
|
||||
}
|
||||
chunk := int(offset / int64(file.doc.ChunkSize))
|
||||
if chunk+1 == file.chunk && offset >= file.offset {
|
||||
file.rbuf = file.rbuf[int(offset-file.offset):]
|
||||
file.offset = offset
|
||||
return file.offset, nil
|
||||
}
|
||||
file.offset = offset
|
||||
file.chunk = chunk
|
||||
file.rbuf = nil
|
||||
file.rbuf, err = file.getChunk()
|
||||
if err == nil {
|
||||
file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):]
|
||||
}
|
||||
return file.offset, err
|
||||
}
|
||||
|
||||
// Read reads into b the next available data from the file and
|
||||
// returns the number of bytes written and an error in case
|
||||
// something wrong happened. At the end of the file, n will
|
||||
// be zero and err will be set to io.EOF.
|
||||
//
|
||||
// The parameters and behavior of this function turn the file
|
||||
// into an io.Reader.
|
||||
func (file *GridFile) Read(b []byte) (n int, err error) {
|
||||
file.assertMode(gfsReading)
|
||||
file.m.Lock()
|
||||
debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b))
|
||||
defer file.m.Unlock()
|
||||
if file.offset == file.doc.Length {
|
||||
return 0, io.EOF
|
||||
}
|
||||
for err == nil {
|
||||
i := copy(b, file.rbuf)
|
||||
n += i
|
||||
file.offset += int64(i)
|
||||
file.rbuf = file.rbuf[i:]
|
||||
if i == len(b) || file.offset == file.doc.Length {
|
||||
break
|
||||
}
|
||||
b = b[i:]
|
||||
file.rbuf, err = file.getChunk()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (file *GridFile) getChunk() (data []byte, err error) {
|
||||
cache := file.rcache
|
||||
file.rcache = nil
|
||||
if cache != nil && cache.n == file.chunk {
|
||||
debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk)
|
||||
cache.wait.Lock()
|
||||
data, err = cache.data, cache.err
|
||||
} else {
|
||||
debugf("GridFile %p: Fetching chunk %d", file, file.chunk)
|
||||
var doc gfsChunk
|
||||
err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc)
|
||||
data = doc.Data
|
||||
}
|
||||
file.chunk++
|
||||
if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length {
|
||||
// Read the next one in background.
|
||||
cache = &gfsCachedChunk{n: file.chunk}
|
||||
cache.wait.Lock()
|
||||
debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk)
|
||||
// Clone the session to avoid having it closed in between.
|
||||
chunks := file.gfs.Chunks
|
||||
session := chunks.Database.Session.Clone()
|
||||
go func(id interface{}, n int) {
|
||||
defer session.Close()
|
||||
chunks = chunks.With(session)
|
||||
var doc gfsChunk
|
||||
cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc)
|
||||
cache.data = doc.Data
|
||||
cache.wait.Unlock()
|
||||
}(file.doc.Id, file.chunk)
|
||||
file.rcache = cache
|
||||
}
|
||||
debugf("Returning err: %#v", err)
|
||||
return
|
||||
}
|
708
vendor/gopkg.in/mgo.v2/gridfs_test.go
generated
vendored
708
vendor/gopkg.in/mgo.v2/gridfs_test.go
generated
vendored
@ -1,708 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
func (s *S) TestGridFSCreate(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
before := bson.Now()
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
n, err := file.Write([]byte("some data"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 9)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
after := bson.Now()
|
||||
|
||||
// Check the file information.
|
||||
result := M{}
|
||||
err = db.C("fs.files").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
fileId, ok := result["_id"].(bson.ObjectId)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(fileId.Valid(), Equals, true)
|
||||
result["_id"] = "<id>"
|
||||
|
||||
ud, ok := result["uploadDate"].(time.Time)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(ud.After(before) && ud.Before(after), Equals, true)
|
||||
result["uploadDate"] = "<timestamp>"
|
||||
|
||||
expected := M{
|
||||
"_id": "<id>",
|
||||
"length": 9,
|
||||
"chunkSize": 255 * 1024,
|
||||
"uploadDate": "<timestamp>",
|
||||
"md5": "1e50210a0202497fb79bc38b6ade6c34",
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
|
||||
// Check the chunk.
|
||||
result = M{}
|
||||
err = db.C("fs.chunks").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
chunkId, ok := result["_id"].(bson.ObjectId)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(chunkId.Valid(), Equals, true)
|
||||
result["_id"] = "<id>"
|
||||
|
||||
expected = M{
|
||||
"_id": "<id>",
|
||||
"files_id": fileId,
|
||||
"n": 0,
|
||||
"data": []byte("some data"),
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
|
||||
// Check that an index was created.
|
||||
indexes, err := db.C("fs.chunks").Indexes()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(len(indexes), Equals, 2)
|
||||
c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"})
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSFileDetails(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile1.txt")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
n, err := file.Write([]byte("some"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 4)
|
||||
|
||||
c.Assert(file.Size(), Equals, int64(4))
|
||||
|
||||
n, err = file.Write([]byte(" data"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 5)
|
||||
|
||||
c.Assert(file.Size(), Equals, int64(9))
|
||||
|
||||
id, _ := file.Id().(bson.ObjectId)
|
||||
c.Assert(id.Valid(), Equals, true)
|
||||
c.Assert(file.Name(), Equals, "myfile1.txt")
|
||||
c.Assert(file.ContentType(), Equals, "")
|
||||
|
||||
var info interface{}
|
||||
err = file.GetMeta(&info)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(info, IsNil)
|
||||
|
||||
file.SetId("myid")
|
||||
file.SetName("myfile2.txt")
|
||||
file.SetContentType("text/plain")
|
||||
file.SetMeta(M{"any": "thing"})
|
||||
|
||||
c.Assert(file.Id(), Equals, "myid")
|
||||
c.Assert(file.Name(), Equals, "myfile2.txt")
|
||||
c.Assert(file.ContentType(), Equals, "text/plain")
|
||||
|
||||
err = file.GetMeta(&info)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(info, DeepEquals, bson.M{"any": "thing"})
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34")
|
||||
|
||||
ud := file.UploadDate()
|
||||
now := time.Now()
|
||||
c.Assert(ud.Before(now), Equals, true)
|
||||
c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true)
|
||||
|
||||
result := M{}
|
||||
err = db.C("fs.files").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
result["uploadDate"] = "<timestamp>"
|
||||
|
||||
expected := M{
|
||||
"_id": "myid",
|
||||
"length": 9,
|
||||
"chunkSize": 255 * 1024,
|
||||
"uploadDate": "<timestamp>",
|
||||
"md5": "1e50210a0202497fb79bc38b6ade6c34",
|
||||
"filename": "myfile2.txt",
|
||||
"contentType": "text/plain",
|
||||
"metadata": M{"any": "thing"},
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSSetUploadDate(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local)
|
||||
file.SetUploadDate(t)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// Check the file information.
|
||||
result := M{}
|
||||
err = db.C("fs.files").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
ud := result["uploadDate"].(time.Time)
|
||||
if !ud.Equal(t) {
|
||||
c.Fatalf("want upload date %s, got %s", t, ud)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSCreateWithChunking(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
// Smaller than the chunk size.
|
||||
n, err := file.Write([]byte("abc"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 3)
|
||||
|
||||
// Boundary in the middle.
|
||||
n, err = file.Write([]byte("defg"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 4)
|
||||
|
||||
// Boundary at the end.
|
||||
n, err = file.Write([]byte("hij"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 3)
|
||||
|
||||
// Larger than the chunk size, with 3 chunks.
|
||||
n, err = file.Write([]byte("klmnopqrstuv"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 12)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// Check the file information.
|
||||
result := M{}
|
||||
err = db.C("fs.files").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
fileId, _ := result["_id"].(bson.ObjectId)
|
||||
c.Assert(fileId.Valid(), Equals, true)
|
||||
result["_id"] = "<id>"
|
||||
result["uploadDate"] = "<timestamp>"
|
||||
|
||||
expected := M{
|
||||
"_id": "<id>",
|
||||
"length": 22,
|
||||
"chunkSize": 5,
|
||||
"uploadDate": "<timestamp>",
|
||||
"md5": "44a66044834cbe55040089cabfc102d5",
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
|
||||
// Check the chunks.
|
||||
iter := db.C("fs.chunks").Find(nil).Sort("n").Iter()
|
||||
dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"}
|
||||
for i := 0; ; i++ {
|
||||
result = M{}
|
||||
if !iter.Next(result) {
|
||||
if i != 5 {
|
||||
c.Fatalf("Expected 5 chunks, got %d", i)
|
||||
}
|
||||
break
|
||||
}
|
||||
c.Assert(iter.Close(), IsNil)
|
||||
|
||||
result["_id"] = "<id>"
|
||||
|
||||
expected = M{
|
||||
"_id": "<id>",
|
||||
"files_id": fileId,
|
||||
"n": i,
|
||||
"data": []byte(dataChunks[i]),
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSAbort(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
n, err := file.Write([]byte("some data"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 9)
|
||||
|
||||
var count int
|
||||
for i := 0; i < 10; i++ {
|
||||
count, err = db.C("fs.chunks").Count()
|
||||
if count > 0 || err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(count, Equals, 1)
|
||||
|
||||
file.Abort()
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, ErrorMatches, "write aborted")
|
||||
|
||||
count, err = db.C("fs.chunks").Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(count, Equals, 0)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSCloseConflict(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
db.C("fs.files").EnsureIndex(mgo.Index{Key: []string{"filename"}, Unique: true})
|
||||
|
||||
// For a closing-time conflict
|
||||
err = db.C("fs.files").Insert(M{"filename": "foo.txt"})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("foo.txt")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
_, err = file.Write([]byte("some data"))
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(mgo.IsDup(err), Equals, true)
|
||||
|
||||
count, err := db.C("fs.chunks").Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(count, Equals, 0)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSOpenNotFound(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.OpenId("non-existent")
|
||||
c.Assert(err == mgo.ErrNotFound, Equals, true)
|
||||
c.Assert(file, IsNil)
|
||||
|
||||
file, err = gfs.Open("non-existent")
|
||||
c.Assert(err == mgo.ErrNotFound, Equals, true)
|
||||
c.Assert(file, IsNil)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSReadAll(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
id := file.Id()
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 22)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file, err = gfs.OpenId(id)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
b := make([]byte, 30)
|
||||
n, err = file.Read(b)
|
||||
c.Assert(n, Equals, 22)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
n, err = file.Read(b)
|
||||
c.Assert(n, Equals, 0)
|
||||
c.Assert(err == io.EOF, Equals, true)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSReadChunking(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
id := file.Id()
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 22)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file, err = gfs.OpenId(id)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
b := make([]byte, 30)
|
||||
|
||||
// Smaller than the chunk size.
|
||||
n, err = file.Read(b[:3])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 3)
|
||||
c.Assert(b[:3], DeepEquals, []byte("abc"))
|
||||
|
||||
// Boundary in the middle.
|
||||
n, err = file.Read(b[:4])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 4)
|
||||
c.Assert(b[:4], DeepEquals, []byte("defg"))
|
||||
|
||||
// Boundary at the end.
|
||||
n, err = file.Read(b[:3])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 3)
|
||||
c.Assert(b[:3], DeepEquals, []byte("hij"))
|
||||
|
||||
// Larger than the chunk size, with 3 chunks.
|
||||
n, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 12)
|
||||
c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv"))
|
||||
|
||||
n, err = file.Read(b)
|
||||
c.Assert(n, Equals, 0)
|
||||
c.Assert(err == io.EOF, Equals, true)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSOpen(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'1'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'2'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Open("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
defer file.Close()
|
||||
|
||||
var b [1]byte
|
||||
|
||||
_, err = file.Read(b[:])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b[:]), Equals, "2")
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSSeek(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
id := file.Id()
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 22)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
b := make([]byte, 5)
|
||||
|
||||
file, err = gfs.OpenId(id)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
o, err := file.Seek(3, os.SEEK_SET)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(3))
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("defgh"))
|
||||
|
||||
o, err = file.Seek(5, os.SEEK_CUR)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(13))
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("nopqr"))
|
||||
|
||||
o, err = file.Seek(0, os.SEEK_END)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(22))
|
||||
n, err = file.Read(b)
|
||||
c.Assert(err, Equals, io.EOF)
|
||||
c.Assert(n, Equals, 0)
|
||||
|
||||
o, err = file.Seek(-10, os.SEEK_END)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(12))
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("mnopq"))
|
||||
|
||||
o, err = file.Seek(8, os.SEEK_SET)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(8))
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("ijklm"))
|
||||
|
||||
// Trivial seek forward within same chunk. Already
|
||||
// got the data, shouldn't touch the database.
|
||||
sent := mgo.GetStats().SentOps
|
||||
o, err = file.Seek(1, os.SEEK_CUR)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(14))
|
||||
c.Assert(mgo.GetStats().SentOps, Equals, sent)
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("opqrs"))
|
||||
|
||||
// Try seeking past end of file.
|
||||
file.Seek(3, os.SEEK_SET)
|
||||
o, err = file.Seek(23, os.SEEK_SET)
|
||||
c.Assert(err, ErrorMatches, "seek past end of file")
|
||||
c.Assert(o, Equals, int64(3))
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSRemoveId(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'1'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'2'})
|
||||
id := file.Id()
|
||||
file.Close()
|
||||
|
||||
err = gfs.RemoveId(id)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file, err = gfs.Open("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
defer file.Close()
|
||||
|
||||
var b [1]byte
|
||||
|
||||
_, err = file.Read(b[:])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b[:]), Equals, "1")
|
||||
|
||||
n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 0)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSRemove(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'1'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'2'})
|
||||
file.Close()
|
||||
|
||||
err = gfs.Remove("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
_, err = gfs.Open("myfile.txt")
|
||||
c.Assert(err == mgo.ErrNotFound, Equals, true)
|
||||
|
||||
n, err := db.C("fs.chunks").Find(nil).Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 0)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSOpenNext(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile1.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'1'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Create("myfile2.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'2'})
|
||||
file.Close()
|
||||
|
||||
var f *mgo.GridFile
|
||||
var b [1]byte
|
||||
|
||||
iter := gfs.Find(nil).Sort("-filename").Iter()
|
||||
|
||||
ok := gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Check(f.Name(), Equals, "myfile2.txt")
|
||||
|
||||
_, err = f.Read(b[:])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b[:]), Equals, "2")
|
||||
|
||||
ok = gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Check(f.Name(), Equals, "myfile1.txt")
|
||||
|
||||
_, err = f.Read(b[:])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b[:]), Equals, "1")
|
||||
|
||||
ok = gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, false)
|
||||
c.Assert(iter.Close(), IsNil)
|
||||
c.Assert(f, IsNil)
|
||||
|
||||
// Do it again with a more restrictive query to make sure
|
||||
// it's actually taken into account.
|
||||
iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter()
|
||||
|
||||
ok = gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Check(f.Name(), Equals, "myfile1.txt")
|
||||
|
||||
ok = gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, false)
|
||||
c.Assert(iter.Close(), IsNil)
|
||||
c.Assert(f, IsNil)
|
||||
}
|
77
vendor/gopkg.in/mgo.v2/internal/sasl/sasl.c
generated
vendored
77
vendor/gopkg.in/mgo.v2/internal/sasl/sasl.c
generated
vendored
@ -1,77 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sasl/sasl.h>
|
||||
|
||||
static int mgo_sasl_simple(void *context, int id, const char **result, unsigned int *len)
|
||||
{
|
||||
if (!result) {
|
||||
return SASL_BADPARAM;
|
||||
}
|
||||
switch (id) {
|
||||
case SASL_CB_USER:
|
||||
*result = (char *)context;
|
||||
break;
|
||||
case SASL_CB_AUTHNAME:
|
||||
*result = (char *)context;
|
||||
break;
|
||||
case SASL_CB_LANGUAGE:
|
||||
*result = NULL;
|
||||
break;
|
||||
default:
|
||||
return SASL_BADPARAM;
|
||||
}
|
||||
if (len) {
|
||||
*len = *result ? strlen(*result) : 0;
|
||||
}
|
||||
return SASL_OK;
|
||||
}
|
||||
|
||||
typedef int (*callback)(void);
|
||||
|
||||
static int mgo_sasl_secret(sasl_conn_t *conn, void *context, int id, sasl_secret_t **result)
|
||||
{
|
||||
if (!conn || !result || id != SASL_CB_PASS) {
|
||||
return SASL_BADPARAM;
|
||||
}
|
||||
*result = (sasl_secret_t *)context;
|
||||
return SASL_OK;
|
||||
}
|
||||
|
||||
sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password)
|
||||
{
|
||||
sasl_callback_t *cb = malloc(4 * sizeof(sasl_callback_t));
|
||||
int n = 0;
|
||||
|
||||
size_t len = strlen(password);
|
||||
sasl_secret_t *secret = (sasl_secret_t*)malloc(sizeof(sasl_secret_t) + len);
|
||||
if (!secret) {
|
||||
free(cb);
|
||||
return NULL;
|
||||
}
|
||||
strcpy((char *)secret->data, password);
|
||||
secret->len = len;
|
||||
|
||||
cb[n].id = SASL_CB_PASS;
|
||||
cb[n].proc = (callback)&mgo_sasl_secret;
|
||||
cb[n].context = secret;
|
||||
n++;
|
||||
|
||||
cb[n].id = SASL_CB_USER;
|
||||
cb[n].proc = (callback)&mgo_sasl_simple;
|
||||
cb[n].context = (char*)username;
|
||||
n++;
|
||||
|
||||
cb[n].id = SASL_CB_AUTHNAME;
|
||||
cb[n].proc = (callback)&mgo_sasl_simple;
|
||||
cb[n].context = (char*)username;
|
||||
n++;
|
||||
|
||||
cb[n].id = SASL_CB_LIST_END;
|
||||
cb[n].proc = NULL;
|
||||
cb[n].context = NULL;
|
||||
|
||||
return cb;
|
||||
}
|
138
vendor/gopkg.in/mgo.v2/internal/sasl/sasl.go
generated
vendored
138
vendor/gopkg.in/mgo.v2/internal/sasl/sasl.go
generated
vendored
@ -1,138 +0,0 @@
|
||||
// Package sasl is an implementation detail of the mgo package.
|
||||
//
|
||||
// This package is not meant to be used by itself.
|
||||
//
|
||||
|
||||
// +build !windows
|
||||
|
||||
package sasl
|
||||
|
||||
// #cgo LDFLAGS: -lsasl2
|
||||
//
|
||||
// struct sasl_conn {};
|
||||
//
|
||||
// #include <stdlib.h>
|
||||
// #include <sasl/sasl.h>
|
||||
//
|
||||
// sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password);
|
||||
//
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type saslStepper interface {
|
||||
Step(serverData []byte) (clientData []byte, done bool, err error)
|
||||
Close()
|
||||
}
|
||||
|
||||
type saslSession struct {
|
||||
conn *C.sasl_conn_t
|
||||
step int
|
||||
mech string
|
||||
|
||||
cstrings []*C.char
|
||||
callbacks *C.sasl_callback_t
|
||||
}
|
||||
|
||||
var initError error
|
||||
var initOnce sync.Once
|
||||
|
||||
func initSASL() {
|
||||
rc := C.sasl_client_init(nil)
|
||||
if rc != C.SASL_OK {
|
||||
initError = saslError(rc, nil, "cannot initialize SASL library")
|
||||
}
|
||||
}
|
||||
|
||||
func New(username, password, mechanism, service, host string) (saslStepper, error) {
|
||||
initOnce.Do(initSASL)
|
||||
if initError != nil {
|
||||
return nil, initError
|
||||
}
|
||||
|
||||
ss := &saslSession{mech: mechanism}
|
||||
if service == "" {
|
||||
service = "mongodb"
|
||||
}
|
||||
if i := strings.Index(host, ":"); i >= 0 {
|
||||
host = host[:i]
|
||||
}
|
||||
ss.callbacks = C.mgo_sasl_callbacks(ss.cstr(username), ss.cstr(password))
|
||||
rc := C.sasl_client_new(ss.cstr(service), ss.cstr(host), nil, nil, ss.callbacks, 0, &ss.conn)
|
||||
if rc != C.SASL_OK {
|
||||
ss.Close()
|
||||
return nil, saslError(rc, nil, "cannot create new SASL client")
|
||||
}
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func (ss *saslSession) cstr(s string) *C.char {
|
||||
cstr := C.CString(s)
|
||||
ss.cstrings = append(ss.cstrings, cstr)
|
||||
return cstr
|
||||
}
|
||||
|
||||
func (ss *saslSession) Close() {
|
||||
for _, cstr := range ss.cstrings {
|
||||
C.free(unsafe.Pointer(cstr))
|
||||
}
|
||||
ss.cstrings = nil
|
||||
|
||||
if ss.callbacks != nil {
|
||||
C.free(unsafe.Pointer(ss.callbacks))
|
||||
}
|
||||
|
||||
// The documentation of SASL dispose makes it clear that this should only
|
||||
// be done when the connection is done, not when the authentication phase
|
||||
// is done, because an encryption layer may have been negotiated.
|
||||
// Even then, we'll do this for now, because it's simpler and prevents
|
||||
// keeping track of this state for every socket. If it breaks, we'll fix it.
|
||||
C.sasl_dispose(&ss.conn)
|
||||
}
|
||||
|
||||
func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) {
|
||||
ss.step++
|
||||
if ss.step > 10 {
|
||||
return nil, false, fmt.Errorf("too many SASL steps without authentication")
|
||||
}
|
||||
var cclientData *C.char
|
||||
var cclientDataLen C.uint
|
||||
var rc C.int
|
||||
if ss.step == 1 {
|
||||
var mechanism *C.char // ignored - must match cred
|
||||
rc = C.sasl_client_start(ss.conn, ss.cstr(ss.mech), nil, &cclientData, &cclientDataLen, &mechanism)
|
||||
} else {
|
||||
var cserverData *C.char
|
||||
var cserverDataLen C.uint
|
||||
if len(serverData) > 0 {
|
||||
cserverData = (*C.char)(unsafe.Pointer(&serverData[0]))
|
||||
cserverDataLen = C.uint(len(serverData))
|
||||
}
|
||||
rc = C.sasl_client_step(ss.conn, cserverData, cserverDataLen, nil, &cclientData, &cclientDataLen)
|
||||
}
|
||||
if cclientData != nil && cclientDataLen > 0 {
|
||||
clientData = C.GoBytes(unsafe.Pointer(cclientData), C.int(cclientDataLen))
|
||||
}
|
||||
if rc == C.SASL_OK {
|
||||
return clientData, true, nil
|
||||
}
|
||||
if rc == C.SASL_CONTINUE {
|
||||
return clientData, false, nil
|
||||
}
|
||||
return nil, false, saslError(rc, ss.conn, "cannot establish SASL session")
|
||||
}
|
||||
|
||||
func saslError(rc C.int, conn *C.sasl_conn_t, msg string) error {
|
||||
var detail string
|
||||
if conn == nil {
|
||||
detail = C.GoString(C.sasl_errstring(rc, nil, nil))
|
||||
} else {
|
||||
detail = C.GoString(C.sasl_errdetail(conn))
|
||||
}
|
||||
return fmt.Errorf(msg + ": " + detail)
|
||||
}
|
118
vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c
generated
vendored
118
vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c
generated
vendored
@ -1,118 +0,0 @@
|
||||
#include "sasl_windows.h"
|
||||
|
||||
static const LPSTR SSPI_PACKAGE_NAME = "kerberos";
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle *cred_handle, char *username, char *password, char *domain)
|
||||
{
|
||||
SEC_WINNT_AUTH_IDENTITY auth_identity;
|
||||
SECURITY_INTEGER ignored;
|
||||
|
||||
auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI;
|
||||
auth_identity.User = (LPSTR) username;
|
||||
auth_identity.UserLength = strlen(username);
|
||||
auth_identity.Password = (LPSTR) password;
|
||||
auth_identity.PasswordLength = strlen(password);
|
||||
auth_identity.Domain = (LPSTR) domain;
|
||||
auth_identity.DomainLength = strlen(domain);
|
||||
return call_sspi_acquire_credentials_handle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, &auth_identity, NULL, NULL, cred_handle, &ignored);
|
||||
}
|
||||
|
||||
int sspi_step(CredHandle *cred_handle, int has_context, CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *target)
|
||||
{
|
||||
SecBufferDesc inbuf;
|
||||
SecBuffer in_bufs[1];
|
||||
SecBufferDesc outbuf;
|
||||
SecBuffer out_bufs[1];
|
||||
|
||||
if (has_context > 0) {
|
||||
// If we already have a context, we now have data to send.
|
||||
// Put this data in an inbuf.
|
||||
inbuf.ulVersion = SECBUFFER_VERSION;
|
||||
inbuf.cBuffers = 1;
|
||||
inbuf.pBuffers = in_bufs;
|
||||
in_bufs[0].pvBuffer = *buffer;
|
||||
in_bufs[0].cbBuffer = *buffer_length;
|
||||
in_bufs[0].BufferType = SECBUFFER_TOKEN;
|
||||
}
|
||||
|
||||
outbuf.ulVersion = SECBUFFER_VERSION;
|
||||
outbuf.cBuffers = 1;
|
||||
outbuf.pBuffers = out_bufs;
|
||||
out_bufs[0].pvBuffer = NULL;
|
||||
out_bufs[0].cbBuffer = 0;
|
||||
out_bufs[0].BufferType = SECBUFFER_TOKEN;
|
||||
|
||||
ULONG context_attr = 0;
|
||||
|
||||
int ret = call_sspi_initialize_security_context(cred_handle,
|
||||
has_context > 0 ? context : NULL,
|
||||
(LPSTR) target,
|
||||
ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH,
|
||||
0,
|
||||
SECURITY_NETWORK_DREP,
|
||||
has_context > 0 ? &inbuf : NULL,
|
||||
0,
|
||||
context,
|
||||
&outbuf,
|
||||
&context_attr,
|
||||
NULL);
|
||||
|
||||
*buffer = malloc(out_bufs[0].cbBuffer);
|
||||
*buffer_length = out_bufs[0].cbBuffer;
|
||||
memcpy(*buffer, out_bufs[0].pvBuffer, *buffer_length);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sspi_send_client_authz_id(CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *user_plus_realm)
|
||||
{
|
||||
SecPkgContext_Sizes sizes;
|
||||
SECURITY_STATUS status = call_sspi_query_context_attributes(context, SECPKG_ATTR_SIZES, &sizes);
|
||||
|
||||
if (status != SEC_E_OK) {
|
||||
return status;
|
||||
}
|
||||
|
||||
size_t user_plus_realm_length = strlen(user_plus_realm);
|
||||
int msgSize = 4 + user_plus_realm_length;
|
||||
char *msg = malloc((sizes.cbSecurityTrailer + msgSize + sizes.cbBlockSize) * sizeof(char));
|
||||
msg[sizes.cbSecurityTrailer + 0] = 1;
|
||||
msg[sizes.cbSecurityTrailer + 1] = 0;
|
||||
msg[sizes.cbSecurityTrailer + 2] = 0;
|
||||
msg[sizes.cbSecurityTrailer + 3] = 0;
|
||||
memcpy(&msg[sizes.cbSecurityTrailer + 4], user_plus_realm, user_plus_realm_length);
|
||||
|
||||
SecBuffer wrapBufs[3];
|
||||
SecBufferDesc wrapBufDesc;
|
||||
wrapBufDesc.cBuffers = 3;
|
||||
wrapBufDesc.pBuffers = wrapBufs;
|
||||
wrapBufDesc.ulVersion = SECBUFFER_VERSION;
|
||||
|
||||
wrapBufs[0].cbBuffer = sizes.cbSecurityTrailer;
|
||||
wrapBufs[0].BufferType = SECBUFFER_TOKEN;
|
||||
wrapBufs[0].pvBuffer = msg;
|
||||
|
||||
wrapBufs[1].cbBuffer = msgSize;
|
||||
wrapBufs[1].BufferType = SECBUFFER_DATA;
|
||||
wrapBufs[1].pvBuffer = msg + sizes.cbSecurityTrailer;
|
||||
|
||||
wrapBufs[2].cbBuffer = sizes.cbBlockSize;
|
||||
wrapBufs[2].BufferType = SECBUFFER_PADDING;
|
||||
wrapBufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + msgSize;
|
||||
|
||||
status = call_sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0);
|
||||
if (status != SEC_E_OK) {
|
||||
free(msg);
|
||||
return status;
|
||||
}
|
||||
|
||||
*buffer_length = wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer + wrapBufs[2].cbBuffer;
|
||||
*buffer = malloc(*buffer_length);
|
||||
|
||||
memcpy(*buffer, wrapBufs[0].pvBuffer, wrapBufs[0].cbBuffer);
|
||||
memcpy(*buffer + wrapBufs[0].cbBuffer, wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer);
|
||||
memcpy(*buffer + wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer, wrapBufs[2].pvBuffer, wrapBufs[2].cbBuffer);
|
||||
|
||||
free(msg);
|
||||
return SEC_E_OK;
|
||||
}
|
140
vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go
generated
vendored
140
vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go
generated
vendored
@ -1,140 +0,0 @@
|
||||
package sasl
|
||||
|
||||
// #include "sasl_windows.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type saslStepper interface {
|
||||
Step(serverData []byte) (clientData []byte, done bool, err error)
|
||||
Close()
|
||||
}
|
||||
|
||||
type saslSession struct {
|
||||
// Credentials
|
||||
mech string
|
||||
service string
|
||||
host string
|
||||
userPlusRealm string
|
||||
target string
|
||||
domain string
|
||||
|
||||
// Internal state
|
||||
authComplete bool
|
||||
errored bool
|
||||
step int
|
||||
|
||||
// C internal state
|
||||
credHandle C.CredHandle
|
||||
context C.CtxtHandle
|
||||
hasContext C.int
|
||||
|
||||
// Keep track of pointers we need to explicitly free
|
||||
stringsToFree []*C.char
|
||||
}
|
||||
|
||||
var initError error
|
||||
var initOnce sync.Once
|
||||
|
||||
func initSSPI() {
|
||||
rc := C.load_secur32_dll()
|
||||
if rc != 0 {
|
||||
initError = fmt.Errorf("Error loading libraries: %v", rc)
|
||||
}
|
||||
}
|
||||
|
||||
func New(username, password, mechanism, service, host string) (saslStepper, error) {
|
||||
initOnce.Do(initSSPI)
|
||||
ss := &saslSession{mech: mechanism, hasContext: 0, userPlusRealm: username}
|
||||
if service == "" {
|
||||
service = "mongodb"
|
||||
}
|
||||
if i := strings.Index(host, ":"); i >= 0 {
|
||||
host = host[:i]
|
||||
}
|
||||
ss.service = service
|
||||
ss.host = host
|
||||
|
||||
usernameComponents := strings.Split(username, "@")
|
||||
if len(usernameComponents) < 2 {
|
||||
return nil, fmt.Errorf("Username '%v' doesn't contain a realm!", username)
|
||||
}
|
||||
user := usernameComponents[0]
|
||||
ss.domain = usernameComponents[1]
|
||||
ss.target = fmt.Sprintf("%s/%s", ss.service, ss.host)
|
||||
|
||||
var status C.SECURITY_STATUS
|
||||
// Step 0: call AcquireCredentialsHandle to get a nice SSPI CredHandle
|
||||
if len(password) > 0 {
|
||||
status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), ss.cstr(password), ss.cstr(ss.domain))
|
||||
} else {
|
||||
status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), nil, ss.cstr(ss.domain))
|
||||
}
|
||||
if status != C.SEC_E_OK {
|
||||
ss.errored = true
|
||||
return nil, fmt.Errorf("Couldn't create new SSPI client, error code %v", status)
|
||||
}
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func (ss *saslSession) cstr(s string) *C.char {
|
||||
cstr := C.CString(s)
|
||||
ss.stringsToFree = append(ss.stringsToFree, cstr)
|
||||
return cstr
|
||||
}
|
||||
|
||||
func (ss *saslSession) Close() {
|
||||
for _, cstr := range ss.stringsToFree {
|
||||
C.free(unsafe.Pointer(cstr))
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) {
|
||||
ss.step++
|
||||
if ss.step > 10 {
|
||||
return nil, false, fmt.Errorf("too many SSPI steps without authentication")
|
||||
}
|
||||
var buffer C.PVOID
|
||||
var bufferLength C.ULONG
|
||||
if len(serverData) > 0 {
|
||||
buffer = (C.PVOID)(unsafe.Pointer(&serverData[0]))
|
||||
bufferLength = C.ULONG(len(serverData))
|
||||
}
|
||||
var status C.int
|
||||
if ss.authComplete {
|
||||
// Step 3: last bit of magic to use the correct server credentials
|
||||
status = C.sspi_send_client_authz_id(&ss.context, &buffer, &bufferLength, ss.cstr(ss.userPlusRealm))
|
||||
} else {
|
||||
// Step 1 + Step 2: set up security context with the server and TGT
|
||||
status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, &buffer, &bufferLength, ss.cstr(ss.target))
|
||||
}
|
||||
if buffer != C.PVOID(nil) {
|
||||
defer C.free(unsafe.Pointer(buffer))
|
||||
}
|
||||
if status != C.SEC_E_OK && status != C.SEC_I_CONTINUE_NEEDED {
|
||||
ss.errored = true
|
||||
return nil, false, ss.handleSSPIErrorCode(status)
|
||||
}
|
||||
|
||||
clientData = C.GoBytes(unsafe.Pointer(buffer), C.int(bufferLength))
|
||||
if status == C.SEC_E_OK {
|
||||
ss.authComplete = true
|
||||
return clientData, true, nil
|
||||
} else {
|
||||
ss.hasContext = 1
|
||||
return clientData, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *saslSession) handleSSPIErrorCode(code C.int) error {
|
||||
switch {
|
||||
case code == C.SEC_E_TARGET_UNKNOWN:
|
||||
return fmt.Errorf("Target %v@%v not found", ss.target, ss.domain)
|
||||
}
|
||||
return fmt.Errorf("Unknown error doing step %v, error code %v", ss.step, code)
|
||||
}
|
7
vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h
generated
vendored
7
vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h
generated
vendored
@ -1,7 +0,0 @@
|
||||
#include <windows.h>
|
||||
|
||||
#include "sspi_windows.h"
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain);
|
||||
int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* target);
|
||||
int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm);
|
96
vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c
generated
vendored
96
vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c
generated
vendored
@ -1,96 +0,0 @@
|
||||
// Code adapted from the NodeJS kerberos library:
|
||||
//
|
||||
// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.c
|
||||
//
|
||||
// Under the terms of the Apache License, Version 2.0:
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "sspi_windows.h"
|
||||
|
||||
static HINSTANCE sspi_secur32_dll = NULL;
|
||||
|
||||
int load_secur32_dll()
|
||||
{
|
||||
sspi_secur32_dll = LoadLibrary("secur32.dll");
|
||||
if (sspi_secur32_dll == NULL) {
|
||||
return GetLastError();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo)
|
||||
{
|
||||
if (sspi_secur32_dll == NULL) {
|
||||
return -1;
|
||||
}
|
||||
encryptMessage_fn pfn_encryptMessage = (encryptMessage_fn) GetProcAddress(sspi_secur32_dll, "EncryptMessage");
|
||||
if (!pfn_encryptMessage) {
|
||||
return -2;
|
||||
}
|
||||
return (*pfn_encryptMessage)(phContext, fQOP, pMessage, MessageSeqNo);
|
||||
}
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(
|
||||
LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse,
|
||||
void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
|
||||
PCredHandle phCredential, PTimeStamp ptsExpiry)
|
||||
{
|
||||
if (sspi_secur32_dll == NULL) {
|
||||
return -1;
|
||||
}
|
||||
acquireCredentialsHandle_fn pfn_acquireCredentialsHandle;
|
||||
#ifdef _UNICODE
|
||||
pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleW");
|
||||
#else
|
||||
pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleA");
|
||||
#endif
|
||||
if (!pfn_acquireCredentialsHandle) {
|
||||
return -2;
|
||||
}
|
||||
return (*pfn_acquireCredentialsHandle)(
|
||||
pszPrincipal, pszPackage, fCredentialUse, pvLogonId, pAuthData,
|
||||
pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry);
|
||||
}
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(
|
||||
PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName,
|
||||
unsigned long fContextReq, unsigned long Reserved1, unsigned long TargetDataRep,
|
||||
PSecBufferDesc pInput, unsigned long Reserved2, PCtxtHandle phNewContext,
|
||||
PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry)
|
||||
{
|
||||
if (sspi_secur32_dll == NULL) {
|
||||
return -1;
|
||||
}
|
||||
initializeSecurityContext_fn pfn_initializeSecurityContext;
|
||||
#ifdef _UNICODE
|
||||
pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextW");
|
||||
#else
|
||||
pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextA");
|
||||
#endif
|
||||
if (!pfn_initializeSecurityContext) {
|
||||
return -2;
|
||||
}
|
||||
return (*pfn_initializeSecurityContext)(
|
||||
phCredential, phContext, pszTargetName, fContextReq, Reserved1, TargetDataRep,
|
||||
pInput, Reserved2, phNewContext, pOutput, pfContextAttr, ptsExpiry);
|
||||
}
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer)
|
||||
{
|
||||
if (sspi_secur32_dll == NULL) {
|
||||
return -1;
|
||||
}
|
||||
queryContextAttributes_fn pfn_queryContextAttributes;
|
||||
#ifdef _UNICODE
|
||||
pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesW");
|
||||
#else
|
||||
pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesA");
|
||||
#endif
|
||||
if (!pfn_queryContextAttributes) {
|
||||
return -2;
|
||||
}
|
||||
return (*pfn_queryContextAttributes)(phContext, ulAttribute, pBuffer);
|
||||
}
|
70
vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h
generated
vendored
70
vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h
generated
vendored
@ -1,70 +0,0 @@
|
||||
// Code adapted from the NodeJS kerberos library:
|
||||
//
|
||||
// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.h
|
||||
//
|
||||
// Under the terms of the Apache License, Version 2.0:
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
#ifndef SSPI_WINDOWS_H
|
||||
#define SSPI_WINDOWS_H
|
||||
|
||||
#define SECURITY_WIN32 1
|
||||
|
||||
#include <windows.h>
|
||||
#include <sspi.h>
|
||||
|
||||
int load_secur32_dll();
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo);
|
||||
|
||||
typedef DWORD (WINAPI *encryptMessage_fn)(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo);
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(
|
||||
LPSTR pszPrincipal, // Name of principal
|
||||
LPSTR pszPackage, // Name of package
|
||||
unsigned long fCredentialUse, // Flags indicating use
|
||||
void *pvLogonId, // Pointer to logon ID
|
||||
void *pAuthData, // Package specific data
|
||||
SEC_GET_KEY_FN pGetKeyFn, // Pointer to GetKey() func
|
||||
void *pvGetKeyArgument, // Value to pass to GetKey()
|
||||
PCredHandle phCredential, // (out) Cred Handle
|
||||
PTimeStamp ptsExpiry // (out) Lifetime (optional)
|
||||
);
|
||||
|
||||
typedef DWORD (WINAPI *acquireCredentialsHandle_fn)(
|
||||
LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse,
|
||||
void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
|
||||
PCredHandle phCredential, PTimeStamp ptsExpiry
|
||||
);
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(
|
||||
PCredHandle phCredential, // Cred to base context
|
||||
PCtxtHandle phContext, // Existing context (OPT)
|
||||
LPSTR pszTargetName, // Name of target
|
||||
unsigned long fContextReq, // Context Requirements
|
||||
unsigned long Reserved1, // Reserved, MBZ
|
||||
unsigned long TargetDataRep, // Data rep of target
|
||||
PSecBufferDesc pInput, // Input Buffers
|
||||
unsigned long Reserved2, // Reserved, MBZ
|
||||
PCtxtHandle phNewContext, // (out) New Context handle
|
||||
PSecBufferDesc pOutput, // (inout) Output Buffers
|
||||
unsigned long *pfContextAttr, // (out) Context attrs
|
||||
PTimeStamp ptsExpiry // (out) Life span (OPT)
|
||||
);
|
||||
|
||||
typedef DWORD (WINAPI *initializeSecurityContext_fn)(
|
||||
PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, unsigned long fContextReq,
|
||||
unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2,
|
||||
PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry);
|
||||
|
||||
SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(
|
||||
PCtxtHandle phContext, // Context to query
|
||||
unsigned long ulAttribute, // Attribute to query
|
||||
void *pBuffer // Buffer for attributes
|
||||
);
|
||||
|
||||
typedef DWORD (WINAPI *queryContextAttributes_fn)(
|
||||
PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer);
|
||||
|
||||
#endif // SSPI_WINDOWS_H
|
266
vendor/gopkg.in/mgo.v2/internal/scram/scram.go
generated
vendored
266
vendor/gopkg.in/mgo.v2/internal/scram/scram.go
generated
vendored
@ -1,266 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2014 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Pacakage scram implements a SCRAM-{SHA-1,etc} client per RFC5802.
|
||||
//
|
||||
// http://tools.ietf.org/html/rfc5802
|
||||
//
|
||||
package scram
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc).
|
||||
//
|
||||
// A Client may be used within a SASL conversation with logic resembling:
|
||||
//
|
||||
// var in []byte
|
||||
// var client = scram.NewClient(sha1.New, user, pass)
|
||||
// for client.Step(in) {
|
||||
// out := client.Out()
|
||||
// // send out to server
|
||||
// in := serverOut
|
||||
// }
|
||||
// if client.Err() != nil {
|
||||
// // auth failed
|
||||
// }
|
||||
//
|
||||
type Client struct {
|
||||
newHash func() hash.Hash
|
||||
|
||||
user string
|
||||
pass string
|
||||
step int
|
||||
out bytes.Buffer
|
||||
err error
|
||||
|
||||
clientNonce []byte
|
||||
serverNonce []byte
|
||||
saltedPass []byte
|
||||
authMsg bytes.Buffer
|
||||
}
|
||||
|
||||
// NewClient returns a new SCRAM-* client with the provided hash algorithm.
|
||||
//
|
||||
// For SCRAM-SHA-1, for example, use:
|
||||
//
|
||||
// client := scram.NewClient(sha1.New, user, pass)
|
||||
//
|
||||
func NewClient(newHash func() hash.Hash, user, pass string) *Client {
|
||||
c := &Client{
|
||||
newHash: newHash,
|
||||
user: user,
|
||||
pass: pass,
|
||||
}
|
||||
c.out.Grow(256)
|
||||
c.authMsg.Grow(256)
|
||||
return c
|
||||
}
|
||||
|
||||
// Out returns the data to be sent to the server in the current step.
|
||||
func (c *Client) Out() []byte {
|
||||
if c.out.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
return c.out.Bytes()
|
||||
}
|
||||
|
||||
// Err returns the error that ocurred, or nil if there were no errors.
|
||||
func (c *Client) Err() error {
|
||||
return c.err
|
||||
}
|
||||
|
||||
// SetNonce sets the client nonce to the provided value.
|
||||
// If not set, the nonce is generated automatically out of crypto/rand on the first step.
|
||||
func (c *Client) SetNonce(nonce []byte) {
|
||||
c.clientNonce = nonce
|
||||
}
|
||||
|
||||
var escaper = strings.NewReplacer("=", "=3D", ",", "=2C")
|
||||
|
||||
// Step processes the incoming data from the server and makes the
|
||||
// next round of data for the server available via Client.Out.
|
||||
// Step returns false if there are no errors and more data is
|
||||
// still expected.
|
||||
func (c *Client) Step(in []byte) bool {
|
||||
c.out.Reset()
|
||||
if c.step > 2 || c.err != nil {
|
||||
return false
|
||||
}
|
||||
c.step++
|
||||
switch c.step {
|
||||
case 1:
|
||||
c.err = c.step1(in)
|
||||
case 2:
|
||||
c.err = c.step2(in)
|
||||
case 3:
|
||||
c.err = c.step3(in)
|
||||
}
|
||||
return c.step > 2 || c.err != nil
|
||||
}
|
||||
|
||||
func (c *Client) step1(in []byte) error {
|
||||
if len(c.clientNonce) == 0 {
|
||||
const nonceLen = 6
|
||||
buf := make([]byte, nonceLen + b64.EncodedLen(nonceLen))
|
||||
if _, err := rand.Read(buf[:nonceLen]); err != nil {
|
||||
return fmt.Errorf("cannot read random SCRAM-SHA-1 nonce from operating system: %v", err)
|
||||
}
|
||||
c.clientNonce = buf[nonceLen:]
|
||||
b64.Encode(c.clientNonce, buf[:nonceLen])
|
||||
}
|
||||
c.authMsg.WriteString("n=")
|
||||
escaper.WriteString(&c.authMsg, c.user)
|
||||
c.authMsg.WriteString(",r=")
|
||||
c.authMsg.Write(c.clientNonce)
|
||||
|
||||
c.out.WriteString("n,,")
|
||||
c.out.Write(c.authMsg.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
var b64 = base64.StdEncoding
|
||||
|
||||
func (c *Client) step2(in []byte) error {
|
||||
c.authMsg.WriteByte(',')
|
||||
c.authMsg.Write(in)
|
||||
|
||||
fields := bytes.Split(in, []byte(","))
|
||||
if len(fields) != 3 {
|
||||
return fmt.Errorf("expected 3 fields in first SCRAM-SHA-1 server message, got %d: %q", len(fields), in)
|
||||
}
|
||||
if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 {
|
||||
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 nonce: %q", fields[0])
|
||||
}
|
||||
if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 {
|
||||
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 salt: %q", fields[1])
|
||||
}
|
||||
if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 {
|
||||
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2])
|
||||
}
|
||||
|
||||
c.serverNonce = fields[0][2:]
|
||||
if !bytes.HasPrefix(c.serverNonce, c.clientNonce) {
|
||||
return fmt.Errorf("server SCRAM-SHA-1 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce)
|
||||
}
|
||||
|
||||
salt := make([]byte, b64.DecodedLen(len(fields[1][2:])))
|
||||
n, err := b64.Decode(salt, fields[1][2:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot decode SCRAM-SHA-1 salt sent by server: %q", fields[1])
|
||||
}
|
||||
salt = salt[:n]
|
||||
iterCount, err := strconv.Atoi(string(fields[2][2:]))
|
||||
if err != nil {
|
||||
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2])
|
||||
}
|
||||
c.saltPassword(salt, iterCount)
|
||||
|
||||
c.authMsg.WriteString(",c=biws,r=")
|
||||
c.authMsg.Write(c.serverNonce)
|
||||
|
||||
c.out.WriteString("c=biws,r=")
|
||||
c.out.Write(c.serverNonce)
|
||||
c.out.WriteString(",p=")
|
||||
c.out.Write(c.clientProof())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) step3(in []byte) error {
|
||||
var isv, ise bool
|
||||
var fields = bytes.Split(in, []byte(","))
|
||||
if len(fields) == 1 {
|
||||
isv = bytes.HasPrefix(fields[0], []byte("v="))
|
||||
ise = bytes.HasPrefix(fields[0], []byte("e="))
|
||||
}
|
||||
if ise {
|
||||
return fmt.Errorf("SCRAM-SHA-1 authentication error: %s", fields[0][2:])
|
||||
} else if !isv {
|
||||
return fmt.Errorf("unsupported SCRAM-SHA-1 final message from server: %q", in)
|
||||
}
|
||||
if !bytes.Equal(c.serverSignature(), fields[0][2:]) {
|
||||
return fmt.Errorf("cannot authenticate SCRAM-SHA-1 server signature: %q", fields[0][2:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) saltPassword(salt []byte, iterCount int) {
|
||||
mac := hmac.New(c.newHash, []byte(c.pass))
|
||||
mac.Write(salt)
|
||||
mac.Write([]byte{0, 0, 0, 1})
|
||||
ui := mac.Sum(nil)
|
||||
hi := make([]byte, len(ui))
|
||||
copy(hi, ui)
|
||||
for i := 1; i < iterCount; i++ {
|
||||
mac.Reset()
|
||||
mac.Write(ui)
|
||||
mac.Sum(ui[:0])
|
||||
for j, b := range ui {
|
||||
hi[j] ^= b
|
||||
}
|
||||
}
|
||||
c.saltedPass = hi
|
||||
}
|
||||
|
||||
func (c *Client) clientProof() []byte {
|
||||
mac := hmac.New(c.newHash, c.saltedPass)
|
||||
mac.Write([]byte("Client Key"))
|
||||
clientKey := mac.Sum(nil)
|
||||
hash := c.newHash()
|
||||
hash.Write(clientKey)
|
||||
storedKey := hash.Sum(nil)
|
||||
mac = hmac.New(c.newHash, storedKey)
|
||||
mac.Write(c.authMsg.Bytes())
|
||||
clientProof := mac.Sum(nil)
|
||||
for i, b := range clientKey {
|
||||
clientProof[i] ^= b
|
||||
}
|
||||
clientProof64 := make([]byte, b64.EncodedLen(len(clientProof)))
|
||||
b64.Encode(clientProof64, clientProof)
|
||||
return clientProof64
|
||||
}
|
||||
|
||||
func (c *Client) serverSignature() []byte {
|
||||
mac := hmac.New(c.newHash, c.saltedPass)
|
||||
mac.Write([]byte("Server Key"))
|
||||
serverKey := mac.Sum(nil)
|
||||
|
||||
mac = hmac.New(c.newHash, serverKey)
|
||||
mac.Write(c.authMsg.Bytes())
|
||||
serverSignature := mac.Sum(nil)
|
||||
|
||||
encoded := make([]byte, b64.EncodedLen(len(serverSignature)))
|
||||
b64.Encode(encoded, serverSignature)
|
||||
return encoded
|
||||
}
|
67
vendor/gopkg.in/mgo.v2/internal/scram/scram_test.go
generated
vendored
67
vendor/gopkg.in/mgo.v2/internal/scram/scram_test.go
generated
vendored
@ -1,67 +0,0 @@
|
||||
package scram_test
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"testing"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/mgo.v2/internal/scram"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ = Suite(&S{})
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type S struct{}
|
||||
|
||||
var tests = [][]string{{
|
||||
"U: user pencil",
|
||||
"N: fyko+d2lbbFgONRv9qkxdawL",
|
||||
"C: n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL",
|
||||
"S: r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096",
|
||||
"C: c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=",
|
||||
"S: v=rmF9pqV8S7suAoZWja4dJRkFsKQ=",
|
||||
}, {
|
||||
"U: root fe8c89e308ec08763df36333cbf5d3a2",
|
||||
"N: OTcxNDk5NjM2MzE5",
|
||||
"C: n,,n=root,r=OTcxNDk5NjM2MzE5",
|
||||
"S: r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,s=XRDkVrFC9JuL7/F4tG0acQ==,i=10000",
|
||||
"C: c=biws,r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,p=6y1jp9R7ETyouTXS9fW9k5UHdBc=",
|
||||
"S: v=LBnd9dUJRxdqZiEq91NKP3z/bHA=",
|
||||
}}
|
||||
|
||||
func (s *S) TestExamples(c *C) {
|
||||
for _, steps := range tests {
|
||||
if len(steps) < 2 || len(steps[0]) < 3 || !strings.HasPrefix(steps[0], "U: ") {
|
||||
c.Fatalf("Invalid test: %#v", steps)
|
||||
}
|
||||
auth := strings.Fields(steps[0][3:])
|
||||
client := scram.NewClient(sha1.New, auth[0], auth[1])
|
||||
first, done := true, false
|
||||
c.Logf("-----")
|
||||
c.Logf("%s", steps[0])
|
||||
for _, step := range steps[1:] {
|
||||
c.Logf("%s", step)
|
||||
switch step[:3] {
|
||||
case "N: ":
|
||||
client.SetNonce([]byte(step[3:]))
|
||||
case "C: ":
|
||||
if first {
|
||||
first = false
|
||||
done = client.Step(nil)
|
||||
}
|
||||
c.Assert(done, Equals, false)
|
||||
c.Assert(client.Err(), IsNil)
|
||||
c.Assert(string(client.Out()), Equals, step[3:])
|
||||
case "S: ":
|
||||
first = false
|
||||
done = client.Step([]byte(step[3:]))
|
||||
default:
|
||||
panic("invalid test line: " + step)
|
||||
}
|
||||
}
|
||||
c.Assert(done, Equals, true)
|
||||
c.Assert(client.Err(), IsNil)
|
||||
}
|
||||
}
|
133
vendor/gopkg.in/mgo.v2/log.go
generated
vendored
133
vendor/gopkg.in/mgo.v2/log.go
generated
vendored
@ -1,133 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Logging integration.
|
||||
|
||||
// Avoid importing the log type information unnecessarily. There's a small cost
|
||||
// associated with using an interface rather than the type. Depending on how
|
||||
// often the logger is plugged in, it would be worth using the type instead.
|
||||
type log_Logger interface {
|
||||
Output(calldepth int, s string) error
|
||||
}
|
||||
|
||||
var (
|
||||
globalLogger log_Logger
|
||||
globalDebug bool
|
||||
globalMutex sync.Mutex
|
||||
)
|
||||
|
||||
// RACE WARNING: There are known data races when logging, which are manually
|
||||
// silenced when the race detector is in use. These data races won't be
|
||||
// observed in typical use, because logging is supposed to be set up once when
|
||||
// the application starts. Having raceDetector as a constant, the compiler
|
||||
// should elide the locks altogether in actual use.
|
||||
|
||||
// Specify the *log.Logger object where log messages should be sent to.
|
||||
func SetLogger(logger log_Logger) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
globalLogger = logger
|
||||
}
|
||||
|
||||
// Enable the delivery of debug messages to the logger. Only meaningful
|
||||
// if a logger is also set.
|
||||
func SetDebug(debug bool) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
globalDebug = debug
|
||||
}
|
||||
|
||||
func log(v ...interface{}) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
if globalLogger != nil {
|
||||
globalLogger.Output(2, fmt.Sprint(v...))
|
||||
}
|
||||
}
|
||||
|
||||
func logln(v ...interface{}) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
if globalLogger != nil {
|
||||
globalLogger.Output(2, fmt.Sprintln(v...))
|
||||
}
|
||||
}
|
||||
|
||||
func logf(format string, v ...interface{}) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
if globalLogger != nil {
|
||||
globalLogger.Output(2, fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
||||
|
||||
func debug(v ...interface{}) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
if globalDebug && globalLogger != nil {
|
||||
globalLogger.Output(2, fmt.Sprint(v...))
|
||||
}
|
||||
}
|
||||
|
||||
func debugln(v ...interface{}) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
if globalDebug && globalLogger != nil {
|
||||
globalLogger.Output(2, fmt.Sprintln(v...))
|
||||
}
|
||||
}
|
||||
|
||||
func debugf(format string, v ...interface{}) {
|
||||
if raceDetector {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
}
|
||||
if globalDebug && globalLogger != nil {
|
||||
globalLogger.Output(2, fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
91
vendor/gopkg.in/mgo.v2/queue.go
generated
vendored
91
vendor/gopkg.in/mgo.v2/queue.go
generated
vendored
@ -1,91 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
type queue struct {
|
||||
elems []interface{}
|
||||
nelems, popi, pushi int
|
||||
}
|
||||
|
||||
func (q *queue) Len() int {
|
||||
return q.nelems
|
||||
}
|
||||
|
||||
func (q *queue) Push(elem interface{}) {
|
||||
//debugf("Pushing(pushi=%d popi=%d cap=%d): %#v\n",
|
||||
// q.pushi, q.popi, len(q.elems), elem)
|
||||
if q.nelems == len(q.elems) {
|
||||
q.expand()
|
||||
}
|
||||
q.elems[q.pushi] = elem
|
||||
q.nelems++
|
||||
q.pushi = (q.pushi + 1) % len(q.elems)
|
||||
//debugf(" Pushed(pushi=%d popi=%d cap=%d): %#v\n",
|
||||
// q.pushi, q.popi, len(q.elems), elem)
|
||||
}
|
||||
|
||||
func (q *queue) Pop() (elem interface{}) {
|
||||
//debugf("Popping(pushi=%d popi=%d cap=%d)\n",
|
||||
// q.pushi, q.popi, len(q.elems))
|
||||
if q.nelems == 0 {
|
||||
return nil
|
||||
}
|
||||
elem = q.elems[q.popi]
|
||||
q.elems[q.popi] = nil // Help GC.
|
||||
q.nelems--
|
||||
q.popi = (q.popi + 1) % len(q.elems)
|
||||
//debugf(" Popped(pushi=%d popi=%d cap=%d): %#v\n",
|
||||
// q.pushi, q.popi, len(q.elems), elem)
|
||||
return elem
|
||||
}
|
||||
|
||||
func (q *queue) expand() {
|
||||
curcap := len(q.elems)
|
||||
var newcap int
|
||||
if curcap == 0 {
|
||||
newcap = 8
|
||||
} else if curcap < 1024 {
|
||||
newcap = curcap * 2
|
||||
} else {
|
||||
newcap = curcap + (curcap / 4)
|
||||
}
|
||||
elems := make([]interface{}, newcap)
|
||||
|
||||
if q.popi == 0 {
|
||||
copy(elems, q.elems)
|
||||
q.pushi = curcap
|
||||
} else {
|
||||
newpopi := newcap - (curcap - q.popi)
|
||||
copy(elems, q.elems[:q.popi])
|
||||
copy(elems[newpopi:], q.elems[q.popi:])
|
||||
q.popi = newpopi
|
||||
}
|
||||
for i := range q.elems {
|
||||
q.elems[i] = nil // Help GC.
|
||||
}
|
||||
q.elems = elems
|
||||
}
|
101
vendor/gopkg.in/mgo.v2/queue_test.go
generated
vendored
101
vendor/gopkg.in/mgo.v2/queue_test.go
generated
vendored
@ -1,101 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type QS struct{}
|
||||
|
||||
var _ = Suite(&QS{})
|
||||
|
||||
func (s *QS) TestSequentialGrowth(c *C) {
|
||||
q := queue{}
|
||||
n := 2048
|
||||
for i := 0; i != n; i++ {
|
||||
q.Push(i)
|
||||
}
|
||||
for i := 0; i != n; i++ {
|
||||
c.Assert(q.Pop(), Equals, i)
|
||||
}
|
||||
}
|
||||
|
||||
var queueTestLists = [][]int{
|
||||
// {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
|
||||
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
|
||||
// {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7}
|
||||
{0, 1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11},
|
||||
|
||||
// {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7}
|
||||
{0, 1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11},
|
||||
|
||||
// {0, 1, 2, 3, 4, 5, 6, 7, 8}
|
||||
{0, 1, 2, 3, 4, 5, 6, 7, 8,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8},
|
||||
}
|
||||
|
||||
func (s *QS) TestQueueTestLists(c *C) {
|
||||
test := []int{}
|
||||
testi := 0
|
||||
reset := func() {
|
||||
test = test[0:0]
|
||||
testi = 0
|
||||
}
|
||||
push := func(i int) {
|
||||
test = append(test, i)
|
||||
}
|
||||
pop := func() (i int) {
|
||||
if testi == len(test) {
|
||||
return -1
|
||||
}
|
||||
i = test[testi]
|
||||
testi++
|
||||
return
|
||||
}
|
||||
|
||||
for _, list := range queueTestLists {
|
||||
reset()
|
||||
q := queue{}
|
||||
for _, n := range list {
|
||||
if n == -1 {
|
||||
c.Assert(q.Pop(), Equals, pop(), Commentf("With list %#v", list))
|
||||
} else {
|
||||
q.Push(n)
|
||||
push(n)
|
||||
}
|
||||
}
|
||||
|
||||
for n := pop(); n != -1; n = pop() {
|
||||
c.Assert(q.Pop(), Equals, n, Commentf("With list %#v", list))
|
||||
}
|
||||
|
||||
c.Assert(q.Pop(), Equals, nil, Commentf("With list %#v", list))
|
||||
}
|
||||
}
|
5
vendor/gopkg.in/mgo.v2/raceoff.go
generated
vendored
5
vendor/gopkg.in/mgo.v2/raceoff.go
generated
vendored
@ -1,5 +0,0 @@
|
||||
// +build !race
|
||||
|
||||
package mgo
|
||||
|
||||
const raceDetector = false
|
5
vendor/gopkg.in/mgo.v2/raceon.go
generated
vendored
5
vendor/gopkg.in/mgo.v2/raceon.go
generated
vendored
@ -1,5 +0,0 @@
|
||||
// +build race
|
||||
|
||||
package mgo
|
||||
|
||||
const raceDetector = true
|
11
vendor/gopkg.in/mgo.v2/saslimpl.go
generated
vendored
11
vendor/gopkg.in/mgo.v2/saslimpl.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
//+build sasl
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"gopkg.in/mgo.v2/internal/sasl"
|
||||
)
|
||||
|
||||
func saslNew(cred Credential, host string) (saslStepper, error) {
|
||||
return sasl.New(cred.Username, cred.Password, cred.Mechanism, cred.Service, host)
|
||||
}
|
11
vendor/gopkg.in/mgo.v2/saslstub.go
generated
vendored
11
vendor/gopkg.in/mgo.v2/saslstub.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
//+build !sasl
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func saslNew(cred Credential, host string) (saslStepper, error) {
|
||||
return nil, fmt.Errorf("SASL support not enabled during build (-tags sasl)")
|
||||
}
|
447
vendor/gopkg.in/mgo.v2/server.go
generated
vendored
447
vendor/gopkg.in/mgo.v2/server.go
generated
vendored
@ -1,447 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mongo server encapsulation.
|
||||
|
||||
type mongoServer struct {
|
||||
sync.RWMutex
|
||||
Addr string
|
||||
ResolvedAddr string
|
||||
tcpaddr *net.TCPAddr
|
||||
unusedSockets []*mongoSocket
|
||||
liveSockets []*mongoSocket
|
||||
closed bool
|
||||
abended bool
|
||||
sync chan bool
|
||||
dial dialer
|
||||
pingValue time.Duration
|
||||
pingIndex int
|
||||
pingCount uint32
|
||||
pingWindow [6]time.Duration
|
||||
info *mongoServerInfo
|
||||
}
|
||||
|
||||
type dialer struct {
|
||||
old func(addr net.Addr) (net.Conn, error)
|
||||
new func(addr *ServerAddr) (net.Conn, error)
|
||||
}
|
||||
|
||||
func (dial dialer) isSet() bool {
|
||||
return dial.old != nil || dial.new != nil
|
||||
}
|
||||
|
||||
type mongoServerInfo struct {
|
||||
Master bool
|
||||
Mongos bool
|
||||
Tags bson.D
|
||||
MaxWireVersion int
|
||||
SetName string
|
||||
}
|
||||
|
||||
var defaultServerInfo mongoServerInfo
|
||||
|
||||
func newServer(addr string, tcpaddr *net.TCPAddr, sync chan bool, dial dialer) *mongoServer {
|
||||
server := &mongoServer{
|
||||
Addr: addr,
|
||||
ResolvedAddr: tcpaddr.String(),
|
||||
tcpaddr: tcpaddr,
|
||||
sync: sync,
|
||||
dial: dial,
|
||||
info: &defaultServerInfo,
|
||||
pingValue: time.Hour, // Push it back before an actual ping.
|
||||
}
|
||||
go server.pinger(true)
|
||||
return server
|
||||
}
|
||||
|
||||
var errPoolLimit = errors.New("per-server connection limit reached")
|
||||
var errServerClosed = errors.New("server was closed")
|
||||
|
||||
// AcquireSocket returns a socket for communicating with the server.
|
||||
// This will attempt to reuse an old connection, if one is available. Otherwise,
|
||||
// it will establish a new one. The returned socket is owned by the call site,
|
||||
// and will return to the cache when the socket has its Release method called
|
||||
// the same number of times as AcquireSocket + Acquire were called for it.
|
||||
// If the poolLimit argument is greater than zero and the number of sockets in
|
||||
// use in this server is greater than the provided limit, errPoolLimit is
|
||||
// returned.
|
||||
func (server *mongoServer) AcquireSocket(poolLimit int, timeout time.Duration) (socket *mongoSocket, abended bool, err error) {
|
||||
for {
|
||||
server.Lock()
|
||||
abended = server.abended
|
||||
if server.closed {
|
||||
server.Unlock()
|
||||
return nil, abended, errServerClosed
|
||||
}
|
||||
n := len(server.unusedSockets)
|
||||
if poolLimit > 0 && len(server.liveSockets)-n >= poolLimit {
|
||||
server.Unlock()
|
||||
return nil, false, errPoolLimit
|
||||
}
|
||||
if n > 0 {
|
||||
socket = server.unusedSockets[n-1]
|
||||
server.unusedSockets[n-1] = nil // Help GC.
|
||||
server.unusedSockets = server.unusedSockets[:n-1]
|
||||
info := server.info
|
||||
server.Unlock()
|
||||
err = socket.InitialAcquire(info, timeout)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
server.Unlock()
|
||||
socket, err = server.Connect(timeout)
|
||||
if err == nil {
|
||||
server.Lock()
|
||||
// We've waited for the Connect, see if we got
|
||||
// closed in the meantime
|
||||
if server.closed {
|
||||
server.Unlock()
|
||||
socket.Release()
|
||||
socket.Close()
|
||||
return nil, abended, errServerClosed
|
||||
}
|
||||
server.liveSockets = append(server.liveSockets, socket)
|
||||
server.Unlock()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Connect establishes a new connection to the server. This should
|
||||
// generally be done through server.AcquireSocket().
|
||||
func (server *mongoServer) Connect(timeout time.Duration) (*mongoSocket, error) {
|
||||
server.RLock()
|
||||
master := server.info.Master
|
||||
dial := server.dial
|
||||
server.RUnlock()
|
||||
|
||||
logf("Establishing new connection to %s (timeout=%s)...", server.Addr, timeout)
|
||||
var conn net.Conn
|
||||
var err error
|
||||
switch {
|
||||
case !dial.isSet():
|
||||
// Cannot do this because it lacks timeout support. :-(
|
||||
//conn, err = net.DialTCP("tcp", nil, server.tcpaddr)
|
||||
conn, err = net.DialTimeout("tcp", server.ResolvedAddr, timeout)
|
||||
case dial.old != nil:
|
||||
conn, err = dial.old(server.tcpaddr)
|
||||
case dial.new != nil:
|
||||
conn, err = dial.new(&ServerAddr{server.Addr, server.tcpaddr})
|
||||
default:
|
||||
panic("dialer is set, but both dial.old and dial.new are nil")
|
||||
}
|
||||
if err != nil {
|
||||
logf("Connection to %s failed: %v", server.Addr, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
logf("Connection to %s established.", server.Addr)
|
||||
|
||||
stats.conn(+1, master)
|
||||
return newSocket(server, conn, timeout), nil
|
||||
}
|
||||
|
||||
// Close forces closing all sockets that are alive, whether
|
||||
// they're currently in use or not.
|
||||
func (server *mongoServer) Close() {
|
||||
server.Lock()
|
||||
server.closed = true
|
||||
liveSockets := server.liveSockets
|
||||
unusedSockets := server.unusedSockets
|
||||
server.liveSockets = nil
|
||||
server.unusedSockets = nil
|
||||
server.Unlock()
|
||||
logf("Connections to %s closing (%d live sockets).", server.Addr, len(liveSockets))
|
||||
for i, s := range liveSockets {
|
||||
s.Close()
|
||||
liveSockets[i] = nil
|
||||
}
|
||||
for i := range unusedSockets {
|
||||
unusedSockets[i] = nil
|
||||
}
|
||||
}
|
||||
|
||||
// RecycleSocket puts socket back into the unused cache.
|
||||
func (server *mongoServer) RecycleSocket(socket *mongoSocket) {
|
||||
server.Lock()
|
||||
if !server.closed {
|
||||
server.unusedSockets = append(server.unusedSockets, socket)
|
||||
}
|
||||
server.Unlock()
|
||||
}
|
||||
|
||||
func removeSocket(sockets []*mongoSocket, socket *mongoSocket) []*mongoSocket {
|
||||
for i, s := range sockets {
|
||||
if s == socket {
|
||||
copy(sockets[i:], sockets[i+1:])
|
||||
n := len(sockets) - 1
|
||||
sockets[n] = nil
|
||||
sockets = sockets[:n]
|
||||
break
|
||||
}
|
||||
}
|
||||
return sockets
|
||||
}
|
||||
|
||||
// AbendSocket notifies the server that the given socket has terminated
|
||||
// abnormally, and thus should be discarded rather than cached.
|
||||
func (server *mongoServer) AbendSocket(socket *mongoSocket) {
|
||||
server.Lock()
|
||||
server.abended = true
|
||||
if server.closed {
|
||||
server.Unlock()
|
||||
return
|
||||
}
|
||||
server.liveSockets = removeSocket(server.liveSockets, socket)
|
||||
server.unusedSockets = removeSocket(server.unusedSockets, socket)
|
||||
server.Unlock()
|
||||
// Maybe just a timeout, but suggest a cluster sync up just in case.
|
||||
select {
|
||||
case server.sync <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (server *mongoServer) SetInfo(info *mongoServerInfo) {
|
||||
server.Lock()
|
||||
server.info = info
|
||||
server.Unlock()
|
||||
}
|
||||
|
||||
func (server *mongoServer) Info() *mongoServerInfo {
|
||||
server.Lock()
|
||||
info := server.info
|
||||
server.Unlock()
|
||||
return info
|
||||
}
|
||||
|
||||
func (server *mongoServer) hasTags(serverTags []bson.D) bool {
|
||||
NextTagSet:
|
||||
for _, tags := range serverTags {
|
||||
NextReqTag:
|
||||
for _, req := range tags {
|
||||
for _, has := range server.info.Tags {
|
||||
if req.Name == has.Name {
|
||||
if req.Value == has.Value {
|
||||
continue NextReqTag
|
||||
}
|
||||
continue NextTagSet
|
||||
}
|
||||
}
|
||||
continue NextTagSet
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var pingDelay = 15 * time.Second
|
||||
|
||||
func (server *mongoServer) pinger(loop bool) {
|
||||
var delay time.Duration
|
||||
if raceDetector {
|
||||
// This variable is only ever touched by tests.
|
||||
globalMutex.Lock()
|
||||
delay = pingDelay
|
||||
globalMutex.Unlock()
|
||||
} else {
|
||||
delay = pingDelay
|
||||
}
|
||||
op := queryOp{
|
||||
collection: "admin.$cmd",
|
||||
query: bson.D{{"ping", 1}},
|
||||
flags: flagSlaveOk,
|
||||
limit: -1,
|
||||
}
|
||||
for {
|
||||
if loop {
|
||||
time.Sleep(delay)
|
||||
}
|
||||
op := op
|
||||
socket, _, err := server.AcquireSocket(0, delay)
|
||||
if err == nil {
|
||||
start := time.Now()
|
||||
_, _ = socket.SimpleQuery(&op)
|
||||
delay := time.Now().Sub(start)
|
||||
|
||||
server.pingWindow[server.pingIndex] = delay
|
||||
server.pingIndex = (server.pingIndex + 1) % len(server.pingWindow)
|
||||
server.pingCount++
|
||||
var max time.Duration
|
||||
for i := 0; i < len(server.pingWindow) && uint32(i) < server.pingCount; i++ {
|
||||
if server.pingWindow[i] > max {
|
||||
max = server.pingWindow[i]
|
||||
}
|
||||
}
|
||||
socket.Release()
|
||||
server.Lock()
|
||||
if server.closed {
|
||||
loop = false
|
||||
}
|
||||
server.pingValue = max
|
||||
server.Unlock()
|
||||
logf("Ping for %s is %d ms", server.Addr, max/time.Millisecond)
|
||||
} else if err == errServerClosed {
|
||||
return
|
||||
}
|
||||
if !loop {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type mongoServerSlice []*mongoServer
|
||||
|
||||
func (s mongoServerSlice) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s mongoServerSlice) Less(i, j int) bool {
|
||||
return s[i].ResolvedAddr < s[j].ResolvedAddr
|
||||
}
|
||||
|
||||
func (s mongoServerSlice) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s mongoServerSlice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s mongoServerSlice) Search(resolvedAddr string) (i int, ok bool) {
|
||||
n := len(s)
|
||||
i = sort.Search(n, func(i int) bool {
|
||||
return s[i].ResolvedAddr >= resolvedAddr
|
||||
})
|
||||
return i, i != n && s[i].ResolvedAddr == resolvedAddr
|
||||
}
|
||||
|
||||
type mongoServers struct {
|
||||
slice mongoServerSlice
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Search(resolvedAddr string) (server *mongoServer) {
|
||||
if i, ok := servers.slice.Search(resolvedAddr); ok {
|
||||
return servers.slice[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Add(server *mongoServer) {
|
||||
servers.slice = append(servers.slice, server)
|
||||
servers.slice.Sort()
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Remove(other *mongoServer) (server *mongoServer) {
|
||||
if i, found := servers.slice.Search(other.ResolvedAddr); found {
|
||||
server = servers.slice[i]
|
||||
copy(servers.slice[i:], servers.slice[i+1:])
|
||||
n := len(servers.slice) - 1
|
||||
servers.slice[n] = nil // Help GC.
|
||||
servers.slice = servers.slice[:n]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Slice() []*mongoServer {
|
||||
return ([]*mongoServer)(servers.slice)
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Get(i int) *mongoServer {
|
||||
return servers.slice[i]
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Len() int {
|
||||
return len(servers.slice)
|
||||
}
|
||||
|
||||
func (servers *mongoServers) Empty() bool {
|
||||
return len(servers.slice) == 0
|
||||
}
|
||||
|
||||
// BestFit returns the best guess of what would be the most interesting
|
||||
// server to perform operations on at this point in time.
|
||||
func (servers *mongoServers) BestFit(mode Mode, serverTags []bson.D) *mongoServer {
|
||||
var best *mongoServer
|
||||
for _, next := range servers.slice {
|
||||
if best == nil {
|
||||
best = next
|
||||
best.RLock()
|
||||
if serverTags != nil && !next.info.Mongos && !best.hasTags(serverTags) {
|
||||
best.RUnlock()
|
||||
best = nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
next.RLock()
|
||||
swap := false
|
||||
switch {
|
||||
case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags):
|
||||
// Must have requested tags.
|
||||
case next.info.Master != best.info.Master && mode != Nearest:
|
||||
// Prefer slaves, unless the mode is PrimaryPreferred.
|
||||
swap = (mode == PrimaryPreferred) != best.info.Master
|
||||
case absDuration(next.pingValue-best.pingValue) > 15*time.Millisecond:
|
||||
// Prefer nearest server.
|
||||
swap = next.pingValue < best.pingValue
|
||||
case len(next.liveSockets)-len(next.unusedSockets) < len(best.liveSockets)-len(best.unusedSockets):
|
||||
// Prefer servers with less connections.
|
||||
swap = true
|
||||
}
|
||||
if swap {
|
||||
best.RUnlock()
|
||||
best = next
|
||||
} else {
|
||||
next.RUnlock()
|
||||
}
|
||||
}
|
||||
if best != nil {
|
||||
best.RUnlock()
|
||||
}
|
||||
return best
|
||||
}
|
||||
|
||||
func absDuration(d time.Duration) time.Duration {
|
||||
if d < 0 {
|
||||
return -d
|
||||
}
|
||||
return d
|
||||
}
|
4411
vendor/gopkg.in/mgo.v2/session.go
generated
vendored
4411
vendor/gopkg.in/mgo.v2/session.go
generated
vendored
File diff suppressed because it is too large
Load Diff
3876
vendor/gopkg.in/mgo.v2/session_test.go
generated
vendored
3876
vendor/gopkg.in/mgo.v2/session_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
706
vendor/gopkg.in/mgo.v2/socket.go
generated
vendored
706
vendor/gopkg.in/mgo.v2/socket.go
generated
vendored
@ -1,706 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
type replyFunc func(err error, reply *replyOp, docNum int, docData []byte)
|
||||
|
||||
type mongoSocket struct {
|
||||
sync.Mutex
|
||||
server *mongoServer // nil when cached
|
||||
conn net.Conn
|
||||
timeout time.Duration
|
||||
addr string // For debugging only.
|
||||
nextRequestId uint32
|
||||
replyFuncs map[uint32]replyFunc
|
||||
references int
|
||||
creds []Credential
|
||||
logout []Credential
|
||||
cachedNonce string
|
||||
gotNonce sync.Cond
|
||||
dead error
|
||||
serverInfo *mongoServerInfo
|
||||
}
|
||||
|
||||
type queryOpFlags uint32
|
||||
|
||||
const (
|
||||
_ queryOpFlags = 1 << iota
|
||||
flagTailable
|
||||
flagSlaveOk
|
||||
flagLogReplay
|
||||
flagNoCursorTimeout
|
||||
flagAwaitData
|
||||
)
|
||||
|
||||
type queryOp struct {
|
||||
collection string
|
||||
query interface{}
|
||||
skip int32
|
||||
limit int32
|
||||
selector interface{}
|
||||
flags queryOpFlags
|
||||
replyFunc replyFunc
|
||||
|
||||
mode Mode
|
||||
options queryWrapper
|
||||
hasOptions bool
|
||||
serverTags []bson.D
|
||||
}
|
||||
|
||||
type queryWrapper struct {
|
||||
Query interface{} "$query"
|
||||
OrderBy interface{} "$orderby,omitempty"
|
||||
Hint interface{} "$hint,omitempty"
|
||||
Explain bool "$explain,omitempty"
|
||||
Snapshot bool "$snapshot,omitempty"
|
||||
ReadPreference bson.D "$readPreference,omitempty"
|
||||
MaxScan int "$maxScan,omitempty"
|
||||
MaxTimeMS int "$maxTimeMS,omitempty"
|
||||
Comment string "$comment,omitempty"
|
||||
}
|
||||
|
||||
func (op *queryOp) finalQuery(socket *mongoSocket) interface{} {
|
||||
if socket.ServerInfo().Mongos {
|
||||
var modeName string
|
||||
if op.flags&flagSlaveOk == 0 {
|
||||
modeName = "primary"
|
||||
} else {
|
||||
switch op.mode {
|
||||
case Strong:
|
||||
modeName = "primary"
|
||||
case Monotonic, Eventual:
|
||||
modeName = "secondaryPreferred"
|
||||
case PrimaryPreferred:
|
||||
modeName = "primaryPreferred"
|
||||
case Secondary:
|
||||
modeName = "secondary"
|
||||
case SecondaryPreferred:
|
||||
modeName = "secondaryPreferred"
|
||||
case Nearest:
|
||||
modeName = "nearest"
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported read mode: %d", op.mode))
|
||||
}
|
||||
}
|
||||
op.hasOptions = true
|
||||
op.options.ReadPreference = make(bson.D, 0, 2)
|
||||
op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"mode", modeName})
|
||||
if len(op.serverTags) > 0 {
|
||||
op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"tags", op.serverTags})
|
||||
}
|
||||
}
|
||||
if op.hasOptions {
|
||||
if op.query == nil {
|
||||
var empty bson.D
|
||||
op.options.Query = empty
|
||||
} else {
|
||||
op.options.Query = op.query
|
||||
}
|
||||
debugf("final query is %#v\n", &op.options)
|
||||
return &op.options
|
||||
}
|
||||
return op.query
|
||||
}
|
||||
|
||||
type getMoreOp struct {
|
||||
collection string
|
||||
limit int32
|
||||
cursorId int64
|
||||
replyFunc replyFunc
|
||||
}
|
||||
|
||||
type replyOp struct {
|
||||
flags uint32
|
||||
cursorId int64
|
||||
firstDoc int32
|
||||
replyDocs int32
|
||||
}
|
||||
|
||||
type insertOp struct {
|
||||
collection string // "database.collection"
|
||||
documents []interface{} // One or more documents to insert
|
||||
flags uint32
|
||||
}
|
||||
|
||||
type updateOp struct {
|
||||
Collection string `bson:"-"` // "database.collection"
|
||||
Selector interface{} `bson:"q"`
|
||||
Update interface{} `bson:"u"`
|
||||
Flags uint32 `bson:"-"`
|
||||
Multi bool `bson:"multi,omitempty"`
|
||||
Upsert bool `bson:"upsert,omitempty"`
|
||||
}
|
||||
|
||||
type deleteOp struct {
|
||||
collection string // "database.collection"
|
||||
selector interface{}
|
||||
flags uint32
|
||||
}
|
||||
|
||||
type killCursorsOp struct {
|
||||
cursorIds []int64
|
||||
}
|
||||
|
||||
type requestInfo struct {
|
||||
bufferPos int
|
||||
replyFunc replyFunc
|
||||
}
|
||||
|
||||
func newSocket(server *mongoServer, conn net.Conn, timeout time.Duration) *mongoSocket {
|
||||
socket := &mongoSocket{
|
||||
conn: conn,
|
||||
addr: server.Addr,
|
||||
server: server,
|
||||
replyFuncs: make(map[uint32]replyFunc),
|
||||
}
|
||||
socket.gotNonce.L = &socket.Mutex
|
||||
if err := socket.InitialAcquire(server.Info(), timeout); err != nil {
|
||||
panic("newSocket: InitialAcquire returned error: " + err.Error())
|
||||
}
|
||||
stats.socketsAlive(+1)
|
||||
debugf("Socket %p to %s: initialized", socket, socket.addr)
|
||||
socket.resetNonce()
|
||||
go socket.readLoop()
|
||||
return socket
|
||||
}
|
||||
|
||||
// Server returns the server that the socket is associated with.
|
||||
// It returns nil while the socket is cached in its respective server.
|
||||
func (socket *mongoSocket) Server() *mongoServer {
|
||||
socket.Lock()
|
||||
server := socket.server
|
||||
socket.Unlock()
|
||||
return server
|
||||
}
|
||||
|
||||
// ServerInfo returns details for the server at the time the socket
|
||||
// was initially acquired.
|
||||
func (socket *mongoSocket) ServerInfo() *mongoServerInfo {
|
||||
socket.Lock()
|
||||
serverInfo := socket.serverInfo
|
||||
socket.Unlock()
|
||||
return serverInfo
|
||||
}
|
||||
|
||||
// InitialAcquire obtains the first reference to the socket, either
|
||||
// right after the connection is made or once a recycled socket is
|
||||
// being put back in use.
|
||||
func (socket *mongoSocket) InitialAcquire(serverInfo *mongoServerInfo, timeout time.Duration) error {
|
||||
socket.Lock()
|
||||
if socket.references > 0 {
|
||||
panic("Socket acquired out of cache with references")
|
||||
}
|
||||
if socket.dead != nil {
|
||||
dead := socket.dead
|
||||
socket.Unlock()
|
||||
return dead
|
||||
}
|
||||
socket.references++
|
||||
socket.serverInfo = serverInfo
|
||||
socket.timeout = timeout
|
||||
stats.socketsInUse(+1)
|
||||
stats.socketRefs(+1)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Acquire obtains an additional reference to the socket.
|
||||
// The socket will only be recycled when it's released as many
|
||||
// times as it's been acquired.
|
||||
func (socket *mongoSocket) Acquire() (info *mongoServerInfo) {
|
||||
socket.Lock()
|
||||
if socket.references == 0 {
|
||||
panic("Socket got non-initial acquire with references == 0")
|
||||
}
|
||||
// We'll track references to dead sockets as well.
|
||||
// Caller is still supposed to release the socket.
|
||||
socket.references++
|
||||
stats.socketRefs(+1)
|
||||
serverInfo := socket.serverInfo
|
||||
socket.Unlock()
|
||||
return serverInfo
|
||||
}
|
||||
|
||||
// Release decrements a socket reference. The socket will be
|
||||
// recycled once its released as many times as it's been acquired.
|
||||
func (socket *mongoSocket) Release() {
|
||||
socket.Lock()
|
||||
if socket.references == 0 {
|
||||
panic("socket.Release() with references == 0")
|
||||
}
|
||||
socket.references--
|
||||
stats.socketRefs(-1)
|
||||
if socket.references == 0 {
|
||||
stats.socketsInUse(-1)
|
||||
server := socket.server
|
||||
socket.Unlock()
|
||||
socket.LogoutAll()
|
||||
// If the socket is dead server is nil.
|
||||
if server != nil {
|
||||
server.RecycleSocket(socket)
|
||||
}
|
||||
} else {
|
||||
socket.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// SetTimeout changes the timeout used on socket operations.
|
||||
func (socket *mongoSocket) SetTimeout(d time.Duration) {
|
||||
socket.Lock()
|
||||
socket.timeout = d
|
||||
socket.Unlock()
|
||||
}
|
||||
|
||||
type deadlineType int
|
||||
|
||||
const (
|
||||
readDeadline deadlineType = 1
|
||||
writeDeadline deadlineType = 2
|
||||
)
|
||||
|
||||
func (socket *mongoSocket) updateDeadline(which deadlineType) {
|
||||
var when time.Time
|
||||
if socket.timeout > 0 {
|
||||
when = time.Now().Add(socket.timeout)
|
||||
}
|
||||
whichstr := ""
|
||||
switch which {
|
||||
case readDeadline | writeDeadline:
|
||||
whichstr = "read/write"
|
||||
socket.conn.SetDeadline(when)
|
||||
case readDeadline:
|
||||
whichstr = "read"
|
||||
socket.conn.SetReadDeadline(when)
|
||||
case writeDeadline:
|
||||
whichstr = "write"
|
||||
socket.conn.SetWriteDeadline(when)
|
||||
default:
|
||||
panic("invalid parameter to updateDeadline")
|
||||
}
|
||||
debugf("Socket %p to %s: updated %s deadline to %s ahead (%s)", socket, socket.addr, whichstr, socket.timeout, when)
|
||||
}
|
||||
|
||||
// Close terminates the socket use.
|
||||
func (socket *mongoSocket) Close() {
|
||||
socket.kill(errors.New("Closed explicitly"), false)
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) kill(err error, abend bool) {
|
||||
socket.Lock()
|
||||
if socket.dead != nil {
|
||||
debugf("Socket %p to %s: killed again: %s (previously: %s)", socket, socket.addr, err.Error(), socket.dead.Error())
|
||||
socket.Unlock()
|
||||
return
|
||||
}
|
||||
logf("Socket %p to %s: closing: %s (abend=%v)", socket, socket.addr, err.Error(), abend)
|
||||
socket.dead = err
|
||||
socket.conn.Close()
|
||||
stats.socketsAlive(-1)
|
||||
replyFuncs := socket.replyFuncs
|
||||
socket.replyFuncs = make(map[uint32]replyFunc)
|
||||
server := socket.server
|
||||
socket.server = nil
|
||||
socket.gotNonce.Broadcast()
|
||||
socket.Unlock()
|
||||
for _, replyFunc := range replyFuncs {
|
||||
logf("Socket %p to %s: notifying replyFunc of closed socket: %s", socket, socket.addr, err.Error())
|
||||
replyFunc(err, nil, -1, nil)
|
||||
}
|
||||
if abend {
|
||||
server.AbendSocket(socket)
|
||||
}
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) SimpleQuery(op *queryOp) (data []byte, err error) {
|
||||
var wait, change sync.Mutex
|
||||
var replyDone bool
|
||||
var replyData []byte
|
||||
var replyErr error
|
||||
wait.Lock()
|
||||
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
|
||||
change.Lock()
|
||||
if !replyDone {
|
||||
replyDone = true
|
||||
replyErr = err
|
||||
if err == nil {
|
||||
replyData = docData
|
||||
}
|
||||
}
|
||||
change.Unlock()
|
||||
wait.Unlock()
|
||||
}
|
||||
err = socket.Query(op)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wait.Lock()
|
||||
change.Lock()
|
||||
data = replyData
|
||||
err = replyErr
|
||||
change.Unlock()
|
||||
return data, err
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) Query(ops ...interface{}) (err error) {
|
||||
|
||||
if lops := socket.flushLogout(); len(lops) > 0 {
|
||||
ops = append(lops, ops...)
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 256)
|
||||
|
||||
// Serialize operations synchronously to avoid interrupting
|
||||
// other goroutines while we can't really be sending data.
|
||||
// Also, record id positions so that we can compute request
|
||||
// ids at once later with the lock already held.
|
||||
requests := make([]requestInfo, len(ops))
|
||||
requestCount := 0
|
||||
|
||||
for _, op := range ops {
|
||||
debugf("Socket %p to %s: serializing op: %#v", socket, socket.addr, op)
|
||||
start := len(buf)
|
||||
var replyFunc replyFunc
|
||||
switch op := op.(type) {
|
||||
|
||||
case *updateOp:
|
||||
buf = addHeader(buf, 2001)
|
||||
buf = addInt32(buf, 0) // Reserved
|
||||
buf = addCString(buf, op.Collection)
|
||||
buf = addInt32(buf, int32(op.Flags))
|
||||
debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector)
|
||||
buf, err = addBSON(buf, op.Selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.Update)
|
||||
buf, err = addBSON(buf, op.Update)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *insertOp:
|
||||
buf = addHeader(buf, 2002)
|
||||
buf = addInt32(buf, int32(op.flags))
|
||||
buf = addCString(buf, op.collection)
|
||||
for _, doc := range op.documents {
|
||||
debugf("Socket %p to %s: serializing document for insertion: %#v", socket, socket.addr, doc)
|
||||
buf, err = addBSON(buf, doc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case *queryOp:
|
||||
buf = addHeader(buf, 2004)
|
||||
buf = addInt32(buf, int32(op.flags))
|
||||
buf = addCString(buf, op.collection)
|
||||
buf = addInt32(buf, op.skip)
|
||||
buf = addInt32(buf, op.limit)
|
||||
buf, err = addBSON(buf, op.finalQuery(socket))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if op.selector != nil {
|
||||
buf, err = addBSON(buf, op.selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
replyFunc = op.replyFunc
|
||||
|
||||
case *getMoreOp:
|
||||
buf = addHeader(buf, 2005)
|
||||
buf = addInt32(buf, 0) // Reserved
|
||||
buf = addCString(buf, op.collection)
|
||||
buf = addInt32(buf, op.limit)
|
||||
buf = addInt64(buf, op.cursorId)
|
||||
replyFunc = op.replyFunc
|
||||
|
||||
case *deleteOp:
|
||||
buf = addHeader(buf, 2006)
|
||||
buf = addInt32(buf, 0) // Reserved
|
||||
buf = addCString(buf, op.collection)
|
||||
buf = addInt32(buf, int32(op.flags))
|
||||
debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector)
|
||||
buf, err = addBSON(buf, op.selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *killCursorsOp:
|
||||
buf = addHeader(buf, 2007)
|
||||
buf = addInt32(buf, 0) // Reserved
|
||||
buf = addInt32(buf, int32(len(op.cursorIds)))
|
||||
for _, cursorId := range op.cursorIds {
|
||||
buf = addInt64(buf, cursorId)
|
||||
}
|
||||
|
||||
default:
|
||||
panic("internal error: unknown operation type")
|
||||
}
|
||||
|
||||
setInt32(buf, start, int32(len(buf)-start))
|
||||
|
||||
if replyFunc != nil {
|
||||
request := &requests[requestCount]
|
||||
request.replyFunc = replyFunc
|
||||
request.bufferPos = start
|
||||
requestCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Buffer is ready for the pipe. Lock, allocate ids, and enqueue.
|
||||
|
||||
socket.Lock()
|
||||
if socket.dead != nil {
|
||||
dead := socket.dead
|
||||
socket.Unlock()
|
||||
debugf("Socket %p to %s: failing query, already closed: %s", socket, socket.addr, socket.dead.Error())
|
||||
// XXX This seems necessary in case the session is closed concurrently
|
||||
// with a query being performed, but it's not yet tested:
|
||||
for i := 0; i != requestCount; i++ {
|
||||
request := &requests[i]
|
||||
if request.replyFunc != nil {
|
||||
request.replyFunc(dead, nil, -1, nil)
|
||||
}
|
||||
}
|
||||
return dead
|
||||
}
|
||||
|
||||
wasWaiting := len(socket.replyFuncs) > 0
|
||||
|
||||
// Reserve id 0 for requests which should have no responses.
|
||||
requestId := socket.nextRequestId + 1
|
||||
if requestId == 0 {
|
||||
requestId++
|
||||
}
|
||||
socket.nextRequestId = requestId + uint32(requestCount)
|
||||
for i := 0; i != requestCount; i++ {
|
||||
request := &requests[i]
|
||||
setInt32(buf, request.bufferPos+4, int32(requestId))
|
||||
socket.replyFuncs[requestId] = request.replyFunc
|
||||
requestId++
|
||||
}
|
||||
|
||||
debugf("Socket %p to %s: sending %d op(s) (%d bytes)", socket, socket.addr, len(ops), len(buf))
|
||||
stats.sentOps(len(ops))
|
||||
|
||||
socket.updateDeadline(writeDeadline)
|
||||
_, err = socket.conn.Write(buf)
|
||||
if !wasWaiting && requestCount > 0 {
|
||||
socket.updateDeadline(readDeadline)
|
||||
}
|
||||
socket.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func fill(r net.Conn, b []byte) error {
|
||||
l := len(b)
|
||||
n, err := r.Read(b)
|
||||
for n != l && err == nil {
|
||||
var ni int
|
||||
ni, err = r.Read(b[n:])
|
||||
n += ni
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Estimated minimum cost per socket: 1 goroutine + memory for the largest
|
||||
// document ever seen.
|
||||
func (socket *mongoSocket) readLoop() {
|
||||
p := make([]byte, 36) // 16 from header + 20 from OP_REPLY fixed fields
|
||||
s := make([]byte, 4)
|
||||
conn := socket.conn // No locking, conn never changes.
|
||||
for {
|
||||
// XXX Handle timeouts, , etc
|
||||
err := fill(conn, p)
|
||||
if err != nil {
|
||||
socket.kill(err, true)
|
||||
return
|
||||
}
|
||||
|
||||
totalLen := getInt32(p, 0)
|
||||
responseTo := getInt32(p, 8)
|
||||
opCode := getInt32(p, 12)
|
||||
|
||||
// Don't use socket.server.Addr here. socket is not
|
||||
// locked and socket.server may go away.
|
||||
debugf("Socket %p to %s: got reply (%d bytes)", socket, socket.addr, totalLen)
|
||||
|
||||
_ = totalLen
|
||||
|
||||
if opCode != 1 {
|
||||
socket.kill(errors.New("opcode != 1, corrupted data?"), true)
|
||||
return
|
||||
}
|
||||
|
||||
reply := replyOp{
|
||||
flags: uint32(getInt32(p, 16)),
|
||||
cursorId: getInt64(p, 20),
|
||||
firstDoc: getInt32(p, 28),
|
||||
replyDocs: getInt32(p, 32),
|
||||
}
|
||||
|
||||
stats.receivedOps(+1)
|
||||
stats.receivedDocs(int(reply.replyDocs))
|
||||
|
||||
socket.Lock()
|
||||
replyFunc, ok := socket.replyFuncs[uint32(responseTo)]
|
||||
if ok {
|
||||
delete(socket.replyFuncs, uint32(responseTo))
|
||||
}
|
||||
socket.Unlock()
|
||||
|
||||
if replyFunc != nil && reply.replyDocs == 0 {
|
||||
replyFunc(nil, &reply, -1, nil)
|
||||
} else {
|
||||
for i := 0; i != int(reply.replyDocs); i++ {
|
||||
err := fill(conn, s)
|
||||
if err != nil {
|
||||
if replyFunc != nil {
|
||||
replyFunc(err, nil, -1, nil)
|
||||
}
|
||||
socket.kill(err, true)
|
||||
return
|
||||
}
|
||||
|
||||
b := make([]byte, int(getInt32(s, 0)))
|
||||
|
||||
// copy(b, s) in an efficient way.
|
||||
b[0] = s[0]
|
||||
b[1] = s[1]
|
||||
b[2] = s[2]
|
||||
b[3] = s[3]
|
||||
|
||||
err = fill(conn, b[4:])
|
||||
if err != nil {
|
||||
if replyFunc != nil {
|
||||
replyFunc(err, nil, -1, nil)
|
||||
}
|
||||
socket.kill(err, true)
|
||||
return
|
||||
}
|
||||
|
||||
if globalDebug && globalLogger != nil {
|
||||
m := bson.M{}
|
||||
if err := bson.Unmarshal(b, m); err == nil {
|
||||
debugf("Socket %p to %s: received document: %#v", socket, socket.addr, m)
|
||||
}
|
||||
}
|
||||
|
||||
if replyFunc != nil {
|
||||
replyFunc(nil, &reply, i, b)
|
||||
}
|
||||
|
||||
// XXX Do bound checking against totalLen.
|
||||
}
|
||||
}
|
||||
|
||||
socket.Lock()
|
||||
if len(socket.replyFuncs) == 0 {
|
||||
// Nothing else to read for now. Disable deadline.
|
||||
socket.conn.SetReadDeadline(time.Time{})
|
||||
} else {
|
||||
socket.updateDeadline(readDeadline)
|
||||
}
|
||||
socket.Unlock()
|
||||
|
||||
// XXX Do bound checking against totalLen.
|
||||
}
|
||||
}
|
||||
|
||||
var emptyHeader = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
func addHeader(b []byte, opcode int) []byte {
|
||||
i := len(b)
|
||||
b = append(b, emptyHeader...)
|
||||
// Enough for current opcodes.
|
||||
b[i+12] = byte(opcode)
|
||||
b[i+13] = byte(opcode >> 8)
|
||||
return b
|
||||
}
|
||||
|
||||
func addInt32(b []byte, i int32) []byte {
|
||||
return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24))
|
||||
}
|
||||
|
||||
func addInt64(b []byte, i int64) []byte {
|
||||
return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24),
|
||||
byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56))
|
||||
}
|
||||
|
||||
func addCString(b []byte, s string) []byte {
|
||||
b = append(b, []byte(s)...)
|
||||
b = append(b, 0)
|
||||
return b
|
||||
}
|
||||
|
||||
func addBSON(b []byte, doc interface{}) ([]byte, error) {
|
||||
if doc == nil {
|
||||
return append(b, 5, 0, 0, 0, 0), nil
|
||||
}
|
||||
data, err := bson.Marshal(doc)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
return append(b, data...), nil
|
||||
}
|
||||
|
||||
func setInt32(b []byte, pos int, i int32) {
|
||||
b[pos] = byte(i)
|
||||
b[pos+1] = byte(i >> 8)
|
||||
b[pos+2] = byte(i >> 16)
|
||||
b[pos+3] = byte(i >> 24)
|
||||
}
|
||||
|
||||
func getInt32(b []byte, pos int) int32 {
|
||||
return (int32(b[pos+0])) |
|
||||
(int32(b[pos+1]) << 8) |
|
||||
(int32(b[pos+2]) << 16) |
|
||||
(int32(b[pos+3]) << 24)
|
||||
}
|
||||
|
||||
func getInt64(b []byte, pos int) int64 {
|
||||
return (int64(b[pos+0])) |
|
||||
(int64(b[pos+1]) << 8) |
|
||||
(int64(b[pos+2]) << 16) |
|
||||
(int64(b[pos+3]) << 24) |
|
||||
(int64(b[pos+4]) << 32) |
|
||||
(int64(b[pos+5]) << 40) |
|
||||
(int64(b[pos+6]) << 48) |
|
||||
(int64(b[pos+7]) << 56)
|
||||
}
|
147
vendor/gopkg.in/mgo.v2/stats.go
generated
vendored
147
vendor/gopkg.in/mgo.v2/stats.go
generated
vendored
@ -1,147 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var stats *Stats
|
||||
var statsMutex sync.Mutex
|
||||
|
||||
func SetStats(enabled bool) {
|
||||
statsMutex.Lock()
|
||||
if enabled {
|
||||
if stats == nil {
|
||||
stats = &Stats{}
|
||||
}
|
||||
} else {
|
||||
stats = nil
|
||||
}
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
|
||||
func GetStats() (snapshot Stats) {
|
||||
statsMutex.Lock()
|
||||
snapshot = *stats
|
||||
statsMutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func ResetStats() {
|
||||
statsMutex.Lock()
|
||||
debug("Resetting stats")
|
||||
old := stats
|
||||
stats = &Stats{}
|
||||
// These are absolute values:
|
||||
stats.Clusters = old.Clusters
|
||||
stats.SocketsInUse = old.SocketsInUse
|
||||
stats.SocketsAlive = old.SocketsAlive
|
||||
stats.SocketRefs = old.SocketRefs
|
||||
statsMutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
Clusters int
|
||||
MasterConns int
|
||||
SlaveConns int
|
||||
SentOps int
|
||||
ReceivedOps int
|
||||
ReceivedDocs int
|
||||
SocketsAlive int
|
||||
SocketsInUse int
|
||||
SocketRefs int
|
||||
}
|
||||
|
||||
func (stats *Stats) cluster(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.Clusters += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) conn(delta int, master bool) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
if master {
|
||||
stats.MasterConns += delta
|
||||
} else {
|
||||
stats.SlaveConns += delta
|
||||
}
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) sentOps(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.SentOps += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) receivedOps(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.ReceivedOps += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) receivedDocs(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.ReceivedDocs += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) socketsInUse(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.SocketsInUse += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) socketsAlive(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.SocketsAlive += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (stats *Stats) socketRefs(delta int) {
|
||||
if stats != nil {
|
||||
statsMutex.Lock()
|
||||
stats.SocketRefs += delta
|
||||
statsMutex.Unlock()
|
||||
}
|
||||
}
|
259
vendor/gopkg.in/mgo.v2/suite_test.go
generated
vendored
259
vendor/gopkg.in/mgo.v2/suite_test.go
generated
vendored
@ -1,259 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
var fast = flag.Bool("fast", false, "Skip slow tests")
|
||||
|
||||
type M bson.M
|
||||
|
||||
type cLogger C
|
||||
|
||||
func (c *cLogger) Output(calldepth int, s string) error {
|
||||
ns := time.Now().UnixNano()
|
||||
t := float64(ns%100e9) / 1e9
|
||||
((*C)(c)).Logf("[LOG] %.05f %s", t, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestAll(t *testing.T) {
|
||||
TestingT(t)
|
||||
}
|
||||
|
||||
type S struct {
|
||||
session *mgo.Session
|
||||
stopped bool
|
||||
build mgo.BuildInfo
|
||||
frozen []string
|
||||
}
|
||||
|
||||
func (s *S) versionAtLeast(v ...int) (result bool) {
|
||||
for i := range v {
|
||||
if i == len(s.build.VersionArray) {
|
||||
return false
|
||||
}
|
||||
if s.build.VersionArray[i] != v[i] {
|
||||
return s.build.VersionArray[i] >= v[i]
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var _ = Suite(&S{})
|
||||
|
||||
func (s *S) SetUpSuite(c *C) {
|
||||
mgo.SetDebug(true)
|
||||
mgo.SetStats(true)
|
||||
s.StartAll()
|
||||
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
s.build, err = session.BuildInfo()
|
||||
c.Check(err, IsNil)
|
||||
session.Close()
|
||||
}
|
||||
|
||||
func (s *S) SetUpTest(c *C) {
|
||||
err := run("mongo --nodb testdb/dropall.js")
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
mgo.SetLogger((*cLogger)(c))
|
||||
mgo.ResetStats()
|
||||
}
|
||||
|
||||
func (s *S) TearDownTest(c *C) {
|
||||
if s.stopped {
|
||||
s.Stop(":40201")
|
||||
s.Stop(":40202")
|
||||
s.Stop(":40203")
|
||||
s.StartAll()
|
||||
}
|
||||
for _, host := range s.frozen {
|
||||
if host != "" {
|
||||
s.Thaw(host)
|
||||
}
|
||||
}
|
||||
var stats mgo.Stats
|
||||
for i := 0; ; i++ {
|
||||
stats = mgo.GetStats()
|
||||
if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
|
||||
break
|
||||
}
|
||||
if i == 20 {
|
||||
c.Fatal("Test left sockets in a dirty state")
|
||||
}
|
||||
c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
for i := 0; ; i++ {
|
||||
stats = mgo.GetStats()
|
||||
if stats.Clusters == 0 {
|
||||
break
|
||||
}
|
||||
if i == 60 {
|
||||
c.Fatal("Test left clusters alive")
|
||||
}
|
||||
c.Logf("Waiting for clusters to die: %d alive", stats.Clusters)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) Stop(host string) {
|
||||
// Give a moment for slaves to sync and avoid getting rollback issues.
|
||||
panicOnWindows()
|
||||
time.Sleep(2 * time.Second)
|
||||
err := run("cd _testdb && supervisorctl stop " + supvName(host))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s.stopped = true
|
||||
}
|
||||
|
||||
func (s *S) pid(host string) int {
|
||||
output, err := exec.Command("lsof", "-iTCP:"+hostPort(host), "-sTCP:LISTEN", "-Fp").CombinedOutput()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pidstr := string(output[1 : len(output)-1])
|
||||
pid, err := strconv.Atoi(pidstr)
|
||||
if err != nil {
|
||||
panic("cannot convert pid to int: " + pidstr)
|
||||
}
|
||||
return pid
|
||||
}
|
||||
|
||||
func (s *S) Freeze(host string) {
|
||||
err := stop(s.pid(host))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s.frozen = append(s.frozen, host)
|
||||
}
|
||||
|
||||
func (s *S) Thaw(host string) {
|
||||
err := cont(s.pid(host))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for i, frozen := range s.frozen {
|
||||
if frozen == host {
|
||||
s.frozen[i] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) StartAll() {
|
||||
if s.stopped {
|
||||
// Restart any stopped nodes.
|
||||
run("cd _testdb && supervisorctl start all")
|
||||
err := run("cd testdb && mongo --nodb wait.js")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s.stopped = false
|
||||
}
|
||||
}
|
||||
|
||||
func run(command string) error {
|
||||
var output []byte
|
||||
var err error
|
||||
if runtime.GOOS == "windows" {
|
||||
output, err = exec.Command("cmd", "/C", command).CombinedOutput()
|
||||
} else {
|
||||
output, err = exec.Command("/bin/sh", "-c", command).CombinedOutput()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Failed to execute: %s: %s\n%s", command, err.Error(), string(output))
|
||||
return errors.New(msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var supvNames = map[string]string{
|
||||
"40001": "db1",
|
||||
"40002": "db2",
|
||||
"40011": "rs1a",
|
||||
"40012": "rs1b",
|
||||
"40013": "rs1c",
|
||||
"40021": "rs2a",
|
||||
"40022": "rs2b",
|
||||
"40023": "rs2c",
|
||||
"40031": "rs3a",
|
||||
"40032": "rs3b",
|
||||
"40033": "rs3c",
|
||||
"40041": "rs4a",
|
||||
"40101": "cfg1",
|
||||
"40102": "cfg2",
|
||||
"40103": "cfg3",
|
||||
"40201": "s1",
|
||||
"40202": "s2",
|
||||
"40203": "s3",
|
||||
}
|
||||
|
||||
// supvName returns the supervisord name for the given host address.
|
||||
func supvName(host string) string {
|
||||
host, port, err := net.SplitHostPort(host)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
name, ok := supvNames[port]
|
||||
if !ok {
|
||||
panic("Unknown host: " + host)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func hostPort(host string) string {
|
||||
_, port, err := net.SplitHostPort(host)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return port
|
||||
}
|
||||
|
||||
func panicOnWindows() {
|
||||
if runtime.GOOS == "windows" {
|
||||
panic("the test suite is not yet fully supported on Windows")
|
||||
}
|
||||
}
|
15
vendor/gopkg.in/mgo.v2/syscall_test.go
generated
vendored
15
vendor/gopkg.in/mgo.v2/syscall_test.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package mgo_test
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func stop(pid int) (err error) {
|
||||
return syscall.Kill(pid, syscall.SIGSTOP)
|
||||
}
|
||||
|
||||
func cont(pid int) (err error) {
|
||||
return syscall.Kill(pid, syscall.SIGCONT)
|
||||
}
|
11
vendor/gopkg.in/mgo.v2/syscall_windows_test.go
generated
vendored
11
vendor/gopkg.in/mgo.v2/syscall_windows_test.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
package mgo_test
|
||||
|
||||
func stop(pid int) (err error) {
|
||||
panicOnWindows() // Always does.
|
||||
return nil
|
||||
}
|
||||
|
||||
func cont(pid int) (err error) {
|
||||
panicOnWindows() // Always does.
|
||||
return nil
|
||||
}
|
24
vendor/vendor.json
vendored
24
vendor/vendor.json
vendored
@ -89,8 +89,8 @@
|
||||
},
|
||||
{
|
||||
"path": "github.com/minio/minio-go",
|
||||
"revision": "a4c6c439feb53e1aad083f0a3f0083a047092c17",
|
||||
"revisionTime": "2016-03-10T10:12:11-08:00"
|
||||
"revision": "ec610a695d37f1f04b9516a91857bfca007e3740",
|
||||
"revisionTime": "2016-03-19T15:53:30-07:00"
|
||||
},
|
||||
{
|
||||
"path": "github.com/minio/miniobrowser",
|
||||
@ -126,26 +126,6 @@
|
||||
"path": "gopkg.in/check.v1",
|
||||
"revision": "11d3bc7aa68e238947792f30573146a3231fc0f1",
|
||||
"revisionTime": "2015-07-29T10:04:31+02:00"
|
||||
},
|
||||
{
|
||||
"path": "gopkg.in/mgo.v2",
|
||||
"revision": "f402e3a216db333ae6b3ba68b9152a34a0bc6984",
|
||||
"revisionTime": "2015-10-05T19:07:38-07:00"
|
||||
},
|
||||
{
|
||||
"path": "gopkg.in/mgo.v2/bson",
|
||||
"revision": "f402e3a216db333ae6b3ba68b9152a34a0bc6984",
|
||||
"revisionTime": "2015-10-05T19:07:38-07:00"
|
||||
},
|
||||
{
|
||||
"path": "gopkg.in/mgo.v2/internal/sasl",
|
||||
"revision": "f402e3a216db333ae6b3ba68b9152a34a0bc6984",
|
||||
"revisionTime": "2015-10-05T19:07:38-07:00"
|
||||
},
|
||||
{
|
||||
"path": "gopkg.in/mgo.v2/internal/scram",
|
||||
"revision": "f402e3a216db333ae6b3ba68b9152a34a0bc6984",
|
||||
"revisionTime": "2015-10-05T19:07:38-07:00"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ func isJWTReqAuthenticated(req *http.Request) bool {
|
||||
if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok {
|
||||
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
|
||||
}
|
||||
return jwt.secretAccessKey, nil
|
||||
return []byte(jwt.SecretAccessKey), nil
|
||||
})
|
||||
if e != nil {
|
||||
return false
|
||||
@ -133,7 +133,7 @@ func (web *webAPI) MakeBucket(r *http.Request, args *MakeBucketArgs, reply *Gene
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
reply.UIVersion = miniobrowser.UIVersion
|
||||
e := web.Client.MakeBucket(args.BucketName, "", "")
|
||||
e := web.Client.MakeBucket(args.BucketName, "")
|
||||
if e != nil {
|
||||
return &json2.Error{Message: e.Error()}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user