api: Implement bucket notification. (#2271)

* Implement basic S3 notifications through queues

Supports multiple queues and three basic queue types:

1. NilQueue -- messages don't get sent anywhere
2. LogQueue -- messages get logged
3. AmqpQueue -- messages are sent to an AMQP queue

* api: Implement bucket notification.

Supports two different queue types

- AMQP
- ElasticSearch.

* Add support for redis
This commit is contained in:
Harshavardhana 2016-07-23 22:51:12 -07:00 committed by Anand Babu (AB) Periasamy
parent f85d94288d
commit f248089523
234 changed files with 45415 additions and 550 deletions

View File

@ -105,6 +105,12 @@ const (
ErrBucketAlreadyOwnedByYou
// Add new error codes here.
// Bucket notification related errors.
ErrEventNotification
ErrARNNotification
ErrRegionNotification
ErrOverlappingFilterNotification
// S3 extended errors.
ErrContentSHA256Mismatch
@ -411,6 +417,28 @@ var errorCodeResponse = map[APIErrorCode]APIError{
HTTPStatusCode: http.StatusConflict,
},
/// Bucket notification related errors.
ErrEventNotification: {
Code: "InvalidArgument",
Description: "A specified event is not supported for notifications.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrARNNotification: {
Code: "InvalidArgument",
Description: "A specified destination ARN does not exist or is not well-formed. Verify the destination ARN.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrRegionNotification: {
Code: "InvalidArgument",
Description: "A specified destination is in a different region than the bucket. You must use a destination that resides in the same region as the bucket.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrOverlappingFilterNotification: {
Code: "InvalidArgument",
Description: "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types.",
HTTPStatusCode: http.StatusBadRequest,
},
/// S3 extensions.
ErrContentSHA256Mismatch: {
Code: "XAmzContentSHA256Mismatch",

View File

@ -60,6 +60,8 @@ func registerAPIRouter(mux *router.Router, api objectAPIHandlers) {
bucket.Methods("GET").HandlerFunc(api.GetBucketLocationHandler).Queries("location", "")
// GetBucketPolicy
bucket.Methods("GET").HandlerFunc(api.GetBucketPolicyHandler).Queries("policy", "")
// GetBucketNotification
bucket.Methods("GET").HandlerFunc(api.GetBucketNotificationHandler).Queries("notification", "")
// ListMultipartUploads
bucket.Methods("GET").HandlerFunc(api.ListMultipartUploadsHandler).Queries("uploads", "")
// ListObjectsV2
@ -68,6 +70,8 @@ func registerAPIRouter(mux *router.Router, api objectAPIHandlers) {
bucket.Methods("GET").HandlerFunc(api.ListObjectsV1Handler)
// PutBucketPolicy
bucket.Methods("PUT").HandlerFunc(api.PutBucketPolicyHandler).Queries("policy", "")
// PutBucketNotification
bucket.Methods("PUT").HandlerFunc(api.PutBucketNotificationHandler).Queries("notification", "")
// PutBucket
bucket.Methods("PUT").HandlerFunc(api.PutBucketHandler)
// HeadBucket

View File

@ -23,7 +23,6 @@ import (
"encoding/xml"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"strings"
@ -319,9 +318,9 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
// requests which do not follow valid region requirements.
if s3Error := isValidLocationConstraint(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
// Make bucket.
// Proceed to creating a bucket.
err := api.ObjectAPI.MakeBucket(bucket)
if err != nil {
errorIf(err, "Unable to create a bucket.")
@ -333,32 +332,6 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
writeSuccessResponse(w, nil)
}
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, error) {
/// HTML Form values
formValues := make(map[string]string)
filePart := new(bytes.Buffer)
var err error
for err == nil {
var part *multipart.Part
part, err = reader.NextPart()
if part != nil {
if part.FileName() == "" {
var buffer []byte
buffer, err = ioutil.ReadAll(part)
if err != nil {
return nil, nil, err
}
formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer)
} else {
if _, err = io.Copy(filePart, part); err != nil {
return nil, nil, err
}
}
}
}
return filePart, formValues, nil
}
// PostPolicyBucketHandler - POST policy
// ----------
// This implementation of the POST operation handles object creation with a specified
@ -415,6 +388,17 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
})
setCommonHeaders(w)
writeSuccessResponse(w, encodedSuccessResponse)
// Load notification config if any.
nConfig, err := api.loadNotificationConfig(bucket)
if err != nil {
errorIf(err, "Unable to load notification config for bucket: \"%s\"", bucket)
return
}
size := int64(0) // FIXME: support notify size.
// Notify event.
notifyObjectCreatedEvent(nConfig, ObjectCreatedPost, bucket, object, md5Sum, size)
}
// HeadBucketHandler - HEAD Bucket
@ -464,6 +448,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
vars := mux.Vars(r)
bucket := vars["bucket"]
// Attempt to delete bucket.
if err := api.ObjectAPI.DeleteBucket(bucket); err != nil {
errorIf(err, "Unable to delete a bucket.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)

View File

@ -0,0 +1,155 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import "encoding/xml"
type filterRule struct {
Name string `xml:"FilterRuleName"`
Value string
}
type keyFilter struct {
FilterRules []filterRule `xml:"FilterRule"`
}
type notificationConfigFilter struct {
Key keyFilter `xml:"S3Key"`
}
// Queue SQS configuration.
type queueConfig struct {
Events []string `xml:"Event"`
Filter notificationConfigFilter
ID string `xml:"Id"`
QueueArn string `xml:"Queue"`
}
// Topic SNS configuration, this is a compliance field
// not used by minio yet.
type topicConfig struct {
Events []string `xml:"Event"`
Filter notificationConfigFilter
ID string `xml:"Id"`
TopicArn string `xml:"Topic"`
}
// Lambda function configuration, this is a compliance field
// not used by minio yet.
type lambdaFuncConfig struct {
Events []string `xml:"Event"`
Filter notificationConfigFilter
ID string `xml:"Id"`
LambdaFunctionArn string `xml:"CloudFunction"`
}
// Notification configuration structure represents the XML format of
// notification configuration of buckets.
type notificationConfig struct {
XMLName xml.Name `xml:"NotificationConfiguration"`
QueueConfigurations []queueConfig `xml:"QueueConfiguration"`
TopicConfigurations []topicConfig `xml:"TopicConfiguration"`
LambdaConfigurations []lambdaFuncConfig `xml:"CloudFunctionConfiguration"`
}
// EventName is AWS S3 event type:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
type EventName int
const (
// ObjectCreatedPut is s3:ObjectCreated:Put
ObjectCreatedPut EventName = iota
// ObjectCreatedPost is s3:ObjectCreated:POst
ObjectCreatedPost
// ObjectCreatedCopy is s3:ObjectCreated:Post
ObjectCreatedCopy
// ObjectCreatedCompleteMultipartUpload is s3:ObjectCreated:CompleteMultipartUpload
ObjectCreatedCompleteMultipartUpload
// ObjectRemovedDelete is s3:ObjectRemoved:Delete
ObjectRemovedDelete
)
// Stringer interface for event name.
func (eventName EventName) String() string {
switch eventName {
case ObjectCreatedPut:
return "s3:ObjectCreated:Put"
case ObjectCreatedPost:
return "s3:ObjectCreated:Post"
case ObjectCreatedCopy:
return "s3:ObjectCreated:Copy"
case ObjectCreatedCompleteMultipartUpload:
return "s3:ObjectCreated:CompleteMultipartUpload"
case ObjectRemovedDelete:
return "s3:ObjectRemoved:Delete"
default:
return "s3:Unknown"
}
}
// Indentity represents the user id, this is a compliance field.
type identity struct {
PrincipalID string `json:"principalId"`
}
func defaultIdentity() identity {
return identity{"minio"}
}
type s3BucketReference struct {
Name string `json:"name"`
OwnerIdentity identity `json:"ownerIdentity"`
ARN string `json:"arn"`
}
type s3ObjectReference struct {
Key string `json:"key"`
Size int64 `json:"size,omitempty"`
ETag string `json:"eTag,omitempty"`
VersionID string `json:"versionId,omitempty"`
Sequencer string `json:"sequencer"`
}
type s3Reference struct {
SchemaVersion string `json:"s3SchemaVersion"`
ConfigurationID string `json:"configurationId"`
Bucket s3BucketReference `json:"bucket"`
Object s3ObjectReference `json:"object"`
}
// NotificationEvent represents an Amazon an S3 bucket notification event.
type NotificationEvent struct {
EventVersion string `json:"eventVersion"`
EventSource string `json:"eventSource"`
AwsRegion string `json:"awsRegion"`
EventTime string `json:"eventTime"`
EventName string `json:"eventName"`
UserIdentity identity `json:"userIdentity"`
RequestParameters map[string]string `json:"requestParameters"`
ResponseElements map[string]string `json:"responseElements"`
S3 s3Reference `json:"s3"`
}
// Represents the minio sqs type and inputs.
type arnMinioSqs struct {
sqsType string
}
// Stringer for constructing AWS ARN compatible string.
func (m arnMinioSqs) String() string {
return minioSqs + serverConfig.GetRegion() + ":" + m.sqsType
}

View File

@ -0,0 +1,211 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"bytes"
"encoding/xml"
"io"
"net/http"
"path"
"github.com/gorilla/mux"
)
const (
bucketConfigPrefix = "buckets"
bucketNotificationConfig = "notification.xml"
)
// loads notifcation config if any for a given bucket, returns back structured notification config.
func (api objectAPIHandlers) loadNotificationConfig(bucket string) (nConfig notificationConfig, err error) {
notificationConfigPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
var objInfo ObjectInfo
objInfo, err = api.ObjectAPI.GetObjectInfo(minioMetaBucket, notificationConfigPath)
if err != nil {
switch err.(type) {
case ObjectNotFound:
return notificationConfig{}, nil
}
return notificationConfig{}, err
}
var buffer bytes.Buffer
err = api.ObjectAPI.GetObject(minioMetaBucket, notificationConfigPath, 0, objInfo.Size, &buffer)
if err != nil {
switch err.(type) {
case ObjectNotFound:
return notificationConfig{}, nil
}
return notificationConfig{}, err
}
// Unmarshal notification bytes.
notificationConfigBytes := buffer.Bytes()
if err = xml.Unmarshal(notificationConfigBytes, &nConfig); err != nil {
return notificationConfig{}, err
} // Successfully marshalled notification configuration.
return nConfig, nil
}
// GetBucketNotificationHandler - This implementation of the GET
// operation uses the notification subresource to return the
// notification configuration of a bucket. If notifications are
// not enabled on the bucket, the operation returns an empty
// NotificationConfiguration element.
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
// Validate request authorization.
if s3Error := checkAuth(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
notificationConfigPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
objInfo, err := api.ObjectAPI.GetObjectInfo(minioMetaBucket, notificationConfigPath)
if err != nil {
switch err.(type) {
case ObjectNotFound:
writeSuccessResponse(w, nil)
return
}
errorIf(err, "Unable to read notification configuration.", notificationConfigPath)
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return
}
// Indicates if any data was written to the http.ResponseWriter
dataWritten := false
// io.Writer type which keeps track if any data was written.
writer := funcToWriter(func(p []byte) (int, error) {
if !dataWritten {
// Set headers on the first write.
// Set standard object headers.
setObjectHeaders(w, objInfo, nil)
// Set any additional requested response headers.
setGetRespHeaders(w, r.URL.Query())
dataWritten = true
}
return w.Write(p)
})
// Reads the object at startOffset and writes to func writer..
err = api.ObjectAPI.GetObject(minioMetaBucket, notificationConfigPath, 0, objInfo.Size, writer)
if err != nil {
if !dataWritten {
switch err.(type) {
case ObjectNotFound:
writeSuccessResponse(w, nil)
return
}
// Error response only if no data has been written to client yet. i.e if
// partial data has already been written before an error
// occurred then no point in setting StatusCode and
// sending error XML.
apiErr := toAPIErrorCode(err)
writeErrorResponse(w, r, apiErr, r.URL.Path)
}
errorIf(err, "Unable to write to client.")
return
}
if !dataWritten {
// If ObjectAPI.GetObject did not return error and no data has
// been written it would mean that it is a 0-byte object.
// call wrter.Write(nil) to set appropriate headers.
writer.Write(nil)
}
}
// PutBucketNotificationHandler - Minio notification feature enables
// you to receive notifications when certain events happen in your bucket.
// Using this API, you can replace an existing notification configuration.
// The configuration is an XML file that defines the event types that you
// want Minio to publish and the destination where you want Minio to publish
// an event notification when it detects an event of the specified type.
// By default, your bucket has no event notifications configured. That is,
// the notification configuration will be an empty NotificationConfiguration.
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
// Validate request authorization.
if s3Error := checkAuth(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
_, err := api.ObjectAPI.GetBucketInfo(bucket)
if err != nil {
errorIf(err, "Unable to bucket info.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return
}
// If Content-Length is unknown or zero, deny the request. PutBucketNotification
// always needs a Content-Length if incoming request is not chunked.
if !contains(r.TransferEncoding, "chunked") {
if r.ContentLength == -1 {
writeErrorResponse(w, r, ErrMissingContentLength, r.URL.Path)
return
}
}
// Reads the incoming notification configuration.
var buffer bytes.Buffer
if r.ContentLength >= 0 {
_, err = io.CopyN(&buffer, r.Body, r.ContentLength)
} else {
_, err = io.Copy(&buffer, r.Body)
}
if err != nil {
errorIf(err, "Unable to read incoming body.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return
}
var notificationCfg notificationConfig
// Unmarshal notification bytes.
notificationConfigBytes := buffer.Bytes()
if err = xml.Unmarshal(notificationConfigBytes, &notificationCfg); err != nil {
errorIf(err, "Unable to parse notification configuration XML.")
writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path)
return
} // Successfully marshalled notification configuration.
// Validate unmarshalled bucket notification configuration.
if s3Error := validateNotificationConfig(notificationCfg); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
// Proceed to save notification configuration.
size := int64(len(notificationConfigBytes))
data := bytes.NewReader(notificationConfigBytes)
notificationConfigPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
_, err = api.ObjectAPI.PutObject(minioMetaBucket, notificationConfigPath, size, data, nil)
if err != nil {
errorIf(err, "Unable to write bucket notification configuration.", notificationConfigPath)
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return
}
// Success.
writeSuccessResponse(w, nil)
}

View File

@ -0,0 +1,128 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import "strings"
// List of valid event types.
var suppportedEventTypes = map[string]struct{}{
// Object created event types.
"s3:ObjectCreated:*": {},
"s3:ObjectCreated:Put": {},
"s3:ObjectCreated:Post": {},
"s3:ObjectCreated:Copy": {},
"s3:ObjectCreated:CompleteMultipartUpload": {},
// Object removed event types.
"s3:ObjectRemoved:*": {},
"s3:ObjectRemoved:Delete": {},
}
// checkEvent - checks if an event is supported.
func checkEvent(event string) APIErrorCode {
_, ok := suppportedEventTypes[event]
if !ok {
return ErrEventNotification
}
return ErrNone
}
// checkEvents - checks given list of events if all of them are valid.
// given if one of them is invalid, this function returns an error.
func checkEvents(events []string) APIErrorCode {
for _, event := range events {
if s3Error := checkEvent(event); s3Error != ErrNone {
return s3Error
}
}
return ErrNone
}
// checkQueueArn - check if the queue arn is valid.
func checkQueueArn(queueArn string) APIErrorCode {
if !strings.HasPrefix(queueArn, minioSqs) {
return ErrARNNotification
}
if !strings.HasPrefix(queueArn, minioSqs+serverConfig.GetRegion()+":") {
return ErrRegionNotification
}
return ErrNone
}
// Check - validates queue configuration and returns error if any.
func checkQueueConfig(qConfig queueConfig) APIErrorCode {
// Check queue arn is valid.
if s3Error := checkQueueArn(qConfig.QueueArn); s3Error != ErrNone {
return s3Error
}
// Unmarshals QueueArn into structured object.
sqsArn := unmarshalSqsArn(qConfig.QueueArn)
// Validate if sqsArn requested any of the known supported queues.
if !isAMQPQueue(sqsArn) || !isElasticQueue(sqsArn) || !isRedisQueue(sqsArn) {
return ErrARNNotification
}
// Check if valid events are set in queue config.
if s3Error := checkEvents(qConfig.Events); s3Error != ErrNone {
return s3Error
}
// Success.
return ErrNone
}
// Validates all incoming queue configs, checkQueueConfig validates if the
// input fields for each queues is not malformed and has valid configuration
// information. If validation fails bucket notifications are not enabled.
func validateQueueConfigs(queueConfigs []queueConfig) APIErrorCode {
for _, qConfig := range queueConfigs {
if s3Error := checkQueueConfig(qConfig); s3Error != ErrNone {
return s3Error
}
}
// Success.
return ErrNone
}
// Validates all the bucket notification configuration for their validity,
// if one of the config is malformed or has invalid data it is rejected.
// Configuration is never applied partially.
func validateNotificationConfig(nConfig notificationConfig) APIErrorCode {
if s3Error := validateQueueConfigs(nConfig.QueueConfigurations); s3Error != ErrNone {
return s3Error
}
// Add validation for other configurations.
return ErrNone
}
// Unmarshals input value of AWS ARN format into minioSqs object.
// Returned value represents minio sqs types, currently supported are
// - amqp
// - elasticsearch
func unmarshalSqsArn(queueArn string) (mSqs arnMinioSqs) {
sqsType := strings.TrimPrefix(queueArn, minioSqs+serverConfig.GetRegion()+":")
mSqs = arnMinioSqs{}
switch sqsType {
case queueTypeAMQP:
mSqs.sqsType = queueTypeAMQP
case queueTypeElastic:
mSqs.sqsType = queueTypeElastic
case queueTypeRedis:
mSqs.sqsType = queueTypeRedis
} // Add more cases here.
return mSqs
}

View File

@ -32,6 +32,8 @@ func migrateConfig() {
migrateV2ToV3()
// Migrate version '3' to '4'.
migrateV3ToV4()
// Migrate version '4' to '5'.
migrateV4ToV5()
}
// Version '1' is not supported anymore and deprecated, safe to delete.
@ -124,8 +126,8 @@ func migrateV3ToV4() {
}
// Save only the new fields, ignore the rest.
srvConfig := &serverConfigV4{}
srvConfig.Version = globalMinioConfigVersion
srvConfig := &configV4{}
srvConfig.Version = "4"
srvConfig.Credential = cv3.Credential
srvConfig.Region = cv3.Region
if srvConfig.Region == "" {
@ -146,3 +148,49 @@ func migrateV3ToV4() {
console.Println("Migration from version " + cv3.Version + " to " + srvConfig.Version + " completed successfully.")
}
// Version '4' to '5' migrates config, removes previous fields related
// to backend types and server address. This change further simplifies
// the config for future additions.
func migrateV4ToV5() {
cv4, err := loadConfigV4()
if err != nil && os.IsNotExist(err) {
return
}
fatalIf(err, "Unable to load config version 4.")
if cv4.Version != "4" {
return
}
// Save only the new fields, ignore the rest.
srvConfig := &serverConfigV5{}
srvConfig.Version = globalMinioConfigVersion
srvConfig.Credential = cv4.Credential
srvConfig.Region = cv4.Region
if srvConfig.Region == "" {
// Region needs to be set for AWS Signature Version 4.
srvConfig.Region = "us-east-1"
}
srvConfig.Logger.Console = cv4.Logger.Console
srvConfig.Logger.File = cv4.Logger.File
srvConfig.Logger.Syslog = cv4.Logger.Syslog
srvConfig.Logger.AMQP = amqpLogger{
Enable: false,
}
srvConfig.Logger.ElasticSearch = elasticSearchLogger{
Enable: false,
}
srvConfig.Logger.Redis = redisLogger{
Enable: false,
}
qc, err := quick.New(srvConfig)
fatalIf(err, "Unable to initialize the quick config.")
configFile, err := getConfigFile()
fatalIf(err, "Unable to get config file.")
err = qc.Save(configFile)
fatalIf(err, "Failed to migrate config from "+cv4.Version+" to "+srvConfig.Version+" failed.")
console.Println("Migration from version " + cv4.Version + " to " + srvConfig.Version + " completed successfully.")
}

View File

@ -145,3 +145,53 @@ func loadConfigV3() (*configV3, error) {
}
return c, nil
}
type loggerV4 struct {
Console struct {
Enable bool `json:"enable"`
Level string `json:"level"`
} `json:"console"`
File struct {
Enable bool `json:"enable"`
Filename string `json:"fileName"`
Level string `json:"level"`
} `json:"file"`
Syslog struct {
Enable bool `json:"enable"`
Addr string `json:"address"`
Level string `json:"level"`
} `json:"syslog"`
}
// configV4 server configuration version '4'.
type configV4 struct {
Version string `json:"version"`
// S3 API configuration.
Credential credential `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger loggerV4 `json:"logger"`
}
// loadConfigV4 load config version '4'.
func loadConfigV4() (*configV4, error) {
configFile, err := getConfigFile()
if err != nil {
return nil, err
}
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
c := &configV4{}
c.Version = "4"
qc, err := quick.New(c)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return c, nil
}

View File

@ -23,8 +23,8 @@ import (
"github.com/minio/minio/pkg/quick"
)
// serverConfigV4 server configuration version '4'.
type serverConfigV4 struct {
// serverConfigV5 server configuration version '5'.
type serverConfigV5 struct {
Version string `json:"version"`
// S3 API configuration.
@ -41,7 +41,7 @@ type serverConfigV4 struct {
// initConfig - initialize server config. config version (called only once).
func initConfig() error {
if !isConfigFileExists() {
srvCfg := &serverConfigV4{}
srvCfg := &serverConfigV5{}
srvCfg.Version = globalMinioConfigVersion
srvCfg.Region = "us-east-1"
srvCfg.Credential = mustGenAccessKeys()
@ -76,7 +76,7 @@ func initConfig() error {
if _, err = os.Stat(configFile); err != nil {
return err
}
srvCfg := &serverConfigV4{}
srvCfg := &serverConfigV5{}
srvCfg.Version = globalMinioConfigVersion
srvCfg.rwMutex = &sync.RWMutex{}
qc, err := quick.New(srvCfg)
@ -90,14 +90,15 @@ func initConfig() error {
serverConfig = srvCfg
// Set the version properly after the unmarshalled json is loaded.
serverConfig.Version = globalMinioConfigVersion
return nil
}
// serverConfig server config.
var serverConfig *serverConfigV4
var serverConfig *serverConfigV5
// GetVersion get current config version.
func (s serverConfigV4) GetVersion() string {
func (s serverConfigV5) GetVersion() string {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
return s.Version
@ -105,78 +106,117 @@ func (s serverConfigV4) GetVersion() string {
/// Logger related.
func (s *serverConfigV5) SetAMQPLogger(amqpl amqpLogger) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
s.Logger.AMQP = amqpl
}
// GetAMQPLogger get current AMQP logger.
func (s serverConfigV5) GetAMQPLogger() amqpLogger {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
return s.Logger.AMQP
}
func (s *serverConfigV5) SetElasticSearchLogger(esLogger elasticSearchLogger) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
s.Logger.ElasticSearch = esLogger
}
// GetElasticSearchLogger get current ElasicSearch logger.
func (s serverConfigV5) GetElasticSearchLogger() elasticSearchLogger {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
return s.Logger.ElasticSearch
}
func (s *serverConfigV5) SetRedisLogger(rLogger redisLogger) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
s.Logger.Redis = rLogger
}
// GetRedisLogger get current Redis logger.
func (s serverConfigV5) GetRedisLogger() redisLogger {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
return s.Logger.Redis
}
// SetFileLogger set new file logger.
func (s *serverConfigV4) SetFileLogger(flogger fileLogger) {
func (s *serverConfigV5) SetFileLogger(flogger fileLogger) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
s.Logger.File = flogger
}
// GetFileLogger get current file logger.
func (s serverConfigV4) GetFileLogger() fileLogger {
func (s serverConfigV5) GetFileLogger() fileLogger {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
return s.Logger.File
}
// SetConsoleLogger set new console logger.
func (s *serverConfigV4) SetConsoleLogger(clogger consoleLogger) {
func (s *serverConfigV5) SetConsoleLogger(clogger consoleLogger) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
s.Logger.Console = clogger
}
// GetConsoleLogger get current console logger.
func (s serverConfigV4) GetConsoleLogger() consoleLogger {
func (s serverConfigV5) GetConsoleLogger() consoleLogger {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
return s.Logger.Console
}
// SetSyslogLogger set new syslog logger.
func (s *serverConfigV4) SetSyslogLogger(slogger syslogLogger) {
func (s *serverConfigV5) SetSyslogLogger(slogger syslogLogger) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
s.Logger.Syslog = slogger
}
// GetSyslogLogger get current syslog logger.
func (s *serverConfigV4) GetSyslogLogger() syslogLogger {
func (s *serverConfigV5) GetSyslogLogger() syslogLogger {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
return s.Logger.Syslog
}
// SetRegion set new region.
func (s *serverConfigV4) SetRegion(region string) {
func (s *serverConfigV5) SetRegion(region string) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
s.Region = region
}
// GetRegion get current region.
func (s serverConfigV4) GetRegion() string {
func (s serverConfigV5) GetRegion() string {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
return s.Region
}
// SetCredentials set new credentials.
func (s *serverConfigV4) SetCredential(creds credential) {
func (s *serverConfigV5) SetCredential(creds credential) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
s.Credential = creds
}
// GetCredentials get current credentials.
func (s serverConfigV4) GetCredential() credential {
func (s serverConfigV5) GetCredential() credential {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
return s.Credential
}
// Save config.
func (s serverConfigV4) Save() error {
func (s serverConfigV5) Save() error {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()

View File

@ -64,17 +64,29 @@ func loadFormatFS(storageDisk StorageAPI) (format formatConfigV1, err error) {
// Should be called when process shuts down.
func shutdownFS(storage StorageAPI) {
// List if there are any multipart entries.
_, err := storage.ListDir(minioMetaBucket, mpartMetaPrefix)
if err != errFileNotFound {
// Multipart directory is not empty hence do not remove .minio volume.
// Multipart directory is not empty hence do not remove '.minio.sys' volume.
os.Exit(0)
}
// List if there are any bucket configuration entries.
_, err = storage.ListDir(minioMetaBucket, bucketConfigPrefix)
if err != errFileNotFound {
// Bucket config directory is not empty hence do not remove '.minio.sys' volume.
os.Exit(0)
}
// Cleanup everything else.
prefix := ""
if err := cleanupDir(storage, minioMetaBucket, prefix); err != nil {
os.Exit(0)
return
if err = cleanupDir(storage, minioMetaBucket, prefix); err != nil {
errorIf(err, "Unable to cleanup minio meta bucket")
os.Exit(1)
}
storage.DeleteVol(minioMetaBucket)
if err = storage.DeleteVol(minioMetaBucket); err != nil {
errorIf(err, "Unable to delete minio meta bucket", minioMetaBucket)
os.Exit(1)
}
// Successful exit.
os.Exit(0)
}
@ -184,6 +196,10 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
if !IsValidBucketName(vol.Name) {
continue
}
// Ignore the volume special bucket.
if vol.Name == minioMetaBucket {
continue
}
bucketInfos = append(bucketInfos, BucketInfo{
Name: vol.Name,
Created: vol.Created,

View File

@ -275,7 +275,6 @@ var notimplementedBucketResourceNames = map[string]bool{
"cors": true,
"lifecycle": true,
"logging": true,
"notification": true,
"replication": true,
"tagging": true,
"versions": true,

View File

@ -28,7 +28,7 @@ const (
// minio configuration related constants.
const (
globalMinioConfigVersion = "4"
globalMinioConfigVersion = "5"
globalMinioConfigDir = ".minio"
globalMinioCertsDir = "certs"
globalMinioCertFile = "public.crt"

View File

@ -17,7 +17,10 @@
package main
import (
"bytes"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"strings"
)
@ -95,3 +98,30 @@ func extractMetadataFromHeader(header http.Header) map[string]string {
// Return.
return metadata
}
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, error) {
/// HTML Form values
formValues := make(map[string]string)
filePart := new(bytes.Buffer)
var err error
for err == nil {
var part *multipart.Part
part, err = reader.NextPart()
if part != nil {
if part.FileName() == "" {
var buffer []byte
buffer, err = ioutil.ReadAll(part)
if err != nil {
return nil, nil, err
}
formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer)
} else {
if _, err = io.Copy(filePart, part); err != nil {
return nil, nil, err
}
}
}
}
return filePart, formValues, nil
}

151
logger-amqp.go Normal file
View File

@ -0,0 +1,151 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"errors"
"github.com/Sirupsen/logrus"
"github.com/streadway/amqp"
)
// amqpLogger - represents logrus compatible AMQP hook.
// All fields represent AMQP configuration details.
type amqpLogger struct {
Enable bool `json:"enable"`
Level string `json:"level"`
URL string `json:"url"`
Exchange string `json:"exchange"`
RoutingKey string `json:"routineKey"`
ExchangeType string `json:"exchangeType"`
Mandatory bool `json:"mandatory"`
Immediate bool `json:"immediate"`
Durable bool `json:"durable"`
Internal bool `json:"internal"`
NoWait bool `json:"noWait"`
AutoDeleted bool `json:"autoDeleted"`
}
type amqpConn struct {
params amqpLogger
*amqp.Connection
}
func dialAMQP(amqpL amqpLogger) (amqpConn, error) {
conn, err := amqp.Dial(amqpL.URL)
if err != nil {
return amqpConn{}, err
}
return amqpConn{Connection: conn, params: amqpL}, nil
}
var errLoggerNotEnabled = errors.New("logger type not enabled")
func enableAMQPLogger() error {
amqpL := serverConfig.GetAMQPLogger()
if !amqpL.Enable {
return errLoggerNotEnabled
}
// Connect to amqp server.
amqpC, err := dialAMQP(amqpL)
if err != nil {
return err
}
lvl, err := logrus.ParseLevel(amqpL.Level)
fatalIf(err, "Unknown log level found in the config file.")
// Add a amqp hook.
log.Hooks.Add(amqpC)
// Set default JSON formatter.
log.Formatter = new(logrus.JSONFormatter)
// Set default log level to info.
log.Level = lvl
// Successfully enabled.
return nil
}
// Fire is called when an event should be sent to the message broker.
func (q amqpConn) Fire(entry *logrus.Entry) error {
ch, err := q.Connection.Channel()
if err != nil {
// Any other error other than connection closed, return.
if err != amqp.ErrClosed {
return err
}
// Attempt to connect again.
var conn *amqp.Connection
conn, err = amqp.Dial(q.params.URL)
if err != nil {
return err
}
ch, err = conn.Channel()
if err != nil {
return err
}
}
defer ch.Close()
err = ch.ExchangeDeclare(
q.params.Exchange,
q.params.ExchangeType,
q.params.Durable,
q.params.AutoDeleted,
q.params.Internal,
q.params.NoWait,
nil,
)
if err != nil {
return err
}
body, err := entry.String()
if err != nil {
return err
}
err = ch.Publish(
q.params.Exchange,
q.params.RoutingKey,
q.params.Mandatory,
q.params.Immediate,
amqp.Publishing{
ContentType: "application/json",
Body: []byte(body),
})
if err != nil {
return err
}
return nil
}
// Levels is available logging levels.
func (q amqpConn) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
}
}

116
logger-elasticsearch.go Normal file
View File

@ -0,0 +1,116 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"errors"
"github.com/Sirupsen/logrus"
"gopkg.in/olivere/elastic.v3"
)
// elasticQueue is a elasticsearch event notification queue.
type elasticSearchLogger struct {
Enable bool `json:"enable"`
Level string `json:"level"`
URL string `json:"url"`
Index string `json:"index"`
}
type elasticClient struct {
*elastic.Client
params elasticSearchLogger
}
// Connects to elastic search instance at URL.
func dialElastic(url string) (*elastic.Client, error) {
client, err := elastic.NewClient(elastic.SetURL(url), elastic.SetSniff(false))
if err != nil {
return nil, err
}
return client, nil
}
// Enables elasticsearch logger.
func enableElasticLogger() error {
esLogger := serverConfig.GetElasticSearchLogger()
if !esLogger.Enable {
return errLoggerNotEnabled
}
client, err := dialElastic(esLogger.URL)
if err != nil {
return err
}
// Use the IndexExists service to check if a specified index exists.
exists, err := client.IndexExists(esLogger.Index).Do()
if err != nil {
return err
}
// Index does not exist, attempt to create it.
if !exists {
var createIndex *elastic.IndicesCreateResult
createIndex, err = client.CreateIndex(esLogger.Index).Do()
if err != nil {
return err
}
if !createIndex.Acknowledged {
return errors.New("index not created")
}
}
elasticCl := elasticClient{
Client: client,
params: esLogger,
}
lvl, err := logrus.ParseLevel(esLogger.Level)
fatalIf(err, "Unknown log level found in the config file.")
// Add a elasticsearch hook.
log.Hooks.Add(elasticCl)
// Set default JSON formatter.
log.Formatter = new(logrus.JSONFormatter)
// Set default log level to info.
log.Level = lvl
return nil
}
// Fire is required to implement logrus hook
func (q elasticClient) Fire(entry *logrus.Entry) error {
_, err := q.Client.Index().Index(q.params.Index).
Type("event").
BodyJson(entry.Data).
Do()
return err
}
// Required for logrus hook implementation
func (q elasticClient) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
}
}

136
logger-redis.go Normal file
View File

@ -0,0 +1,136 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"time"
"github.com/Sirupsen/logrus"
"github.com/minio/redigo/redis"
)
// redisLogger to send logs to Redis server
type redisLogger struct {
Enable bool `json:"enable"`
Level string `json:"level"`
Addr string `json:"address"`
Password string `json:"password"`
Key string `json:"key"`
}
type redisConn struct {
*redis.Pool
params redisLogger
}
// Dial a new connection to redis instance at addr, optionally with a password if any.
func dialRedis(addr, password string) (*redis.Pool, error) {
rPool := &redis.Pool{
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := redis.Dial("tcp", addr)
if err != nil {
return nil, err
}
if password != "" {
if _, err := c.Do("AUTH", password); err != nil {
c.Close()
return nil, err
}
}
return c, err
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
_, err := c.Do("PING")
return err
},
}
// Test if connection with REDIS can be established.
rConn := rPool.Get()
defer rConn.Close()
// Check connection.
_, err := rConn.Do("PING")
if err != nil {
return nil, err
}
// Return pool.
return rPool, nil
}
func enableRedisLogger() error {
rLogger := serverConfig.GetRedisLogger()
if !rLogger.Enable {
return errLoggerNotEnabled
}
// Dial redis.
rPool, err := dialRedis(rLogger.Addr, rLogger.Password)
if err != nil {
return err
}
rrConn := redisConn{
Pool: rPool,
params: rLogger,
}
lvl, err := logrus.ParseLevel(rLogger.Level)
fatalIf(err, "Unknown log level found in the config file.")
// Add a elasticsearch hook.
log.Hooks.Add(rrConn)
// Set default JSON formatter.
log.Formatter = new(logrus.JSONFormatter)
// Set default log level to info.
log.Level = lvl
return nil
}
func (r redisConn) Fire(entry *logrus.Entry) error {
rConn := r.Pool.Get()
defer rConn.Close()
data, err := entry.String()
if err != nil {
return err
}
_, err = rConn.Do("RPUSH", r.params.Key, data)
if err != nil {
return err
}
return nil
}
// Required for logrus hook implementation
func (r redisConn) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
}
}

View File

@ -20,7 +20,6 @@ import (
"bufio"
"bytes"
"os"
"reflect"
"runtime"
"runtime/debug"
"strconv"
@ -40,11 +39,16 @@ var log = logrus.New() // Default console logger.
// - console [default]
// - file
// - syslog
// - amqp
// - elasticsearch
//
type logger struct {
Console consoleLogger `json:"console"`
File fileLogger `json:"file"`
Syslog syslogLogger `json:"syslog"`
Console consoleLogger `json:"console"`
File fileLogger `json:"file"`
Syslog syslogLogger `json:"syslog"`
AMQP amqpLogger `json:"amqp"`
ElasticSearch elasticSearchLogger `json:"elasticsearch"`
Redis redisLogger `json:"redis"`
// Add new loggers here.
}
@ -91,11 +95,8 @@ func errorIf(err error, msg string, data ...interface{}) {
if err == nil {
return
}
sysInfo := sysInfo()
fields := logrus.Fields{
"cause": err.Error(),
"type": reflect.TypeOf(err),
"sysInfo": sysInfo,
"cause": err.Error(),
}
if globalTrace {
fields["stack"] = "\n" + stackInfo()
@ -108,11 +109,8 @@ func fatalIf(err error, msg string, data ...interface{}) {
if err == nil {
return
}
sysInfo := sysInfo()
fields := logrus.Fields{
"cause": err.Error(),
"type": reflect.TypeOf(err),
"sysInfo": sysInfo,
"cause": err.Error(),
}
if globalTrace {
fields["stack"] = "\n" + stackInfo()

View File

@ -77,6 +77,10 @@ func enableLoggers() {
enableConsoleLogger()
enableFileLogger()
// Adding new bucket notification related loggers.
enableAMQPLogger()
enableElasticLogger()
// Add your logger here.
}

View File

@ -157,6 +157,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
}
return w.Write(p)
})
// Reads the object at startOffset and writes to mw.
if err := api.ObjectAPI.GetObject(bucket, object, startOffset, length, writer); err != nil {
errorIf(err, "Unable to write to client.")
@ -353,6 +354,16 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
writeSuccessResponse(w, encodedSuccessResponse)
// Explicitly close the reader, to avoid fd leaks.
pipeReader.Close()
// Load notification config if any.
nConfig, err := api.loadNotificationConfig(bucket)
if err != nil {
errorIf(err, "Unable to load notification config for bucket: \"%s\"", bucket)
return
}
// Notify object created event.
notifyObjectCreatedEvent(nConfig, ObjectCreatedCopy, bucket, object, objInfo.MD5Sum, objInfo.Size)
}
// PutObjectHandler - PUT Object
@ -422,6 +433,16 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
w.Header().Set("ETag", "\""+md5Sum+"\"")
}
writeSuccessResponse(w, nil)
// Load notification config if any.
nConfig, err := api.loadNotificationConfig(bucket)
if err != nil {
errorIf(err, "Unable to load notification config for bucket: \"%s\"", bucket)
return
}
// Notify object created event.
notifyObjectCreatedEvent(nConfig, ObjectCreatedPut, bucket, object, md5Sum, size)
}
/// Multipart objectAPIHandlers
@ -645,7 +666,6 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
uploadID, _, _, _ := getObjectResources(r.URL.Query())
var md5Sum string
var err error
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
@ -711,7 +731,6 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
}(doneCh)
sendWhiteSpaceChars(w, doneCh)
if err != nil {
errorIf(err, "Unable to complete multipart upload.")
switch oErr := err.(type) {
@ -735,9 +754,21 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
writeErrorResponseNoHeader(w, r, ErrInternalError, r.URL.Path)
return
}
// write success response.
// Write success response.
w.Write(encodedSuccessResponse)
w.(http.Flusher).Flush()
// Load notification config if any.
nConfig, err := api.loadNotificationConfig(bucket)
if err != nil {
errorIf(err, "Unable to load notification config for bucket: \"%s\"", bucket)
return
}
// Notify object created event.
size := int64(0) // FIXME: support event size.
notifyObjectCreatedEvent(nConfig, ObjectCreatedCompleteMultipartUpload, bucket, object, md5Sum, size)
}
/// Delete objectAPIHandlers
@ -768,6 +799,19 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
/// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
/// Ignore delete object errors, since we are suppposed to reply
/// only 204.
api.ObjectAPI.DeleteObject(bucket, object)
if err := api.ObjectAPI.DeleteObject(bucket, object); err != nil {
writeSuccessNoContent(w)
return
}
writeSuccessNoContent(w)
// Load notification config if any.
nConfig, err := api.loadNotificationConfig(bucket)
if err != nil {
errorIf(err, "Unable to load notification config for bucket: \"%s\"", bucket)
return
}
// Notify object deleted event.
notifyObjectDeletedEvent(nConfig, bucket, object)
}

View File

@ -46,6 +46,10 @@ var isIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
// and periods, but must begin and end with a lowercase letter or a number.
// See: http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
func IsValidBucketName(bucket string) bool {
// Special case when bucket is equal to 'metaBucket'.
if bucket == minioMetaBucket {
return true
}
if len(bucket) < 3 || len(bucket) > 63 {
return false
}

193
queues.go Normal file
View File

@ -0,0 +1,193 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"net/url"
"time"
"github.com/Sirupsen/logrus"
)
const (
minioSqs = "arn:minio:sqs:"
// Static string indicating queue type 'amqp'.
queueTypeAMQP = "1:amqp"
// Static string indicating queue type 'elasticsearch'.
queueTypeElastic = "1:elasticsearch"
// Static string indicating queue type 'redis'.
queueTypeRedis = "1:redis"
)
// Returns true if queueArn is for an AMQP queue.
func isAMQPQueue(sqsArn arnMinioSqs) bool {
if sqsArn.sqsType == queueTypeAMQP {
amqpL := serverConfig.GetAMQPLogger()
if !amqpL.Enable {
return false
}
// Connect to amqp server to validate.
amqpC, err := dialAMQP(amqpL)
if err != nil {
errorIf(err, "Unable to connect to amqp service.", amqpL)
return false
}
defer amqpC.Close()
}
return true
}
// Returns true if queueArn is for an Redis queue.
func isRedisQueue(sqsArn arnMinioSqs) bool {
if sqsArn.sqsType == queueTypeRedis {
rLogger := serverConfig.GetRedisLogger()
if !rLogger.Enable {
return false
}
// Connect to redis server to validate.
rPool, err := dialRedis(rLogger.Addr, rLogger.Password)
if err != nil {
errorIf(err, "Unable to connect to redis service.", rLogger)
return false
}
defer rPool.Close()
}
return true
}
// Returns true if queueArn is for an ElasticSearch queue.
func isElasticQueue(sqsArn arnMinioSqs) bool {
if sqsArn.sqsType == queueTypeElastic {
esLogger := serverConfig.GetElasticSearchLogger()
if !esLogger.Enable {
return false
}
elasticC, err := dialElastic(esLogger.URL)
if err != nil {
errorIf(err, "Unable to connect to elasticsearch service.", esLogger.URL)
return false
}
defer elasticC.Stop()
}
return true
}
// Match function matches wild cards in 'pattern' for events.
func eventMatch(eventType EventName, events []string) (ok bool) {
for _, event := range events {
ok = wildCardMatch(event, eventType.String())
if ok {
break
}
}
return ok
}
// NotifyObjectCreatedEvent - notifies a new 's3:ObjectCreated' event.
// List of events reported through this function are
// - s3:ObjectCreated:Put
// - s3:ObjectCreated:Post
// - s3:ObjectCreated:Copy
// - s3:ObjectCreated:CompleteMultipartUpload
func notifyObjectCreatedEvent(nConfig notificationConfig, eventType EventName, bucket string, object string, etag string, size int64) {
/// Construct a new object created event.
region := serverConfig.GetRegion()
tnow := time.Now().UTC()
sequencer := fmt.Sprintf("%X", tnow.UnixNano())
// Following blocks fills in all the necessary details of s3 event message structure.
// http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
events := []*NotificationEvent{
&NotificationEvent{
EventVersion: "2.0",
EventSource: "aws:s3",
AwsRegion: region,
EventTime: tnow.Format(iso8601Format),
EventName: eventType.String(),
UserIdentity: defaultIdentity(),
RequestParameters: map[string]string{},
ResponseElements: map[string]string{},
S3: s3Reference{
SchemaVersion: "1.0",
ConfigurationID: "Config",
Bucket: s3BucketReference{
Name: bucket,
OwnerIdentity: defaultIdentity(),
ARN: "arn:aws:s3:::" + bucket,
},
Object: s3ObjectReference{
Key: url.QueryEscape(object),
ETag: etag,
Size: size,
Sequencer: sequencer,
},
},
},
}
// Notify to all the configured queues.
for _, qConfig := range nConfig.QueueConfigurations {
if eventMatch(eventType, qConfig.Events) {
log.WithFields(logrus.Fields{
"Records": events,
}).Info()
}
}
}
// NotifyObjectRemovedEvent - notifies a new 's3:ObjectRemoved' event.
// List of events reported through this function are
// - s3:ObjectRemoved:Delete
func notifyObjectDeletedEvent(nConfig notificationConfig, bucket string, object string) {
region := serverConfig.GetRegion()
tnow := time.Now().UTC()
sequencer := fmt.Sprintf("%X", tnow.UnixNano())
// Following blocks fills in all the necessary details of s3 event message structure.
// http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
events := []*NotificationEvent{
&NotificationEvent{
EventVersion: "2.0",
EventSource: "aws:s3",
AwsRegion: region,
EventTime: tnow.Format(iso8601Format),
EventName: ObjectRemovedDelete.String(),
UserIdentity: defaultIdentity(),
RequestParameters: map[string]string{},
ResponseElements: map[string]string{},
S3: s3Reference{
SchemaVersion: "1.0",
ConfigurationID: "Config",
Bucket: s3BucketReference{
Name: bucket,
OwnerIdentity: defaultIdentity(),
ARN: "arn:aws:s3:::" + bucket,
},
Object: s3ObjectReference{
Key: url.QueryEscape(object),
Sequencer: sequencer,
},
},
},
}
// Notify to all the configured queues.
for _, qConfig := range nConfig.QueueConfigurations {
if eventMatch(ObjectRemovedDelete, qConfig.Events) {
log.WithFields(logrus.Fields{
"Records": events,
}).Info()
}
}
}

View File

@ -758,6 +758,7 @@ func (s *TestSuiteCommon) TestPutObject(c *C) {
c.Assert(n, Equals, int64(len([]byte("hello world"))))
// asserted the contents of the fetched object with the expected result.
c.Assert(true, Equals, bytes.Equal(buffer2.Bytes(), []byte("hello world")))
}
// TestListBuckets - Make request for listing of all buckets.
@ -2141,4 +2142,5 @@ func (s *TestSuiteCommon) TestObjectMultipart(c *C) {
c.Assert(err, IsNil)
// verify whether complete multipart was successfull.
c.Assert(response.StatusCode, Equals, http.StatusOK)
}

View File

@ -1,104 +0,0 @@
clock [![Build Status](https://drone.io/github.com/benbjohnson/clock/status.png)](https://drone.io/github.com/benbjohnson/clock/latest) [![Coverage Status](https://coveralls.io/repos/benbjohnson/clock/badge.png?branch=master)](https://coveralls.io/r/benbjohnson/clock?branch=master) [![GoDoc](https://godoc.org/github.com/benbjohnson/clock?status.png)](https://godoc.org/github.com/benbjohnson/clock) ![Project status](http://img.shields.io/status/experimental.png?color=red)
=====
Clock is a small library for mocking time in Go. It provides an interface
around the standard library's [`time`][time] package so that the application
can use the realtime clock while tests can use the mock clock.
[time]: http://golang.org/pkg/time/
## Usage
### Realtime Clock
Your application can maintain a `Clock` variable that will allow realtime and
mock clocks to be interchangable. For example, if you had an `Application` type:
```go
import "github.com/benbjohnson/clock"
type Application struct {
Clock clock.Clock
}
```
You could initialize it to use the realtime clock like this:
```go
var app Application
app.Clock = clock.New()
...
```
Then all timers and time-related functionality should be performed from the
`Clock` variable.
### Mocking time
In your tests, you will want to use a `Mock` clock:
```go
import (
"testing"
"github.com/benbjohnson/clock"
)
func TestApplication_DoSomething(t *testing.T) {
mock := clock.NewMock()
app := Application{Clock: mock}
...
}
```
Now that you've initialized your application to use the mock clock, you can
adjust the time programmatically. The mock clock always starts from the Unix
epoch (midnight, Jan 1, 1970 UTC).
### Controlling time
The mock clock provides the same functions that the standard library's `time`
package provides. For example, to find the current time, you use the `Now()`
function:
```go
mock := clock.NewMock()
// Find the current time.
mock.Now().UTC() // 1970-01-01 00:00:00 +0000 UTC
// Move the clock forward.
mock.Add(2 * time.Hour)
// Check the time again. It's 2 hours later!
mock.Now().UTC() // 1970-01-01 02:00:00 +0000 UTC
```
Timers and Tickers are also controlled by this same mock clock. They will only
execute when the clock is moved forward:
```
mock := clock.NewMock()
count := 0
// Kick off a timer to increment every 1 mock second.
go func() {
ticker := clock.Ticker(1 * time.Second)
for {
<-ticker.C
count++
}
}()
runtime.Gosched()
// Move the clock forward 10 second.
mock.Add(10 * time.Second)
// This prints 10.
fmt.Println(count)
```

View File

@ -1,363 +0,0 @@
package clock
import (
"runtime"
"sort"
"sync"
"time"
)
// Clock represents an interface to the functions in the standard library time
// package. Two implementations are available in the clock package. The first
// is a real-time clock which simply wraps the time package's functions. The
// second is a mock clock which will only make forward progress when
// programmatically adjusted.
type Clock interface {
After(d time.Duration) <-chan time.Time
AfterFunc(d time.Duration, f func()) *Timer
Now() time.Time
Sleep(d time.Duration)
Tick(d time.Duration) <-chan time.Time
Ticker(d time.Duration) *Ticker
Timer(d time.Duration) *Timer
}
// New returns an instance of a real-time clock.
func New() Clock {
return &clock{}
}
// clock implements a real-time clock by simply wrapping the time package functions.
type clock struct{}
func (c *clock) After(d time.Duration) <-chan time.Time { return time.After(d) }
func (c *clock) AfterFunc(d time.Duration, f func()) *Timer {
return &Timer{timer: time.AfterFunc(d, f)}
}
func (c *clock) Now() time.Time { return time.Now() }
func (c *clock) Sleep(d time.Duration) { time.Sleep(d) }
func (c *clock) Tick(d time.Duration) <-chan time.Time { return time.Tick(d) }
func (c *clock) Ticker(d time.Duration) *Ticker {
t := time.NewTicker(d)
return &Ticker{C: t.C, ticker: t}
}
func (c *clock) Timer(d time.Duration) *Timer {
t := time.NewTimer(d)
return &Timer{C: t.C, timer: t}
}
// Mock represents a mock clock that only moves forward programmically.
// It can be preferable to a real-time clock when testing time-based functionality.
type Mock struct {
mu sync.Mutex
now time.Time // current time
timers clockTimers // tickers & timers
calls Calls
waiting []waiting
callsMutex sync.Mutex
}
// NewMock returns an instance of a mock clock.
// The current time of the mock clock on initialization is the Unix epoch.
func NewMock() *Mock {
return &Mock{now: time.Unix(0, 0)}
}
// Add moves the current time of the mock clock forward by the duration.
// This should only be called from a single goroutine at a time.
func (m *Mock) Add(d time.Duration) {
// Calculate the final current time.
t := m.now.Add(d)
// Continue to execute timers until there are no more before the new time.
for {
if !m.runNextTimer(t) {
break
}
}
// Ensure that we end with the new time.
m.mu.Lock()
m.now = t
m.mu.Unlock()
// Give a small buffer to make sure the other goroutines get handled.
gosched()
}
// runNextTimer executes the next timer in chronological order and moves the
// current time to the timer's next tick time. The next time is not executed if
// it's next time if after the max time. Returns true if a timer is executed.
func (m *Mock) runNextTimer(max time.Time) bool {
m.mu.Lock()
// Sort timers by time.
sort.Sort(m.timers)
// If we have no more timers then exit.
if len(m.timers) == 0 {
m.mu.Unlock()
return false
}
// Retrieve next timer. Exit if next tick is after new time.
t := m.timers[0]
if t.Next().After(max) {
m.mu.Unlock()
return false
}
// Move "now" forward and unlock clock.
m.now = t.Next()
m.mu.Unlock()
// Execute timer.
t.Tick(m.now)
return true
}
// After waits for the duration to elapse and then sends the current time on the returned channel.
func (m *Mock) After(d time.Duration) <-chan time.Time {
defer m.inc(&m.calls.After)
return m.Timer(d).C
}
// AfterFunc waits for the duration to elapse and then executes a function.
// A Timer is returned that can be stopped.
func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer {
defer m.inc(&m.calls.AfterFunc)
t := m.Timer(d)
t.C = nil
t.fn = f
return t
}
// Now returns the current wall time on the mock clock.
func (m *Mock) Now() time.Time {
defer m.inc(&m.calls.Now)
m.mu.Lock()
defer m.mu.Unlock()
return m.now
}
// Sleep pauses the goroutine for the given duration on the mock clock.
// The clock must be moved forward in a separate goroutine.
func (m *Mock) Sleep(d time.Duration) {
defer m.inc(&m.calls.Sleep)
<-m.After(d)
}
// Tick is a convenience function for Ticker().
// It will return a ticker channel that cannot be stopped.
func (m *Mock) Tick(d time.Duration) <-chan time.Time {
defer m.inc(&m.calls.Tick)
return m.Ticker(d).C
}
// Ticker creates a new instance of Ticker.
func (m *Mock) Ticker(d time.Duration) *Ticker {
defer m.inc(&m.calls.Ticker)
m.mu.Lock()
defer m.mu.Unlock()
ch := make(chan time.Time)
t := &Ticker{
C: ch,
c: ch,
mock: m,
d: d,
next: m.now.Add(d),
}
m.timers = append(m.timers, (*internalTicker)(t))
return t
}
// Timer creates a new instance of Timer.
func (m *Mock) Timer(d time.Duration) *Timer {
defer m.inc(&m.calls.Timer)
m.mu.Lock()
defer m.mu.Unlock()
ch := make(chan time.Time)
t := &Timer{
C: ch,
c: ch,
mock: m,
next: m.now.Add(d),
}
m.timers = append(m.timers, (*internalTimer)(t))
return t
}
func (m *Mock) removeClockTimer(t clockTimer) {
m.mu.Lock()
defer m.mu.Unlock()
for i, timer := range m.timers {
if timer == t {
copy(m.timers[i:], m.timers[i+1:])
m.timers[len(m.timers)-1] = nil
m.timers = m.timers[:len(m.timers)-1]
break
}
}
sort.Sort(m.timers)
}
func (m *Mock) inc(addr *uint32) {
m.callsMutex.Lock()
defer m.callsMutex.Unlock()
*addr++
var newWaiting []waiting
for _, w := range m.waiting {
if m.calls.atLeast(w.expected) {
close(w.done)
continue
}
newWaiting = append(newWaiting, w)
}
m.waiting = newWaiting
}
// Wait waits for at least the relevant calls before returning. The expected
// Calls are always over the lifetime of the Mock. Values in the Calls struct
// are used as the minimum number of calls, this allows you to wait for only
// the calls you care about.
func (m *Mock) Wait(s Calls) {
m.callsMutex.Lock()
if m.calls.atLeast(s) {
m.callsMutex.Unlock()
return
}
done := make(chan struct{})
m.waiting = append(m.waiting, waiting{expected: s, done: done})
m.callsMutex.Unlock()
<-done
}
// clockTimer represents an object with an associated start time.
type clockTimer interface {
Next() time.Time
Tick(time.Time)
}
// clockTimers represents a list of sortable timers.
type clockTimers []clockTimer
func (a clockTimers) Len() int { return len(a) }
func (a clockTimers) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a clockTimers) Less(i, j int) bool { return a[i].Next().Before(a[j].Next()) }
// Timer represents a single event.
// The current time will be sent on C, unless the timer was created by AfterFunc.
type Timer struct {
C <-chan time.Time
c chan time.Time
timer *time.Timer // realtime impl, if set
next time.Time // next tick time
mock *Mock // mock clock, if set
fn func() // AfterFunc function, if set
}
// Stop turns off the ticker.
func (t *Timer) Stop() {
if t.timer != nil {
t.timer.Stop()
} else {
t.mock.removeClockTimer((*internalTimer)(t))
}
}
type internalTimer Timer
func (t *internalTimer) Next() time.Time { return t.next }
func (t *internalTimer) Tick(now time.Time) {
if t.fn != nil {
t.fn()
} else {
t.c <- now
}
t.mock.removeClockTimer((*internalTimer)(t))
gosched()
}
// Ticker holds a channel that receives "ticks" at regular intervals.
type Ticker struct {
C <-chan time.Time
c chan time.Time
ticker *time.Ticker // realtime impl, if set
next time.Time // next tick time
mock *Mock // mock clock, if set
d time.Duration // time between ticks
}
// Stop turns off the ticker.
func (t *Ticker) Stop() {
if t.ticker != nil {
t.ticker.Stop()
} else {
t.mock.removeClockTimer((*internalTicker)(t))
}
}
type internalTicker Ticker
func (t *internalTicker) Next() time.Time { return t.next }
func (t *internalTicker) Tick(now time.Time) {
select {
case t.c <- now:
case <-time.After(1 * time.Millisecond):
}
t.next = now.Add(t.d)
gosched()
}
// Sleep momentarily so that other goroutines can process.
func gosched() { runtime.Gosched() }
// Calls keeps track of the count of calls for each of the methods on the Clock
// interface.
type Calls struct {
After uint32
AfterFunc uint32
Now uint32
Sleep uint32
Tick uint32
Ticker uint32
Timer uint32
}
// atLeast returns true if at least the number of calls in o have been made.
func (c Calls) atLeast(o Calls) bool {
if c.After < o.After {
return false
}
if c.AfterFunc < o.AfterFunc {
return false
}
if c.Now < o.Now {
return false
}
if c.Sleep < o.Sleep {
return false
}
if c.Tick < o.Tick {
return false
}
if c.Ticker < o.Ticker {
return false
}
if c.Timer < o.Timer {
return false
}
return true
}
type waiting struct {
expected Calls
done chan struct{}
}

175
vendor/github.com/minio/redigo/LICENSE generated vendored Normal file
View File

@ -0,0 +1,175 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

54
vendor/github.com/minio/redigo/redis/commandinfo.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2014 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package redis
import (
"strings"
)
const (
WatchState = 1 << iota
MultiState
SubscribeState
MonitorState
)
type CommandInfo struct {
Set, Clear int
}
var commandInfos = map[string]CommandInfo{
"WATCH": {Set: WatchState},
"UNWATCH": {Clear: WatchState},
"MULTI": {Set: MultiState},
"EXEC": {Clear: WatchState | MultiState},
"DISCARD": {Clear: WatchState | MultiState},
"PSUBSCRIBE": {Set: SubscribeState},
"SUBSCRIBE": {Set: SubscribeState},
"MONITOR": {Set: MonitorState},
}
func init() {
for n, ci := range commandInfos {
commandInfos[strings.ToLower(n)] = ci
}
}
func LookupCommandInfo(commandName string) CommandInfo {
if ci, ok := commandInfos[commandName]; ok {
return ci
}
return commandInfos[strings.ToUpper(commandName)]
}

570
vendor/github.com/minio/redigo/redis/conn.go generated vendored Normal file
View File

@ -0,0 +1,570 @@
// Copyright 2012 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package redis
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"net"
"net/url"
"regexp"
"strconv"
"sync"
"time"
)
// conn is the low-level implementation of Conn
type conn struct {
// Shared
mu sync.Mutex
pending int
err error
conn net.Conn
// Read
readTimeout time.Duration
br *bufio.Reader
// Write
writeTimeout time.Duration
bw *bufio.Writer
// Scratch space for formatting argument length.
// '*' or '$', length, "\r\n"
lenScratch [32]byte
// Scratch space for formatting integers and floats.
numScratch [40]byte
}
// DialTimeout acts like Dial but takes timeouts for establishing the
// connection to the server, writing a command and reading a reply.
//
// Deprecated: Use Dial with options instead.
func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) {
return Dial(network, address,
DialConnectTimeout(connectTimeout),
DialReadTimeout(readTimeout),
DialWriteTimeout(writeTimeout))
}
// DialOption specifies an option for dialing a Redis server.
type DialOption struct {
f func(*dialOptions)
}
type dialOptions struct {
readTimeout time.Duration
writeTimeout time.Duration
dial func(network, addr string) (net.Conn, error)
db int
password string
}
// DialReadTimeout specifies the timeout for reading a single command reply.
func DialReadTimeout(d time.Duration) DialOption {
return DialOption{func(do *dialOptions) {
do.readTimeout = d
}}
}
// DialWriteTimeout specifies the timeout for writing a single command.
func DialWriteTimeout(d time.Duration) DialOption {
return DialOption{func(do *dialOptions) {
do.writeTimeout = d
}}
}
// DialConnectTimeout specifies the timeout for connecting to the Redis server.
func DialConnectTimeout(d time.Duration) DialOption {
return DialOption{func(do *dialOptions) {
dialer := net.Dialer{Timeout: d}
do.dial = dialer.Dial
}}
}
// DialNetDial specifies a custom dial function for creating TCP
// connections. If this option is left out, then net.Dial is
// used. DialNetDial overrides DialConnectTimeout.
func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption {
return DialOption{func(do *dialOptions) {
do.dial = dial
}}
}
// DialDatabase specifies the database to select when dialing a connection.
func DialDatabase(db int) DialOption {
return DialOption{func(do *dialOptions) {
do.db = db
}}
}
// DialPassword specifies the password to use when connecting to
// the Redis server.
func DialPassword(password string) DialOption {
return DialOption{func(do *dialOptions) {
do.password = password
}}
}
// Dial connects to the Redis server at the given network and
// address using the specified options.
func Dial(network, address string, options ...DialOption) (Conn, error) {
do := dialOptions{
dial: net.Dial,
}
for _, option := range options {
option.f(&do)
}
netConn, err := do.dial(network, address)
if err != nil {
return nil, err
}
c := &conn{
conn: netConn,
bw: bufio.NewWriter(netConn),
br: bufio.NewReader(netConn),
readTimeout: do.readTimeout,
writeTimeout: do.writeTimeout,
}
if do.password != "" {
if _, err := c.Do("AUTH", do.password); err != nil {
netConn.Close()
return nil, err
}
}
if do.db != 0 {
if _, err := c.Do("SELECT", do.db); err != nil {
netConn.Close()
return nil, err
}
}
return c, nil
}
var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`)
// DialURL connects to a Redis server at the given URL using the Redis
// URI scheme. URLs should follow the draft IANA specification for the
// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis).
func DialURL(rawurl string, options ...DialOption) (Conn, error) {
u, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
if u.Scheme != "redis" {
return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme)
}
// As per the IANA draft spec, the host defaults to localhost and
// the port defaults to 6379.
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
// assume port is missing
host = u.Host
port = "6379"
}
if host == "" {
host = "localhost"
}
address := net.JoinHostPort(host, port)
if u.User != nil {
password, isSet := u.User.Password()
if isSet {
options = append(options, DialPassword(password))
}
}
match := pathDBRegexp.FindStringSubmatch(u.Path)
if len(match) == 2 {
db := 0
if len(match[1]) > 0 {
db, err = strconv.Atoi(match[1])
if err != nil {
return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
}
}
if db != 0 {
options = append(options, DialDatabase(db))
}
} else if u.Path != "" {
return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
}
return Dial("tcp", address, options...)
}
// NewConn returns a new Redigo connection for the given net connection.
func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn {
return &conn{
conn: netConn,
bw: bufio.NewWriter(netConn),
br: bufio.NewReader(netConn),
readTimeout: readTimeout,
writeTimeout: writeTimeout,
}
}
func (c *conn) Close() error {
c.mu.Lock()
err := c.err
if c.err == nil {
c.err = errors.New("redigo: closed")
err = c.conn.Close()
}
c.mu.Unlock()
return err
}
func (c *conn) fatal(err error) error {
c.mu.Lock()
if c.err == nil {
c.err = err
// Close connection to force errors on subsequent calls and to unblock
// other reader or writer.
c.conn.Close()
}
c.mu.Unlock()
return err
}
func (c *conn) Err() error {
c.mu.Lock()
err := c.err
c.mu.Unlock()
return err
}
func (c *conn) writeLen(prefix byte, n int) error {
c.lenScratch[len(c.lenScratch)-1] = '\n'
c.lenScratch[len(c.lenScratch)-2] = '\r'
i := len(c.lenScratch) - 3
for {
c.lenScratch[i] = byte('0' + n%10)
i -= 1
n = n / 10
if n == 0 {
break
}
}
c.lenScratch[i] = prefix
_, err := c.bw.Write(c.lenScratch[i:])
return err
}
func (c *conn) writeString(s string) error {
c.writeLen('$', len(s))
c.bw.WriteString(s)
_, err := c.bw.WriteString("\r\n")
return err
}
func (c *conn) writeBytes(p []byte) error {
c.writeLen('$', len(p))
c.bw.Write(p)
_, err := c.bw.WriteString("\r\n")
return err
}
func (c *conn) writeInt64(n int64) error {
return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10))
}
func (c *conn) writeFloat64(n float64) error {
return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64))
}
func (c *conn) writeCommand(cmd string, args []interface{}) (err error) {
c.writeLen('*', 1+len(args))
err = c.writeString(cmd)
for _, arg := range args {
if err != nil {
break
}
switch arg := arg.(type) {
case string:
err = c.writeString(arg)
case []byte:
err = c.writeBytes(arg)
case int:
err = c.writeInt64(int64(arg))
case int64:
err = c.writeInt64(arg)
case float64:
err = c.writeFloat64(arg)
case bool:
if arg {
err = c.writeString("1")
} else {
err = c.writeString("0")
}
case nil:
err = c.writeString("")
default:
var buf bytes.Buffer
fmt.Fprint(&buf, arg)
err = c.writeBytes(buf.Bytes())
}
}
return err
}
type protocolError string
func (pe protocolError) Error() string {
return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe))
}
func (c *conn) readLine() ([]byte, error) {
p, err := c.br.ReadSlice('\n')
if err == bufio.ErrBufferFull {
return nil, protocolError("long response line")
}
if err != nil {
return nil, err
}
i := len(p) - 2
if i < 0 || p[i] != '\r' {
return nil, protocolError("bad response line terminator")
}
return p[:i], nil
}
// parseLen parses bulk string and array lengths.
func parseLen(p []byte) (int, error) {
if len(p) == 0 {
return -1, protocolError("malformed length")
}
if p[0] == '-' && len(p) == 2 && p[1] == '1' {
// handle $-1 and $-1 null replies.
return -1, nil
}
var n int
for _, b := range p {
n *= 10
if b < '0' || b > '9' {
return -1, protocolError("illegal bytes in length")
}
n += int(b - '0')
}
return n, nil
}
// parseInt parses an integer reply.
func parseInt(p []byte) (interface{}, error) {
if len(p) == 0 {
return 0, protocolError("malformed integer")
}
var negate bool
if p[0] == '-' {
negate = true
p = p[1:]
if len(p) == 0 {
return 0, protocolError("malformed integer")
}
}
var n int64
for _, b := range p {
n *= 10
if b < '0' || b > '9' {
return 0, protocolError("illegal bytes in length")
}
n += int64(b - '0')
}
if negate {
n = -n
}
return n, nil
}
var (
okReply interface{} = "OK"
pongReply interface{} = "PONG"
)
func (c *conn) readReply() (interface{}, error) {
line, err := c.readLine()
if err != nil {
return nil, err
}
if len(line) == 0 {
return nil, protocolError("short response line")
}
switch line[0] {
case '+':
switch {
case len(line) == 3 && line[1] == 'O' && line[2] == 'K':
// Avoid allocation for frequent "+OK" response.
return okReply, nil
case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G':
// Avoid allocation in PING command benchmarks :)
return pongReply, nil
default:
return string(line[1:]), nil
}
case '-':
return Error(string(line[1:])), nil
case ':':
return parseInt(line[1:])
case '$':
n, err := parseLen(line[1:])
if n < 0 || err != nil {
return nil, err
}
p := make([]byte, n)
_, err = io.ReadFull(c.br, p)
if err != nil {
return nil, err
}
if line, err := c.readLine(); err != nil {
return nil, err
} else if len(line) != 0 {
return nil, protocolError("bad bulk string format")
}
return p, nil
case '*':
n, err := parseLen(line[1:])
if n < 0 || err != nil {
return nil, err
}
r := make([]interface{}, n)
for i := range r {
r[i], err = c.readReply()
if err != nil {
return nil, err
}
}
return r, nil
}
return nil, protocolError("unexpected response line")
}
func (c *conn) Send(cmd string, args ...interface{}) error {
c.mu.Lock()
c.pending += 1
c.mu.Unlock()
if c.writeTimeout != 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
}
if err := c.writeCommand(cmd, args); err != nil {
return c.fatal(err)
}
return nil
}
func (c *conn) Flush() error {
if c.writeTimeout != 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
}
if err := c.bw.Flush(); err != nil {
return c.fatal(err)
}
return nil
}
func (c *conn) Receive() (reply interface{}, err error) {
if c.readTimeout != 0 {
c.conn.SetReadDeadline(time.Now().Add(c.readTimeout))
}
if reply, err = c.readReply(); err != nil {
return nil, c.fatal(err)
}
// When using pub/sub, the number of receives can be greater than the
// number of sends. To enable normal use of the connection after
// unsubscribing from all channels, we do not decrement pending to a
// negative value.
//
// The pending field is decremented after the reply is read to handle the
// case where Receive is called before Send.
c.mu.Lock()
if c.pending > 0 {
c.pending -= 1
}
c.mu.Unlock()
if err, ok := reply.(Error); ok {
return nil, err
}
return
}
func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) {
c.mu.Lock()
pending := c.pending
c.pending = 0
c.mu.Unlock()
if cmd == "" && pending == 0 {
return nil, nil
}
if c.writeTimeout != 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
}
if cmd != "" {
if err := c.writeCommand(cmd, args); err != nil {
return nil, c.fatal(err)
}
}
if err := c.bw.Flush(); err != nil {
return nil, c.fatal(err)
}
if c.readTimeout != 0 {
c.conn.SetReadDeadline(time.Now().Add(c.readTimeout))
}
if cmd == "" {
reply := make([]interface{}, pending)
for i := range reply {
r, e := c.readReply()
if e != nil {
return nil, c.fatal(e)
}
reply[i] = r
}
return reply, nil
}
var err error
var reply interface{}
for i := 0; i <= pending; i++ {
var e error
if reply, e = c.readReply(); e != nil {
return nil, c.fatal(e)
}
if e, ok := reply.(Error); ok && err == nil {
err = e
}
}
return reply, err
}

168
vendor/github.com/minio/redigo/redis/doc.go generated vendored Normal file
View File

@ -0,0 +1,168 @@
// Copyright 2012 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// Package redis is a client for the Redis database.
//
// The Redigo FAQ (https://github.com/minio/redigo/wiki/FAQ) contains more
// documentation about this package.
//
// Connections
//
// The Conn interface is the primary interface for working with Redis.
// Applications create connections by calling the Dial, DialWithTimeout or
// NewConn functions. In the future, functions will be added for creating
// sharded and other types of connections.
//
// The application must call the connection Close method when the application
// is done with the connection.
//
// Executing Commands
//
// The Conn interface has a generic method for executing Redis commands:
//
// Do(commandName string, args ...interface{}) (reply interface{}, err error)
//
// The Redis command reference (http://redis.io/commands) lists the available
// commands. An example of using the Redis APPEND command is:
//
// n, err := conn.Do("APPEND", "key", "value")
//
// The Do method converts command arguments to binary strings for transmission
// to the server as follows:
//
// Go Type Conversion
// []byte Sent as is
// string Sent as is
// int, int64 strconv.FormatInt(v)
// float64 strconv.FormatFloat(v, 'g', -1, 64)
// bool true -> "1", false -> "0"
// nil ""
// all other types fmt.Print(v)
//
// Redis command reply types are represented using the following Go types:
//
// Redis type Go type
// error redis.Error
// integer int64
// simple string string
// bulk string []byte or nil if value not present.
// array []interface{} or nil if value not present.
//
// Use type assertions or the reply helper functions to convert from
// interface{} to the specific Go type for the command result.
//
// Pipelining
//
// Connections support pipelining using the Send, Flush and Receive methods.
//
// Send(commandName string, args ...interface{}) error
// Flush() error
// Receive() (reply interface{}, err error)
//
// Send writes the command to the connection's output buffer. Flush flushes the
// connection's output buffer to the server. Receive reads a single reply from
// the server. The following example shows a simple pipeline.
//
// c.Send("SET", "foo", "bar")
// c.Send("GET", "foo")
// c.Flush()
// c.Receive() // reply from SET
// v, err = c.Receive() // reply from GET
//
// The Do method combines the functionality of the Send, Flush and Receive
// methods. The Do method starts by writing the command and flushing the output
// buffer. Next, the Do method receives all pending replies including the reply
// for the command just sent by Do. If any of the received replies is an error,
// then Do returns the error. If there are no errors, then Do returns the last
// reply. If the command argument to the Do method is "", then the Do method
// will flush the output buffer and receive pending replies without sending a
// command.
//
// Use the Send and Do methods to implement pipelined transactions.
//
// c.Send("MULTI")
// c.Send("INCR", "foo")
// c.Send("INCR", "bar")
// r, err := c.Do("EXEC")
// fmt.Println(r) // prints [1, 1]
//
// Concurrency
//
// Connections support one concurrent caller to the Recieve method and one
// concurrent caller to the Send and Flush methods. No other concurrency is
// supported including concurrent calls to the Do method.
//
// For full concurrent access to Redis, use the thread-safe Pool to get, use
// and release a connection from within a goroutine. Connections returned from
// a Pool have the concurrency restrictions described in the previous
// paragraph.
//
// Publish and Subscribe
//
// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers.
//
// c.Send("SUBSCRIBE", "example")
// c.Flush()
// for {
// reply, err := c.Receive()
// if err != nil {
// return err
// }
// // process pushed message
// }
//
// The PubSubConn type wraps a Conn with convenience methods for implementing
// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods
// send and flush a subscription management command. The receive method
// converts a pushed message to convenient types for use in a type switch.
//
// psc := redis.PubSubConn{c}
// psc.Subscribe("example")
// for {
// switch v := psc.Receive().(type) {
// case redis.Message:
// fmt.Printf("%s: message: %s\n", v.Channel, v.Data)
// case redis.Subscription:
// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
// case error:
// return v
// }
// }
//
// Reply Helpers
//
// The Bool, Int, Bytes, String, Strings and Values functions convert a reply
// to a value of a specific type. To allow convenient wrapping of calls to the
// connection Do and Receive methods, the functions take a second argument of
// type error. If the error is non-nil, then the helper function returns the
// error. If the error is nil, the function converts the reply to the specified
// type:
//
// exists, err := redis.Bool(c.Do("EXISTS", "foo"))
// if err != nil {
// // handle error return from c.Do or type conversion error.
// }
//
// The Scan function converts elements of a array reply to Go types:
//
// var value1 int
// var value2 string
// reply, err := redis.Values(c.Do("MGET", "key1", "key2"))
// if err != nil {
// // handle error
// }
// if _, err := redis.Scan(reply, &value1, &value2); err != nil {
// // handle error
// }
package redis // import "github.com/minio/redigo/redis"

117
vendor/github.com/minio/redigo/redis/log.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
// Copyright 2012 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package redis
import (
"bytes"
"fmt"
"log"
)
// NewLoggingConn returns a logging wrapper around a connection.
func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn {
if prefix != "" {
prefix = prefix + "."
}
return &loggingConn{conn, logger, prefix}
}
type loggingConn struct {
Conn
logger *log.Logger
prefix string
}
func (c *loggingConn) Close() error {
err := c.Conn.Close()
var buf bytes.Buffer
fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err)
c.logger.Output(2, buf.String())
return err
}
func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) {
const chop = 32
switch v := v.(type) {
case []byte:
if len(v) > chop {
fmt.Fprintf(buf, "%q...", v[:chop])
} else {
fmt.Fprintf(buf, "%q", v)
}
case string:
if len(v) > chop {
fmt.Fprintf(buf, "%q...", v[:chop])
} else {
fmt.Fprintf(buf, "%q", v)
}
case []interface{}:
if len(v) == 0 {
buf.WriteString("[]")
} else {
sep := "["
fin := "]"
if len(v) > chop {
v = v[:chop]
fin = "...]"
}
for _, vv := range v {
buf.WriteString(sep)
c.printValue(buf, vv)
sep = ", "
}
buf.WriteString(fin)
}
default:
fmt.Fprint(buf, v)
}
}
func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%s%s(", c.prefix, method)
if method != "Receive" {
buf.WriteString(commandName)
for _, arg := range args {
buf.WriteString(", ")
c.printValue(&buf, arg)
}
}
buf.WriteString(") -> (")
if method != "Send" {
c.printValue(&buf, reply)
buf.WriteString(", ")
}
fmt.Fprintf(&buf, "%v)", err)
c.logger.Output(3, buf.String())
}
func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) {
reply, err := c.Conn.Do(commandName, args...)
c.print("Do", commandName, args, reply, err)
return reply, err
}
func (c *loggingConn) Send(commandName string, args ...interface{}) error {
err := c.Conn.Send(commandName, args...)
c.print("Send", commandName, args, nil, err)
return err
}
func (c *loggingConn) Receive() (interface{}, error) {
reply, err := c.Conn.Receive()
c.print("Receive", "", nil, reply, err)
return reply, err
}

391
vendor/github.com/minio/redigo/redis/pool.go generated vendored Normal file
View File

@ -0,0 +1,391 @@
// Copyright 2012 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package redis
import (
"bytes"
"container/list"
"crypto/rand"
"crypto/sha1"
"errors"
"io"
"strconv"
"sync"
"time"
)
var nowFunc = time.Now // for testing
// ErrPoolExhausted is returned from a pool connection method (Do, Send,
// Receive, Flush, Err) when the maximum number of database connections in the
// pool has been reached.
var ErrPoolExhausted = errors.New("redigo: connection pool exhausted")
var (
errPoolClosed = errors.New("redigo: connection pool closed")
errConnClosed = errors.New("redigo: connection closed")
)
// Pool maintains a pool of connections. The application calls the Get method
// to get a connection from the pool and the connection's Close method to
// return the connection's resources to the pool.
//
// The following example shows how to use a pool in a web application. The
// application creates a pool at application startup and makes it available to
// request handlers using a global variable.
//
// func newPool(server, password string) *redis.Pool {
// return &redis.Pool{
// MaxIdle: 3,
// IdleTimeout: 240 * time.Second,
// Dial: func () (redis.Conn, error) {
// c, err := redis.Dial("tcp", server)
// if err != nil {
// return nil, err
// }
// if _, err := c.Do("AUTH", password); err != nil {
// c.Close()
// return nil, err
// }
// return c, err
// },
// TestOnBorrow: func(c redis.Conn, t time.Time) error {
// _, err := c.Do("PING")
// return err
// },
// }
// }
//
// var (
// pool *redis.Pool
// redisServer = flag.String("redisServer", ":6379", "")
// redisPassword = flag.String("redisPassword", "", "")
// )
//
// func main() {
// flag.Parse()
// pool = newPool(*redisServer, *redisPassword)
// ...
// }
//
// A request handler gets a connection from the pool and closes the connection
// when the handler is done:
//
// func serveHome(w http.ResponseWriter, r *http.Request) {
// conn := pool.Get()
// defer conn.Close()
// ....
// }
//
type Pool struct {
// Dial is an application supplied function for creating and configuring a
// connection.
//
// The connection returned from Dial must not be in a special state
// (subscribed to pubsub channel, transaction started, ...).
Dial func() (Conn, error)
// TestOnBorrow is an optional application supplied function for checking
// the health of an idle connection before the connection is used again by
// the application. Argument t is the time that the connection was returned
// to the pool. If the function returns an error, then the connection is
// closed.
TestOnBorrow func(c Conn, t time.Time) error
// Maximum number of idle connections in the pool.
MaxIdle int
// Maximum number of connections allocated by the pool at a given time.
// When zero, there is no limit on the number of connections in the pool.
MaxActive int
// Close connections after remaining idle for this duration. If the value
// is zero, then idle connections are not closed. Applications should set
// the timeout to a value less than the server's timeout.
IdleTimeout time.Duration
// If Wait is true and the pool is at the MaxActive limit, then Get() waits
// for a connection to be returned to the pool before returning.
Wait bool
// mu protects fields defined below.
mu sync.Mutex
cond *sync.Cond
closed bool
active int
// Stack of idleConn with most recently used at the front.
idle list.List
}
type idleConn struct {
c Conn
t time.Time
}
// NewPool creates a new pool.
//
// Deprecated: Initialize the Pool directory as shown in the example.
func NewPool(newFn func() (Conn, error), maxIdle int) *Pool {
return &Pool{Dial: newFn, MaxIdle: maxIdle}
}
// Get gets a connection. The application must close the returned connection.
// This method always returns a valid connection so that applications can defer
// error handling to the first use of the connection. If there is an error
// getting an underlying connection, then the connection Err, Do, Send, Flush
// and Receive methods return that error.
func (p *Pool) Get() Conn {
c, err := p.get()
if err != nil {
return errorConnection{err}
}
return &pooledConnection{p: p, c: c}
}
// ActiveCount returns the number of active connections in the pool.
func (p *Pool) ActiveCount() int {
p.mu.Lock()
active := p.active
p.mu.Unlock()
return active
}
// Close releases the resources used by the pool.
func (p *Pool) Close() error {
p.mu.Lock()
idle := p.idle
p.idle.Init()
p.closed = true
p.active -= idle.Len()
if p.cond != nil {
p.cond.Broadcast()
}
p.mu.Unlock()
for e := idle.Front(); e != nil; e = e.Next() {
e.Value.(idleConn).c.Close()
}
return nil
}
// release decrements the active count and signals waiters. The caller must
// hold p.mu during the call.
func (p *Pool) release() {
p.active -= 1
if p.cond != nil {
p.cond.Signal()
}
}
// get prunes stale connections and returns a connection from the idle list or
// creates a new connection.
func (p *Pool) get() (Conn, error) {
p.mu.Lock()
// Prune stale connections.
if timeout := p.IdleTimeout; timeout > 0 {
for i, n := 0, p.idle.Len(); i < n; i++ {
e := p.idle.Back()
if e == nil {
break
}
ic := e.Value.(idleConn)
if ic.t.Add(timeout).After(nowFunc()) {
break
}
p.idle.Remove(e)
p.release()
p.mu.Unlock()
ic.c.Close()
p.mu.Lock()
}
}
for {
// Get idle connection.
for i, n := 0, p.idle.Len(); i < n; i++ {
e := p.idle.Front()
if e == nil {
break
}
ic := e.Value.(idleConn)
p.idle.Remove(e)
test := p.TestOnBorrow
p.mu.Unlock()
if test == nil || test(ic.c, ic.t) == nil {
return ic.c, nil
}
ic.c.Close()
p.mu.Lock()
p.release()
}
// Check for pool closed before dialing a new connection.
if p.closed {
p.mu.Unlock()
return nil, errors.New("redigo: get on closed pool")
}
// Dial new connection if under limit.
if p.MaxActive == 0 || p.active < p.MaxActive {
dial := p.Dial
p.active += 1
p.mu.Unlock()
c, err := dial()
if err != nil {
p.mu.Lock()
p.release()
p.mu.Unlock()
c = nil
}
return c, err
}
if !p.Wait {
p.mu.Unlock()
return nil, ErrPoolExhausted
}
if p.cond == nil {
p.cond = sync.NewCond(&p.mu)
}
p.cond.Wait()
}
}
func (p *Pool) put(c Conn, forceClose bool) error {
err := c.Err()
p.mu.Lock()
if !p.closed && err == nil && !forceClose {
p.idle.PushFront(idleConn{t: nowFunc(), c: c})
if p.idle.Len() > p.MaxIdle {
c = p.idle.Remove(p.idle.Back()).(idleConn).c
} else {
c = nil
}
}
if c == nil {
if p.cond != nil {
p.cond.Signal()
}
p.mu.Unlock()
return nil
}
p.release()
p.mu.Unlock()
return c.Close()
}
type pooledConnection struct {
p *Pool
c Conn
state int
}
var (
sentinel []byte
sentinelOnce sync.Once
)
func initSentinel() {
p := make([]byte, 64)
if _, err := rand.Read(p); err == nil {
sentinel = p
} else {
h := sha1.New()
io.WriteString(h, "Oops, rand failed. Use time instead.")
io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10))
sentinel = h.Sum(nil)
}
}
func (pc *pooledConnection) Close() error {
c := pc.c
if _, ok := c.(errorConnection); ok {
return nil
}
pc.c = errorConnection{errConnClosed}
if pc.state&MultiState != 0 {
c.Send("DISCARD")
pc.state &^= (MultiState | WatchState)
} else if pc.state&WatchState != 0 {
c.Send("UNWATCH")
pc.state &^= WatchState
}
if pc.state&SubscribeState != 0 {
c.Send("UNSUBSCRIBE")
c.Send("PUNSUBSCRIBE")
// To detect the end of the message stream, ask the server to echo
// a sentinel value and read until we see that value.
sentinelOnce.Do(initSentinel)
c.Send("ECHO", sentinel)
c.Flush()
for {
p, err := c.Receive()
if err != nil {
break
}
if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) {
pc.state &^= SubscribeState
break
}
}
}
c.Do("")
pc.p.put(c, pc.state != 0)
return nil
}
func (pc *pooledConnection) Err() error {
return pc.c.Err()
}
func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) {
ci := LookupCommandInfo(commandName)
pc.state = (pc.state | ci.Set) &^ ci.Clear
return pc.c.Do(commandName, args...)
}
func (pc *pooledConnection) Send(commandName string, args ...interface{}) error {
ci := LookupCommandInfo(commandName)
pc.state = (pc.state | ci.Set) &^ ci.Clear
return pc.c.Send(commandName, args...)
}
func (pc *pooledConnection) Flush() error {
return pc.c.Flush()
}
func (pc *pooledConnection) Receive() (reply interface{}, err error) {
return pc.c.Receive()
}
type errorConnection struct{ err error }
func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err }
func (ec errorConnection) Send(string, ...interface{}) error { return ec.err }
func (ec errorConnection) Err() error { return ec.err }
func (ec errorConnection) Close() error { return ec.err }
func (ec errorConnection) Flush() error { return ec.err }
func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err }

144
vendor/github.com/minio/redigo/redis/pubsub.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
// Copyright 2012 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package redis
import "errors"
// Subscription represents a subscribe or unsubscribe notification.
type Subscription struct {
// Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe"
Kind string
// The channel that was changed.
Channel string
// The current number of subscriptions for connection.
Count int
}
// Message represents a message notification.
type Message struct {
// The originating channel.
Channel string
// The message data.
Data []byte
}
// PMessage represents a pmessage notification.
type PMessage struct {
// The matched pattern.
Pattern string
// The originating channel.
Channel string
// The message data.
Data []byte
}
// Pong represents a pubsub pong notification.
type Pong struct {
Data string
}
// PubSubConn wraps a Conn with convenience methods for subscribers.
type PubSubConn struct {
Conn Conn
}
// Close closes the connection.
func (c PubSubConn) Close() error {
return c.Conn.Close()
}
// Subscribe subscribes the connection to the specified channels.
func (c PubSubConn) Subscribe(channel ...interface{}) error {
c.Conn.Send("SUBSCRIBE", channel...)
return c.Conn.Flush()
}
// PSubscribe subscribes the connection to the given patterns.
func (c PubSubConn) PSubscribe(channel ...interface{}) error {
c.Conn.Send("PSUBSCRIBE", channel...)
return c.Conn.Flush()
}
// Unsubscribe unsubscribes the connection from the given channels, or from all
// of them if none is given.
func (c PubSubConn) Unsubscribe(channel ...interface{}) error {
c.Conn.Send("UNSUBSCRIBE", channel...)
return c.Conn.Flush()
}
// PUnsubscribe unsubscribes the connection from the given patterns, or from all
// of them if none is given.
func (c PubSubConn) PUnsubscribe(channel ...interface{}) error {
c.Conn.Send("PUNSUBSCRIBE", channel...)
return c.Conn.Flush()
}
// Ping sends a PING to the server with the specified data.
func (c PubSubConn) Ping(data string) error {
c.Conn.Send("PING", data)
return c.Conn.Flush()
}
// Receive returns a pushed message as a Subscription, Message, PMessage, Pong
// or error. The return value is intended to be used directly in a type switch
// as illustrated in the PubSubConn example.
func (c PubSubConn) Receive() interface{} {
reply, err := Values(c.Conn.Receive())
if err != nil {
return err
}
var kind string
reply, err = Scan(reply, &kind)
if err != nil {
return err
}
switch kind {
case "message":
var m Message
if _, err := Scan(reply, &m.Channel, &m.Data); err != nil {
return err
}
return m
case "pmessage":
var pm PMessage
if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil {
return err
}
return pm
case "subscribe", "psubscribe", "unsubscribe", "punsubscribe":
s := Subscription{Kind: kind}
if _, err := Scan(reply, &s.Channel, &s.Count); err != nil {
return err
}
return s
case "pong":
var p Pong
if _, err := Scan(reply, &p.Data); err != nil {
return err
}
return p
}
return errors.New("redigo: unknown pubsub notification")
}

44
vendor/github.com/minio/redigo/redis/redis.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
// Copyright 2012 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package redis
// Error represents an error returned in a command reply.
type Error string
func (err Error) Error() string { return string(err) }
// Conn represents a connection to a Redis server.
type Conn interface {
// Close closes the connection.
Close() error
// Err returns a non-nil value if the connection is broken. The returned
// value is either the first non-nil value returned from the underlying
// network connection or a protocol parsing error. Applications should
// close broken connections.
Err() error
// Do sends a command to the server and returns the received reply.
Do(commandName string, args ...interface{}) (reply interface{}, err error)
// Send writes the command to the client's output buffer.
Send(commandName string, args ...interface{}) error
// Flush flushes the output buffer to the Redis server.
Flush() error
// Receive receives a single reply from the Redis server
Receive() (reply interface{}, err error)
}

393
vendor/github.com/minio/redigo/redis/reply.go generated vendored Normal file
View File

@ -0,0 +1,393 @@
// Copyright 2012 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package redis
import (
"errors"
"fmt"
"strconv"
)
// ErrNil indicates that a reply value is nil.
var ErrNil = errors.New("redigo: nil returned")
// Int is a helper that converts a command reply to an integer. If err is not
// equal to nil, then Int returns 0, err. Otherwise, Int converts the
// reply to an int as follows:
//
// Reply type Result
// integer int(reply), nil
// bulk string parsed reply, nil
// nil 0, ErrNil
// other 0, error
func Int(reply interface{}, err error) (int, error) {
if err != nil {
return 0, err
}
switch reply := reply.(type) {
case int64:
x := int(reply)
if int64(x) != reply {
return 0, strconv.ErrRange
}
return x, nil
case []byte:
n, err := strconv.ParseInt(string(reply), 10, 0)
return int(n), err
case nil:
return 0, ErrNil
case Error:
return 0, reply
}
return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply)
}
// Int64 is a helper that converts a command reply to 64 bit integer. If err is
// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
// reply to an int64 as follows:
//
// Reply type Result
// integer reply, nil
// bulk string parsed reply, nil
// nil 0, ErrNil
// other 0, error
func Int64(reply interface{}, err error) (int64, error) {
if err != nil {
return 0, err
}
switch reply := reply.(type) {
case int64:
return reply, nil
case []byte:
n, err := strconv.ParseInt(string(reply), 10, 64)
return n, err
case nil:
return 0, ErrNil
case Error:
return 0, reply
}
return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply)
}
var errNegativeInt = errors.New("redigo: unexpected value for Uint64")
// Uint64 is a helper that converts a command reply to 64 bit integer. If err is
// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
// reply to an int64 as follows:
//
// Reply type Result
// integer reply, nil
// bulk string parsed reply, nil
// nil 0, ErrNil
// other 0, error
func Uint64(reply interface{}, err error) (uint64, error) {
if err != nil {
return 0, err
}
switch reply := reply.(type) {
case int64:
if reply < 0 {
return 0, errNegativeInt
}
return uint64(reply), nil
case []byte:
n, err := strconv.ParseUint(string(reply), 10, 64)
return n, err
case nil:
return 0, ErrNil
case Error:
return 0, reply
}
return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply)
}
// Float64 is a helper that converts a command reply to 64 bit float. If err is
// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts
// the reply to an int as follows:
//
// Reply type Result
// bulk string parsed reply, nil
// nil 0, ErrNil
// other 0, error
func Float64(reply interface{}, err error) (float64, error) {
if err != nil {
return 0, err
}
switch reply := reply.(type) {
case []byte:
n, err := strconv.ParseFloat(string(reply), 64)
return n, err
case nil:
return 0, ErrNil
case Error:
return 0, reply
}
return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply)
}
// String is a helper that converts a command reply to a string. If err is not
// equal to nil, then String returns "", err. Otherwise String converts the
// reply to a string as follows:
//
// Reply type Result
// bulk string string(reply), nil
// simple string reply, nil
// nil "", ErrNil
// other "", error
func String(reply interface{}, err error) (string, error) {
if err != nil {
return "", err
}
switch reply := reply.(type) {
case []byte:
return string(reply), nil
case string:
return reply, nil
case nil:
return "", ErrNil
case Error:
return "", reply
}
return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply)
}
// Bytes is a helper that converts a command reply to a slice of bytes. If err
// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts
// the reply to a slice of bytes as follows:
//
// Reply type Result
// bulk string reply, nil
// simple string []byte(reply), nil
// nil nil, ErrNil
// other nil, error
func Bytes(reply interface{}, err error) ([]byte, error) {
if err != nil {
return nil, err
}
switch reply := reply.(type) {
case []byte:
return reply, nil
case string:
return []byte(reply), nil
case nil:
return nil, ErrNil
case Error:
return nil, reply
}
return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply)
}
// Bool is a helper that converts a command reply to a boolean. If err is not
// equal to nil, then Bool returns false, err. Otherwise Bool converts the
// reply to boolean as follows:
//
// Reply type Result
// integer value != 0, nil
// bulk string strconv.ParseBool(reply)
// nil false, ErrNil
// other false, error
func Bool(reply interface{}, err error) (bool, error) {
if err != nil {
return false, err
}
switch reply := reply.(type) {
case int64:
return reply != 0, nil
case []byte:
return strconv.ParseBool(string(reply))
case nil:
return false, ErrNil
case Error:
return false, reply
}
return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply)
}
// MultiBulk is a helper that converts an array command reply to a []interface{}.
//
// Deprecated: Use Values instead.
func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) }
// Values is a helper that converts an array command reply to a []interface{}.
// If err is not equal to nil, then Values returns nil, err. Otherwise, Values
// converts the reply as follows:
//
// Reply type Result
// array reply, nil
// nil nil, ErrNil
// other nil, error
func Values(reply interface{}, err error) ([]interface{}, error) {
if err != nil {
return nil, err
}
switch reply := reply.(type) {
case []interface{}:
return reply, nil
case nil:
return nil, ErrNil
case Error:
return nil, reply
}
return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply)
}
// Strings is a helper that converts an array command reply to a []string. If
// err is not equal to nil, then Strings returns nil, err. Nil array items are
// converted to "" in the output slice. Strings returns an error if an array
// item is not a bulk string or nil.
func Strings(reply interface{}, err error) ([]string, error) {
if err != nil {
return nil, err
}
switch reply := reply.(type) {
case []interface{}:
result := make([]string, len(reply))
for i := range reply {
if reply[i] == nil {
continue
}
p, ok := reply[i].([]byte)
if !ok {
return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i])
}
result[i] = string(p)
}
return result, nil
case nil:
return nil, ErrNil
case Error:
return nil, reply
}
return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply)
}
// ByteSlices is a helper that converts an array command reply to a [][]byte.
// If err is not equal to nil, then ByteSlices returns nil, err. Nil array
// items are stay nil. ByteSlices returns an error if an array item is not a
// bulk string or nil.
func ByteSlices(reply interface{}, err error) ([][]byte, error) {
if err != nil {
return nil, err
}
switch reply := reply.(type) {
case []interface{}:
result := make([][]byte, len(reply))
for i := range reply {
if reply[i] == nil {
continue
}
p, ok := reply[i].([]byte)
if !ok {
return nil, fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", reply[i])
}
result[i] = p
}
return result, nil
case nil:
return nil, ErrNil
case Error:
return nil, reply
}
return nil, fmt.Errorf("redigo: unexpected type for ByteSlices, got type %T", reply)
}
// Ints is a helper that converts an array command reply to a []int. If
// err is not equal to nil, then Ints returns nil, err.
func Ints(reply interface{}, err error) ([]int, error) {
var ints []int
values, err := Values(reply, err)
if err != nil {
return ints, err
}
if err := ScanSlice(values, &ints); err != nil {
return ints, err
}
return ints, nil
}
// StringMap is a helper that converts an array of strings (alternating key, value)
// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format.
// Requires an even number of values in result.
func StringMap(result interface{}, err error) (map[string]string, error) {
values, err := Values(result, err)
if err != nil {
return nil, err
}
if len(values)%2 != 0 {
return nil, errors.New("redigo: StringMap expects even number of values result")
}
m := make(map[string]string, len(values)/2)
for i := 0; i < len(values); i += 2 {
key, okKey := values[i].([]byte)
value, okValue := values[i+1].([]byte)
if !okKey || !okValue {
return nil, errors.New("redigo: ScanMap key not a bulk string value")
}
m[string(key)] = string(value)
}
return m, nil
}
// IntMap is a helper that converts an array of strings (alternating key, value)
// into a map[string]int. The HGETALL commands return replies in this format.
// Requires an even number of values in result.
func IntMap(result interface{}, err error) (map[string]int, error) {
values, err := Values(result, err)
if err != nil {
return nil, err
}
if len(values)%2 != 0 {
return nil, errors.New("redigo: IntMap expects even number of values result")
}
m := make(map[string]int, len(values)/2)
for i := 0; i < len(values); i += 2 {
key, ok := values[i].([]byte)
if !ok {
return nil, errors.New("redigo: ScanMap key not a bulk string value")
}
value, err := Int(values[i+1], nil)
if err != nil {
return nil, err
}
m[string(key)] = value
}
return m, nil
}
// Int64Map is a helper that converts an array of strings (alternating key, value)
// into a map[string]int64. The HGETALL commands return replies in this format.
// Requires an even number of values in result.
func Int64Map(result interface{}, err error) (map[string]int64, error) {
values, err := Values(result, err)
if err != nil {
return nil, err
}
if len(values)%2 != 0 {
return nil, errors.New("redigo: Int64Map expects even number of values result")
}
m := make(map[string]int64, len(values)/2)
for i := 0; i < len(values); i += 2 {
key, ok := values[i].([]byte)
if !ok {
return nil, errors.New("redigo: ScanMap key not a bulk string value")
}
value, err := Int64(values[i+1], nil)
if err != nil {
return nil, err
}
m[string(key)] = value
}
return m, nil
}

555
vendor/github.com/minio/redigo/redis/scan.go generated vendored Normal file
View File

@ -0,0 +1,555 @@
// Copyright 2012 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package redis
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
)
func ensureLen(d reflect.Value, n int) {
if n > d.Cap() {
d.Set(reflect.MakeSlice(d.Type(), n, n))
} else {
d.SetLen(n)
}
}
func cannotConvert(d reflect.Value, s interface{}) error {
var sname string
switch s.(type) {
case string:
sname = "Redis simple string"
case Error:
sname = "Redis error"
case int64:
sname = "Redis integer"
case []byte:
sname = "Redis bulk string"
case []interface{}:
sname = "Redis array"
default:
sname = reflect.TypeOf(s).String()
}
return fmt.Errorf("cannot convert from %s to %s", sname, d.Type())
}
func convertAssignBulkString(d reflect.Value, s []byte) (err error) {
switch d.Type().Kind() {
case reflect.Float32, reflect.Float64:
var x float64
x, err = strconv.ParseFloat(string(s), d.Type().Bits())
d.SetFloat(x)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
var x int64
x, err = strconv.ParseInt(string(s), 10, d.Type().Bits())
d.SetInt(x)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
var x uint64
x, err = strconv.ParseUint(string(s), 10, d.Type().Bits())
d.SetUint(x)
case reflect.Bool:
var x bool
x, err = strconv.ParseBool(string(s))
d.SetBool(x)
case reflect.String:
d.SetString(string(s))
case reflect.Slice:
if d.Type().Elem().Kind() != reflect.Uint8 {
err = cannotConvert(d, s)
} else {
d.SetBytes(s)
}
default:
err = cannotConvert(d, s)
}
return
}
func convertAssignInt(d reflect.Value, s int64) (err error) {
switch d.Type().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
d.SetInt(s)
if d.Int() != s {
err = strconv.ErrRange
d.SetInt(0)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if s < 0 {
err = strconv.ErrRange
} else {
x := uint64(s)
d.SetUint(x)
if d.Uint() != x {
err = strconv.ErrRange
d.SetUint(0)
}
}
case reflect.Bool:
d.SetBool(s != 0)
default:
err = cannotConvert(d, s)
}
return
}
func convertAssignValue(d reflect.Value, s interface{}) (err error) {
switch s := s.(type) {
case []byte:
err = convertAssignBulkString(d, s)
case int64:
err = convertAssignInt(d, s)
default:
err = cannotConvert(d, s)
}
return err
}
func convertAssignArray(d reflect.Value, s []interface{}) error {
if d.Type().Kind() != reflect.Slice {
return cannotConvert(d, s)
}
ensureLen(d, len(s))
for i := 0; i < len(s); i++ {
if err := convertAssignValue(d.Index(i), s[i]); err != nil {
return err
}
}
return nil
}
func convertAssign(d interface{}, s interface{}) (err error) {
// Handle the most common destination types using type switches and
// fall back to reflection for all other types.
switch s := s.(type) {
case nil:
// ingore
case []byte:
switch d := d.(type) {
case *string:
*d = string(s)
case *int:
*d, err = strconv.Atoi(string(s))
case *bool:
*d, err = strconv.ParseBool(string(s))
case *[]byte:
*d = s
case *interface{}:
*d = s
case nil:
// skip value
default:
if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
err = cannotConvert(d, s)
} else {
err = convertAssignBulkString(d.Elem(), s)
}
}
case int64:
switch d := d.(type) {
case *int:
x := int(s)
if int64(x) != s {
err = strconv.ErrRange
x = 0
}
*d = x
case *bool:
*d = s != 0
case *interface{}:
*d = s
case nil:
// skip value
default:
if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
err = cannotConvert(d, s)
} else {
err = convertAssignInt(d.Elem(), s)
}
}
case string:
switch d := d.(type) {
case *string:
*d = string(s)
default:
err = cannotConvert(reflect.ValueOf(d), s)
}
case []interface{}:
switch d := d.(type) {
case *[]interface{}:
*d = s
case *interface{}:
*d = s
case nil:
// skip value
default:
if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
err = cannotConvert(d, s)
} else {
err = convertAssignArray(d.Elem(), s)
}
}
case Error:
err = s
default:
err = cannotConvert(reflect.ValueOf(d), s)
}
return
}
// Scan copies from src to the values pointed at by dest.
//
// The values pointed at by dest must be an integer, float, boolean, string,
// []byte, interface{} or slices of these types. Scan uses the standard strconv
// package to convert bulk strings to numeric and boolean types.
//
// If a dest value is nil, then the corresponding src value is skipped.
//
// If a src element is nil, then the corresponding dest value is not modified.
//
// To enable easy use of Scan in a loop, Scan returns the slice of src
// following the copied values.
func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) {
if len(src) < len(dest) {
return nil, errors.New("redigo.Scan: array short")
}
var err error
for i, d := range dest {
err = convertAssign(d, src[i])
if err != nil {
err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err)
break
}
}
return src[len(dest):], err
}
type fieldSpec struct {
name string
index []int
omitEmpty bool
}
type structSpec struct {
m map[string]*fieldSpec
l []*fieldSpec
}
func (ss *structSpec) fieldSpec(name []byte) *fieldSpec {
return ss.m[string(name)]
}
func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) {
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
switch {
case f.PkgPath != "" && !f.Anonymous:
// Ignore unexported fields.
case f.Anonymous:
// TODO: Handle pointers. Requires change to decoder and
// protection against infinite recursion.
if f.Type.Kind() == reflect.Struct {
compileStructSpec(f.Type, depth, append(index, i), ss)
}
default:
fs := &fieldSpec{name: f.Name}
tag := f.Tag.Get("redis")
p := strings.Split(tag, ",")
if len(p) > 0 {
if p[0] == "-" {
continue
}
if len(p[0]) > 0 {
fs.name = p[0]
}
for _, s := range p[1:] {
switch s {
case "omitempty":
fs.omitEmpty = true
default:
panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name()))
}
}
}
d, found := depth[fs.name]
if !found {
d = 1 << 30
}
switch {
case len(index) == d:
// At same depth, remove from result.
delete(ss.m, fs.name)
j := 0
for i := 0; i < len(ss.l); i++ {
if fs.name != ss.l[i].name {
ss.l[j] = ss.l[i]
j += 1
}
}
ss.l = ss.l[:j]
case len(index) < d:
fs.index = make([]int, len(index)+1)
copy(fs.index, index)
fs.index[len(index)] = i
depth[fs.name] = len(index)
ss.m[fs.name] = fs
ss.l = append(ss.l, fs)
}
}
}
}
var (
structSpecMutex sync.RWMutex
structSpecCache = make(map[reflect.Type]*structSpec)
defaultFieldSpec = &fieldSpec{}
)
func structSpecForType(t reflect.Type) *structSpec {
structSpecMutex.RLock()
ss, found := structSpecCache[t]
structSpecMutex.RUnlock()
if found {
return ss
}
structSpecMutex.Lock()
defer structSpecMutex.Unlock()
ss, found = structSpecCache[t]
if found {
return ss
}
ss = &structSpec{m: make(map[string]*fieldSpec)}
compileStructSpec(t, make(map[string]int), nil, ss)
structSpecCache[t] = ss
return ss
}
var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct")
// ScanStruct scans alternating names and values from src to a struct. The
// HGETALL and CONFIG GET commands return replies in this format.
//
// ScanStruct uses exported field names to match values in the response. Use
// 'redis' field tag to override the name:
//
// Field int `redis:"myName"`
//
// Fields with the tag redis:"-" are ignored.
//
// Integer, float, boolean, string and []byte fields are supported. Scan uses the
// standard strconv package to convert bulk string values to numeric and
// boolean types.
//
// If a src element is nil, then the corresponding field is not modified.
func ScanStruct(src []interface{}, dest interface{}) error {
d := reflect.ValueOf(dest)
if d.Kind() != reflect.Ptr || d.IsNil() {
return errScanStructValue
}
d = d.Elem()
if d.Kind() != reflect.Struct {
return errScanStructValue
}
ss := structSpecForType(d.Type())
if len(src)%2 != 0 {
return errors.New("redigo.ScanStruct: number of values not a multiple of 2")
}
for i := 0; i < len(src); i += 2 {
s := src[i+1]
if s == nil {
continue
}
name, ok := src[i].([]byte)
if !ok {
return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i)
}
fs := ss.fieldSpec(name)
if fs == nil {
continue
}
if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err)
}
}
return nil
}
var (
errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct")
)
// ScanSlice scans src to the slice pointed to by dest. The elements the dest
// slice must be integer, float, boolean, string, struct or pointer to struct
// values.
//
// Struct fields must be integer, float, boolean or string values. All struct
// fields are used unless a subset is specified using fieldNames.
func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error {
d := reflect.ValueOf(dest)
if d.Kind() != reflect.Ptr || d.IsNil() {
return errScanSliceValue
}
d = d.Elem()
if d.Kind() != reflect.Slice {
return errScanSliceValue
}
isPtr := false
t := d.Type().Elem()
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
isPtr = true
t = t.Elem()
}
if t.Kind() != reflect.Struct {
ensureLen(d, len(src))
for i, s := range src {
if s == nil {
continue
}
if err := convertAssignValue(d.Index(i), s); err != nil {
return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err)
}
}
return nil
}
ss := structSpecForType(t)
fss := ss.l
if len(fieldNames) > 0 {
fss = make([]*fieldSpec, len(fieldNames))
for i, name := range fieldNames {
fss[i] = ss.m[name]
if fss[i] == nil {
return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name)
}
}
}
if len(fss) == 0 {
return errors.New("redigo.ScanSlice: no struct fields")
}
n := len(src) / len(fss)
if n*len(fss) != len(src) {
return errors.New("redigo.ScanSlice: length not a multiple of struct field count")
}
ensureLen(d, n)
for i := 0; i < n; i++ {
d := d.Index(i)
if isPtr {
if d.IsNil() {
d.Set(reflect.New(t))
}
d = d.Elem()
}
for j, fs := range fss {
s := src[i*len(fss)+j]
if s == nil {
continue
}
if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err)
}
}
}
return nil
}
// Args is a helper for constructing command arguments from structured values.
type Args []interface{}
// Add returns the result of appending value to args.
func (args Args) Add(value ...interface{}) Args {
return append(args, value...)
}
// AddFlat returns the result of appending the flattened value of v to args.
//
// Maps are flattened by appending the alternating keys and map values to args.
//
// Slices are flattened by appending the slice elements to args.
//
// Structs are flattened by appending the alternating names and values of
// exported fields to args. If v is a nil struct pointer, then nothing is
// appended. The 'redis' field tag overrides struct field names. See ScanStruct
// for more information on the use of the 'redis' field tag.
//
// Other types are appended to args as is.
func (args Args) AddFlat(v interface{}) Args {
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Struct:
args = flattenStruct(args, rv)
case reflect.Slice:
for i := 0; i < rv.Len(); i++ {
args = append(args, rv.Index(i).Interface())
}
case reflect.Map:
for _, k := range rv.MapKeys() {
args = append(args, k.Interface(), rv.MapIndex(k).Interface())
}
case reflect.Ptr:
if rv.Type().Elem().Kind() == reflect.Struct {
if !rv.IsNil() {
args = flattenStruct(args, rv.Elem())
}
} else {
args = append(args, v)
}
default:
args = append(args, v)
}
return args
}
func flattenStruct(args Args, v reflect.Value) Args {
ss := structSpecForType(v.Type())
for _, fs := range ss.l {
fv := v.FieldByIndex(fs.index)
if fs.omitEmpty {
var empty = false
switch fv.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
empty = fv.Len() == 0
case reflect.Bool:
empty = !fv.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
empty = fv.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
empty = fv.Uint() == 0
case reflect.Float32, reflect.Float64:
empty = fv.Float() == 0
case reflect.Interface, reflect.Ptr:
empty = fv.IsNil()
}
if empty {
continue
}
}
args = append(args, fs.name, fv.Interface())
}
return args
}

86
vendor/github.com/minio/redigo/redis/script.go generated vendored Normal file
View File

@ -0,0 +1,86 @@
// Copyright 2012 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package redis
import (
"crypto/sha1"
"encoding/hex"
"io"
"strings"
)
// Script encapsulates the source, hash and key count for a Lua script. See
// http://redis.io/commands/eval for information on scripts in Redis.
type Script struct {
keyCount int
src string
hash string
}
// NewScript returns a new script object. If keyCount is greater than or equal
// to zero, then the count is automatically inserted in the EVAL command
// argument list. If keyCount is less than zero, then the application supplies
// the count as the first value in the keysAndArgs argument to the Do, Send and
// SendHash methods.
func NewScript(keyCount int, src string) *Script {
h := sha1.New()
io.WriteString(h, src)
return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))}
}
func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} {
var args []interface{}
if s.keyCount < 0 {
args = make([]interface{}, 1+len(keysAndArgs))
args[0] = spec
copy(args[1:], keysAndArgs)
} else {
args = make([]interface{}, 2+len(keysAndArgs))
args[0] = spec
args[1] = s.keyCount
copy(args[2:], keysAndArgs)
}
return args
}
// Do evaluates the script. Under the covers, Do optimistically evaluates the
// script using the EVALSHA command. If the command fails because the script is
// not loaded, then Do evaluates the script using the EVAL command (thus
// causing the script to load).
func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) {
v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...)
if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") {
v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...)
}
return v, err
}
// SendHash evaluates the script without waiting for the reply. The script is
// evaluated with the EVALSHA command. The application must ensure that the
// script is loaded by a previous call to Send, Do or Load methods.
func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error {
return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...)
}
// Send evaluates the script without waiting for the reply.
func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error {
return c.Send("EVAL", s.args(s.src, keysAndArgs)...)
}
// Load loads the script without evaluating it.
func (s *Script) Load(c Conn) error {
_, err := c.Do("SCRIPT", "LOAD", s.src)
return err
}

23
vendor/github.com/streadway/amqp/LICENSE generated vendored Normal file
View File

@ -0,0 +1,23 @@
Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

81
vendor/github.com/streadway/amqp/README.md generated vendored Normal file
View File

@ -0,0 +1,81 @@
# AMQP
AMQP 0.9.1 client with RabbitMQ extensions in Go.
# Status
*Beta*
[![Build Status](https://secure.travis-ci.org/streadway/amqp.png)](http://travis-ci.org/streadway/amqp)
API changes unlikely and will be discussed on [Github
issues](https://github.com/streadway/amqp/issues) along with any bugs or
enhancements.
# Goals
Provide a functional interface that closely represents the AMQP 0.9.1 model
targeted to RabbitMQ as a server. This includes the minimum necessary to
interact the semantics of the protocol.
# Non-goals
Things not intended to be supported.
* Auto reconnect and re-synchronization of client and server topologies.
* Reconnection would require understanding the error paths when the
topology cannot be declared on reconnect. This would require a new set
of types and code paths that are best suited at the call-site of this
package. AMQP has a dynamic topology that needs all peers to agree. If
this doesn't happen, the behavior is undefined. Instead of producing a
possible interface with undefined behavior, this package is designed to
be simple for the caller to implement the necessary connection-time
topology declaration so that reconnection is trivial and encapsulated in
the caller's application code.
* AMQP Protocol negotiation for forward or backward compatibility.
* 0.9.1 is stable and widely deployed. Versions 0.10 and 1.0 are divergent
specifications that change the semantics and wire format of the protocol.
We will accept patches for other protocol support but have no plans for
implementation ourselves.
* Anything other than PLAIN and EXTERNAL authentication mechanisms.
* Keeping the mechanisms interface modular makes it possible to extend
outside of this package. If other mechanisms prove to be popular, then
we would accept patches to include them in this pacakge.
# Usage
See the 'examples' subdirectory for simple producers and consumers executables.
If you have a use-case in mind which isn't well-represented by the examples,
please file an issue.
# Documentation
Use [Godoc documentation](http://godoc.org/github.com/streadway/amqp) for
reference and usage.
[RabbitMQ tutorials in
Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) are also
available.
# Contributing
Pull requests are very much welcomed. Create your pull request on a non-master
branch, make sure a test or example is included that covers your change and
your commits represent coherent changes that include a reason for the change.
To run the integration tests, make sure you have RabbitMQ running on any host,
export the environment variable `AMQP_URL=amqp://host/` and run `go test -tags
integration`. TravisCI will also run the integration tests.
Thanks to the [community of contributors](https://github.com/streadway/amqp/graphs/contributors).
# External packages
* Google App Engine Dialer support: [https://github.com/soundtrackyourbrand/gaeamqp](https://github.com/soundtrackyourbrand/gaeamqp)
* RabbitMQ examples in Go: [https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go)
# License
BSD 2 clause - see LICENSE for more details.

106
vendor/github.com/streadway/amqp/allocator.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
package amqp
import (
"bytes"
"fmt"
"math/big"
)
const (
free = 0
allocated = 1
)
// allocator maintains a bitset of allocated numbers.
type allocator struct {
pool *big.Int
last int
low int
high int
}
// NewAllocator reserves and frees integers out of a range between low and
// high.
//
// O(N) worst case space used, where N is maximum allocated, divided by
// sizeof(big.Word)
func newAllocator(low, high int) *allocator {
return &allocator{
pool: big.NewInt(0),
last: low,
low: low,
high: high,
}
}
// String returns a string describing the contents of the allocator like
// "allocator[low..high] reserved..until"
//
// O(N) where N is high-low
func (a allocator) String() string {
b := &bytes.Buffer{}
fmt.Fprintf(b, "allocator[%d..%d]", a.low, a.high)
for low := a.low; low <= a.high; low++ {
high := low
for a.reserved(high) && high <= a.high {
high++
}
if high > low+1 {
fmt.Fprintf(b, " %d..%d", low, high-1)
} else if high > low {
fmt.Fprintf(b, " %d", high-1)
}
low = high
}
return b.String()
}
// Next reserves and returns the next available number out of the range between
// low and high. If no number is available, false is returned.
//
// O(N) worst case runtime where N is allocated, but usually O(1) due to a
// rolling index into the oldest allocation.
func (a *allocator) next() (int, bool) {
wrapped := a.last
// Find trailing bit
for ; a.last <= a.high; a.last++ {
if a.reserve(a.last) {
return a.last, true
}
}
// Find preceeding free'd pool
a.last = a.low
for ; a.last < wrapped; a.last++ {
if a.reserve(a.last) {
return a.last, true
}
}
return 0, false
}
// reserve claims the bit if it is not already claimed, returning true if
// succesfully claimed.
func (a *allocator) reserve(n int) bool {
if a.reserved(n) {
return false
}
a.pool.SetBit(a.pool, n-a.low, allocated)
return true
}
// reserved returns true if the integer has been allocated
func (a *allocator) reserved(n int) bool {
return a.pool.Bit(n-a.low) == allocated
}
// release frees the use of the number for another allocation
func (a *allocator) release(n int) {
a.pool.SetBit(a.pool, n-a.low, free)
}

44
vendor/github.com/streadway/amqp/auth.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/streadway/amqp
package amqp
import (
"fmt"
)
// Authentication interface provides a means for different SASL authentication
// mechanisms to be used during connection tuning.
type Authentication interface {
Mechanism() string
Response() string
}
// PlainAuth is a similar to Basic Auth in HTTP.
type PlainAuth struct {
Username string
Password string
}
func (me *PlainAuth) Mechanism() string {
return "PLAIN"
}
func (me *PlainAuth) Response() string {
return fmt.Sprintf("\000%s\000%s", me.Username, me.Password)
}
// Finds the first mechanism preferred by the client that the server supports.
func pickSASLMechanism(client []Authentication, serverMechanisms []string) (auth Authentication, ok bool) {
for _, auth = range client {
for _, mech := range serverMechanisms {
if auth.Mechanism() == mech {
return auth, true
}
}
}
return
}

159
vendor/github.com/streadway/amqp/certs.sh generated vendored Executable file
View File

@ -0,0 +1,159 @@
#!/bin/sh
#
# Creates the CA, server and client certs to be used by tls_test.go
# http://www.rabbitmq.com/ssl.html
#
# Copy stdout into the const section of tls_test.go or use for RabbitMQ
#
root=$PWD/certs
if [ -f $root/ca/serial ]; then
echo >&2 "Previous installation found"
echo >&2 "Remove $root/ca and rerun to overwrite"
exit 1
fi
mkdir -p $root/ca/private
mkdir -p $root/ca/certs
mkdir -p $root/server
mkdir -p $root/client
cd $root/ca
chmod 700 private
touch index.txt
echo 'unique_subject = no' > index.txt.attr
echo '01' > serial
echo >openssl.cnf '
[ ca ]
default_ca = testca
[ testca ]
dir = .
certificate = $dir/cacert.pem
database = $dir/index.txt
new_certs_dir = $dir/certs
private_key = $dir/private/cakey.pem
serial = $dir/serial
default_crl_days = 7
default_days = 3650
default_md = sha1
policy = testca_policy
x509_extensions = certificate_extensions
[ testca_policy ]
commonName = supplied
stateOrProvinceName = optional
countryName = optional
emailAddress = optional
organizationName = optional
organizationalUnitName = optional
[ certificate_extensions ]
basicConstraints = CA:false
[ req ]
default_bits = 2048
default_keyfile = ./private/cakey.pem
default_md = sha1
prompt = yes
distinguished_name = root_ca_distinguished_name
x509_extensions = root_ca_extensions
[ root_ca_distinguished_name ]
commonName = hostname
[ root_ca_extensions ]
basicConstraints = CA:true
keyUsage = keyCertSign, cRLSign
[ client_ca_extensions ]
basicConstraints = CA:false
keyUsage = digitalSignature
extendedKeyUsage = 1.3.6.1.5.5.7.3.2
[ server_ca_extensions ]
basicConstraints = CA:false
keyUsage = keyEncipherment
extendedKeyUsage = 1.3.6.1.5.5.7.3.1
subjectAltName = @alt_names
[ alt_names ]
IP.1 = 127.0.0.1
'
openssl req \
-x509 \
-nodes \
-config openssl.cnf \
-newkey rsa:2048 \
-days 3650 \
-subj "/CN=MyTestCA/" \
-out cacert.pem \
-outform PEM
openssl x509 \
-in cacert.pem \
-out cacert.cer \
-outform DER
openssl genrsa -out $root/server/key.pem 2048
openssl genrsa -out $root/client/key.pem 2048
openssl req \
-new \
-nodes \
-config openssl.cnf \
-subj "/CN=127.0.0.1/O=server/" \
-key $root/server/key.pem \
-out $root/server/req.pem \
-outform PEM
openssl req \
-new \
-nodes \
-config openssl.cnf \
-subj "/CN=127.0.0.1/O=client/" \
-key $root/client/key.pem \
-out $root/client/req.pem \
-outform PEM
openssl ca \
-config openssl.cnf \
-in $root/server/req.pem \
-out $root/server/cert.pem \
-notext \
-batch \
-extensions server_ca_extensions
openssl ca \
-config openssl.cnf \
-in $root/client/req.pem \
-out $root/client/cert.pem \
-notext \
-batch \
-extensions client_ca_extensions
cat <<-END
const caCert = \`
`cat $root/ca/cacert.pem`
\`
const serverCert = \`
`cat $root/server/cert.pem`
\`
const serverKey = \`
`cat $root/server/key.pem`
\`
const clientCert = \`
`cat $root/client/cert.pem`
\`
const clientKey = \`
`cat $root/client/key.pem`
\`
END

1557
vendor/github.com/streadway/amqp/channel.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

93
vendor/github.com/streadway/amqp/confirms.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
package amqp
import "sync"
// confirms resequences and notifies one or multiple publisher confirmation listeners
type confirms struct {
m sync.Mutex
listeners []chan Confirmation
sequencer map[uint64]Confirmation
published uint64
expecting uint64
}
// newConfirms allocates a confirms
func newConfirms() *confirms {
return &confirms{
sequencer: map[uint64]Confirmation{},
published: 0,
expecting: 1,
}
}
func (c *confirms) Listen(l chan Confirmation) {
c.m.Lock()
defer c.m.Unlock()
c.listeners = append(c.listeners, l)
}
// publish increments the publishing counter
func (c *confirms) Publish() uint64 {
c.m.Lock()
defer c.m.Unlock()
c.published++
return c.published
}
// confirm confirms one publishing, increments the expecting delivery tag, and
// removes bookkeeping for that delivery tag.
func (c *confirms) confirm(confirmation Confirmation) {
delete(c.sequencer, c.expecting)
c.expecting++
for _, l := range c.listeners {
l <- confirmation
}
}
// resequence confirms any out of order delivered confirmations
func (c *confirms) resequence() {
for c.expecting <= c.published {
sequenced, found := c.sequencer[c.expecting]
if !found {
return
}
c.confirm(sequenced)
}
}
// one confirms one publishing and all following in the publishing sequence
func (c *confirms) One(confirmed Confirmation) {
c.m.Lock()
defer c.m.Unlock()
if c.expecting == confirmed.DeliveryTag {
c.confirm(confirmed)
} else {
c.sequencer[confirmed.DeliveryTag] = confirmed
}
c.resequence()
}
// multiple confirms all publishings up until the delivery tag
func (c *confirms) Multiple(confirmed Confirmation) {
c.m.Lock()
defer c.m.Unlock()
for c.expecting <= confirmed.DeliveryTag {
c.confirm(Confirmation{c.expecting, confirmed.Ack})
}
}
// Close closes all listeners, discarding any out of sequence confirmations
func (c *confirms) Close() error {
c.m.Lock()
defer c.m.Unlock()
for _, l := range c.listeners {
close(l)
}
c.listeners = nil
return nil
}

769
vendor/github.com/streadway/amqp/connection.go generated vendored Normal file
View File

@ -0,0 +1,769 @@
// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/streadway/amqp
package amqp
import (
"bufio"
"crypto/tls"
"io"
"net"
"reflect"
"strconv"
"strings"
"sync"
"time"
)
const (
maxChannelMax = (2 << 15) - 1
defaultHeartbeat = 10 * time.Second
defaultConnectionTimeout = 30 * time.Second
defaultProduct = "https://github.com/streadway/amqp"
defaultVersion = "β"
defaultChannelMax = maxChannelMax
)
// Config is used in DialConfig and Open to specify the desired tuning
// parameters used during a connection open handshake. The negotiated tuning
// will be stored in the returned connection's Config field.
type Config struct {
// The SASL mechanisms to try in the client request, and the successful
// mechanism used on the Connection object.
// If SASL is nil, PlainAuth from the URL is used.
SASL []Authentication
// Vhost specifies the namespace of permissions, exchanges, queues and
// bindings on the server. Dial sets this to the path parsed from the URL.
Vhost string
ChannelMax int // 0 max channels means 2^16 - 1
FrameSize int // 0 max bytes means unlimited
Heartbeat time.Duration // less than 1s uses the server's interval
// TLSClientConfig specifies the client configuration of the TLS connection
// when establishing a tls transport.
// If the URL uses an amqps scheme, then an empty tls.Config with the
// ServerName from the URL is used.
TLSClientConfig *tls.Config
// Properties is table of properties that the client advertises to the server.
// This is an optional setting - if the application does not set this,
// the underlying library will use a generic set of client properties.
Properties Table
// Dial returns a net.Conn prepared for a TLS handshake with TSLClientConfig,
// then an AMQP connection handshake.
// If Dial is nil, net.DialTimeout with a 30s connection and 30s read
// deadline is used.
Dial func(network, addr string) (net.Conn, error)
}
// Connection manages the serialization and deserialization of frames from IO
// and dispatches the frames to the appropriate channel. All RPC methods and
// asyncronous Publishing, Delivery, Ack, Nack and Return messages are
// multiplexed on this channel. There must always be active receivers for
// every asynchronous message on this connection.
type Connection struct {
destructor sync.Once // shutdown once
sendM sync.Mutex // conn writer mutex
m sync.Mutex // struct field mutex
conn io.ReadWriteCloser
rpc chan message
writer *writer
sends chan time.Time // timestamps of each frame sent
deadlines chan readDeadliner // heartbeater updates read deadlines
allocator *allocator // id generator valid after openTune
channels map[uint16]*Channel
noNotify bool // true when we will never notify again
closes []chan *Error
blocks []chan Blocking
errors chan *Error
Config Config // The negotiated Config after connection.open
Major int // Server's major version
Minor int // Server's minor version
Properties Table // Server properties
}
type readDeadliner interface {
SetReadDeadline(time.Time) error
}
type localNetAddr interface {
LocalAddr() net.Addr
}
// defaultDial establishes a connection when config.Dial is not provided
func defaultDial(network, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(network, addr, defaultConnectionTimeout)
if err != nil {
return nil, err
}
// Heartbeating hasn't started yet, don't stall forever on a dead server.
if err := conn.SetReadDeadline(time.Now().Add(defaultConnectionTimeout)); err != nil {
return nil, err
}
return conn, nil
}
// Dial accepts a string in the AMQP URI format and returns a new Connection
// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10
// seconds and sets the initial read deadline to 30 seconds.
//
// Dial uses the zero value of tls.Config when it encounters an amqps://
// scheme. It is equivalent to calling DialTLS(amqp, nil).
func Dial(url string) (*Connection, error) {
return DialConfig(url, Config{
Heartbeat: defaultHeartbeat,
})
}
// DialTLS accepts a string in the AMQP URI format and returns a new Connection
// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10
// seconds and sets the initial read deadline to 30 seconds.
//
// DialTLS uses the provided tls.Config when encountering an amqps:// scheme.
func DialTLS(url string, amqps *tls.Config) (*Connection, error) {
return DialConfig(url, Config{
Heartbeat: defaultHeartbeat,
TLSClientConfig: amqps,
})
}
// DialConfig accepts a string in the AMQP URI format and a configuration for
// the transport and connection setup, returning a new Connection. Defaults to
// a server heartbeat interval of 10 seconds and sets the initial read deadline
// to 30 seconds.
func DialConfig(url string, config Config) (*Connection, error) {
var err error
var conn net.Conn
uri, err := ParseURI(url)
if err != nil {
return nil, err
}
if config.SASL == nil {
config.SASL = []Authentication{uri.PlainAuth()}
}
if config.Vhost == "" {
config.Vhost = uri.Vhost
}
if uri.Scheme == "amqps" && config.TLSClientConfig == nil {
config.TLSClientConfig = new(tls.Config)
}
addr := net.JoinHostPort(uri.Host, strconv.FormatInt(int64(uri.Port), 10))
dialer := config.Dial
if dialer == nil {
dialer = defaultDial
}
conn, err = dialer("tcp", addr)
if err != nil {
return nil, err
}
if config.TLSClientConfig != nil {
// Use the URI's host for hostname validation unless otherwise set. Make a
// copy so not to modify the caller's reference when the caller reuses a
// tls.Config for a different URL.
if config.TLSClientConfig.ServerName == "" {
c := *config.TLSClientConfig
c.ServerName = uri.Host
config.TLSClientConfig = &c
}
client := tls.Client(conn, config.TLSClientConfig)
if err := client.Handshake(); err != nil {
conn.Close()
return nil, err
}
conn = client
}
return Open(conn, config)
}
/*
Open accepts an already established connection, or other io.ReadWriteCloser as
a transport. Use this method if you have established a TLS connection or wish
to use your own custom transport.
*/
func Open(conn io.ReadWriteCloser, config Config) (*Connection, error) {
me := &Connection{
conn: conn,
writer: &writer{bufio.NewWriter(conn)},
channels: make(map[uint16]*Channel),
rpc: make(chan message),
sends: make(chan time.Time),
errors: make(chan *Error, 1),
deadlines: make(chan readDeadliner, 1),
}
go me.reader(conn)
return me, me.open(config)
}
/*
LocalAddr returns the local TCP peer address, or ":0" (the zero value of net.TCPAddr)
as a fallback default value if the underlying transport does not support LocalAddr().
*/
func (me *Connection) LocalAddr() net.Addr {
if c, ok := me.conn.(localNetAddr); ok {
return c.LocalAddr()
}
return &net.TCPAddr{}
}
/*
NotifyClose registers a listener for close events either initiated by an error
accompaning a connection.close method or by a normal shutdown.
On normal shutdowns, the chan will be closed.
To reconnect after a transport or protocol error, register a listener here and
re-run your setup process.
*/
func (me *Connection) NotifyClose(c chan *Error) chan *Error {
me.m.Lock()
defer me.m.Unlock()
if me.noNotify {
close(c)
} else {
me.closes = append(me.closes, c)
}
return c
}
/*
NotifyBlock registers a listener for RabbitMQ specific TCP flow control method
extensions connection.blocked and connection.unblocked. Flow control is active
with a reason when Blocking.Blocked is true. When a Connection is blocked, all
methods will block across all connections until server resources become free
again.
This optional extension is supported by the server when the
"connection.blocked" server capability key is true.
*/
func (me *Connection) NotifyBlocked(c chan Blocking) chan Blocking {
me.m.Lock()
defer me.m.Unlock()
if me.noNotify {
close(c)
} else {
me.blocks = append(me.blocks, c)
}
return c
}
/*
Close requests and waits for the response to close the AMQP connection.
It's advisable to use this message when publishing to ensure all kernel buffers
have been flushed on the server and client before exiting.
An error indicates that server may not have received this request to close but
the connection should be treated as closed regardless.
After returning from this call, all resources associated with this connection,
including the underlying io, Channels, Notify listeners and Channel consumers
will also be closed.
*/
func (me *Connection) Close() error {
defer me.shutdown(nil)
return me.call(
&connectionClose{
ReplyCode: replySuccess,
ReplyText: "kthxbai",
},
&connectionCloseOk{},
)
}
func (me *Connection) closeWith(err *Error) error {
defer me.shutdown(err)
return me.call(
&connectionClose{
ReplyCode: uint16(err.Code),
ReplyText: err.Reason,
},
&connectionCloseOk{},
)
}
func (me *Connection) send(f frame) error {
me.sendM.Lock()
err := me.writer.WriteFrame(f)
me.sendM.Unlock()
if err != nil {
// shutdown could be re-entrant from signaling notify chans
go me.shutdown(&Error{
Code: FrameError,
Reason: err.Error(),
})
} else {
// Broadcast we sent a frame, reducing heartbeats, only
// if there is something that can receive - like a non-reentrant
// call or if the heartbeater isn't running
select {
case me.sends <- time.Now():
default:
}
}
return err
}
func (me *Connection) shutdown(err *Error) {
me.destructor.Do(func() {
if err != nil {
for _, c := range me.closes {
c <- err
}
}
for _, ch := range me.channels {
me.closeChannel(ch, err)
}
if err != nil {
me.errors <- err
}
me.conn.Close()
for _, c := range me.closes {
close(c)
}
for _, c := range me.blocks {
close(c)
}
me.m.Lock()
me.noNotify = true
me.m.Unlock()
})
}
// All methods sent to the connection channel should be synchronous so we
// can handle them directly without a framing component
func (me *Connection) demux(f frame) {
if f.channel() == 0 {
me.dispatch0(f)
} else {
me.dispatchN(f)
}
}
func (me *Connection) dispatch0(f frame) {
switch mf := f.(type) {
case *methodFrame:
switch m := mf.Method.(type) {
case *connectionClose:
// Send immediately as shutdown will close our side of the writer.
me.send(&methodFrame{
ChannelId: 0,
Method: &connectionCloseOk{},
})
me.shutdown(newError(m.ReplyCode, m.ReplyText))
case *connectionBlocked:
for _, c := range me.blocks {
c <- Blocking{Active: true, Reason: m.Reason}
}
case *connectionUnblocked:
for _, c := range me.blocks {
c <- Blocking{Active: false}
}
default:
me.rpc <- m
}
case *heartbeatFrame:
// kthx - all reads reset our deadline. so we can drop this
default:
// lolwat - channel0 only responds to methods and heartbeats
me.closeWith(ErrUnexpectedFrame)
}
}
func (me *Connection) dispatchN(f frame) {
me.m.Lock()
channel := me.channels[f.channel()]
me.m.Unlock()
if channel != nil {
channel.recv(channel, f)
} else {
me.dispatchClosed(f)
}
}
// section 2.3.7: "When a peer decides to close a channel or connection, it
// sends a Close method. The receiving peer MUST respond to a Close with a
// Close-Ok, and then both parties can close their channel or connection. Note
// that if peers ignore Close, deadlock can happen when both peers send Close
// at the same time."
//
// When we don't have a channel, so we must respond with close-ok on a close
// method. This can happen between a channel exception on an asynchronous
// method like basic.publish and a synchronous close with channel.close.
// In that case, we'll get both a channel.close and channel.close-ok in any
// order.
func (me *Connection) dispatchClosed(f frame) {
// Only consider method frames, drop content/header frames
if mf, ok := f.(*methodFrame); ok {
switch mf.Method.(type) {
case *channelClose:
me.send(&methodFrame{
ChannelId: f.channel(),
Method: &channelCloseOk{},
})
case *channelCloseOk:
// we are already closed, so do nothing
default:
// unexpected method on closed channel
me.closeWith(ErrClosed)
}
}
}
// Reads each frame off the IO and hand off to the connection object that
// will demux the streams and dispatch to one of the opened channels or
// handle on channel 0 (the connection channel).
func (me *Connection) reader(r io.Reader) {
buf := bufio.NewReader(r)
frames := &reader{buf}
conn, haveDeadliner := r.(readDeadliner)
for {
frame, err := frames.ReadFrame()
if err != nil {
me.shutdown(&Error{Code: FrameError, Reason: err.Error()})
return
}
me.demux(frame)
if haveDeadliner {
me.deadlines <- conn
}
}
}
// Ensures that at least one frame is being sent at the tuned interval with a
// jitter tolerance of 1s
func (me *Connection) heartbeater(interval time.Duration, done chan *Error) {
const maxServerHeartbeatsInFlight = 3
var sendTicks <-chan time.Time
if interval > 0 {
ticker := time.NewTicker(interval)
defer ticker.Stop()
sendTicks = ticker.C
}
lastSent := time.Now()
for {
select {
case at, stillSending := <-me.sends:
// When actively sending, depend on sent frames to reset server timer
if stillSending {
lastSent = at
} else {
return
}
case at := <-sendTicks:
// When idle, fill the space with a heartbeat frame
if at.Sub(lastSent) > interval-time.Second {
if err := me.send(&heartbeatFrame{}); err != nil {
// send heartbeats even after close/closeOk so we
// tick until the connection starts erroring
return
}
}
case conn := <-me.deadlines:
// When reading, reset our side of the deadline, if we've negotiated one with
// a deadline that covers at least 2 server heartbeats
if interval > 0 {
conn.SetReadDeadline(time.Now().Add(maxServerHeartbeatsInFlight * interval))
}
case <-done:
return
}
}
}
// Convenience method to inspect the Connection.Properties["capabilities"]
// Table for server identified capabilities like "basic.ack" or
// "confirm.select".
func (me *Connection) isCapable(featureName string) bool {
capabilities, _ := me.Properties["capabilities"].(Table)
hasFeature, _ := capabilities[featureName].(bool)
return hasFeature
}
// allocateChannel records but does not open a new channel with a unique id.
// This method is the initial part of the channel lifecycle and paired with
// releaseChannel
func (me *Connection) allocateChannel() (*Channel, error) {
me.m.Lock()
defer me.m.Unlock()
id, ok := me.allocator.next()
if !ok {
return nil, ErrChannelMax
}
ch := newChannel(me, uint16(id))
me.channels[uint16(id)] = ch
return ch, nil
}
// releaseChannel removes a channel from the registry as the final part of the
// channel lifecycle
func (me *Connection) releaseChannel(id uint16) {
me.m.Lock()
defer me.m.Unlock()
delete(me.channels, id)
me.allocator.release(int(id))
}
// openChannel allocates and opens a channel, must be paired with closeChannel
func (me *Connection) openChannel() (*Channel, error) {
ch, err := me.allocateChannel()
if err != nil {
return nil, err
}
if err := ch.open(); err != nil {
return nil, err
}
return ch, nil
}
// closeChannel releases and initiates a shutdown of the channel. All channel
// closures should be initiated here for proper channel lifecycle management on
// this connection.
func (me *Connection) closeChannel(ch *Channel, e *Error) {
ch.shutdown(e)
me.releaseChannel(ch.id)
}
/*
Channel opens a unique, concurrent server channel to process the bulk of AMQP
messages. Any error from methods on this receiver will render the receiver
invalid and a new Channel should be opened.
*/
func (me *Connection) Channel() (*Channel, error) {
return me.openChannel()
}
func (me *Connection) call(req message, res ...message) error {
// Special case for when the protocol header frame is sent insted of a
// request method
if req != nil {
if err := me.send(&methodFrame{ChannelId: 0, Method: req}); err != nil {
return err
}
}
select {
case err := <-me.errors:
return err
case msg := <-me.rpc:
// Try to match one of the result types
for _, try := range res {
if reflect.TypeOf(msg) == reflect.TypeOf(try) {
// *res = *msg
vres := reflect.ValueOf(try).Elem()
vmsg := reflect.ValueOf(msg).Elem()
vres.Set(vmsg)
return nil
}
}
return ErrCommandInvalid
}
panic("unreachable")
}
// Connection = open-Connection *use-Connection close-Connection
// open-Connection = C:protocol-header
// S:START C:START-OK
// *challenge
// S:TUNE C:TUNE-OK
// C:OPEN S:OPEN-OK
// challenge = S:SECURE C:SECURE-OK
// use-Connection = *channel
// close-Connection = C:CLOSE S:CLOSE-OK
// / S:CLOSE C:CLOSE-OK
func (me *Connection) open(config Config) error {
if err := me.send(&protocolHeader{}); err != nil {
return err
}
return me.openStart(config)
}
func (me *Connection) openStart(config Config) error {
start := &connectionStart{}
if err := me.call(nil, start); err != nil {
return err
}
me.Major = int(start.VersionMajor)
me.Minor = int(start.VersionMinor)
me.Properties = Table(start.ServerProperties)
// eventually support challenge/response here by also responding to
// connectionSecure.
auth, ok := pickSASLMechanism(config.SASL, strings.Split(start.Mechanisms, " "))
if !ok {
return ErrSASL
}
// Save this mechanism off as the one we chose
me.Config.SASL = []Authentication{auth}
return me.openTune(config, auth)
}
func (me *Connection) openTune(config Config, auth Authentication) error {
if len(config.Properties) == 0 {
config.Properties = Table{
"product": defaultProduct,
"version": defaultVersion,
}
}
config.Properties["capabilities"] = Table{
"connection.blocked": true,
"consumer_cancel_notify": true,
}
ok := &connectionStartOk{
Mechanism: auth.Mechanism(),
Response: auth.Response(),
ClientProperties: config.Properties,
}
tune := &connectionTune{}
if err := me.call(ok, tune); err != nil {
// per spec, a connection can only be closed when it has been opened
// so at this point, we know it's an auth error, but the socket
// was closed instead. Return a meaningful error.
return ErrCredentials
}
// When the server and client both use default 0, then the max channel is
// only limited by uint16.
me.Config.ChannelMax = pick(config.ChannelMax, int(tune.ChannelMax))
if me.Config.ChannelMax == 0 {
me.Config.ChannelMax = defaultChannelMax
}
me.Config.ChannelMax = min(me.Config.ChannelMax, maxChannelMax)
// Frame size includes headers and end byte (len(payload)+8), even if
// this is less than FrameMinSize, use what the server sends because the
// alternative is to stop the handshake here.
me.Config.FrameSize = pick(config.FrameSize, int(tune.FrameMax))
// Save this off for resetDeadline()
me.Config.Heartbeat = time.Second * time.Duration(pick(
int(config.Heartbeat/time.Second),
int(tune.Heartbeat)))
// "The client should start sending heartbeats after receiving a
// Connection.Tune method"
go me.heartbeater(me.Config.Heartbeat, me.NotifyClose(make(chan *Error, 1)))
if err := me.send(&methodFrame{
ChannelId: 0,
Method: &connectionTuneOk{
ChannelMax: uint16(me.Config.ChannelMax),
FrameMax: uint32(me.Config.FrameSize),
Heartbeat: uint16(me.Config.Heartbeat / time.Second),
},
}); err != nil {
return err
}
return me.openVhost(config)
}
func (me *Connection) openVhost(config Config) error {
req := &connectionOpen{VirtualHost: config.Vhost}
res := &connectionOpenOk{}
if err := me.call(req, res); err != nil {
// Cannot be closed yet, but we know it's a vhost problem
return ErrVhost
}
me.Config.Vhost = config.Vhost
return me.openComplete()
}
// openComplete performs any final Connection initialization dependent on the
// connection handshake.
func (me *Connection) openComplete() error {
me.allocator = newAllocator(1, me.Config.ChannelMax)
return nil
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func pick(client, server int) int {
if client == 0 || server == 0 {
return max(client, server)
}
return min(client, server)
}

118
vendor/github.com/streadway/amqp/consumers.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/streadway/amqp
package amqp
import (
"fmt"
"os"
"sync"
"sync/atomic"
)
var consumerSeq uint64
func uniqueConsumerTag() string {
return fmt.Sprintf("ctag-%s-%d", os.Args[0], atomic.AddUint64(&consumerSeq, 1))
}
type consumerBuffers map[string]chan *Delivery
// Concurrent type that manages the consumerTag ->
// ingress consumerBuffer mapping
type consumers struct {
sync.Mutex
chans consumerBuffers
}
func makeConsumers() *consumers {
return &consumers{chans: make(consumerBuffers)}
}
func bufferDeliveries(in chan *Delivery, out chan Delivery) {
var queue []*Delivery
var queueIn = in
for delivery := range in {
select {
case out <- *delivery:
// delivered immediately while the consumer chan can receive
default:
queue = append(queue, delivery)
}
for len(queue) > 0 {
select {
case out <- *queue[0]:
queue = queue[1:]
case delivery, open := <-queueIn:
if open {
queue = append(queue, delivery)
} else {
// stop receiving to drain the queue
queueIn = nil
}
}
}
}
close(out)
}
// On key conflict, close the previous channel.
func (me *consumers) add(tag string, consumer chan Delivery) {
me.Lock()
defer me.Unlock()
if prev, found := me.chans[tag]; found {
close(prev)
}
in := make(chan *Delivery)
go bufferDeliveries(in, consumer)
me.chans[tag] = in
}
func (me *consumers) close(tag string) (found bool) {
me.Lock()
defer me.Unlock()
ch, found := me.chans[tag]
if found {
delete(me.chans, tag)
close(ch)
}
return found
}
func (me *consumers) closeAll() {
me.Lock()
defer me.Unlock()
for _, ch := range me.chans {
close(ch)
}
me.chans = make(consumerBuffers)
}
// Sends a delivery to a the consumer identified by `tag`.
// If unbuffered channels are used for Consume this method
// could block all deliveries until the consumer
// receives on the other end of the channel.
func (me *consumers) send(tag string, msg *Delivery) bool {
me.Lock()
defer me.Unlock()
buffer, found := me.chans[tag]
if found {
buffer <- msg
}
return found
}

173
vendor/github.com/streadway/amqp/delivery.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/streadway/amqp
package amqp
import (
"errors"
"time"
)
var errDeliveryNotInitialized = errors.New("delivery not initialized")
// Acknowledger notifies the server of successful or failed consumption of
// delivieries via identifier found in the Delivery.DeliveryTag field.
//
// Applications can provide mock implementations in tests of Delivery handlers.
type Acknowledger interface {
Ack(tag uint64, multiple bool) error
Nack(tag uint64, multiple bool, requeue bool) error
Reject(tag uint64, requeue bool) error
}
// Delivery captures the fields for a previously delivered message resident in
// a queue to be delivered by the server to a consumer from Channel.Consume or
// Channel.Get.
type Delivery struct {
Acknowledger Acknowledger // the channel from which this delivery arrived
Headers Table // Application or header exchange table
// Properties
ContentType string // MIME content type
ContentEncoding string // MIME content encoding
DeliveryMode uint8 // queue implemention use - non-persistent (1) or persistent (2)
Priority uint8 // queue implementation use - 0 to 9
CorrelationId string // application use - correlation identifier
ReplyTo string // application use - address to to reply to (ex: RPC)
Expiration string // implementation use - message expiration spec
MessageId string // application use - message identifier
Timestamp time.Time // application use - message timestamp
Type string // application use - message type name
UserId string // application use - creating user - should be authenticated user
AppId string // application use - creating application id
// Valid only with Channel.Consume
ConsumerTag string
// Valid only with Channel.Get
MessageCount uint32
DeliveryTag uint64
Redelivered bool
Exchange string // basic.publish exhange
RoutingKey string // basic.publish routing key
Body []byte
}
func newDelivery(channel *Channel, msg messageWithContent) *Delivery {
props, body := msg.getContent()
delivery := Delivery{
Acknowledger: channel,
Headers: props.Headers,
ContentType: props.ContentType,
ContentEncoding: props.ContentEncoding,
DeliveryMode: props.DeliveryMode,
Priority: props.Priority,
CorrelationId: props.CorrelationId,
ReplyTo: props.ReplyTo,
Expiration: props.Expiration,
MessageId: props.MessageId,
Timestamp: props.Timestamp,
Type: props.Type,
UserId: props.UserId,
AppId: props.AppId,
Body: body,
}
// Properties for the delivery types
switch m := msg.(type) {
case *basicDeliver:
delivery.ConsumerTag = m.ConsumerTag
delivery.DeliveryTag = m.DeliveryTag
delivery.Redelivered = m.Redelivered
delivery.Exchange = m.Exchange
delivery.RoutingKey = m.RoutingKey
case *basicGetOk:
delivery.MessageCount = m.MessageCount
delivery.DeliveryTag = m.DeliveryTag
delivery.Redelivered = m.Redelivered
delivery.Exchange = m.Exchange
delivery.RoutingKey = m.RoutingKey
}
return &delivery
}
/*
Ack delegates an acknowledgement through the Acknowledger interface that the
client or server has finished work on a delivery.
All deliveries in AMQP must be acknowledged. If you called Channel.Consume
with autoAck true then the server will be automatically ack each message and
this method should not be called. Otherwise, you must call Delivery.Ack after
you have successfully processed this delivery.
When multiple is true, this delivery and all prior unacknowledged deliveries
on the same channel will be acknowledged. This is useful for batch processing
of deliveries.
An error will indicate that the acknowledge could not be delivered to the
channel it was sent from.
Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every
delivery that is not automatically acknowledged.
*/
func (me Delivery) Ack(multiple bool) error {
if me.Acknowledger == nil {
return errDeliveryNotInitialized
}
return me.Acknowledger.Ack(me.DeliveryTag, multiple)
}
/*
Reject delegates a negatively acknowledgement through the Acknowledger interface.
When requeue is true, queue this message to be delivered to a consumer on a
different channel. When requeue is false or the server is unable to queue this
message, it will be dropped.
If you are batch processing deliveries, and your server supports it, prefer
Delivery.Nack.
Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every
delivery that is not automatically acknowledged.
*/
func (me Delivery) Reject(requeue bool) error {
if me.Acknowledger == nil {
return errDeliveryNotInitialized
}
return me.Acknowledger.Reject(me.DeliveryTag, requeue)
}
/*
Nack negatively acknowledge the delivery of message(s) identified by the
delivery tag from either the client or server.
When multiple is true, nack messages up to and including delivered messages up
until the delivery tag delivered on the same channel.
When requeue is true, request the server to deliver this message to a different
consumer. If it is not possible or requeue is false, the message will be
dropped or delivered to a server configured dead-letter queue.
This method must not be used to select or requeue messages the client wishes
not to handle, rather it is to inform the server that the client is incapable
of handling this message at this time.
Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every
delivery that is not automatically acknowledged.
*/
func (me Delivery) Nack(multiple, requeue bool) error {
if me.Acknowledger == nil {
return errDeliveryNotInitialized
}
return me.Acknowledger.Nack(me.DeliveryTag, multiple, requeue)
}

108
vendor/github.com/streadway/amqp/doc.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/streadway/amqp
/*
AMQP 0.9.1 client with RabbitMQ extensions
Understand the AMQP 0.9.1 messaging model by reviewing these links first. Much
of the terminology in this library directly relates to AMQP concepts.
Resources
http://www.rabbitmq.com/tutorials/amqp-concepts.html
http://www.rabbitmq.com/getstarted.html
http://www.rabbitmq.com/amqp-0-9-1-reference.html
Design
Most other broker clients publish to queues, but in AMQP, clients publish
Exchanges instead. AMQP is programmable, meaning that both the producers and
consumers agree on the configuration of the broker, instead requiring an
operator or system configuration that declares the logical topology in the
broker. The routing between producers and consumer queues is via Bindings.
These bindings form the logical topology of the broker.
In this library, a message sent from publisher is called a "Publishing" and a
message received to a consumer is called a "Delivery". The fields of
Publishings and Deliveries are close but not exact mappings to the underlying
wire format to maintain stronger types. Many other libraries will combine
message properties with message headers. In this library, the message well
known properties are strongly typed fields on the Publishings and Deliveries,
whereas the user defined headers are in the Headers field.
The method naming closely matches the protocol's method name with positional
parameters mapping to named protocol message fields. The motivation here is to
present a comprehensive view over all possible interactions with the server.
Generally, methods that map to protocol methods of the "basic" class will be
elided in this interface, and "select" methods of various channel mode selectors
will be elided for example Channel.Confirm and Channel.Tx.
The library is intentionally designed to be synchronous, where responses for
each protocol message are required to be received in an RPC manner. Some
methods have a noWait parameter like Channel.QueueDeclare, and some methods are
asynchronous like Channel.Publish. The error values should still be checked for
these methods as they will indicate IO failures like when the underlying
connection closes.
Asynchronous Events
Clients of this library may be interested in receiving some of the protocol
messages other than Deliveries like basic.ack methods while a channel is in
confirm mode.
The Notify* methods with Connection and Channel receivers model the pattern of
asynchronous events like closes due to exceptions, or messages that are sent out
of band from an RPC call like basic.ack or basic.flow.
Any asynchronous events, including Deliveries and Publishings must always have
a receiver until the corresponding chans are closed. Without asynchronous
receivers, the sychronous methods will block.
Use Case
It's important as a client to an AMQP topology to ensure the state of the
broker matches your expectations. For both publish and consume use cases,
make sure you declare the queues, exchanges and bindings you expect to exist
prior to calling Channel.Publish or Channel.Consume.
// Connections start with amqp.Dial() typically from a command line argument
// or environment variable.
connection, err := amqp.Dial(os.Getenv("AMQP_URL"))
// To cleanly shutdown by flushing kernel buffers, make sure to close and
// wait for the response.
defer connection.Close()
// Most operations happen on a channel. If any error is returned on a
// channel, the channel will no longer be valid, throw it away and try with
// a different channel. If you use many channels, it's useful for the
// server to
channel, err := connection.Channel()
// Declare your topology here, if it doesn't exist, it will be created, if
// it existed already and is not what you expect, then that's considered an
// error.
// Use your connection on this topology with either Publish or Consume, or
// inspect your queues with QueueInspect. It's unwise to mix Publish and
// Consume to let TCP do its job well.
SSL/TLS - Secure connections
When Dial encounters an amqps:// scheme, it will use the zero value of a
tls.Config. This will only perform server certificate and host verification.
Use DialTLS when you wish to provide a client certificate (recommended),
include a private certificate authority's certificate in the cert chain for
server validity, or run insecure by not verifying the server certificate dial
your own connection. DialTLS will use the provided tls.Config when it
encounters an amqps:// scheme and will dial a plain connection when it
encounters an amqp:// scheme.
SSL/TLS in RabbitMQ is documented here: http://www.rabbitmq.com/ssl.html
*/
package amqp

16
vendor/github.com/streadway/amqp/fuzz.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// +build gofuzz
package amqp
import "bytes"
func Fuzz(data []byte) int {
r := reader{bytes.NewReader(data)}
frame, err := r.ReadFrame()
if err != nil {
if frame != nil {
panic("frame is not nil")
}
return 0
}
return 1
}

2
vendor/github.com/streadway/amqp/gen.sh generated vendored Executable file
View File

@ -0,0 +1,2 @@
#!/bin/sh
go run spec/gen.go < spec/amqp0-9-1.stripped.extended.xml | gofmt > spec091.go

447
vendor/github.com/streadway/amqp/read.go generated vendored Normal file
View File

@ -0,0 +1,447 @@
// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/streadway/amqp
package amqp
import (
"bytes"
"encoding/binary"
"errors"
"io"
"time"
)
/*
Reads a frame from an input stream and returns an interface that can be cast into
one of the following:
methodFrame
PropertiesFrame
bodyFrame
heartbeatFrame
2.3.5 frame Details
All frames consist of a header (7 octets), a payload of arbitrary size, and a
'frame-end' octet that detects malformed frames:
0 1 3 7 size+7 size+8
+------+---------+-------------+ +------------+ +-----------+
| type | channel | size | | payload | | frame-end |
+------+---------+-------------+ +------------+ +-----------+
octet short long size octets octet
To read a frame, we:
1. Read the header and check the frame type and channel.
2. Depending on the frame type, we read the payload and process it.
3. Read the frame end octet.
In realistic implementations where performance is a concern, we would use
read-ahead buffering or
gathering reads to avoid doing three separate system calls to read a frame.
*/
func (me *reader) ReadFrame() (frame frame, err error) {
var scratch [7]byte
if _, err = io.ReadFull(me.r, scratch[:7]); err != nil {
return
}
typ := uint8(scratch[0])
channel := binary.BigEndian.Uint16(scratch[1:3])
size := binary.BigEndian.Uint32(scratch[3:7])
switch typ {
case frameMethod:
if frame, err = me.parseMethodFrame(channel, size); err != nil {
return
}
case frameHeader:
if frame, err = me.parseHeaderFrame(channel, size); err != nil {
return
}
case frameBody:
if frame, err = me.parseBodyFrame(channel, size); err != nil {
return nil, err
}
case frameHeartbeat:
if frame, err = me.parseHeartbeatFrame(channel, size); err != nil {
return
}
default:
return nil, ErrFrame
}
if _, err = io.ReadFull(me.r, scratch[:1]); err != nil {
return nil, err
}
if scratch[0] != frameEnd {
return nil, ErrFrame
}
return
}
func readShortstr(r io.Reader) (v string, err error) {
var length uint8
if err = binary.Read(r, binary.BigEndian, &length); err != nil {
return
}
bytes := make([]byte, length)
if _, err = io.ReadFull(r, bytes); err != nil {
return
}
return string(bytes), nil
}
func readLongstr(r io.Reader) (v string, err error) {
var length uint32
if err = binary.Read(r, binary.BigEndian, &length); err != nil {
return
}
bytes := make([]byte, length)
if _, err = io.ReadFull(r, bytes); err != nil {
return
}
return string(bytes), nil
}
func readDecimal(r io.Reader) (v Decimal, err error) {
if err = binary.Read(r, binary.BigEndian, &v.Scale); err != nil {
return
}
if err = binary.Read(r, binary.BigEndian, &v.Value); err != nil {
return
}
return
}
func readFloat32(r io.Reader) (v float32, err error) {
if err = binary.Read(r, binary.BigEndian, &v); err != nil {
return
}
return
}
func readFloat64(r io.Reader) (v float64, err error) {
if err = binary.Read(r, binary.BigEndian, &v); err != nil {
return
}
return
}
func readTimestamp(r io.Reader) (v time.Time, err error) {
var sec int64
if err = binary.Read(r, binary.BigEndian, &sec); err != nil {
return
}
return time.Unix(sec, 0), nil
}
/*
'A': []interface{}
'D': Decimal
'F': Table
'I': int32
'S': string
'T': time.Time
'V': nil
'b': byte
'd': float64
'f': float32
'l': int64
's': int16
't': bool
'x': []byte
*/
func readField(r io.Reader) (v interface{}, err error) {
var typ byte
if err = binary.Read(r, binary.BigEndian, &typ); err != nil {
return
}
switch typ {
case 't':
var value uint8
if err = binary.Read(r, binary.BigEndian, &value); err != nil {
return
}
return (value != 0), nil
case 'b':
var value [1]byte
if _, err = io.ReadFull(r, value[0:1]); err != nil {
return
}
return value[0], nil
case 's':
var value int16
if err = binary.Read(r, binary.BigEndian, &value); err != nil {
return
}
return value, nil
case 'I':
var value int32
if err = binary.Read(r, binary.BigEndian, &value); err != nil {
return
}
return value, nil
case 'l':
var value int64
if err = binary.Read(r, binary.BigEndian, &value); err != nil {
return
}
return value, nil
case 'f':
var value float32
if err = binary.Read(r, binary.BigEndian, &value); err != nil {
return
}
return value, nil
case 'd':
var value float64
if err = binary.Read(r, binary.BigEndian, &value); err != nil {
return
}
return value, nil
case 'D':
return readDecimal(r)
case 'S':
return readLongstr(r)
case 'A':
return readArray(r)
case 'T':
return readTimestamp(r)
case 'F':
return readTable(r)
case 'x':
var len int32
if err = binary.Read(r, binary.BigEndian, &len); err != nil {
return nil, err
}
value := make([]byte, len)
if _, err = io.ReadFull(r, value); err != nil {
return nil, err
}
return value, err
case 'V':
return nil, nil
}
return nil, ErrSyntax
}
/*
Field tables are long strings that contain packed name-value pairs. The
name-value pairs are encoded as short string defining the name, and octet
defining the values type and then the value itself. The valid field types for
tables are an extension of the native integer, bit, string, and timestamp
types, and are shown in the grammar. Multi-octet integer fields are always
held in network byte order.
*/
func readTable(r io.Reader) (table Table, err error) {
var nested bytes.Buffer
var str string
if str, err = readLongstr(r); err != nil {
return
}
nested.Write([]byte(str))
table = make(Table)
for nested.Len() > 0 {
var key string
var value interface{}
if key, err = readShortstr(&nested); err != nil {
return
}
if value, err = readField(&nested); err != nil {
return
}
table[key] = value
}
return
}
func readArray(r io.Reader) ([]interface{}, error) {
var size uint32
var err error
if err = binary.Read(r, binary.BigEndian, &size); err != nil {
return nil, err
}
lim := &io.LimitedReader{R: r, N: int64(size)}
arr := make([]interface{}, 0)
var field interface{}
for {
if field, err = readField(lim); err != nil {
if err == io.EOF {
break
}
return nil, err
}
arr = append(arr, field)
}
return arr, nil
}
// Checks if this bit mask matches the flags bitset
func hasProperty(mask uint16, prop int) bool {
return int(mask)&prop > 0
}
func (me *reader) parseHeaderFrame(channel uint16, size uint32) (frame frame, err error) {
hf := &headerFrame{
ChannelId: channel,
}
if err = binary.Read(me.r, binary.BigEndian, &hf.ClassId); err != nil {
return
}
if err = binary.Read(me.r, binary.BigEndian, &hf.weight); err != nil {
return
}
if err = binary.Read(me.r, binary.BigEndian, &hf.Size); err != nil {
return
}
var flags uint16
if err = binary.Read(me.r, binary.BigEndian, &flags); err != nil {
return
}
if hasProperty(flags, flagContentType) {
if hf.Properties.ContentType, err = readShortstr(me.r); err != nil {
return
}
}
if hasProperty(flags, flagContentEncoding) {
if hf.Properties.ContentEncoding, err = readShortstr(me.r); err != nil {
return
}
}
if hasProperty(flags, flagHeaders) {
if hf.Properties.Headers, err = readTable(me.r); err != nil {
return
}
}
if hasProperty(flags, flagDeliveryMode) {
if err = binary.Read(me.r, binary.BigEndian, &hf.Properties.DeliveryMode); err != nil {
return
}
}
if hasProperty(flags, flagPriority) {
if err = binary.Read(me.r, binary.BigEndian, &hf.Properties.Priority); err != nil {
return
}
}
if hasProperty(flags, flagCorrelationId) {
if hf.Properties.CorrelationId, err = readShortstr(me.r); err != nil {
return
}
}
if hasProperty(flags, flagReplyTo) {
if hf.Properties.ReplyTo, err = readShortstr(me.r); err != nil {
return
}
}
if hasProperty(flags, flagExpiration) {
if hf.Properties.Expiration, err = readShortstr(me.r); err != nil {
return
}
}
if hasProperty(flags, flagMessageId) {
if hf.Properties.MessageId, err = readShortstr(me.r); err != nil {
return
}
}
if hasProperty(flags, flagTimestamp) {
if hf.Properties.Timestamp, err = readTimestamp(me.r); err != nil {
return
}
}
if hasProperty(flags, flagType) {
if hf.Properties.Type, err = readShortstr(me.r); err != nil {
return
}
}
if hasProperty(flags, flagUserId) {
if hf.Properties.UserId, err = readShortstr(me.r); err != nil {
return
}
}
if hasProperty(flags, flagAppId) {
if hf.Properties.AppId, err = readShortstr(me.r); err != nil {
return
}
}
if hasProperty(flags, flagReserved1) {
if hf.Properties.reserved1, err = readShortstr(me.r); err != nil {
return
}
}
return hf, nil
}
func (me *reader) parseBodyFrame(channel uint16, size uint32) (frame frame, err error) {
bf := &bodyFrame{
ChannelId: channel,
Body: make([]byte, size),
}
if _, err = io.ReadFull(me.r, bf.Body); err != nil {
return nil, err
}
return bf, nil
}
var errHeartbeatPayload = errors.New("Heartbeats should not have a payload")
func (me *reader) parseHeartbeatFrame(channel uint16, size uint32) (frame frame, err error) {
hf := &heartbeatFrame{
ChannelId: channel,
}
if size > 0 {
return nil, errHeartbeatPayload
}
return hf, nil
}

64
vendor/github.com/streadway/amqp/return.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/streadway/amqp
package amqp
import (
"time"
)
// Return captures a flattened struct of fields returned by the server when a
// Publishing is unable to be delivered either due to the `mandatory` flag set
// and no route found, or `immediate` flag set and no free consumer.
type Return struct {
ReplyCode uint16 // reason
ReplyText string // description
Exchange string // basic.publish exchange
RoutingKey string // basic.publish routing key
// Properties
ContentType string // MIME content type
ContentEncoding string // MIME content encoding
Headers Table // Application or header exchange table
DeliveryMode uint8 // queue implemention use - non-persistent (1) or persistent (2)
Priority uint8 // queue implementation use - 0 to 9
CorrelationId string // application use - correlation identifier
ReplyTo string // application use - address to to reply to (ex: RPC)
Expiration string // implementation use - message expiration spec
MessageId string // application use - message identifier
Timestamp time.Time // application use - message timestamp
Type string // application use - message type name
UserId string // application use - creating user id
AppId string // application use - creating application
Body []byte
}
func newReturn(msg basicReturn) *Return {
props, body := msg.getContent()
return &Return{
ReplyCode: msg.ReplyCode,
ReplyText: msg.ReplyText,
Exchange: msg.Exchange,
RoutingKey: msg.RoutingKey,
Headers: props.Headers,
ContentType: props.ContentType,
ContentEncoding: props.ContentEncoding,
DeliveryMode: props.DeliveryMode,
Priority: props.Priority,
CorrelationId: props.CorrelationId,
ReplyTo: props.ReplyTo,
Expiration: props.Expiration,
MessageId: props.MessageId,
Timestamp: props.Timestamp,
Type: props.Type,
UserId: props.UserId,
AppId: props.AppId,
Body: body,
}
}

3306
vendor/github.com/streadway/amqp/spec091.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

390
vendor/github.com/streadway/amqp/types.go generated vendored Normal file
View File

@ -0,0 +1,390 @@
// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/streadway/amqp
package amqp
import (
"fmt"
"io"
"time"
)
var (
// Errors that this library could return/emit from a channel or connection
ErrClosed = &Error{Code: ChannelError, Reason: "channel/connection is not open"}
ErrChannelMax = &Error{Code: ChannelError, Reason: "channel id space exhausted"}
ErrSASL = &Error{Code: AccessRefused, Reason: "SASL could not negotiate a shared mechanism"}
ErrCredentials = &Error{Code: AccessRefused, Reason: "username or password not allowed"}
ErrVhost = &Error{Code: AccessRefused, Reason: "no access to this vhost"}
ErrSyntax = &Error{Code: SyntaxError, Reason: "invalid field or value inside of a frame"}
ErrFrame = &Error{Code: FrameError, Reason: "frame could not be parsed"}
ErrCommandInvalid = &Error{Code: CommandInvalid, Reason: "unexpected command received"}
ErrUnexpectedFrame = &Error{Code: UnexpectedFrame, Reason: "unexpected frame received"}
ErrFieldType = &Error{Code: SyntaxError, Reason: "unsupported table field type"}
)
// Error captures the code and reason a channel or connection has been closed
// by the server.
type Error struct {
Code int // constant code from the specification
Reason string // description of the error
Server bool // true when initiated from the server, false when from this library
Recover bool // true when this error can be recovered by retrying later or with differnet parameters
}
func newError(code uint16, text string) *Error {
return &Error{
Code: int(code),
Reason: text,
Recover: isSoftExceptionCode(int(code)),
Server: true,
}
}
func (me Error) Error() string {
return fmt.Sprintf("Exception (%d) Reason: %q", me.Code, me.Reason)
}
// Used by header frames to capture routing and header information
type properties struct {
ContentType string // MIME content type
ContentEncoding string // MIME content encoding
Headers Table // Application or header exchange table
DeliveryMode uint8 // queue implemention use - Transient (1) or Persistent (2)
Priority uint8 // queue implementation use - 0 to 9
CorrelationId string // application use - correlation identifier
ReplyTo string // application use - address to to reply to (ex: RPC)
Expiration string // implementation use - message expiration spec
MessageId string // application use - message identifier
Timestamp time.Time // application use - message timestamp
Type string // application use - message type name
UserId string // application use - creating user id
AppId string // application use - creating application
reserved1 string // was cluster-id - process for buffer consumption
}
// DeliveryMode. Transient means higher throughput but messages will not be
// restored on broker restart. The delivery mode of publishings is unrelated
// to the durability of the queues they reside on. Transient messages will
// not be restored to durable queues, persistent messages will be restored to
// durable queues and lost on non-durable queues during server restart.
//
// This remains typed as uint8 to match Publishing.DeliveryMode. Other
// delivery modes specific to custom queue implementations are not enumerated
// here.
const (
Transient uint8 = 1
Persistent uint8 = 2
)
// The property flags are an array of bits that indicate the presence or
// absence of each property value in sequence. The bits are ordered from most
// high to low - bit 15 indicates the first property.
const (
flagContentType = 0x8000
flagContentEncoding = 0x4000
flagHeaders = 0x2000
flagDeliveryMode = 0x1000
flagPriority = 0x0800
flagCorrelationId = 0x0400
flagReplyTo = 0x0200
flagExpiration = 0x0100
flagMessageId = 0x0080
flagTimestamp = 0x0040
flagType = 0x0020
flagUserId = 0x0010
flagAppId = 0x0008
flagReserved1 = 0x0004
)
// Queue captures the current server state of the queue on the server returned
// from Channel.QueueDeclare or Channel.QueueInspect.
type Queue struct {
Name string // server confirmed or generated name
Messages int // count of messages not awaiting acknowledgment
Consumers int // number of consumers receiving deliveries
}
// Publishing captures the client message sent to the server. The fields
// outside of the Headers table included in this struct mirror the underlying
// fields in the content frame. They use native types for convenience and
// efficiency.
type Publishing struct {
// Application or exchange specific fields,
// the headers exchange will inspect this field.
Headers Table
// Properties
ContentType string // MIME content type
ContentEncoding string // MIME content encoding
DeliveryMode uint8 // Transient (0 or 1) or Persistent (2)
Priority uint8 // 0 to 9
CorrelationId string // correlation identifier
ReplyTo string // address to to reply to (ex: RPC)
Expiration string // message expiration spec
MessageId string // message identifier
Timestamp time.Time // message timestamp
Type string // message type name
UserId string // creating user id - ex: "guest"
AppId string // creating application id
// The application specific payload of the message
Body []byte
}
// Blocking notifies the server's TCP flow control of the Connection. When a
// server hits a memory or disk alarm it will block all connections until the
// resources are reclaimed. Use NotifyBlock on the Connection to receive these
// events.
type Blocking struct {
Active bool // TCP pushback active/inactive on server
Reason string // Server reason for activation
}
// Confirmation notifies the acknowledgment or negative acknowledgement of a
// publishing identified by its delivery tag. Use NotifyPublish on the Channel
// to consume these events.
type Confirmation struct {
DeliveryTag uint64 // A 1 based counter of publishings from when the channel was put in Confirm mode
Ack bool // True when the server succesfully received the publishing
}
// Decimal matches the AMQP decimal type. Scale is the number of decimal
// digits Scale == 2, Value == 12345, Decimal == 123.45
type Decimal struct {
Scale uint8
Value int32
}
// Table stores user supplied fields of the following types:
//
// bool
// byte
// float32
// float64
// int16
// int32
// int64
// nil
// string
// time.Time
// amqp.Decimal
// amqp.Table
// []byte
// []interface{} - containing above types
//
// Functions taking a table will immediately fail when the table contains a
// value of an unsupported type.
//
// The caller must be specific in which precision of integer it wishes to
// encode.
//
// Use a type assertion when reading values from a table for type converstion.
//
// RabbitMQ expects int32 for integer values.
//
type Table map[string]interface{}
func validateField(f interface{}) error {
switch fv := f.(type) {
case nil, bool, byte, int16, int32, int64, float32, float64, string, []byte, Decimal, time.Time:
return nil
case []interface{}:
for _, v := range fv {
if err := validateField(v); err != nil {
return fmt.Errorf("in array %s", err)
}
}
return nil
case Table:
for k, v := range fv {
if err := validateField(v); err != nil {
return fmt.Errorf("table field %q %s", k, err)
}
}
return nil
}
return fmt.Errorf("value %t not supported", f)
}
func (t Table) Validate() error {
return validateField(t)
}
// Heap interface for maintaining delivery tags
type tagSet []uint64
func (me tagSet) Len() int { return len(me) }
func (me tagSet) Less(i, j int) bool { return (me)[i] < (me)[j] }
func (me tagSet) Swap(i, j int) { (me)[i], (me)[j] = (me)[j], (me)[i] }
func (me *tagSet) Push(tag interface{}) { *me = append(*me, tag.(uint64)) }
func (me *tagSet) Pop() interface{} {
val := (*me)[len(*me)-1]
*me = (*me)[:len(*me)-1]
return val
}
type message interface {
id() (uint16, uint16)
wait() bool
read(io.Reader) error
write(io.Writer) error
}
type messageWithContent interface {
message
getContent() (properties, []byte)
setContent(properties, []byte)
}
/*
The base interface implemented as:
2.3.5 frame Details
All frames consist of a header (7 octets), a payload of arbitrary size, and a 'frame-end' octet that detects
malformed frames:
0 1 3 7 size+7 size+8
+------+---------+-------------+ +------------+ +-----------+
| type | channel | size | | payload | | frame-end |
+------+---------+-------------+ +------------+ +-----------+
octet short long size octets octet
To read a frame, we:
1. Read the header and check the frame type and channel.
2. Depending on the frame type, we read the payload and process it.
3. Read the frame end octet.
In realistic implementations where performance is a concern, we would use
read-ahead buffering or gathering reads to avoid doing three separate
system calls to read a frame.
*/
type frame interface {
write(io.Writer) error
channel() uint16
}
type reader struct {
r io.Reader
}
type writer struct {
w io.Writer
}
// Implements the frame interface for Connection RPC
type protocolHeader struct{}
func (protocolHeader) write(w io.Writer) error {
_, err := w.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1})
return err
}
func (protocolHeader) channel() uint16 {
panic("only valid as initial handshake")
}
/*
Method frames carry the high-level protocol commands (which we call "methods").
One method frame carries one command. The method frame payload has this format:
0 2 4
+----------+-----------+-------------- - -
| class-id | method-id | arguments...
+----------+-----------+-------------- - -
short short ...
To process a method frame, we:
1. Read the method frame payload.
2. Unpack it into a structure. A given method always has the same structure,
so we can unpack the method rapidly. 3. Check that the method is allowed in
the current context.
4. Check that the method arguments are valid.
5. Execute the method.
Method frame bodies are constructed as a list of AMQP data fields (bits,
integers, strings and string tables). The marshalling code is trivially
generated directly from the protocol specifications, and can be very rapid.
*/
type methodFrame struct {
ChannelId uint16
ClassId uint16
MethodId uint16
Method message
}
func (me *methodFrame) channel() uint16 { return me.ChannelId }
/*
Heartbeating is a technique designed to undo one of TCP/IP's features, namely
its ability to recover from a broken physical connection by closing only after
a quite long time-out. In some scenarios we need to know very rapidly if a
peer is disconnected or not responding for other reasons (e.g. it is looping).
Since heartbeating can be done at a low level, we implement this as a special
type of frame that peers exchange at the transport level, rather than as a
class method.
*/
type heartbeatFrame struct {
ChannelId uint16
}
func (me *heartbeatFrame) channel() uint16 { return me.ChannelId }
/*
Certain methods (such as Basic.Publish, Basic.Deliver, etc.) are formally
defined as carrying content. When a peer sends such a method frame, it always
follows it with a content header and zero or more content body frames.
A content header frame has this format:
0 2 4 12 14
+----------+--------+-----------+----------------+------------- - -
| class-id | weight | body size | property flags | property list...
+----------+--------+-----------+----------------+------------- - -
short short long long short remainder...
We place content body in distinct frames (rather than including it in the
method) so that AMQP may support "zero copy" techniques in which content is
never marshalled or encoded. We place the content properties in their own
frame so that recipients can selectively discard contents they do not want to
process
*/
type headerFrame struct {
ChannelId uint16
ClassId uint16
weight uint16
Size uint64
Properties properties
}
func (me *headerFrame) channel() uint16 { return me.ChannelId }
/*
Content is the application data we carry from client-to-client via the AMQP
server. Content is, roughly speaking, a set of properties plus a binary data
part. The set of allowed properties are defined by the Basic class, and these
form the "content header frame". The data can be any size, and MAY be broken
into several (or many) chunks, each forming a "content body frame".
Looking at the frames for a specific channel, as they pass on the wire, we
might see something like this:
[method]
[method] [header] [body] [body]
[method]
...
*/
type bodyFrame struct {
ChannelId uint16
Body []byte
}
func (me *bodyFrame) channel() uint16 { return me.ChannelId }

170
vendor/github.com/streadway/amqp/uri.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/streadway/amqp
package amqp
import (
"errors"
"fmt"
"net/url"
"strconv"
"strings"
)
var errURIScheme = errors.New("AMQP scheme must be either 'amqp://' or 'amqps://'")
var schemePorts = map[string]int{
"amqp": 5672,
"amqps": 5671,
}
var defaultURI = URI{
Scheme: "amqp",
Host: "localhost",
Port: 5672,
Username: "guest",
Password: "guest",
Vhost: "/",
}
// URI represents a parsed AMQP URI string.
type URI struct {
Scheme string
Host string
Port int
Username string
Password string
Vhost string
}
// ParseURI attempts to parse the given AMQP URI according to the spec.
// See http://www.rabbitmq.com/uri-spec.html.
//
// Default values for the fields are:
//
// Scheme: amqp
// Host: localhost
// Port: 5672
// Username: guest
// Password: guest
// Vhost: /
//
func ParseURI(uri string) (URI, error) {
me := defaultURI
u, err := url.Parse(uri)
if err != nil {
return me, err
}
defaultPort, okScheme := schemePorts[u.Scheme]
if okScheme {
me.Scheme = u.Scheme
} else {
return me, errURIScheme
}
host, port := splitHostPort(u.Host)
if host != "" {
me.Host = host
}
if port != "" {
port32, err := strconv.ParseInt(port, 10, 32)
if err != nil {
return me, err
}
me.Port = int(port32)
} else {
me.Port = defaultPort
}
if u.User != nil {
me.Username = u.User.Username()
if password, ok := u.User.Password(); ok {
me.Password = password
}
}
if u.Path != "" {
if strings.HasPrefix(u.Path, "/") {
if u.Host == "" && strings.HasPrefix(u.Path, "///") {
// net/url doesn't handle local context authorities and leaves that up
// to the scheme handler. In our case, we translate amqp:/// into the
// default host and whatever the vhost should be
if len(u.Path) > 3 {
me.Vhost = u.Path[3:]
}
} else if len(u.Path) > 1 {
me.Vhost = u.Path[1:]
}
} else {
me.Vhost = u.Path
}
}
return me, nil
}
// Splits host:port, host, [ho:st]:port, or [ho:st]. Unlike net.SplitHostPort
// which splits :port, host:port or [host]:port
//
// Handles hosts that have colons that are in brackets like [::1]:http
func splitHostPort(addr string) (host, port string) {
i := strings.LastIndex(addr, ":")
if i >= 0 {
host, port = addr[:i], addr[i+1:]
if len(port) > 0 && port[len(port)-1] == ']' && addr[0] == '[' {
// we've split on an inner colon, the port was missing outside of the
// brackets so use the full addr. We could assert that host should not
// contain any colons here
host, port = addr, ""
}
} else {
host = addr
}
return
}
// PlainAuth returns a PlainAuth structure based on the parsed URI's
// Username and Password fields.
func (me URI) PlainAuth() *PlainAuth {
return &PlainAuth{
Username: me.Username,
Password: me.Password,
}
}
func (me URI) String() string {
var authority string
if me.Username != defaultURI.Username || me.Password != defaultURI.Password {
authority += me.Username
if me.Password != defaultURI.Password {
authority += ":" + me.Password
}
authority += "@"
}
authority += me.Host
if defaultPort, found := schemePorts[me.Scheme]; !found || defaultPort != me.Port {
authority += ":" + strconv.FormatInt(int64(me.Port), 10)
}
var vhost string
if me.Vhost != defaultURI.Vhost {
vhost = me.Vhost
}
return fmt.Sprintf("%s://%s/%s", me.Scheme, authority, url.QueryEscape(vhost))
}

411
vendor/github.com/streadway/amqp/write.go generated vendored Normal file
View File

@ -0,0 +1,411 @@
// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/streadway/amqp
package amqp
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"io"
"math"
"time"
)
func (me *writer) WriteFrame(frame frame) (err error) {
if err = frame.write(me.w); err != nil {
return
}
if buf, ok := me.w.(*bufio.Writer); ok {
err = buf.Flush()
}
return
}
func (me *methodFrame) write(w io.Writer) (err error) {
var payload bytes.Buffer
if me.Method == nil {
return errors.New("malformed frame: missing method")
}
class, method := me.Method.id()
if err = binary.Write(&payload, binary.BigEndian, class); err != nil {
return
}
if err = binary.Write(&payload, binary.BigEndian, method); err != nil {
return
}
if err = me.Method.write(&payload); err != nil {
return
}
return writeFrame(w, frameMethod, me.ChannelId, payload.Bytes())
}
// Heartbeat
//
// Payload is empty
func (me *heartbeatFrame) write(w io.Writer) (err error) {
return writeFrame(w, frameHeartbeat, me.ChannelId, []byte{})
}
// CONTENT HEADER
// 0 2 4 12 14
// +----------+--------+-----------+----------------+------------- - -
// | class-id | weight | body size | property flags | property list...
// +----------+--------+-----------+----------------+------------- - -
// short short long long short remainder...
//
func (me *headerFrame) write(w io.Writer) (err error) {
var payload bytes.Buffer
var zeroTime time.Time
if err = binary.Write(&payload, binary.BigEndian, me.ClassId); err != nil {
return
}
if err = binary.Write(&payload, binary.BigEndian, me.weight); err != nil {
return
}
if err = binary.Write(&payload, binary.BigEndian, me.Size); err != nil {
return
}
// First pass will build the mask to be serialized, second pass will serialize
// each of the fields that appear in the mask.
var mask uint16
if len(me.Properties.ContentType) > 0 {
mask = mask | flagContentType
}
if len(me.Properties.ContentEncoding) > 0 {
mask = mask | flagContentEncoding
}
if me.Properties.Headers != nil && len(me.Properties.Headers) > 0 {
mask = mask | flagHeaders
}
if me.Properties.DeliveryMode > 0 {
mask = mask | flagDeliveryMode
}
if me.Properties.Priority > 0 {
mask = mask | flagPriority
}
if len(me.Properties.CorrelationId) > 0 {
mask = mask | flagCorrelationId
}
if len(me.Properties.ReplyTo) > 0 {
mask = mask | flagReplyTo
}
if len(me.Properties.Expiration) > 0 {
mask = mask | flagExpiration
}
if len(me.Properties.MessageId) > 0 {
mask = mask | flagMessageId
}
if me.Properties.Timestamp != zeroTime {
mask = mask | flagTimestamp
}
if len(me.Properties.Type) > 0 {
mask = mask | flagType
}
if len(me.Properties.UserId) > 0 {
mask = mask | flagUserId
}
if len(me.Properties.AppId) > 0 {
mask = mask | flagAppId
}
if err = binary.Write(&payload, binary.BigEndian, mask); err != nil {
return
}
if hasProperty(mask, flagContentType) {
if err = writeShortstr(&payload, me.Properties.ContentType); err != nil {
return
}
}
if hasProperty(mask, flagContentEncoding) {
if err = writeShortstr(&payload, me.Properties.ContentEncoding); err != nil {
return
}
}
if hasProperty(mask, flagHeaders) {
if err = writeTable(&payload, me.Properties.Headers); err != nil {
return
}
}
if hasProperty(mask, flagDeliveryMode) {
if err = binary.Write(&payload, binary.BigEndian, me.Properties.DeliveryMode); err != nil {
return
}
}
if hasProperty(mask, flagPriority) {
if err = binary.Write(&payload, binary.BigEndian, me.Properties.Priority); err != nil {
return
}
}
if hasProperty(mask, flagCorrelationId) {
if err = writeShortstr(&payload, me.Properties.CorrelationId); err != nil {
return
}
}
if hasProperty(mask, flagReplyTo) {
if err = writeShortstr(&payload, me.Properties.ReplyTo); err != nil {
return
}
}
if hasProperty(mask, flagExpiration) {
if err = writeShortstr(&payload, me.Properties.Expiration); err != nil {
return
}
}
if hasProperty(mask, flagMessageId) {
if err = writeShortstr(&payload, me.Properties.MessageId); err != nil {
return
}
}
if hasProperty(mask, flagTimestamp) {
if err = binary.Write(&payload, binary.BigEndian, uint64(me.Properties.Timestamp.Unix())); err != nil {
return
}
}
if hasProperty(mask, flagType) {
if err = writeShortstr(&payload, me.Properties.Type); err != nil {
return
}
}
if hasProperty(mask, flagUserId) {
if err = writeShortstr(&payload, me.Properties.UserId); err != nil {
return
}
}
if hasProperty(mask, flagAppId) {
if err = writeShortstr(&payload, me.Properties.AppId); err != nil {
return
}
}
return writeFrame(w, frameHeader, me.ChannelId, payload.Bytes())
}
// Body
//
// Payload is one byterange from the full body who's size is declared in the
// Header frame
func (me *bodyFrame) write(w io.Writer) (err error) {
return writeFrame(w, frameBody, me.ChannelId, me.Body)
}
func writeFrame(w io.Writer, typ uint8, channel uint16, payload []byte) (err error) {
end := []byte{frameEnd}
size := uint(len(payload))
_, err = w.Write([]byte{
byte(typ),
byte((channel & 0xff00) >> 8),
byte((channel & 0x00ff) >> 0),
byte((size & 0xff000000) >> 24),
byte((size & 0x00ff0000) >> 16),
byte((size & 0x0000ff00) >> 8),
byte((size & 0x000000ff) >> 0),
})
if err != nil {
return
}
if _, err = w.Write(payload); err != nil {
return
}
if _, err = w.Write(end); err != nil {
return
}
return
}
func writeShortstr(w io.Writer, s string) (err error) {
b := []byte(s)
var length uint8 = uint8(len(b))
if err = binary.Write(w, binary.BigEndian, length); err != nil {
return
}
if _, err = w.Write(b[:length]); err != nil {
return
}
return
}
func writeLongstr(w io.Writer, s string) (err error) {
b := []byte(s)
var length uint32 = uint32(len(b))
if err = binary.Write(w, binary.BigEndian, length); err != nil {
return
}
if _, err = w.Write(b[:length]); err != nil {
return
}
return
}
/*
'A': []interface{}
'D': Decimal
'F': Table
'I': int32
'S': string
'T': time.Time
'V': nil
'b': byte
'd': float64
'f': float32
'l': int64
's': int16
't': bool
'x': []byte
*/
func writeField(w io.Writer, value interface{}) (err error) {
var buf [9]byte
var enc []byte
switch v := value.(type) {
case bool:
buf[0] = 't'
if v {
buf[1] = byte(1)
} else {
buf[1] = byte(0)
}
enc = buf[:2]
case byte:
buf[0] = 'b'
buf[1] = byte(v)
enc = buf[:2]
case int16:
buf[0] = 's'
binary.BigEndian.PutUint16(buf[1:3], uint16(v))
enc = buf[:3]
case int32:
buf[0] = 'I'
binary.BigEndian.PutUint32(buf[1:5], uint32(v))
enc = buf[:5]
case int64:
buf[0] = 'l'
binary.BigEndian.PutUint64(buf[1:9], uint64(v))
enc = buf[:9]
case float32:
buf[0] = 'f'
binary.BigEndian.PutUint32(buf[1:5], math.Float32bits(v))
enc = buf[:5]
case float64:
buf[0] = 'd'
binary.BigEndian.PutUint64(buf[1:9], math.Float64bits(v))
enc = buf[:9]
case Decimal:
buf[0] = 'D'
buf[1] = byte(v.Scale)
binary.BigEndian.PutUint32(buf[2:6], uint32(v.Value))
enc = buf[:6]
case string:
buf[0] = 'S'
binary.BigEndian.PutUint32(buf[1:5], uint32(len(v)))
enc = append(buf[:5], []byte(v)...)
case []interface{}: // field-array
buf[0] = 'A'
sec := new(bytes.Buffer)
for _, val := range v {
if err = writeField(sec, val); err != nil {
return
}
}
binary.BigEndian.PutUint32(buf[1:5], uint32(sec.Len()))
if _, err = w.Write(buf[:5]); err != nil {
return
}
if _, err = w.Write(sec.Bytes()); err != nil {
return
}
return
case time.Time:
buf[0] = 'T'
binary.BigEndian.PutUint64(buf[1:9], uint64(v.Unix()))
enc = buf[:9]
case Table:
if _, err = w.Write([]byte{'F'}); err != nil {
return
}
return writeTable(w, v)
case []byte:
buf[0] = 'x'
binary.BigEndian.PutUint32(buf[1:5], uint32(len(v)))
if _, err = w.Write(buf[0:5]); err != nil {
return
}
if _, err = w.Write(v); err != nil {
return
}
return
case nil:
buf[0] = 'V'
enc = buf[:1]
default:
return ErrFieldType
}
_, err = w.Write(enc)
return
}
func writeTable(w io.Writer, table Table) (err error) {
var buf bytes.Buffer
for key, val := range table {
if err = writeShortstr(&buf, key); err != nil {
return
}
if err = writeField(&buf, val); err != nil {
return
}
}
return writeLongstr(w, string(buf.Bytes()))
}

363
vendor/gopkg.in/olivere/elastic.v3/CHANGELOG-3.0.md generated vendored Normal file
View File

@ -0,0 +1,363 @@
# Elastic 3.0
Elasticsearch 2.0 comes with some [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html). You will probably need to upgrade your application and/or rewrite part of it due to those changes.
We use that window of opportunity to also update Elastic (the Go client) from version 2.0 to 3.0. This will introduce both changes due to the Elasticsearch 2.0 update as well as changes that make Elastic cleaner by removing some old cruft.
So, to summarize:
1. Elastic 2.0 is compatible with Elasticsearch 1.7+ and is still actively maintained.
2. Elastic 3.0 is compatible with Elasticsearch 2.0+ and will soon become the new master branch.
The rest of the document is a list of all changes in Elastic 3.0.
## Pointer types
All types have changed to be pointer types, not value types. This not only is cleaner but also simplifies the API as illustrated by the following example:
Example for Elastic 2.0 (old):
```go
q := elastic.NewMatchAllQuery()
res, err := elastic.Search("one").Query(&q).Do() // notice the & here
```
Example for Elastic 3.0 (new):
```go
q := elastic.NewMatchAllQuery()
res, err := elastic.Search("one").Query(q).Do() // no more &
// ... which can be simplified as:
res, err := elastic.Search("one").Query(elastic.NewMatchAllQuery()).Do()
```
It also helps to prevent [subtle issues](https://github.com/olivere/elastic/issues/115#issuecomment-130753046).
## Query/filter merge
One of the biggest changes in Elasticsearch 2.0 is the [merge of queries and filters](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_queries_and_filters_merged). In Elasticsearch 1.x, you had a whole range of queries and filters that were basically identical (e.g. `term_query` and `term_filter`).
The practical aspect of the merge is that you can now basically use queries where once you had to use filters instead. For Elastic 3.0 this means: We could remove a whole bunch of files. Yay!
Notice that some methods still come by "filter", e.g. `PostFilter`. However, they accept a `Query` now when they used to accept a `Filter` before.
Example for Elastic 2.0 (old):
```go
q := elastic.NewMatchAllQuery()
f := elastic.NewTermFilter("tag", "important")
res, err := elastic.Search().Index("one").Query(&q).PostFilter(f)
```
Example for Elastic 3.0 (new):
```go
q := elastic.NewMatchAllQuery()
f := elastic.NewTermQuery("tag", "important") // it's a query now!
res, err := elastic.Search().Index("one").Query(q).PostFilter(f)
```
## Facets are removed
[Facets have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_facets_have_been_removed) in Elasticsearch 2.0. You need to use aggregations now.
## Errors
Elasticsearch 2.0 returns more information about an error in the HTTP response body. Elastic 3.0 now reads this information and makes it accessible by the consumer.
Errors and all its details are now returned in [`Error`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59).
### HTTP Status 404 (Not Found)
When Elasticsearch does not find an entity or an index, it generally returns HTTP status code 404. In Elastic 2.0 this was a valid result and didn't raise an error from the `Do` functions. This has now changed in Elastic 3.0.
Starting with Elastic 3.0, there are only two types of responses considered successful. First, responses with HTTP status codes [200..299]. Second, HEAD requests which return HTTP status 404. The latter is used by Elasticsearch to e.g. check for existence of indices or documents. All other responses will return an error.
To check for HTTP Status 404 (with non-HEAD requests), e.g. when trying to get or delete a missing document, you can use the [`IsNotFound`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L84) helper (see below).
The following example illustrates how to check for a missing document in Elastic 2.0 and what has changed in 3.0.
Example for Elastic 2.0 (old):
```go
res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do()
if err != nil {
// Something else went wrong (but 404 is NOT an error in Elastic 2.0)
}
if !res.Found {
// Document has not been found
}
```
Example for Elastic 3.0 (new):
```go
res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do()
if err != nil {
if elastic.IsNotFound(err) {
// Document has not been found
} else {
// Something else went wrong
}
}
```
### HTTP Status 408 (Timeouts)
Elasticsearch now responds with HTTP status code 408 (Timeout) when a request fails due to a timeout. E.g. if you specify a timeout with the Cluster Health API, the HTTP response status will be 408 if the timeout is raised. See [here](https://github.com/elastic/elasticsearch/commit/fe3179d9cccb569784434b2135ca9ae13d5158d3) for the specific commit to the Cluster Health API.
To check for HTTP Status 408, we introduced the [`IsTimeout`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L101) helper.
Example for Elastic 2.0 (old):
```go
health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do()
if err != nil {
// ...
}
if health.TimedOut {
// We have a timeout
}
```
Example for Elastic 3.0 (new):
```go
health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do()
if elastic.IsTimeout(err) {
// We have a timeout
}
```
### Bulk Errors
The error response of a bulk operation used to be a simple string in Elasticsearch 1.x.
In Elasticsearch 2.0, it returns a structured JSON object with a lot more details about the error.
These errors are now captured in an object of type [`ErrorDetails`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59) which is used in [`BulkResponseItem`](https://github.com/olivere/elastic/blob/release-branch.v3/bulk.go#L206).
### Removed specific Elastic errors
The specific error types `ErrMissingIndex`, `ErrMissingType`, and `ErrMissingId` have been removed. They were only used by `DeleteService` and are replaced by a generic error message.
## Numeric types
Elastic 3.0 has settled to use `float64` everywhere. It used to be a mix of `float32` and `float64` in Elastic 2.0. E.g. all boostable queries in Elastic 3.0 now have a boost type of `float64` where it used to be `float32`.
## Pluralization
Some services accept zero, one or more indices or types to operate on.
E.g. in the `SearchService` accepts a list of zero, one, or more indices to
search and therefor had a func called `Index(index string)` and a func
called `Indices(indices ...string)`.
Elastic 3.0 now only uses the singular form that, when applicable, accepts a
variadic type. E.g. in the case of the `SearchService`, you now only have
one func with the following signature: `Index(indices ...string)`.
Notice this is only limited to `Index(...)` and `Type(...)`. There are other
services with variadic functions. These have not been changed.
## Multiple calls to variadic functions
Some services with variadic functions have cleared the underlying slice when
called while other services just add to the existing slice. This has now been
normalized to always add to the underlying slice.
Example for Elastic 2.0 (old):
```go
// Would only cleared scroll id "two"
// because ScrollId cleared the values when called multiple times
client.ClearScroll().ScrollId("one").ScrollId("two").Do()
```
Example for Elastic 3.0 (new):
```go
// Now (correctly) clears both scroll id "one" and "two"
// because ScrollId no longer clears the values when called multiple times
client.ClearScroll().ScrollId("one").ScrollId("two").Do()
```
## Ping service requires URL
The `Ping` service raised some issues because it is different from all
other services. If not explicitly given a URL, it always pings `127.0.0.1:9200`.
Users expected to ping the cluster, but that is not possible as the cluster
can be a set of many nodes: So which node do we ping then?
To make it more clear, the `Ping` function on the client now requires users
to explicitly set the URL of the node to ping.
## Meta fields
Many of the meta fields e.g. `_parent` or `_routing` are now
[part of the top-level of a document](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_mapping_changes.html#migration-meta-fields)
and are no longer returned as parts of the `fields` object. We had to change
larger parts of e.g. the `Reindexer` to get it to work seamlessly with Elasticsearch 2.0.
Notice that all stored meta-fields are now [returned by default](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_crud_and_routing_changes.html#_all_stored_meta_fields_returned_by_default).
## HasParentQuery / HasChildQuery
`NewHasParentQuery` and `NewHasChildQuery` must now include both parent/child type and query. It is now in line with the Java API.
Example for Elastic 2.0 (old):
```go
allQ := elastic.NewMatchAllQuery()
q := elastic.NewHasChildFilter("tweet").Query(&allQ)
```
Example for Elastic 3.0 (new):
```go
q := elastic.NewHasChildQuery("tweet", elastic.NewMatchAllQuery())
```
## SetBasicAuth client option
You can now tell Elastic to pass HTTP Basic Auth credentials with each request. In previous versions of Elastic you had to set up your own `http.Transport` to do this. This should make it more convenient to use Elastic in combination with [Shield](https://www.elastic.co/products/shield) in its [basic setup](https://www.elastic.co/guide/en/shield/current/enable-basic-auth.html).
Example:
```go
client, err := elastic.NewClient(elastic.SetBasicAuth("user", "secret"))
if err != nil {
log.Fatal(err)
}
```
## Delete-by-Query API
The Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_delete_by_query_is_now_a_plugin). It is no longer core part of Elasticsearch. You can [install it as a plugin as described here](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html).
Elastic 3.0 still contains the `DeleteByQueryService`, but you need to install the plugin first. If you don't install it and use `DeleteByQueryService` you will most probably get a 404.
An older version of this document stated the following:
> Elastic 3.0 still contains the `DeleteByQueryService` but it will fail with `ErrPluginNotFound` when the plugin is not installed.
>
> Example for Elastic 3.0 (new):
>
> ```go
> _, err := client.DeleteByQuery().Query(elastic.NewTermQuery("client", "1")).Do()
> if err == elastic.ErrPluginNotFound {
> // Delete By Query API is not available
> }
> ```
I have decided that this is not a good way to handle the case of a missing plugin. The main reason is that with this logic, you'd always have to check if the plugin is missing in case of an error. This is not only slow, but it also puts logic into a service where it should really be just opaque and return the response of Elasticsearch.
If you rely on certain plugins to be installed, you should check on startup. That's where the following two helpers come into play.
## HasPlugin and SetRequiredPlugins
Some of the core functionality of Elasticsearch has now been moved into plugins. E.g. the Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html).
You need to make sure to add these plugins to your Elasticsearch installation to still be able to use the `DeleteByQueryService`. You can test this now with the `HasPlugin(name string)` helper in the client.
Example for Elastic 3.0 (new):
```go
err, found := client.HasPlugin("delete-by-query")
if err == nil && found {
// ... Delete By Query API is available
}
```
To simplify this process, there is now a `SetRequiredPlugins` helper that can be passed as an option func when creating a new client. If the plugin is not installed, the client wouldn't be created in the first place.
```go
// Will raise an error if the "delete-by-query" plugin is NOT installed
client, err := elastic.NewClient(elastic.SetRequiredPlugins("delete-by-query"))
if err != nil {
log.Fatal(err)
}
```
Notice that there also is a way to define [mandatory plugins](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html#_mandatory_plugins) in the Elasticsearch configuration file.
## Common Query has been renamed to Common Terms Query
The `CommonQuery` has been renamed to `CommonTermsQuery` to be in line with the [Java API](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_java_api_changes.html#_query_filter_refactoring).
## Remove `MoreLikeThis` and `MoreLikeThisField`
The More Like This API and the More Like This Field query [have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_more_like_this) and replaced with the `MoreLikeThisQuery`.
## Remove Filtered Query
With the merge of queries and filters, the [filtered query became deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). While it is only deprecated and therefore still available in Elasticsearch 2.0, we have decided to remove it from Elastic 3.0. Why? Because we think that when you're already forced to rewrite many of your application code, it might be a good chance to get rid of things that are deprecated as well. So you might simply change your filtered query with a boolean query as [described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated).
## Remove FuzzyLikeThis and FuzzyLikeThisField
Both have been removed from Elasticsearch 2.0 as well.
## Remove LimitFilter
The `limit` filter is [deprecated in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_limit_literal_filter_deprecated) and becomes a no-op. Now is a good chance to remove it from your application as well. Use the `terminate_after` parameter in your search [as described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-body.html) to achieve similar effects.
## Remove `_cache` and `_cache_key` from filters
Both have been [removed from Elasticsearch 2.0 as well](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_filter_auto_caching).
## Partial fields are gone
Partial fields are [removed in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_search_changes.html#_partial_fields) in favor of [source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-source-filtering.html).
## Scripting
A [`Script`](https://github.com/olivere/elastic/blob/release-branch.v3/script.go) type has been added to Elastic 3.0. In Elastic 2.0, there were various places (e.g. aggregations) where you could just add the script as a string, specify the scripting language, add parameters etc. With Elastic 3.0, you should now always use the `Script` type.
Example for Elastic 2.0 (old):
```go
update, err := client.Update().Index("twitter").Type("tweet").Id("1").
Script("ctx._source.retweets += num").
ScriptParams(map[string]interface{}{"num": 1}).
Upsert(map[string]interface{}{"retweets": 0}).
Do()
```
Example for Elastic 3.0 (new):
```go
update, err := client.Update().Index("twitter").Type("tweet").Id("1").
Script(elastic.NewScript("ctx._source.retweets += num").Param("num", 1)).
Upsert(map[string]interface{}{"retweets": 0}).
Do()
```
## Cluster State
The combination of `Metric(string)` and `Metrics(...string)` has been replaced by a single func with the signature `Metric(...string)`.
## Unexported structs in response
Services generally return a typed response from a `Do` func. Those structs are exported so that they can be passed around in your own application. In Elastic 3.0 however, we changed that (most) sub-structs are now unexported, meaning: You can only pass around the whole response, not sub-structures of it. This makes it easier for restructuring responses according to the Elasticsearch API. See [`ClusterStateResponse`](https://github.com/olivere/elastic/blob/release-branch.v3/cluster_state.go#L182) as an example.
## Add offset to Histogram aggregation
Histogram aggregations now have an [offset](https://github.com/elastic/elasticsearch/pull/9505) option.
## Services
### REST API specification
As you might know, Elasticsearch comes with a REST API specification. The specification describes the endpoints in a JSON structure.
Most services in Elastic predated the REST API specification. We are in the process of bringing all these services in line with the specification. Services can be generated by `go generate` (not 100% automatic though). This is an ongoing process.
This probably doesn't mean a lot to you. However, you can now be more confident that Elastic supports all features that the REST API specification describes.
At the same time, the file names of the services are renamed to match the REST API specification naming.
### REST API Test Suite
The REST API specification of Elasticsearch comes along with a test suite that official clients typically use to test for conformance. Up until now, Elastic didn't run this test suite. However, we are in the process of setting up infrastructure and tests to match this suite as well.
This process in not completed though.

40
vendor/gopkg.in/olivere/elastic.v3/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,40 @@
# How to contribute
Elastic is an open-source project and we are looking forward to each
contribution.
Notice that while the [official Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) is rather good, it is a high-level
overview of the features of Elasticsearch. However, Elastic tries to resemble
the Java API of Elasticsearch which you can find [on GitHub](https://github.com/elastic/elasticsearch).
This explains why you might think that some options are strange or missing
in Elastic, while often they're just different. Please check the Java API first.
Having said that: Elasticsearch is moving fast and it might be very likely
that we missed some features or changes. Feel free to change that.
## Your Pull Request
To make it easy to review and understand your changes, please keep the
following things in mind before submitting your pull request:
* You compared the existing implemenation with the Java API, did you?
* Please work on the latest possible state of `olivere/elastic`.
Use `release-branch.v2` for targeting Elasticsearch 1.x and
`release-branch.v3` for targeting 2.x.
* Create a branch dedicated to your change.
* If possible, write a test case which confirms your change.
* Make sure your changes and your tests work with all recent versions of
Elasticsearch. We currently support Elasticsearch 1.7.x in the
release-branch.v2 and Elasticsearch 2.x in the release-branch.v3.
* Test your changes before creating a pull request (`go test ./...`).
* Don't mix several features or bug fixes in one pull request.
* Create a meaningful commit message.
* Explain your change, e.g. provide a link to the issue you are fixing and
probably a link to the Elasticsearch documentation and/or source code.
* Format your source with `go fmt`.
## Additional Resources
* [GitHub documentation](http://help.github.com/)
* [GitHub pull request documentation](http://help.github.com/send-pull-requests/)

60
vendor/gopkg.in/olivere/elastic.v3/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,60 @@
# This is a list of people who have contributed code
# to the Elastic repository.
#
# It is just my small "thank you" to all those that helped
# making Elastic what it is.
#
# Please keep this list sorted.
Adam Alix [@adamalix](https://github.com/adamalix)
Adam Weiner [@adamweiner](https://github.com/adamweiner)
Alexey Sharov [@nizsheanez](https://github.com/nizsheanez)
Benjamin Zarzycki [@kf6nux](https://github.com/kf6nux)
Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va)
Brady Love [@bradylove](https://github.com/bradylove)
Bruce Zhou [@brucez-isell](https://github.com/brucez-isell)
Chris M [@tebriel](https://github.com/tebriel)
Christophe Courtaut [@kri5](https://github.com/kri5)
Conrad Pankoff [@deoxxa](https://github.com/deoxxa)
Corey Scott [@corsc](https://github.com/corsc)
Daniel Barrett [@shendaras](https://github.com/shendaras)
Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath)
Daniel Imfeld [@dimfeld](https://github.com/dimfeld)
Dwayne Schultz [@myshkin5](https://github.com/myshkin5)
Faolan C-P [@fcheslack](https://github.com/fcheslack)
Gerhard Häring [@ghaering](https://github.com/ghaering)
Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos)
Guillaume J. Charmes [@creack](https://github.com/creack)
Han Yu [@MoonighT](https://github.com/MoonighT)
Harrison Wright [@wright8191](https://github.com/wright8191)
Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy)
Isaac Saldana [@isaldana](https://github.com/isaldana)
Jack Lindamood [@cep21](https://github.com/cep21)
Joe Buck [@four2five](https://github.com/four2five)
John Barker [@j16r](https://github.com/j16r)
John Goodall [@jgoodall](https://github.com/jgoodall)
Junpei Tsuji [@jun06t](https://github.com/jun06t)
Kenta SUZUKI [@suzuken](https://github.com/suzuken)
Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh)
Mara Kim [@autochthe](https://github.com/autochthe)
Marcy Buccellato [@marcybuccellato](https://github.com/marcybuccellato)
Medhi Bechina [@mdzor](https://github.com/mdzor)
naimulhaider [@naimulhaider](https://github.com/naimulhaider)
navins [@ishare](https://github.com/ishare)
Naoya Tsutsumi [@tutuming](https://github.com/tutuming)
Nicholas Wolff [@nwolff](https://github.com/nwolff)
Nick Whyte [@nickw444](https://github.com/nickw444)
Orne Brocaar [@brocaar](https://github.com/brocaar)
Radoslaw Wesolowski [r--w](https://github.com/r--w)
Ryan Schmukler [@rschmukler](https://github.com/rschmukler)
Sacheendra talluri [@sacheendra](https://github.com/sacheendra)
Sean DuBois [@Sean-Der](https://github.com/Sean-Der)
Shalin LK [@shalinlk](https://github.com/shalinlk)
Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic)
Stuart Warren [@Woz](https://github.com/stuart-warren)
Sundar [@sundarv85](https://github.com/sundarv85)
Tetsuya Morimoto [@t2y](https://github.com/t2y)
TimeEmit [@TimeEmit](https://github.com/timeemit)
TusharM [@tusharm](https://github.com/tusharm)
wolfkdy [@wolfkdy](https://github.com/wolfkdy)
zakthomas [@zakthomas](https://github.com/zakthomas)

16
vendor/gopkg.in/olivere/elastic.v3/ISSUE_TEMPLATE.md generated vendored Normal file
View File

@ -0,0 +1,16 @@
Please use the following questions as a guideline to help me answer
your issue/question without further inquiry. Thank you.
### Which version of Elastic are you using?
[ ] elastic.v2 (for Elasticsearch 1.x)
[ ] elastic.v3 (for Elasticsearch 2.x)
### Please describe the expected behavior
### Please describe the actual behavior
### Any steps to reproduce the behavior?

View File

@ -1,21 +1,20 @@
The MIT License (MIT)
Copyright (c) 2014 Ben Johnson
Copyright © 2012-2015 Oliver Eilhard
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.

421
vendor/gopkg.in/olivere/elastic.v3/README.md generated vendored Normal file
View File

@ -0,0 +1,421 @@
# Elastic
Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
[Go](http://www.golang.org/) programming language.
[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v3)](https://travis-ci.org/olivere/elastic)
[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/gopkg.in/olivere/elastic.v3)
[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
## Releases
**The release branches (e.g. [`release-branch.v3`](https://github.com/olivere/elastic/tree/release-branch.v3)) are actively being worked on and can break at any time. If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).**
Here's the version matrix:
Elasticsearch version | Elastic version -| Package URL
----------------------|------------------|------------
2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3))
1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2))
0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1))
**Example:**
You have installed Elasticsearch 2.1.1 and want to use Elastic. As listed above, you should use Elastic 3.0. So you first install the stable release of Elastic 3.0 from gopkg.in.
```sh
$ go get gopkg.in/olivere/elastic.v3
```
You then import it with this import path:
```go
import "gopkg.in/olivere/elastic.v3"
```
### Elastic 3.0
Elastic 3.0 targets Elasticsearch 2.0 and later. Elasticsearch 2.0.0 was [released on 28th October 2015](https://www.elastic.co/blog/elasticsearch-2-0-0-released).
Notice that there are a lot of [breaking changes in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html) and we used this as an opportunity to [clean up and refactor Elastic as well](https://github.com/olivere/elastic/blob/release-branch.v3/CHANGELOG-3.0.md).
### Elastic 2.0
Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2).
### Elastic 1.0
Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic
to a recent version.
However, if you cannot update for some reason, don't worry. Version 1.0 is
still available. All you need to do is go-get it and change your import path
as described above.
## Status
We use Elastic in production since 2012. Elastic is stable but the API changes
now and then. We strive for API compatibility.
However, Elasticsearch sometimes introduces [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes.html)
and we sometimes have to adapt.
Having said that, there have been no big API changes that required you
to rewrite your application big time. More often than not it's renaming APIs
and adding/removing features so that Elastic is in sync with Elasticsearch.
Elastic has been used in production with the following Elasticsearch versions:
0.90, 1.0-1.7. Furthermore, we use [Travis CI](https://travis-ci.org/)
to test Elastic with the most recent versions of Elasticsearch and Go.
See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml)
file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic)
for the results.
Elasticsearch has quite a few features. Most of them are implemented
by Elastic. I add features and APIs as required. It's straightforward
to implement missing pieces. I'm accepting pull requests :-)
Having said that, I hope you find the project useful.
## Getting Started
The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go). The client connects to Elasticsearch on `http://127.0.0.1:9200` by default.
You typically create one client for your app. Here's a complete example of
creating a client, creating an index, adding a document, executing a search etc.
```go
// Create a client
client, err := elastic.NewClient()
if err != nil {
// Handle error
}
// Create an index
_, err = client.CreateIndex("twitter").Do()
if err != nil {
// Handle error
panic(err)
}
// Add a document to the index
tweet := Tweet{User: "olivere", Message: "Take Five"}
_, err = client.Index().
Index("twitter").
Type("tweet").
Id("1").
BodyJson(tweet).
Refresh(true).
Do()
if err != nil {
// Handle error
panic(err)
}
// Search with a term query
termQuery := elastic.NewTermQuery("user", "olivere")
searchResult, err := client.Search().
Index("twitter"). // search in index "twitter"
Query(termQuery). // specify the query
Sort("user", true). // sort by "user" field, ascending
From(0).Size(10). // take documents 0-9
Pretty(true). // pretty print request and response JSON
Do() // execute
if err != nil {
// Handle error
panic(err)
}
// searchResult is of type SearchResult and returns hits, suggestions,
// and all kinds of other information from Elasticsearch.
fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
// Each is a convenience function that iterates over hits in a search result.
// It makes sure you don't need to check for nil values in the response.
// However, it ignores errors in serialization. If you want full control
// over iterating the hits, see below.
var ttyp Tweet
for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
if t, ok := item.(Tweet); ok {
fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
}
}
// TotalHits is another convenience function that works even when something goes wrong.
fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
// Here's how you iterate through results with full control over each step.
if searchResult.Hits.TotalHits > 0 {
fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
// Iterate through results
for _, hit := range searchResult.Hits.Hits {
// hit.Index contains the name of the index
// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
var t Tweet
err := json.Unmarshal(*hit.Source, &t)
if err != nil {
// Deserialization failed
}
// Work with tweet
fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
}
} else {
// No hits
fmt.Print("Found no tweets\n")
}
// Delete the index again
_, err = client.DeleteIndex("twitter").Do()
if err != nil {
// Handle error
panic(err)
}
```
Here's a [link to a complete working example](https://gist.github.com/olivere/114347ff9d9cfdca7bdc0ecea8b82263).
See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
## API Status
### Document APIs
- [x] Index API
- [x] Get API
- [x] Delete API
- [x] Delete By Query API
- [x] Update API
- [x] Update By Query API
- [x] Multi Get API
- [x] Bulk API
- [x] Reindex API
- [x] Term Vectors
- [x] Multi termvectors API
### Search APIs
- [x] Search
- [x] Search Template
- [ ] Search Shards API
- [x] Suggesters
- [x] Term Suggester
- [x] Phrase Suggester
- [x] Completion Suggester
- [x] Context Suggester
- [x] Multi Search API
- [x] Count API
- [ ] Search Exists API
- [ ] Validate API
- [x] Explain API
- [x] Percolator API
- [x] Field Stats API
### Aggregations
- Metrics Aggregations
- [x] Avg
- [x] Cardinality
- [x] Extended Stats
- [x] Geo Bounds
- [x] Max
- [x] Min
- [x] Percentiles
- [x] Percentile Ranks
- [ ] Scripted Metric
- [x] Stats
- [x] Sum
- [x] Top Hits
- [x] Value Count
- Bucket Aggregations
- [x] Children
- [x] Date Histogram
- [x] Date Range
- [x] Filter
- [x] Filters
- [x] Geo Distance
- [ ] GeoHash Grid
- [x] Global
- [x] Histogram
- [x] IPv4 Range
- [x] Missing
- [x] Nested
- [x] Range
- [x] Reverse Nested
- [x] Sampler
- [x] Significant Terms
- [x] Terms
- Pipeline Aggregations
- [x] Avg Bucket
- [x] Derivative
- [x] Max Bucket
- [x] Min Bucket
- [x] Sum Bucket
- [x] Moving Average
- [x] Cumulative Sum
- [x] Bucket Script
- [x] Bucket Selector
- [x] Serial Differencing
- [x] Aggregation Metadata
### Indices APIs
- [x] Create Index
- [x] Delete Index
- [x] Get Index
- [x] Indices Exists
- [x] Open / Close Index
- [x] Put Mapping
- [x] Get Mapping
- [ ] Get Field Mapping
- [ ] Types Exists
- [x] Index Aliases
- [x] Update Indices Settings
- [x] Get Settings
- [ ] Analyze
- [x] Index Templates
- [x] Warmers
- [x] Indices Stats
- [ ] Indices Segments
- [ ] Indices Recovery
- [ ] Clear Cache
- [x] Flush
- [x] Refresh
- [x] Optimize
- [ ] Shadow Replica Indices
- [ ] Upgrade
### cat APIs
The cat APIs are not implemented as of now. We think they are better suited for operating with Elasticsearch on the command line.
- [ ] cat aliases
- [ ] cat allocation
- [ ] cat count
- [ ] cat fielddata
- [ ] cat health
- [ ] cat indices
- [ ] cat master
- [ ] cat nodes
- [ ] cat pending tasks
- [ ] cat plugins
- [ ] cat recovery
- [ ] cat thread pool
- [ ] cat shards
- [ ] cat segments
### Cluster APIs
- [x] Cluster Health
- [x] Cluster State
- [x] Cluster Stats
- [ ] Pending Cluster Tasks
- [ ] Cluster Reroute
- [ ] Cluster Update Settings
- [ ] Nodes Stats
- [x] Nodes Info
- [x] Task Management API
- [ ] Nodes hot_threads
### Query DSL
- [x] Match All Query
- [x] Inner hits
- Full text queries
- [x] Match Query
- [x] Multi Match Query
- [x] Common Terms Query
- [x] Query String Query
- [x] Simple Query String Query
- Term level queries
- [x] Term Query
- [x] Terms Query
- [x] Range Query
- [x] Exists Query
- [x] Missing Query
- [x] Prefix Query
- [x] Wildcard Query
- [x] Regexp Query
- [x] Fuzzy Query
- [x] Type Query
- [x] Ids Query
- Compound queries
- [x] Constant Score Query
- [x] Bool Query
- [x] Dis Max Query
- [x] Function Score Query
- [x] Boosting Query
- [x] Indices Query
- [x] And Query (deprecated)
- [x] Not Query
- [x] Or Query (deprecated)
- [ ] Filtered Query (deprecated)
- [ ] Limit Query (deprecated)
- Joining queries
- [x] Nested Query
- [x] Has Child Query
- [x] Has Parent Query
- Geo queries
- [ ] GeoShape Query
- [x] Geo Bounding Box Query
- [x] Geo Distance Query
- [ ] Geo Distance Range Query
- [x] Geo Polygon Query
- [ ] Geohash Cell Query
- Specialized queries
- [x] More Like This Query
- [x] Template Query
- [x] Script Query
- Span queries
- [ ] Span Term Query
- [ ] Span Multi Term Query
- [ ] Span First Query
- [ ] Span Near Query
- [ ] Span Or Query
- [ ] Span Not Query
- [ ] Span Containing Query
- [ ] Span Within Query
### Modules
- [ ] Snapshot and Restore
### Sorting
- [x] Sort by score
- [x] Sort by field
- [x] Sort by geo distance
- [x] Sort by script
### Scan
Scrolling through documents (e.g. `search_type=scan`) are implemented via
the `Scroll` and `Scan` services. The `ClearScroll` API is implemented as well.
## How to contribute
Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md).
## Credits
Thanks a lot for the great folks working hard on
[Elasticsearch](http://www.elasticsearch.org/)
and
[Go](http://www.golang.org/).
Elastic uses portions of the
[uritemplates](https://github.com/jtacoma/uritemplates) library
by Joshua Tacoma and
[backoff](https://github.com/cenkalti/backoff) by Cenk Altı.
## LICENSE
MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/)
or the LICENSE file provided in the repository for details.

22
vendor/gopkg.in/olivere/elastic.v3/backoff/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
Portions of this code rely on this LICENSE:
The MIT License (MIT)
Copyright (c) 2014 Cenk Altı
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

159
vendor/gopkg.in/olivere/elastic.v3/backoff/backoff.go generated vendored Normal file
View File

@ -0,0 +1,159 @@
// Copyright 2012-2016 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package backoff
import (
"math"
"math/rand"
"sync"
"sync/atomic"
"time"
)
// Backoff is an interface for different types of backoff algorithms.
type Backoff interface {
Next() time.Duration
Reset()
}
// Stop is used as a signal to indicate that no more retries should be made.
const Stop time.Duration = -1
// -- Simple Backoff --
// SimpleBackoff takes a list of fixed values for backoff intervals.
// Each call to Next returns the next value from that fixed list.
// After each value is returned, subsequent calls to Next will only return
// the last element. The caller may specify if the values are "jittered".
type SimpleBackoff struct {
sync.Mutex
ticks []int
index int
jitter bool
stop bool
}
// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified
// list of fixed intervals in milliseconds.
func NewSimpleBackoff(ticks ...int) *SimpleBackoff {
return &SimpleBackoff{
ticks: ticks,
index: 0,
jitter: false,
stop: false,
}
}
// Jitter, when set, randomizes to return a value of [0.5*value .. 1.5*value].
func (b *SimpleBackoff) Jitter(doJitter bool) *SimpleBackoff {
b.Lock()
defer b.Unlock()
b.jitter = doJitter
return b
}
// SendStop, when enables, makes Next to return Stop once
// the list of values is exhausted.
func (b *SimpleBackoff) SendStop(doStop bool) *SimpleBackoff {
b.Lock()
defer b.Unlock()
b.stop = doStop
return b
}
// Next returns the next wait interval.
func (b *SimpleBackoff) Next() time.Duration {
b.Lock()
defer b.Unlock()
i := b.index
if i >= len(b.ticks) {
if b.stop {
return Stop
}
i = len(b.ticks) - 1
b.index = i
} else {
b.index++
}
ms := b.ticks[i]
if b.jitter {
ms = jitter(ms)
}
return time.Duration(ms) * time.Millisecond
}
// Reset resets SimpleBackoff.
func (b *SimpleBackoff) Reset() {
b.Lock()
b.index = 0
b.Unlock()
}
// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis].
func jitter(millis int) int {
if millis <= 0 {
return 0
}
return millis/2 + rand.Intn(millis)
}
// -- Exponential --
// ExponentialBackoff implements the simple exponential backoff described by
// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html.
type ExponentialBackoff struct {
sync.Mutex
t float64 // initial timeout (in msec)
f float64 // exponential factor (e.g. 2)
m float64 // maximum timeout (in msec)
n int64 // number of retries
stop bool // indicates whether Next should send "Stop" whan max timeout is reached
}
// NewExponentialBackoff returns a ExponentialBackoff backoff policy.
// Use initialTimeout to set the first/minimal interval
// and maxTimeout to set the maximum wait interval.
func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff {
return &ExponentialBackoff{
t: float64(int64(initialTimeout / time.Millisecond)),
f: 2.0,
m: float64(int64(maxTimeout / time.Millisecond)),
n: 0,
stop: false,
}
}
// SendStop, when enables, makes Next to return Stop once
// the maximum timeout is reached.
func (b *ExponentialBackoff) SendStop(doStop bool) *ExponentialBackoff {
b.Lock()
defer b.Unlock()
b.stop = doStop
return b
}
// Next returns the next wait interval.
func (t *ExponentialBackoff) Next() time.Duration {
t.Lock()
defer t.Unlock()
n := float64(atomic.AddInt64(&t.n, 1))
r := 1.0 + rand.Float64() // random number in [1..2]
m := math.Min(r*t.t*math.Pow(t.f, n), t.m)
if t.stop && m >= t.m {
return Stop
}
d := time.Duration(int64(m)) * time.Millisecond
return d
}
// Reset resets the backoff policy so that it can be reused.
func (t *ExponentialBackoff) Reset() {
t.Lock()
t.n = 0
t.Unlock()
}

53
vendor/gopkg.in/olivere/elastic.v3/backoff/retry.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2012-2016 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
// This file is (c) 2014 Cenk Altı and governed by the MIT license.
// See https://github.com/cenkalti/backoff for original source.
package backoff
import "time"
// An Operation is executing by Retry() or RetryNotify().
// The operation will be retried using a backoff policy if it returns an error.
type Operation func() error
// Notify is a notify-on-error function. It receives an operation error and
// backoff delay if the operation failed (with an error).
//
// NOTE that if the backoff policy stated to stop retrying,
// the notify function isn't called.
type Notify func(error, time.Duration)
// Retry the function f until it does not return error or BackOff stops.
// f is guaranteed to be run at least once.
// It is the caller's responsibility to reset b after Retry returns.
//
// Retry sleeps the goroutine for the duration returned by BackOff after a
// failed operation returns.
func Retry(o Operation, b Backoff) error { return RetryNotify(o, b, nil) }
// RetryNotify calls notify function with the error and wait duration
// for each failed attempt before sleep.
func RetryNotify(operation Operation, b Backoff, notify Notify) error {
var err error
var next time.Duration
b.Reset()
for {
if err = operation(); err == nil {
return nil
}
if next = b.Next(); next == Stop {
return err
}
if notify != nil {
notify(err, next)
}
time.Sleep(next)
}
}

353
vendor/gopkg.in/olivere/elastic.v3/bulk.go generated vendored Normal file
View File

@ -0,0 +1,353 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"bytes"
"errors"
"fmt"
"net/url"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// BulkService allows for batching bulk requests and sending them to
// Elasticsearch in one roundtrip. Use the Add method with BulkIndexRequest,
// BulkUpdateRequest, and BulkDeleteRequest to add bulk requests to a batch,
// then use Do to send them to Elasticsearch.
//
// BulkService will be reset after each Do call. In other words, you can
// reuse BulkService to send many batches. You do not have to create a new
// BulkService for each batch.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/docs-bulk.html
// for more details.
type BulkService struct {
client *Client
index string
typ string
requests []BulkableRequest
timeout string
refresh *bool
pretty bool
sizeInBytes int64
}
// NewBulkService initializes a new BulkService.
func NewBulkService(client *Client) *BulkService {
builder := &BulkService{
client: client,
requests: make([]BulkableRequest, 0),
}
return builder
}
func (s *BulkService) reset() {
s.requests = make([]BulkableRequest, 0)
s.sizeInBytes = 0
}
// Index specifies the index to use for all batches. You may also leave
// this blank and specify the index in the individual bulk requests.
func (s *BulkService) Index(index string) *BulkService {
s.index = index
return s
}
// Type specifies the type to use for all batches. You may also leave
// this blank and specify the type in the individual bulk requests.
func (s *BulkService) Type(typ string) *BulkService {
s.typ = typ
return s
}
// Timeout is a global timeout for processing bulk requests. This is a
// server-side timeout, i.e. it tells Elasticsearch the time after which
// it should stop processing.
func (s *BulkService) Timeout(timeout string) *BulkService {
s.timeout = timeout
return s
}
// Refresh, when set to true, tells Elasticsearch to make the bulk requests
// available to search immediately after being processed. Normally, this
// only happens after a specified refresh interval.
func (s *BulkService) Refresh(refresh bool) *BulkService {
s.refresh = &refresh
return s
}
// Pretty tells Elasticsearch whether to return a formatted JSON response.
func (s *BulkService) Pretty(pretty bool) *BulkService {
s.pretty = pretty
return s
}
// Add adds bulkable requests, i.e. BulkIndexRequest, BulkUpdateRequest,
// and/or BulkDeleteRequest.
func (s *BulkService) Add(requests ...BulkableRequest) *BulkService {
for _, r := range requests {
s.requests = append(s.requests, r)
s.sizeInBytes += s.estimateSizeInBytes(r)
}
return s
}
// EstimatedSizeInBytes returns the estimated size of all bulkable
// requests added via Add.
func (s *BulkService) EstimatedSizeInBytes() int64 {
return s.sizeInBytes
}
// estimateSizeInBytes returns the estimates size of the given
// bulkable request, i.e. BulkIndexRequest, BulkUpdateRequest, and
// BulkDeleteRequest.
func (s *BulkService) estimateSizeInBytes(r BulkableRequest) int64 {
lines, _ := r.Source()
size := 0
for _, line := range lines {
// +1 for the \n
size += len(line) + 1
}
return int64(size)
}
// NumberOfActions returns the number of bulkable requests that need to
// be sent to Elasticsearch on the next batch.
func (s *BulkService) NumberOfActions() int {
return len(s.requests)
}
func (s *BulkService) bodyAsString() (string, error) {
buf := bytes.NewBufferString("")
for _, req := range s.requests {
source, err := req.Source()
if err != nil {
return "", err
}
for _, line := range source {
_, err := buf.WriteString(fmt.Sprintf("%s\n", line))
if err != nil {
return "", nil
}
}
}
return buf.String(), nil
}
// Do sends the batched requests to Elasticsearch. Note that, when successful,
// you can reuse the BulkService for the next batch as the list of bulk
// requests is cleared on success.
func (s *BulkService) Do() (*BulkResponse, error) {
// No actions?
if s.NumberOfActions() == 0 {
return nil, errors.New("elastic: No bulk actions to commit")
}
// Get body
body, err := s.bodyAsString()
if err != nil {
return nil, err
}
// Build url
path := "/"
if s.index != "" {
index, err := uritemplates.Expand("{index}", map[string]string{
"index": s.index,
})
if err != nil {
return nil, err
}
path += index + "/"
}
if s.typ != "" {
typ, err := uritemplates.Expand("{type}", map[string]string{
"type": s.typ,
})
if err != nil {
return nil, err
}
path += typ + "/"
}
path += "_bulk"
// Parameters
params := make(url.Values)
if s.pretty {
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
}
if s.refresh != nil {
params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
}
// Get response
res, err := s.client.PerformRequest("POST", path, params, body)
if err != nil {
return nil, err
}
// Return results
ret := new(BulkResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
// Reset so the request can be reused
s.reset()
return ret, nil
}
// BulkResponse is a response to a bulk execution.
//
// Example:
// {
// "took":3,
// "errors":false,
// "items":[{
// "index":{
// "_index":"index1",
// "_type":"tweet",
// "_id":"1",
// "_version":3,
// "status":201
// }
// },{
// "index":{
// "_index":"index2",
// "_type":"tweet",
// "_id":"2",
// "_version":3,
// "status":200
// }
// },{
// "delete":{
// "_index":"index1",
// "_type":"tweet",
// "_id":"1",
// "_version":4,
// "status":200,
// "found":true
// }
// },{
// "update":{
// "_index":"index2",
// "_type":"tweet",
// "_id":"2",
// "_version":4,
// "status":200
// }
// }]
// }
type BulkResponse struct {
Took int `json:"took,omitempty"`
Errors bool `json:"errors,omitempty"`
Items []map[string]*BulkResponseItem `json:"items,omitempty"`
}
// BulkResponseItem is the result of a single bulk request.
type BulkResponseItem struct {
Index string `json:"_index,omitempty"`
Type string `json:"_type,omitempty"`
Id string `json:"_id,omitempty"`
Version int `json:"_version,omitempty"`
Status int `json:"status,omitempty"`
Found bool `json:"found,omitempty"`
Error *ErrorDetails `json:"error,omitempty"`
}
// Indexed returns all bulk request results of "index" actions.
func (r *BulkResponse) Indexed() []*BulkResponseItem {
return r.ByAction("index")
}
// Created returns all bulk request results of "create" actions.
func (r *BulkResponse) Created() []*BulkResponseItem {
return r.ByAction("create")
}
// Updated returns all bulk request results of "update" actions.
func (r *BulkResponse) Updated() []*BulkResponseItem {
return r.ByAction("update")
}
// Deleted returns all bulk request results of "delete" actions.
func (r *BulkResponse) Deleted() []*BulkResponseItem {
return r.ByAction("delete")
}
// ByAction returns all bulk request results of a certain action,
// e.g. "index" or "delete".
func (r *BulkResponse) ByAction(action string) []*BulkResponseItem {
if r.Items == nil {
return nil
}
items := make([]*BulkResponseItem, 0)
for _, item := range r.Items {
if result, found := item[action]; found {
items = append(items, result)
}
}
return items
}
// ById returns all bulk request results of a given document id,
// regardless of the action ("index", "delete" etc.).
func (r *BulkResponse) ById(id string) []*BulkResponseItem {
if r.Items == nil {
return nil
}
items := make([]*BulkResponseItem, 0)
for _, item := range r.Items {
for _, result := range item {
if result.Id == id {
items = append(items, result)
}
}
}
return items
}
// Failed returns those items of a bulk response that have errors,
// i.e. those that don't have a status code between 200 and 299.
func (r *BulkResponse) Failed() []*BulkResponseItem {
if r.Items == nil {
return nil
}
errors := make([]*BulkResponseItem, 0)
for _, item := range r.Items {
for _, result := range item {
if !(result.Status >= 200 && result.Status <= 299) {
errors = append(errors, result)
}
}
}
return errors
}
// Succeeded returns those items of a bulk response that have no errors,
// i.e. those have a status code between 200 and 299.
func (r *BulkResponse) Succeeded() []*BulkResponseItem {
if r.Items == nil {
return nil
}
succeeded := make([]*BulkResponseItem, 0)
for _, item := range r.Items {
for _, result := range item {
if result.Status >= 200 && result.Status <= 299 {
succeeded = append(succeeded, result)
}
}
}
return succeeded
}

View File

@ -0,0 +1,158 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"encoding/json"
"fmt"
"strings"
)
// -- Bulk delete request --
// Bulk request to remove a document from Elasticsearch.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
// for details.
type BulkDeleteRequest struct {
BulkableRequest
index string
typ string
id string
parent string
routing string
refresh *bool
version int64 // default is MATCH_ANY
versionType string // default is "internal"
source []string
}
// NewBulkDeleteRequest returns a new BulkDeleteRequest.
func NewBulkDeleteRequest() *BulkDeleteRequest {
return &BulkDeleteRequest{}
}
// Index specifies the Elasticsearch index to use for this delete request.
// If unspecified, the index set on the BulkService will be used.
func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest {
r.index = index
r.source = nil
return r
}
// Type specifies the Elasticsearch type to use for this delete request.
// If unspecified, the type set on the BulkService will be used.
func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest {
r.typ = typ
r.source = nil
return r
}
// Id specifies the identifier of the document to delete.
func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest {
r.id = id
r.source = nil
return r
}
// Parent specifies the parent of the request, which is used in parent/child
// mappings.
func (r *BulkDeleteRequest) Parent(parent string) *BulkDeleteRequest {
r.parent = parent
r.source = nil
return r
}
// Routing specifies a routing value for the request.
func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest {
r.routing = routing
r.source = nil
return r
}
// Refresh indicates whether to update the shards immediately after
// the delete has been processed. Deleted documents will disappear
// in search immediately at the cost of slower bulk performance.
func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest {
r.refresh = &refresh
r.source = nil
return r
}
// Version indicates the version to be deleted as part of an optimistic
// concurrency model.
func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
r.version = version
r.source = nil
return r
}
// VersionType can be "internal" (default), "external", "external_gte",
// "external_gt", or "force".
func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest {
r.versionType = versionType
r.source = nil
return r
}
// String returns the on-wire representation of the delete request,
// concatenated as a single string.
func (r *BulkDeleteRequest) String() string {
lines, err := r.Source()
if err != nil {
return fmt.Sprintf("error: %v", err)
}
return strings.Join(lines, "\n")
}
// Source returns the on-wire representation of the delete request,
// split into an action-and-meta-data line and an (optional) source line.
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
// for details.
func (r *BulkDeleteRequest) Source() ([]string, error) {
if r.source != nil {
return r.source, nil
}
lines := make([]string, 1)
source := make(map[string]interface{})
deleteCommand := make(map[string]interface{})
if r.index != "" {
deleteCommand["_index"] = r.index
}
if r.typ != "" {
deleteCommand["_type"] = r.typ
}
if r.id != "" {
deleteCommand["_id"] = r.id
}
if r.parent != "" {
deleteCommand["_parent"] = r.parent
}
if r.routing != "" {
deleteCommand["_routing"] = r.routing
}
if r.version > 0 {
deleteCommand["_version"] = r.version
}
if r.versionType != "" {
deleteCommand["_version_type"] = r.versionType
}
if r.refresh != nil {
deleteCommand["refresh"] = *r.refresh
}
source["delete"] = deleteCommand
body, err := json.Marshal(source)
if err != nil {
return nil, err
}
lines[0] = string(body)
r.source = lines
return lines, nil
}

View File

@ -0,0 +1,232 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"encoding/json"
"fmt"
"strings"
)
// Bulk request to add a document to Elasticsearch.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
// for details.
type BulkIndexRequest struct {
BulkableRequest
index string
typ string
id string
opType string
routing string
parent string
timestamp string
ttl int64
refresh *bool
version int64 // default is MATCH_ANY
versionType string // default is "internal"
doc interface{}
source []string
}
// NewBulkIndexRequest returns a new BulkIndexRequest.
// The operation type is "index" by default.
func NewBulkIndexRequest() *BulkIndexRequest {
return &BulkIndexRequest{
opType: "index",
}
}
// Index specifies the Elasticsearch index to use for this index request.
// If unspecified, the index set on the BulkService will be used.
func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest {
r.index = index
r.source = nil
return r
}
// Type specifies the Elasticsearch type to use for this index request.
// If unspecified, the type set on the BulkService will be used.
func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest {
r.typ = typ
r.source = nil
return r
}
// Id specifies the identifier of the document to index.
func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
r.id = id
r.source = nil
return r
}
// OpType specifies if this request should follow create-only or upsert
// behavior. This follows the OpType of the standard document index API.
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#operation-type
// for details.
func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
r.opType = opType
r.source = nil
return r
}
// Routing specifies a routing value for the request.
func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest {
r.routing = routing
r.source = nil
return r
}
// Parent specifies the identifier of the parent document (if available).
func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest {
r.parent = parent
r.source = nil
return r
}
// Timestamp can be used to index a document with a timestamp.
// This is deprecated as of 2.0.0-beta2; you should use a normal date field
// and set its value explicitly.
func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest {
r.timestamp = timestamp
r.source = nil
return r
}
// Ttl (time to live) sets an expiration date for the document. Expired
// documents will be expunged automatically.
// This is deprecated as of 2.0.0-beta2 and will be replaced by a different
// implementation in a future version.
func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest {
r.ttl = ttl
r.source = nil
return r
}
// Refresh indicates whether to update the shards immediately after
// the request has been processed. Newly added documents will appear
// in search immediately at the cost of slower bulk performance.
func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest {
r.refresh = &refresh
r.source = nil
return r
}
// Version indicates the version of the document as part of an optimistic
// concurrency model.
func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
r.version = version
r.source = nil
return r
}
// VersionType specifies how versions are created. It can be e.g. internal,
// external, external_gte, or force.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-versioning
// for details.
func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
r.versionType = versionType
r.source = nil
return r
}
// Doc specifies the document to index.
func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest {
r.doc = doc
r.source = nil
return r
}
// String returns the on-wire representation of the index request,
// concatenated as a single string.
func (r *BulkIndexRequest) String() string {
lines, err := r.Source()
if err != nil {
return fmt.Sprintf("error: %v", err)
}
return strings.Join(lines, "\n")
}
// Source returns the on-wire representation of the index request,
// split into an action-and-meta-data line and an (optional) source line.
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
// for details.
func (r *BulkIndexRequest) Source() ([]string, error) {
// { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
// { "field1" : "value1" }
if r.source != nil {
return r.source, nil
}
lines := make([]string, 2)
// "index" ...
command := make(map[string]interface{})
indexCommand := make(map[string]interface{})
if r.index != "" {
indexCommand["_index"] = r.index
}
if r.typ != "" {
indexCommand["_type"] = r.typ
}
if r.id != "" {
indexCommand["_id"] = r.id
}
if r.routing != "" {
indexCommand["_routing"] = r.routing
}
if r.parent != "" {
indexCommand["_parent"] = r.parent
}
if r.timestamp != "" {
indexCommand["_timestamp"] = r.timestamp
}
if r.ttl > 0 {
indexCommand["_ttl"] = r.ttl
}
if r.version > 0 {
indexCommand["_version"] = r.version
}
if r.versionType != "" {
indexCommand["_version_type"] = r.versionType
}
if r.refresh != nil {
indexCommand["refresh"] = *r.refresh
}
command[r.opType] = indexCommand
line, err := json.Marshal(command)
if err != nil {
return nil, err
}
lines[0] = string(line)
// "field1" ...
if r.doc != nil {
switch t := r.doc.(type) {
default:
body, err := json.Marshal(r.doc)
if err != nil {
return nil, err
}
lines[1] = string(body)
case json.RawMessage:
lines[1] = string(t)
case *json.RawMessage:
lines[1] = string(*t)
case string:
lines[1] = t
case *string:
lines[1] = *t
}
} else {
lines[1] = "{}"
}
r.source = lines
return lines, nil
}

541
vendor/gopkg.in/olivere/elastic.v3/bulk_processor.go generated vendored Normal file
View File

@ -0,0 +1,541 @@
// Copyright 2012-2016 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"sync"
"sync/atomic"
"time"
"gopkg.in/olivere/elastic.v3/backoff"
)
// BulkProcessorService allows to easily process bulk requests. It allows setting
// policies when to flush new bulk requests, e.g. based on a number of actions,
// on the size of the actions, and/or to flush periodically. It also allows
// to control the number of concurrent bulk requests allowed to be executed
// in parallel.
//
// BulkProcessorService, by default, commits either every 1000 requests or when the
// (estimated) size of the bulk requests exceeds 5 MB. However, it does not
// commit periodically. BulkProcessorService also does retry by default, using
// an exponential backoff algorithm.
//
// The caller is responsible for setting the index and type on every
// bulk request added to BulkProcessorService.
//
// BulkProcessorService takes ideas from the BulkProcessor of the
// Elasticsearch Java API as documented in
// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html.
type BulkProcessorService struct {
c *Client
beforeFn BulkBeforeFunc
afterFn BulkAfterFunc
name string // name of processor
numWorkers int // # of workers (>= 1)
bulkActions int // # of requests after which to commit
bulkSize int // # of bytes after which to commit
flushInterval time.Duration // periodic flush interval
wantStats bool // indicates whether to gather statistics
initialTimeout time.Duration // initial wait time before retry on errors
maxTimeout time.Duration // max time to wait for retry on errors
}
// NewBulkProcessorService creates a new BulkProcessorService.
func NewBulkProcessorService(client *Client) *BulkProcessorService {
return &BulkProcessorService{
c: client,
numWorkers: 1,
bulkActions: 1000,
bulkSize: 5 << 20, // 5 MB
initialTimeout: time.Duration(200) * time.Millisecond,
maxTimeout: time.Duration(10000) * time.Millisecond,
}
}
// BulkBeforeFunc defines the signature of callbacks that are executed
// before a commit to Elasticsearch.
type BulkBeforeFunc func(executionId int64, requests []BulkableRequest)
// BulkAfterFunc defines the signature of callbacks that are executed
// after a commit to Elasticsearch. The err parameter signals an error.
type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error)
// Before specifies a function to be executed before bulk requests get comitted
// to Elasticsearch.
func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService {
s.beforeFn = fn
return s
}
// After specifies a function to be executed when bulk requests have been
// comitted to Elasticsearch. The After callback executes both when the
// commit was successful as well as on failures.
func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService {
s.afterFn = fn
return s
}
// Name is an optional name to identify this bulk processor.
func (s *BulkProcessorService) Name(name string) *BulkProcessorService {
s.name = name
return s
}
// Workers is the number of concurrent workers allowed to be
// executed. Defaults to 1 and must be greater or equal to 1.
func (s *BulkProcessorService) Workers(num int) *BulkProcessorService {
s.numWorkers = num
return s
}
// BulkActions specifies when to flush based on the number of actions
// currently added. Defaults to 1000 and can be set to -1 to be disabled.
func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService {
s.bulkActions = bulkActions
return s
}
// BulkSize specifies when to flush based on the size (in bytes) of the actions
// currently added. Defaults to 5 MB and can be set to -1 to be disabled.
func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService {
s.bulkSize = bulkSize
return s
}
// FlushInterval specifies when to flush at the end of the given interval.
// This is disabled by default. If you want the bulk processor to
// operate completely asynchronously, set both BulkActions and BulkSize to
// -1 and set the FlushInterval to a meaningful interval.
func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService {
s.flushInterval = interval
return s
}
// Stats tells bulk processor to gather stats while running.
// Use Stats to return the stats. This is disabled by default.
func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService {
s.wantStats = wantStats
return s
}
// Do creates a new BulkProcessor and starts it.
// Consider the BulkProcessor as a running instance that accepts bulk requests
// and commits them to Elasticsearch, spreading the work across one or more
// workers.
//
// You can interoperate with the BulkProcessor returned by Do, e.g. Start and
// Stop (or Close) it.
//
// Calling Do several times returns new BulkProcessors. You probably don't
// want to do this. BulkProcessorService implements just a builder pattern.
func (s *BulkProcessorService) Do() (*BulkProcessor, error) {
p := newBulkProcessor(
s.c,
s.beforeFn,
s.afterFn,
s.name,
s.numWorkers,
s.bulkActions,
s.bulkSize,
s.flushInterval,
s.wantStats,
s.initialTimeout,
s.maxTimeout)
err := p.Start()
if err != nil {
return nil, err
}
return p, nil
}
// -- Bulk Processor Statistics --
// BulkProcessorStats contains various statistics of a bulk processor
// while it is running. Use the Stats func to return it while running.
type BulkProcessorStats struct {
Flushed int64 // number of times the flush interval has been invoked
Committed int64 // # of times workers committed bulk requests
Indexed int64 // # of requests indexed
Created int64 // # of requests that ES reported as creates (201)
Updated int64 // # of requests that ES reported as updates
Deleted int64 // # of requests that ES reported as deletes
Succeeded int64 // # of requests that ES reported as successful
Failed int64 // # of requests that ES reported as failed
Workers []*BulkProcessorWorkerStats // stats for each worker
}
// BulkProcessorWorkerStats represents per-worker statistics.
type BulkProcessorWorkerStats struct {
Queued int64 // # of requests queued in this worker
LastDuration time.Duration // duration of last commit
}
// newBulkProcessorStats initializes and returns a BulkProcessorStats struct.
func newBulkProcessorStats(workers int) *BulkProcessorStats {
stats := &BulkProcessorStats{
Workers: make([]*BulkProcessorWorkerStats, workers),
}
for i := 0; i < workers; i++ {
stats.Workers[i] = &BulkProcessorWorkerStats{}
}
return stats
}
func (st *BulkProcessorStats) dup() *BulkProcessorStats {
dst := new(BulkProcessorStats)
dst.Flushed = st.Flushed
dst.Committed = st.Committed
dst.Indexed = st.Indexed
dst.Created = st.Created
dst.Updated = st.Updated
dst.Deleted = st.Deleted
dst.Succeeded = st.Succeeded
dst.Failed = st.Failed
for _, src := range st.Workers {
dst.Workers = append(dst.Workers, src.dup())
}
return dst
}
func (st *BulkProcessorWorkerStats) dup() *BulkProcessorWorkerStats {
dst := new(BulkProcessorWorkerStats)
dst.Queued = st.Queued
dst.LastDuration = st.LastDuration
return dst
}
// -- Bulk Processor --
// BulkProcessor encapsulates a task that accepts bulk requests and
// orchestrates committing them to Elasticsearch via one or more workers.
//
// BulkProcessor is returned by setting up a BulkProcessorService and
// calling the Do method.
type BulkProcessor struct {
c *Client
beforeFn BulkBeforeFunc
afterFn BulkAfterFunc
name string
bulkActions int
bulkSize int
numWorkers int
executionId int64
requestsC chan BulkableRequest
workerWg sync.WaitGroup
workers []*bulkWorker
flushInterval time.Duration
flusherStopC chan struct{}
wantStats bool
initialTimeout time.Duration // initial wait time before retry on errors
maxTimeout time.Duration // max time to wait for retry on errors
startedMu sync.Mutex // guards the following block
started bool
statsMu sync.Mutex // guards the following block
stats *BulkProcessorStats
}
func newBulkProcessor(
client *Client,
beforeFn BulkBeforeFunc,
afterFn BulkAfterFunc,
name string,
numWorkers int,
bulkActions int,
bulkSize int,
flushInterval time.Duration,
wantStats bool,
initialTimeout time.Duration,
maxTimeout time.Duration) *BulkProcessor {
return &BulkProcessor{
c: client,
beforeFn: beforeFn,
afterFn: afterFn,
name: name,
numWorkers: numWorkers,
bulkActions: bulkActions,
bulkSize: bulkSize,
flushInterval: flushInterval,
wantStats: wantStats,
initialTimeout: initialTimeout,
maxTimeout: maxTimeout,
}
}
// Start starts the bulk processor. If the processor is already started,
// nil is returned.
func (p *BulkProcessor) Start() error {
p.startedMu.Lock()
defer p.startedMu.Unlock()
if p.started {
return nil
}
// We must have at least one worker.
if p.numWorkers < 1 {
p.numWorkers = 1
}
p.requestsC = make(chan BulkableRequest)
p.executionId = 0
p.stats = newBulkProcessorStats(p.numWorkers)
// Create and start up workers.
p.workers = make([]*bulkWorker, p.numWorkers)
for i := 0; i < p.numWorkers; i++ {
p.workerWg.Add(1)
p.workers[i] = newBulkWorker(p, i)
go p.workers[i].work()
}
// Start the ticker for flush (if enabled)
if int64(p.flushInterval) > 0 {
p.flusherStopC = make(chan struct{})
go p.flusher(p.flushInterval)
}
p.started = true
return nil
}
// Stop is an alias for Close.
func (p *BulkProcessor) Stop() error {
return p.Close()
}
// Close stops the bulk processor previously started with Do.
// If it is already stopped, this is a no-op and nil is returned.
//
// By implementing Close, BulkProcessor implements the io.Closer interface.
func (p *BulkProcessor) Close() error {
p.startedMu.Lock()
defer p.startedMu.Unlock()
// Already stopped? Do nothing.
if !p.started {
return nil
}
// Stop flusher (if enabled)
if p.flusherStopC != nil {
p.flusherStopC <- struct{}{}
<-p.flusherStopC
close(p.flusherStopC)
p.flusherStopC = nil
}
// Stop all workers.
close(p.requestsC)
p.workerWg.Wait()
p.started = false
return nil
}
// Stats returns the latest bulk processor statistics.
// Collecting stats must be enabled first by calling Stats(true) on
// the service that created this processor.
func (p *BulkProcessor) Stats() BulkProcessorStats {
p.statsMu.Lock()
defer p.statsMu.Unlock()
return *p.stats.dup()
}
// Add adds a single request to commit by the BulkProcessorService.
//
// The caller is responsible for setting the index and type on the request.
func (p *BulkProcessor) Add(request BulkableRequest) {
p.requestsC <- request
}
// Flush manually asks all workers to commit their outstanding requests.
// It returns only when all workers acknowledge completion.
func (p *BulkProcessor) Flush() error {
p.statsMu.Lock()
p.stats.Flushed++
p.statsMu.Unlock()
for _, w := range p.workers {
w.flushC <- struct{}{}
<-w.flushAckC // wait for completion
}
return nil
}
// flusher is a single goroutine that periodically asks all workers to
// commit their outstanding bulk requests. It is only started if
// FlushInterval is greater than 0.
func (p *BulkProcessor) flusher(interval time.Duration) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C: // Periodic flush
p.Flush() // TODO swallow errors here?
case <-p.flusherStopC:
p.flusherStopC <- struct{}{}
return
}
}
}
// -- Bulk Worker --
// bulkWorker encapsulates a single worker, running in a goroutine,
// receiving bulk requests and eventually committing them to Elasticsearch.
// It is strongly bound to a BulkProcessor.
type bulkWorker struct {
p *BulkProcessor
i int
bulkActions int
bulkSize int
service *BulkService
flushC chan struct{}
flushAckC chan struct{}
}
// newBulkWorker creates a new bulkWorker instance.
func newBulkWorker(p *BulkProcessor, i int) *bulkWorker {
return &bulkWorker{
p: p,
i: i,
bulkActions: p.bulkActions,
bulkSize: p.bulkSize,
service: NewBulkService(p.c),
flushC: make(chan struct{}),
flushAckC: make(chan struct{}),
}
}
// work waits for bulk requests and manual flush calls on the respective
// channels and is invoked as a goroutine when the bulk processor is started.
func (w *bulkWorker) work() {
defer func() {
w.p.workerWg.Done()
close(w.flushAckC)
close(w.flushC)
}()
var stop bool
for !stop {
select {
case req, open := <-w.p.requestsC:
if open {
// Received a new request
w.service.Add(req)
if w.commitRequired() {
w.commit() // TODO swallow errors here?
}
} else {
// Channel closed: Stop.
stop = true
if w.service.NumberOfActions() > 0 {
w.commit() // TODO swallow errors here?
}
}
case <-w.flushC:
// Commit outstanding requests
if w.service.NumberOfActions() > 0 {
w.commit() // TODO swallow errors here?
}
w.flushAckC <- struct{}{}
}
}
}
// commit commits the bulk requests in the given service,
// invoking callbacks as specified.
func (w *bulkWorker) commit() error {
var res *BulkResponse
// commitFunc will commit bulk requests and, on failure, be retried
// via exponential backoff
commitFunc := func() error {
var err error
res, err = w.service.Do()
return err
}
// notifyFunc will be called if retry fails
notifyFunc := func(err error, d time.Duration) {
w.p.c.errorf("elastic: bulk processor %q failed but will retry in %v: %v", w.p.name, d, err)
}
id := atomic.AddInt64(&w.p.executionId, 1)
// Update # documents in queue before eventual retries
w.p.statsMu.Lock()
if w.p.wantStats {
w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests))
}
w.p.statsMu.Unlock()
// Save requests because they will be reset in commitFunc
reqs := w.service.requests
// Invoke before callback
if w.p.beforeFn != nil {
w.p.beforeFn(id, reqs)
}
// Commit bulk requests
policy := backoff.NewExponentialBackoff(w.p.initialTimeout, w.p.maxTimeout).SendStop(true)
err := backoff.RetryNotify(commitFunc, policy, notifyFunc)
w.updateStats(res)
if err != nil {
w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err)
}
// Invoke after callback
if w.p.afterFn != nil {
w.p.afterFn(id, reqs, res, err)
}
return err
}
func (w *bulkWorker) updateStats(res *BulkResponse) {
// Update stats
if res != nil {
w.p.statsMu.Lock()
if w.p.wantStats {
w.p.stats.Committed++
if res != nil {
w.p.stats.Indexed += int64(len(res.Indexed()))
w.p.stats.Created += int64(len(res.Created()))
w.p.stats.Updated += int64(len(res.Updated()))
w.p.stats.Deleted += int64(len(res.Deleted()))
w.p.stats.Succeeded += int64(len(res.Succeeded()))
w.p.stats.Failed += int64(len(res.Failed()))
}
w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests))
w.p.stats.Workers[w.i].LastDuration = time.Duration(int64(res.Took)) * time.Millisecond
}
w.p.statsMu.Unlock()
}
}
// commitRequired returns true if the service has to commit its
// bulk requests. This can be either because the number of actions
// or the estimated size in bytes is larger than specified in the
// BulkProcessorService.
func (w *bulkWorker) commitRequired() bool {
if w.bulkActions >= 0 && w.service.NumberOfActions() >= w.bulkActions {
return true
}
if w.bulkSize >= 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) {
return true
}
return false
}

17
vendor/gopkg.in/olivere/elastic.v3/bulk_request.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
)
// -- Bulkable request (index/update/delete) --
// Generic interface to bulkable requests.
type BulkableRequest interface {
fmt.Stringer
Source() ([]string, error)
}

View File

@ -0,0 +1,280 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"encoding/json"
"fmt"
"strings"
)
// Bulk request to update a document in Elasticsearch.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
// for details.
type BulkUpdateRequest struct {
BulkableRequest
index string
typ string
id string
routing string
parent string
script *Script
version int64 // default is MATCH_ANY
versionType string // default is "internal"
retryOnConflict *int
refresh *bool
upsert interface{}
docAsUpsert *bool
doc interface{}
ttl int64
timestamp string
source []string
}
// NewBulkUpdateRequest returns a new BulkUpdateRequest.
func NewBulkUpdateRequest() *BulkUpdateRequest {
return &BulkUpdateRequest{}
}
// Index specifies the Elasticsearch index to use for this update request.
// If unspecified, the index set on the BulkService will be used.
func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest {
r.index = index
r.source = nil
return r
}
// Type specifies the Elasticsearch type to use for this update request.
// If unspecified, the type set on the BulkService will be used.
func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest {
r.typ = typ
r.source = nil
return r
}
// Id specifies the identifier of the document to update.
func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest {
r.id = id
r.source = nil
return r
}
// Routing specifies a routing value for the request.
func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest {
r.routing = routing
r.source = nil
return r
}
// Parent specifies the identifier of the parent document (if available).
func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
r.parent = parent
r.source = nil
return r
}
// Script specifies an update script.
// See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/docs-bulk.html#bulk-update
// and https://www.elastic.co/guide/en/elasticsearch/reference/2.x/modules-scripting.html
// for details.
func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest {
r.script = script
r.source = nil
return r
}
// RetryOnConflict specifies how often to retry in case of a version conflict.
func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
r.retryOnConflict = &retryOnConflict
r.source = nil
return r
}
// Version indicates the version of the document as part of an optimistic
// concurrency model.
func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest {
r.version = version
r.source = nil
return r
}
// VersionType can be "internal" (default), "external", "external_gte",
// "external_gt", or "force".
func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
r.versionType = versionType
r.source = nil
return r
}
// Refresh indicates whether to update the shards immediately after
// the request has been processed. Updated documents will appear
// in search immediately at the cost of slower bulk performance.
func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest {
r.refresh = &refresh
r.source = nil
return r
}
// Doc specifies the updated document.
func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
r.doc = doc
r.source = nil
return r
}
// DocAsUpsert indicates whether the contents of Doc should be used as
// the Upsert value.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/docs-update.html#_literal_doc_as_upsert_literal
// for details.
func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
r.docAsUpsert = &docAsUpsert
r.source = nil
return r
}
// Upsert specifies the document to use for upserts. It will be used for
// create if the original document does not exist.
func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
r.upsert = doc
r.source = nil
return r
}
// Ttl specifies the time to live, and optional expiry time.
// This is deprecated as of 2.0.0-beta2.
func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest {
r.ttl = ttl
r.source = nil
return r
}
// Timestamp specifies a timestamp for the document.
// This is deprecated as of 2.0.0-beta2.
func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest {
r.timestamp = timestamp
r.source = nil
return r
}
// String returns the on-wire representation of the update request,
// concatenated as a single string.
func (r *BulkUpdateRequest) String() string {
lines, err := r.Source()
if err != nil {
return fmt.Sprintf("error: %v", err)
}
return strings.Join(lines, "\n")
}
func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) {
switch t := data.(type) {
default:
body, err := json.Marshal(data)
if err != nil {
return "", err
}
return string(body), nil
case json.RawMessage:
return string(t), nil
case *json.RawMessage:
return string(*t), nil
case string:
return t, nil
case *string:
return *t, nil
}
}
// Source returns the on-wire representation of the update request,
// split into an action-and-meta-data line and an (optional) source line.
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
// for details.
func (r BulkUpdateRequest) Source() ([]string, error) {
// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
// { "doc" : { "field1" : "value1", ... } }
// or
// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
// { "script" : { ... } }
if r.source != nil {
return r.source, nil
}
lines := make([]string, 2)
// "update" ...
command := make(map[string]interface{})
updateCommand := make(map[string]interface{})
if r.index != "" {
updateCommand["_index"] = r.index
}
if r.typ != "" {
updateCommand["_type"] = r.typ
}
if r.id != "" {
updateCommand["_id"] = r.id
}
if r.routing != "" {
updateCommand["_routing"] = r.routing
}
if r.parent != "" {
updateCommand["_parent"] = r.parent
}
if r.timestamp != "" {
updateCommand["_timestamp"] = r.timestamp
}
if r.ttl > 0 {
updateCommand["_ttl"] = r.ttl
}
if r.version > 0 {
updateCommand["_version"] = r.version
}
if r.versionType != "" {
updateCommand["_version_type"] = r.versionType
}
if r.refresh != nil {
updateCommand["refresh"] = *r.refresh
}
if r.retryOnConflict != nil {
updateCommand["_retry_on_conflict"] = *r.retryOnConflict
}
command["update"] = updateCommand
line, err := json.Marshal(command)
if err != nil {
return nil, err
}
lines[0] = string(line)
// 2nd line: {"doc" : { ... }} or {"script": {...}}
source := make(map[string]interface{})
if r.docAsUpsert != nil {
source["doc_as_upsert"] = *r.docAsUpsert
}
if r.upsert != nil {
source["upsert"] = r.upsert
}
if r.doc != nil {
// {"doc":{...}}
source["doc"] = r.doc
} else if r.script != nil {
// {"script":...}
src, err := r.script.Source()
if err != nil {
return nil, err
}
source["script"] = src
}
lines[1], err = r.getSourceAsString(source)
if err != nil {
return nil, err
}
r.source = lines
return lines, nil
}

38
vendor/gopkg.in/olivere/elastic.v3/canonicalize.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import "net/url"
// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
// remove anything but scheme, userinfo, host, path, and port.
// It also removes all trailing slashes. It also skips invalid URLs or
// URLs that do not use protocol http or https.
//
// Example:
// http://127.0.0.1:9200/?query=1 -> http://127.0.0.1:9200
// http://127.0.0.1:9200/db1/ -> http://127.0.0.1:9200/db1
// 127.0.0.1:9200 -> http://127.0.0.1:9200
func canonicalize(rawurls ...string) []string {
var canonicalized []string
for _, rawurl := range rawurls {
u, err := url.Parse(rawurl)
if err == nil {
if len(u.Scheme) == 0 {
u.Scheme = DefaultScheme
}
if u.Scheme == "http" || u.Scheme == "https" {
// Trim trailing slashes
for len(u.Path) > 0 && u.Path[len(u.Path)-1] == '/' {
u.Path = u.Path[0 : len(u.Path)-1]
}
u.Fragment = ""
u.RawQuery = ""
canonicalized = append(canonicalized, u.String())
}
}
}
return canonicalized
}

101
vendor/gopkg.in/olivere/elastic.v3/clear_scroll.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"strings"
)
// ClearScrollService clears one or more scroll contexts by their ids.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api
// for details.
type ClearScrollService struct {
client *Client
pretty bool
scrollId []string
}
// NewClearScrollService creates a new ClearScrollService.
func NewClearScrollService(client *Client) *ClearScrollService {
return &ClearScrollService{
client: client,
scrollId: make([]string, 0),
}
}
// ScrollId is a list of scroll IDs to clear.
// Use _all to clear all search contexts.
func (s *ClearScrollService) ScrollId(scrollIds ...string) *ClearScrollService {
s.scrollId = append(s.scrollId, scrollIds...)
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *ClearScrollService) buildURL() (string, url.Values, error) {
// Build URL
path := "/_search/scroll/"
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *ClearScrollService) Validate() error {
var invalid []string
if len(s.scrollId) == 0 {
invalid = append(invalid, "ScrollId")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *ClearScrollService) Do() (*ClearScrollResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Setup HTTP request body
body := strings.Join(s.scrollId, ",")
// Get HTTP response
res, err := s.client.PerformRequest("DELETE", path, params, body)
if err != nil {
return nil, err
}
// Return operation response
ret := new(ClearScrollResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// ClearScrollResponse is the response of ClearScrollService.Do.
type ClearScrollResponse struct {
}

1603
vendor/gopkg.in/olivere/elastic.v3/client.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

243
vendor/gopkg.in/olivere/elastic.v3/cluster_health.go generated vendored Normal file
View File

@ -0,0 +1,243 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// ClusterHealthService allows to get a very simple status on the health of the cluster.
//
// See http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html
// for details.
type ClusterHealthService struct {
client *Client
pretty bool
indices []string
level string
local *bool
masterTimeout string
timeout string
waitForActiveShards *int
waitForNodes string
waitForRelocatingShards *int
waitForStatus string
}
// NewClusterHealthService creates a new ClusterHealthService.
func NewClusterHealthService(client *Client) *ClusterHealthService {
return &ClusterHealthService{
client: client,
indices: make([]string, 0),
}
}
// Index limits the information returned to specific indices.
func (s *ClusterHealthService) Index(indices ...string) *ClusterHealthService {
s.indices = append(s.indices, indices...)
return s
}
// Level specifies the level of detail for returned information.
func (s *ClusterHealthService) Level(level string) *ClusterHealthService {
s.level = level
return s
}
// Local indicates whether to return local information. If it is true,
// we do not retrieve the state from master node (default: false).
func (s *ClusterHealthService) Local(local bool) *ClusterHealthService {
s.local = &local
return s
}
// MasterTimeout specifies an explicit operation timeout for connection to master node.
func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService {
s.masterTimeout = masterTimeout
return s
}
// Timeout specifies an explicit operation timeout.
func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService {
s.timeout = timeout
return s
}
// WaitForActiveShards can be used to wait until the specified number of shards are active.
func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService {
s.waitForActiveShards = &waitForActiveShards
return s
}
// WaitForNodes can be used to wait until the specified number of nodes are available.
// Example: "12" to wait for exact values, ">12" and "<12" for ranges.
func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService {
s.waitForNodes = waitForNodes
return s
}
// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished.
func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService {
s.waitForRelocatingShards = &waitForRelocatingShards
return s
}
// WaitForStatus can be used to wait until the cluster is in a specific state.
// Valid values are: green, yellow, or red.
func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService {
s.waitForStatus = waitForStatus
return s
}
// WaitForGreenStatus will wait for the "green" state.
func (s *ClusterHealthService) WaitForGreenStatus() *ClusterHealthService {
return s.WaitForStatus("green")
}
// WaitForYellowStatus will wait for the "yellow" state.
func (s *ClusterHealthService) WaitForYellowStatus() *ClusterHealthService {
return s.WaitForStatus("yellow")
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *ClusterHealthService) Pretty(pretty bool) *ClusterHealthService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *ClusterHealthService) buildURL() (string, url.Values, error) {
// Build URL
var err error
var path string
if len(s.indices) > 0 {
path, err = uritemplates.Expand("/_cluster/health/{index}", map[string]string{
"index": strings.Join(s.indices, ","),
})
} else {
path = "/_cluster/health"
}
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.level != "" {
params.Set("level", s.level)
}
if s.local != nil {
params.Set("local", fmt.Sprintf("%v", *s.local))
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
}
if s.waitForActiveShards != nil {
params.Set("wait_for_active_shards", fmt.Sprintf("%v", s.waitForActiveShards))
}
if s.waitForNodes != "" {
params.Set("wait_for_nodes", s.waitForNodes)
}
if s.waitForRelocatingShards != nil {
params.Set("wait_for_relocating_shards", fmt.Sprintf("%v", s.waitForRelocatingShards))
}
if s.waitForStatus != "" {
params.Set("wait_for_status", s.waitForStatus)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *ClusterHealthService) Validate() error {
return nil
}
// Do executes the operation.
func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest("GET", path, params, nil)
if err != nil {
return nil, err
}
// Return operation response
ret := new(ClusterHealthResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// ClusterHealthResponse is the response of ClusterHealthService.Do.
type ClusterHealthResponse struct {
ClusterName string `json:"cluster_name"`
Status string `json:"status"`
TimedOut bool `json:"timed_out"`
NumberOfNodes int `json:"number_of_nodes"`
NumberOfDataNodes int `json:"number_of_data_nodes"`
ActivePrimaryShards int `json:"active_primary_shards"`
ActiveShards int `json:"active_shards"`
RelocatingShards int `json:"relocating_shards"`
InitializingShards int `json:"initializing_shards"`
UnassignedShards int `json:"unassigned_shards"`
DelayedUnassignedShards int `json:"delayed_unassigned_shards"`
NumberOfPendingTasks int `json:"number_of_pending_tasks"`
NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"`
TaskMaxWaitTimeInQueueInMillis int `json:"task_max_waiting_in_queue_millis"`
ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"`
// Validation failures -> index name -> array of validation failures
ValidationFailures []map[string][]string `json:"validation_failures"`
// Index name -> index health
Indices map[string]*ClusterIndexHealth `json:"indices"`
}
// ClusterIndexHealth will be returned as part of ClusterHealthResponse.
type ClusterIndexHealth struct {
Status string `json:"status"`
NumberOfShards int `json:"number_of_shards"`
NumberOfReplicas int `json:"number_of_replicas"`
ActivePrimaryShards int `json:"active_primary_shards"`
ActiveShards int `json:"active_shards"`
RelocatingShards int `json:"relocating_shards"`
InitializingShards int `json:"initializing_shards"`
UnassignedShards int `json:"unassigned_shards"`
// Validation failures
ValidationFailures []string `json:"validation_failures"`
// Shards by id, e.g. "0" or "1"
Shards map[string]*ClusterShardHealth `json:"shards"`
}
// ClusterShardHealth will be returned as part of ClusterHealthResponse.
type ClusterShardHealth struct {
Status string `json:"status"`
PrimaryActive bool `json:"primary_active"`
ActiveShards int `json:"active_shards"`
RelocatingShards int `json:"relocating_shards"`
InitializingShards int `json:"initializing_shards"`
UnassignedShards int `json:"unassigned_shards"`
}

283
vendor/gopkg.in/olivere/elastic.v3/cluster_state.go generated vendored Normal file
View File

@ -0,0 +1,283 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// ClusterStateService allows to get a comprehensive state information of the whole cluster.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html
// for details.
type ClusterStateService struct {
client *Client
pretty bool
indices []string
metrics []string
allowNoIndices *bool
expandWildcards string
flatSettings *bool
ignoreUnavailable *bool
local *bool
masterTimeout string
}
// NewClusterStateService creates a new ClusterStateService.
func NewClusterStateService(client *Client) *ClusterStateService {
return &ClusterStateService{
client: client,
indices: make([]string, 0),
metrics: make([]string, 0),
}
}
// Index is a list of index names. Use _all or an empty string to
// perform the operation on all indices.
func (s *ClusterStateService) Index(indices ...string) *ClusterStateService {
s.indices = append(s.indices, indices...)
return s
}
// Metric limits the information returned to the specified metric.
// It can be one of: version, master_node, nodes, routing_table, metadata,
// blocks, or customs.
func (s *ClusterStateService) Metric(metrics ...string) *ClusterStateService {
s.metrics = append(s.metrics, metrics...)
return s
}
// AllowNoIndices indicates whether to ignore if a wildcard indices
// expression resolves into no concrete indices.
// (This includes `_all` string or when no indices have been specified).
func (s *ClusterStateService) AllowNoIndices(allowNoIndices bool) *ClusterStateService {
s.allowNoIndices = &allowNoIndices
return s
}
// ExpandWildcards indicates whether to expand wildcard expression to
// concrete indices that are open, closed or both..
func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterStateService {
s.expandWildcards = expandWildcards
return s
}
// FlatSettings, when set, returns settings in flat format (default: false).
func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService {
s.flatSettings = &flatSettings
return s
}
// IgnoreUnavailable indicates whether specified concrete indices should be
// ignored when unavailable (missing or closed).
func (s *ClusterStateService) IgnoreUnavailable(ignoreUnavailable bool) *ClusterStateService {
s.ignoreUnavailable = &ignoreUnavailable
return s
}
// Local indicates whether to return local information. When set, it does not
// retrieve the state from master node (default: false).
func (s *ClusterStateService) Local(local bool) *ClusterStateService {
s.local = &local
return s
}
// MasterTimeout specifies timeout for connection to master.
func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService {
s.masterTimeout = masterTimeout
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *ClusterStateService) Pretty(pretty bool) *ClusterStateService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *ClusterStateService) buildURL() (string, url.Values, error) {
// Build URL
metrics := strings.Join(s.metrics, ",")
if metrics == "" {
metrics = "_all"
}
indices := strings.Join(s.indices, ",")
if indices == "" {
indices = "_all"
}
path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{
"metrics": metrics,
"indices": indices,
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.allowNoIndices != nil {
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
}
if s.expandWildcards != "" {
params.Set("expand_wildcards", s.expandWildcards)
}
if s.flatSettings != nil {
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
}
if s.local != nil {
params.Set("local", fmt.Sprintf("%v", *s.local))
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *ClusterStateService) Validate() error {
return nil
}
// Do executes the operation.
func (s *ClusterStateService) Do() (*ClusterStateResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest("GET", path, params, nil)
if err != nil {
return nil, err
}
// Return operation response
ret := new(ClusterStateResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// ClusterStateResponse is the response of ClusterStateService.Do.
type ClusterStateResponse struct {
ClusterName string `json:"cluster_name"`
Version int64 `json:"version"`
StateUUID string `json:"state_uuid"`
MasterNode string `json:"master_node"`
Blocks map[string]*clusterBlocks `json:"blocks"`
Nodes map[string]*discoveryNode `json:"nodes"`
Metadata *clusterStateMetadata `json:"metadata"`
RoutingTable map[string]*clusterStateRoutingTable `json:"routing_table"`
RoutingNodes *clusterStateRoutingNode `json:"routing_nodes"`
Customs map[string]interface{} `json:"customs"`
}
type clusterBlocks struct {
Global map[string]*clusterBlock `json:"global"` // id -> cluster block
Indices map[string]*clusterBlock `json:"indices"` // index name -> cluster block
}
type clusterBlock struct {
Description string `json:"description"`
Retryable bool `json:"retryable"`
DisableStatePersistence bool `json:"disable_state_persistence"`
Levels []string `json:"levels"`
}
type clusterStateMetadata struct {
ClusterUUID string `json:"cluster_uuid"`
Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata
Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data
RoutingTable struct {
Indices map[string]*indexRoutingTable `json:"indices"` // index name -> routing table
} `json:"routing_table"`
RoutingNodes struct {
Unassigned []*shardRouting `json:"unassigned"`
Nodes []*shardRouting `json:"nodes"`
} `json:"routing_nodes"`
Customs map[string]interface{} `json:"customs"`
}
type discoveryNode struct {
Name string `json:"name"` // server name, e.g. "es1"
TransportAddress string `json:"transport_address"` // e.g. inet[/1.2.3.4:9300]
Attributes map[string]interface{} `json:"attributes"` // e.g. { "data": true, "master": true }
}
type clusterStateRoutingTable struct {
Indices map[string]interface{} `json:"indices"`
}
type clusterStateRoutingNode struct {
Unassigned []*shardRouting `json:"unassigned"`
// Node Id -> shardRouting
Nodes map[string][]*shardRouting `json:"nodes"`
}
type indexTemplateMetaData struct {
Template string `json:"template"` // e.g. "store-*"
Order int `json:"order"`
Settings map[string]interface{} `json:"settings"` // index settings
Mappings map[string]interface{} `json:"mappings"` // type name -> mapping
}
type indexMetaData struct {
State string `json:"state"`
Settings map[string]interface{} `json:"settings"`
Mappings map[string]interface{} `json:"mappings"`
Aliases []string `json:"aliases"` // e.g. [ "alias1", "alias2" ]
}
type indexRoutingTable struct {
Shards map[string]*shardRouting `json:"shards"`
}
type shardRouting struct {
State string `json:"state"`
Primary bool `json:"primary"`
Node string `json:"node"`
RelocatingNode string `json:"relocating_node"`
Shard int `json:"shard"`
Index string `json:"index"`
Version int64 `json:"state"`
RestoreSource *RestoreSource `json:"restore_source"`
AllocationId *allocationId `json:"allocation_id"`
UnassignedInfo *unassignedInfo `json:"unassigned_info"`
}
type RestoreSource struct {
Repository string `json:"repository"`
Snapshot string `json:"snapshot"`
Version string `json:"version"`
Index string `json:"index"`
}
type allocationId struct {
Id string `json:"id"`
RelocationId string `json:"relocation_id"`
}
type unassignedInfo struct {
Reason string `json:"reason"`
At string `json:"at"`
Details string `json:"details"`
}

348
vendor/gopkg.in/olivere/elastic.v3/cluster_stats.go generated vendored Normal file
View File

@ -0,0 +1,348 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// ClusterStatsService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-stats.html.
type ClusterStatsService struct {
client *Client
pretty bool
nodeId []string
flatSettings *bool
human *bool
}
// NewClusterStatsService creates a new ClusterStatsService.
func NewClusterStatsService(client *Client) *ClusterStatsService {
return &ClusterStatsService{
client: client,
nodeId: make([]string, 0),
}
}
// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes.
func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService {
s.nodeId = nodeId
return s
}
// FlatSettings is documented as: Return settings in flat format (default: false).
func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService {
s.flatSettings = &flatSettings
return s
}
// Human is documented as: Whether to return time and byte values in human-readable format..
func (s *ClusterStatsService) Human(human bool) *ClusterStatsService {
s.human = &human
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *ClusterStatsService) buildURL() (string, url.Values, error) {
// Build URL
var err error
var path string
if len(s.nodeId) > 0 {
path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{
"node_id": strings.Join(s.nodeId, ","),
})
if err != nil {
return "", url.Values{}, err
}
} else {
path, err = uritemplates.Expand("/_cluster/stats", map[string]string{})
if err != nil {
return "", url.Values{}, err
}
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.flatSettings != nil {
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
}
if s.human != nil {
params.Set("human", fmt.Sprintf("%v", *s.human))
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *ClusterStatsService) Validate() error {
return nil
}
// Do executes the operation.
func (s *ClusterStatsService) Do() (*ClusterStatsResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest("GET", path, params, nil)
if err != nil {
return nil, err
}
// Return operation response
ret := new(ClusterStatsResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// ClusterStatsResponse is the response of ClusterStatsService.Do.
type ClusterStatsResponse struct {
Timestamp int64 `json:"timestamp"`
ClusterName string `json:"cluster_name"`
ClusterUUID string `json:"uuid"`
Status string `json:"status"`
Indices *ClusterStatsIndices `json:"indices"`
Nodes *ClusterStatsNodes `json:"nodes"`
}
type ClusterStatsIndices struct {
Count int `json:"count"`
Shards *ClusterStatsIndicesShards `json:"shards"`
Docs *ClusterStatsIndicesDocs `json:"docs"`
Store *ClusterStatsIndicesStore `json:"store"`
FieldData *ClusterStatsIndicesFieldData `json:"fielddata"`
FilterCache *ClusterStatsIndicesFilterCache `json:"filter_cache"`
IdCache *ClusterStatsIndicesIdCache `json:"id_cache"`
Completion *ClusterStatsIndicesCompletion `json:"completion"`
Segments *ClusterStatsIndicesSegments `json:"segments"`
Percolate *ClusterStatsIndicesPercolate `json:"percolate"`
}
type ClusterStatsIndicesShards struct {
Total int `json:"total"`
Primaries int `json:"primaries"`
Replication float64 `json:"replication"`
Index *ClusterStatsIndicesShardsIndex `json:"index"`
}
type ClusterStatsIndicesShardsIndex struct {
Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"`
Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"`
Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"`
}
type ClusterStatsIndicesShardsIndexIntMinMax struct {
Min int `json:"min"`
Max int `json:"max"`
Avg float64 `json:"avg"`
}
type ClusterStatsIndicesShardsIndexFloat64MinMax struct {
Min float64 `json:"min"`
Max float64 `json:"max"`
Avg float64 `json:"avg"`
}
type ClusterStatsIndicesDocs struct {
Count int `json:"count"`
Deleted int `json:"deleted"`
}
type ClusterStatsIndicesStore struct {
Size string `json:"size"` // e.g. "5.3gb"
SizeInBytes int64 `json:"size_in_bytes"`
ThrottleTime string `json:"throttle_time"` // e.g. "0s"
ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"`
}
type ClusterStatsIndicesFieldData struct {
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
Evictions int64 `json:"evictions"`
Fields map[string]struct {
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
} `json:"fields"`
}
type ClusterStatsIndicesFilterCache struct {
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
Evictions int64 `json:"evictions"`
}
type ClusterStatsIndicesIdCache struct {
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
}
type ClusterStatsIndicesCompletion struct {
Size string `json:"size"` // e.g. "61.3kb"
SizeInBytes int64 `json:"size_in_bytes"`
Fields map[string]struct {
Size string `json:"size"` // e.g. "61.3kb"
SizeInBytes int64 `json:"size_in_bytes"`
} `json:"fields"`
}
type ClusterStatsIndicesSegments struct {
Count int64 `json:"count"`
Memory string `json:"memory"` // e.g. "61.3kb"
MemoryInBytes int64 `json:"memory_in_bytes"`
IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb"
IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"`
IndexWriterMaxMemory string `json:"index_writer_max_memory"` // e.g. "61.3kb"
IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"`
VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb"
VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"`
FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb"
FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"`
}
type ClusterStatsIndicesPercolate struct {
Total int64 `json:"total"`
// TODO(oe) The JSON tag here is wrong as of ES 1.5.2 it seems
Time string `json:"get_time"` // e.g. "1s"
TimeInBytes int64 `json:"time_in_millis"`
Current int64 `json:"current"`
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
MemorySizeInBytes int64 `json:"memory_sitze_in_bytes"`
Queries int64 `json:"queries"`
}
// ---
type ClusterStatsNodes struct {
Count *ClusterStatsNodesCount `json:"count"`
Versions []string `json:"versions"`
OS *ClusterStatsNodesOsStats `json:"os"`
Process *ClusterStatsNodesProcessStats `json:"process"`
JVM *ClusterStatsNodesJvmStats `json:"jvm"`
FS *ClusterStatsNodesFsStats `json:"fs"`
Plugins []*ClusterStatsNodesPlugin `json:"plugins"`
}
type ClusterStatsNodesCount struct {
Total int `json:"total"`
MasterOnly int `json:"master_only"`
DataOnly int `json:"data_only"`
MasterData int `json:"master_data"`
Client int `json:"client"`
}
type ClusterStatsNodesOsStats struct {
AvailableProcessors int `json:"available_processors"`
Mem *ClusterStatsNodesOsStatsMem `json:"mem"`
CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"`
}
type ClusterStatsNodesOsStatsMem struct {
Total string `json:"total"` // e.g. "16gb"
TotalInBytes int64 `json:"total_in_bytes"`
}
type ClusterStatsNodesOsStatsCPU struct {
Vendor string `json:"vendor"`
Model string `json:"model"`
MHz int `json:"mhz"`
TotalCores int `json:"total_cores"`
TotalSockets int `json:"total_sockets"`
CoresPerSocket int `json:"cores_per_socket"`
CacheSize string `json:"cache_size"` // e.g. "256b"
CacheSizeInBytes int64 `json:"cache_size_in_bytes"`
Count int `json:"count"`
}
type ClusterStatsNodesProcessStats struct {
CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"`
OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"`
}
type ClusterStatsNodesProcessStatsCPU struct {
Percent float64 `json:"percent"`
}
type ClusterStatsNodesProcessStatsOpenFileDescriptors struct {
Min int64 `json:"min"`
Max int64 `json:"max"`
Avg int64 `json:"avg"`
}
type ClusterStatsNodesJvmStats struct {
MaxUptime string `json:"max_uptime"` // e.g. "5h"
MaxUptimeInMillis int64 `json:"max_uptime_in_millis"`
Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"`
Mem *ClusterStatsNodesJvmStatsMem `json:"mem"`
Threads int64 `json:"threads"`
}
type ClusterStatsNodesJvmStatsVersion struct {
Version string `json:"version"` // e.g. "1.8.0_45"
VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM"
VMVersion string `json:"vm_version"` // e.g. "25.45-b02"
VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation"
Count int `json:"count"`
}
type ClusterStatsNodesJvmStatsMem struct {
HeapUsed string `json:"heap_used"`
HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
HeapMax string `json:"heap_max"`
HeapMaxInBytes int64 `json:"heap_max_in_bytes"`
}
type ClusterStatsNodesFsStats struct {
Path string `json:"path"`
Mount string `json:"mount"`
Dev string `json:"dev"`
Total string `json:"total"` // e.g. "930.7gb"`
TotalInBytes int64 `json:"total_in_bytes"`
Free string `json:"free"` // e.g. "930.7gb"`
FreeInBytes int64 `json:"free_in_bytes"`
Available string `json:"available"` // e.g. "930.7gb"`
AvailableInBytes int64 `json:"available_in_bytes"`
DiskReads int64 `json:"disk_reads"`
DiskWrites int64 `json:"disk_writes"`
DiskIOOp int64 `json:"disk_io_op"`
DiskReadSize string `json:"disk_read_size"` // e.g. "0b"`
DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"`
DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"`
DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"`
DiskIOSize string `json:"disk_io_size"` // e.g. "0b"`
DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"`
DiskQueue string `json:"disk_queue"`
DiskServiceTime string `json:"disk_service_time"`
}
type ClusterStatsNodesPlugin struct {
Name string `json:"name"`
Version string `json:"version"`
Description string `json:"description"`
URL string `json:"url"`
JVM bool `json:"jvm"`
Site bool `json:"site"`
}

90
vendor/gopkg.in/olivere/elastic.v3/connection.go generated vendored Normal file
View File

@ -0,0 +1,90 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"sync"
"time"
)
// conn represents a single connection to a node in a cluster.
type conn struct {
sync.RWMutex
nodeID string // node ID
url string
failures int
dead bool
deadSince *time.Time
}
// newConn creates a new connection to the given URL.
func newConn(nodeID, url string) *conn {
c := &conn{
nodeID: nodeID,
url: url,
}
return c
}
// String returns a representation of the connection status.
func (c *conn) String() string {
c.RLock()
defer c.RUnlock()
return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince)
}
// NodeID returns the ID of the node of this connection.
func (c *conn) NodeID() string {
c.RLock()
defer c.RUnlock()
return c.nodeID
}
// URL returns the URL of this connection.
func (c *conn) URL() string {
c.RLock()
defer c.RUnlock()
return c.url
}
// IsDead returns true if this connection is marked as dead, i.e. a previous
// request to the URL has been unsuccessful.
func (c *conn) IsDead() bool {
c.RLock()
defer c.RUnlock()
return c.dead
}
// MarkAsDead marks this connection as dead, increments the failures
// counter and stores the current time in dead since.
func (c *conn) MarkAsDead() {
c.Lock()
c.dead = true
if c.deadSince == nil {
utcNow := time.Now().UTC()
c.deadSince = &utcNow
}
c.failures += 1
c.Unlock()
}
// MarkAsAlive marks this connection as eligible to be returned from the
// pool of connections by the selector.
func (c *conn) MarkAsAlive() {
c.Lock()
c.dead = false
c.Unlock()
}
// MarkAsHealthy marks this connection as healthy, i.e. a request has been
// successfully performed with it.
func (c *conn) MarkAsHealthy() {
c.Lock()
c.dead = false
c.deadSince = nil
c.failures = 0
c.Unlock()
}

309
vendor/gopkg.in/olivere/elastic.v3/count.go generated vendored Normal file
View File

@ -0,0 +1,309 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// CountService is a convenient service for determining the
// number of documents in an index. Use SearchService with
// a SearchType of count for counting with queries etc.
type CountService struct {
client *Client
pretty bool
index []string
typ []string
allowNoIndices *bool
analyzeWildcard *bool
analyzer string
defaultOperator string
df string
expandWildcards string
ignoreUnavailable *bool
lenient *bool
lowercaseExpandedTerms *bool
minScore interface{}
preference string
q string
query Query
routing string
bodyJson interface{}
bodyString string
}
// NewCountService creates a new CountService.
func NewCountService(client *Client) *CountService {
return &CountService{
client: client,
}
}
// Index sets the names of the indices to restrict the results.
func (s *CountService) Index(index ...string) *CountService {
if s.index == nil {
s.index = make([]string, 0)
}
s.index = append(s.index, index...)
return s
}
// Type sets the types to use to restrict the results.
func (s *CountService) Type(typ ...string) *CountService {
if s.typ == nil {
s.typ = make([]string, 0)
}
s.typ = append(s.typ, typ...)
return s
}
// AllowNoIndices indicates whether to ignore if a wildcard indices
// expression resolves into no concrete indices. (This includes "_all" string
// or when no indices have been specified).
func (s *CountService) AllowNoIndices(allowNoIndices bool) *CountService {
s.allowNoIndices = &allowNoIndices
return s
}
// AnalyzeWildcard specifies whether wildcard and prefix queries should be
// analyzed (default: false).
func (s *CountService) AnalyzeWildcard(analyzeWildcard bool) *CountService {
s.analyzeWildcard = &analyzeWildcard
return s
}
// Analyzer specifies the analyzer to use for the query string.
func (s *CountService) Analyzer(analyzer string) *CountService {
s.analyzer = analyzer
return s
}
// DefaultOperator specifies the default operator for query string query (AND or OR).
func (s *CountService) DefaultOperator(defaultOperator string) *CountService {
s.defaultOperator = defaultOperator
return s
}
// Df specifies the field to use as default where no field prefix is given
// in the query string.
func (s *CountService) Df(df string) *CountService {
s.df = df
return s
}
// ExpandWildcards indicates whether to expand wildcard expression to
// concrete indices that are open, closed or both.
func (s *CountService) ExpandWildcards(expandWildcards string) *CountService {
s.expandWildcards = expandWildcards
return s
}
// IgnoreUnavailable indicates whether specified concrete indices should be
// ignored when unavailable (missing or closed).
func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService {
s.ignoreUnavailable = &ignoreUnavailable
return s
}
// Lenient specifies whether format-based query failures (such as
// providing text to a numeric field) should be ignored.
func (s *CountService) Lenient(lenient bool) *CountService {
s.lenient = &lenient
return s
}
// LowercaseExpandedTerms specifies whether query terms should be lowercased.
func (s *CountService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *CountService {
s.lowercaseExpandedTerms = &lowercaseExpandedTerms
return s
}
// MinScore indicates to include only documents with a specific `_score`
// value in the result.
func (s *CountService) MinScore(minScore interface{}) *CountService {
s.minScore = minScore
return s
}
// Preference specifies the node or shard the operation should be
// performed on (default: random).
func (s *CountService) Preference(preference string) *CountService {
s.preference = preference
return s
}
// Q in the Lucene query string syntax. You can also use Query to pass
// a Query struct.
func (s *CountService) Q(q string) *CountService {
s.q = q
return s
}
// Query specifies the query to pass. You can also pass a query string with Q.
func (s *CountService) Query(query Query) *CountService {
s.query = query
return s
}
// Routing specifies the routing value.
func (s *CountService) Routing(routing string) *CountService {
s.routing = routing
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *CountService) Pretty(pretty bool) *CountService {
s.pretty = pretty
return s
}
// BodyJson specifies the query to restrict the results specified with the
// Query DSL (optional). The interface{} will be serialized to a JSON document,
// so use a map[string]interface{}.
func (s *CountService) BodyJson(body interface{}) *CountService {
s.bodyJson = body
return s
}
// Body specifies a query to restrict the results specified with
// the Query DSL (optional).
func (s *CountService) BodyString(body string) *CountService {
s.bodyString = body
return s
}
// buildURL builds the URL for the operation.
func (s *CountService) buildURL() (string, url.Values, error) {
var err error
var path string
if len(s.index) > 0 && len(s.typ) > 0 {
path, err = uritemplates.Expand("/{index}/{type}/_count", map[string]string{
"index": strings.Join(s.index, ","),
"type": strings.Join(s.typ, ","),
})
} else if len(s.index) > 0 {
path, err = uritemplates.Expand("/{index}/_count", map[string]string{
"index": strings.Join(s.index, ","),
})
} else if len(s.typ) > 0 {
path, err = uritemplates.Expand("/_all/{type}/_count", map[string]string{
"type": strings.Join(s.typ, ","),
})
} else {
path = "/_all/_count"
}
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.allowNoIndices != nil {
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
}
if s.analyzeWildcard != nil {
params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
}
if s.analyzer != "" {
params.Set("analyzer", s.analyzer)
}
if s.defaultOperator != "" {
params.Set("default_operator", s.defaultOperator)
}
if s.df != "" {
params.Set("df", s.df)
}
if s.expandWildcards != "" {
params.Set("expand_wildcards", s.expandWildcards)
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
}
if s.lenient != nil {
params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
}
if s.lowercaseExpandedTerms != nil {
params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
}
if s.minScore != nil {
params.Set("min_score", fmt.Sprintf("%v", s.minScore))
}
if s.preference != "" {
params.Set("preference", s.preference)
}
if s.q != "" {
params.Set("q", s.q)
}
if s.routing != "" {
params.Set("routing", s.routing)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *CountService) Validate() error {
return nil
}
// Do executes the operation.
func (s *CountService) Do() (int64, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return 0, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return 0, err
}
// Setup HTTP request body
var body interface{}
if s.query != nil {
src, err := s.query.Source()
if err != nil {
return 0, err
}
query := make(map[string]interface{})
query["query"] = src
body = query
} else if s.bodyJson != nil {
body = s.bodyJson
} else if s.bodyString != "" {
body = s.bodyString
}
// Get HTTP response
res, err := s.client.PerformRequest("POST", path, params, body)
if err != nil {
return 0, err
}
// Return result
ret := new(CountResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return 0, err
}
if ret != nil {
return ret.Count, nil
}
return int64(0), nil
}
// CountResponse is the response of using the Count API.
type CountResponse struct {
Count int64 `json:"count"`
Shards shardsInfo `json:"_shards,omitempty"`
}

26
vendor/gopkg.in/olivere/elastic.v3/decoder.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"encoding/json"
)
// Decoder is used to decode responses from Elasticsearch.
// Users of elastic can implement their own marshaler for advanced purposes
// and set them per Client (see SetDecoder). If none is specified,
// DefaultDecoder is used.
type Decoder interface {
Decode(data []byte, v interface{}) error
}
// DefaultDecoder uses json.Unmarshal from the Go standard library
// to decode JSON data.
type DefaultDecoder struct{}
// Decode decodes with json.Unmarshal from the Go standard library.
func (u *DefaultDecoder) Decode(data []byte, v interface{}) error {
return json.Unmarshal(data, v)
}

213
vendor/gopkg.in/olivere/elastic.v3/delete.go generated vendored Normal file
View File

@ -0,0 +1,213 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// DeleteService allows to delete a typed JSON document from a specified
// index based on its id.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html
// for details.
type DeleteService struct {
client *Client
pretty bool
id string
index string
typ string
routing string
timeout string
version interface{}
versionType string
consistency string
parent string
refresh *bool
replication string
}
// NewDeleteService creates a new DeleteService.
func NewDeleteService(client *Client) *DeleteService {
return &DeleteService{
client: client,
}
}
// Type is the type of the document.
func (s *DeleteService) Type(typ string) *DeleteService {
s.typ = typ
return s
}
// Id is the document ID.
func (s *DeleteService) Id(id string) *DeleteService {
s.id = id
return s
}
// Index is the name of the index.
func (s *DeleteService) Index(index string) *DeleteService {
s.index = index
return s
}
// Replication specifies a replication type.
func (s *DeleteService) Replication(replication string) *DeleteService {
s.replication = replication
return s
}
// Routing is a specific routing value.
func (s *DeleteService) Routing(routing string) *DeleteService {
s.routing = routing
return s
}
// Timeout is an explicit operation timeout.
func (s *DeleteService) Timeout(timeout string) *DeleteService {
s.timeout = timeout
return s
}
// Version is an explicit version number for concurrency control.
func (s *DeleteService) Version(version interface{}) *DeleteService {
s.version = version
return s
}
// VersionType is a specific version type.
func (s *DeleteService) VersionType(versionType string) *DeleteService {
s.versionType = versionType
return s
}
// Consistency defines a specific write consistency setting for the operation.
func (s *DeleteService) Consistency(consistency string) *DeleteService {
s.consistency = consistency
return s
}
// Parent is the ID of parent document.
func (s *DeleteService) Parent(parent string) *DeleteService {
s.parent = parent
return s
}
// Refresh the index after performing the operation.
func (s *DeleteService) Refresh(refresh bool) *DeleteService {
s.refresh = &refresh
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *DeleteService) Pretty(pretty bool) *DeleteService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *DeleteService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
"index": s.index,
"type": s.typ,
"id": s.id,
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.refresh != nil {
params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
}
if s.replication != "" {
params.Set("replication", s.replication)
}
if s.routing != "" {
params.Set("routing", s.routing)
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
}
if s.version != nil {
params.Set("version", fmt.Sprintf("%v", s.version))
}
if s.versionType != "" {
params.Set("version_type", s.versionType)
}
if s.consistency != "" {
params.Set("consistency", s.consistency)
}
if s.parent != "" {
params.Set("parent", s.parent)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *DeleteService) Validate() error {
var invalid []string
if s.typ == "" {
invalid = append(invalid, "Type")
}
if s.id == "" {
invalid = append(invalid, "Id")
}
if s.index == "" {
invalid = append(invalid, "Index")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *DeleteService) Do() (*DeleteResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest("DELETE", path, params, nil)
if err != nil {
return nil, err
}
// Return operation response
ret := new(DeleteResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// -- Result of a delete request.
// DeleteResponse is the outcome of running DeleteService.Do.
type DeleteResponse struct {
// TODO _shards { total, failed, successful }
Found bool `json:"found"`
Index string `json:"_index"`
Type string `json:"_type"`
Id string `json:"_id"`
Version int64 `json:"_version"`
}

301
vendor/gopkg.in/olivere/elastic.v3/delete_by_query.go generated vendored Normal file
View File

@ -0,0 +1,301 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// DeleteByQueryService deletes documents that match a query.
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html.
type DeleteByQueryService struct {
client *Client
indices []string
types []string
analyzer string
consistency string
defaultOper string
df string
ignoreUnavailable *bool
allowNoIndices *bool
expandWildcards string
replication string
routing string
timeout string
pretty bool
q string
query Query
}
// NewDeleteByQueryService creates a new DeleteByQueryService.
// You typically use the client's DeleteByQuery to get a reference to
// the service.
func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
builder := &DeleteByQueryService{
client: client,
}
return builder
}
// Index sets the indices on which to perform the delete operation.
func (s *DeleteByQueryService) Index(indices ...string) *DeleteByQueryService {
if s.indices == nil {
s.indices = make([]string, 0)
}
s.indices = append(s.indices, indices...)
return s
}
// Type limits the delete operation to the given types.
func (s *DeleteByQueryService) Type(types ...string) *DeleteByQueryService {
if s.types == nil {
s.types = make([]string, 0)
}
s.types = append(s.types, types...)
return s
}
// Analyzer to use for the query string.
func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
s.analyzer = analyzer
return s
}
// Consistency represents the specific write consistency setting for the operation.
// It can be one, quorum, or all.
func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService {
s.consistency = consistency
return s
}
// DefaultOperator for query string query (AND or OR).
func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
s.defaultOper = defaultOperator
return s
}
// DF is the field to use as default where no field prefix is given in the query string.
func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
s.df = defaultField
return s
}
// DefaultField is the field to use as default where no field prefix is given in the query string.
// It is an alias to the DF func.
func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
s.df = defaultField
return s
}
// IgnoreUnavailable indicates whether specified concrete indices should be
// ignored when unavailable (missing or closed).
func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
s.ignoreUnavailable = &ignore
return s
}
// AllowNoIndices indicates whether to ignore if a wildcard indices
// expression resolves into no concrete indices (including the _all string
// or when no indices have been specified).
func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
s.allowNoIndices = &allow
return s
}
// ExpandWildcards indicates whether to expand wildcard expression to
// concrete indices that are open, closed or both. It can be "open" or "closed".
func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
s.expandWildcards = expand
return s
}
// Replication sets a specific replication type (sync or async).
func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService {
s.replication = replication
return s
}
// Q specifies the query in Lucene query string syntax. You can also use
// Query to programmatically specify the query.
func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
s.q = query
return s
}
// QueryString is an alias to Q. Notice that you can also use Query to
// programmatically set the query.
func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
s.q = query
return s
}
// Routing sets a specific routing value.
func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService {
s.routing = routing
return s
}
// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms".
func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
s.timeout = timeout
return s
}
// Pretty indents the JSON output from Elasticsearch.
func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
s.pretty = pretty
return s
}
// Query sets the query programmatically.
func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
s.query = query
return s
}
// Do executes the delete-by-query operation.
func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) {
var err error
// Build url
path := "/"
// Indices part
indexPart := make([]string, 0)
for _, index := range s.indices {
index, err = uritemplates.Expand("{index}", map[string]string{
"index": index,
})
if err != nil {
return nil, err
}
indexPart = append(indexPart, index)
}
if len(indexPart) > 0 {
path += strings.Join(indexPart, ",")
}
// Types part
typesPart := make([]string, 0)
for _, typ := range s.types {
typ, err = uritemplates.Expand("{type}", map[string]string{
"type": typ,
})
if err != nil {
return nil, err
}
typesPart = append(typesPart, typ)
}
if len(typesPart) > 0 {
path += "/" + strings.Join(typesPart, ",")
}
// Search
path += "/_query"
// Parameters
params := make(url.Values)
if s.analyzer != "" {
params.Set("analyzer", s.analyzer)
}
if s.consistency != "" {
params.Set("consistency", s.consistency)
}
if s.defaultOper != "" {
params.Set("default_operator", s.defaultOper)
}
if s.df != "" {
params.Set("df", s.df)
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
}
if s.allowNoIndices != nil {
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
}
if s.expandWildcards != "" {
params.Set("expand_wildcards", s.expandWildcards)
}
if s.replication != "" {
params.Set("replication", s.replication)
}
if s.routing != "" {
params.Set("routing", s.routing)
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
}
if s.pretty {
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
}
if s.q != "" {
params.Set("q", s.q)
}
// Set body if there is a query set
var body interface{}
if s.query != nil {
src, err := s.query.Source()
if err != nil {
return nil, err
}
query := make(map[string]interface{})
query["query"] = src
body = query
}
// Get response
res, err := s.client.PerformRequest("DELETE", path, params, body)
if err != nil {
return nil, err
}
// Return result
ret := new(DeleteByQueryResult)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService.
type DeleteByQueryResult struct {
Took int64 `json:"took"`
TimedOut bool `json:"timed_out"`
Indices map[string]IndexDeleteByQueryResult `json:"_indices"`
Failures []shardOperationFailure `json:"failures"`
}
// IndexNames returns the names of the indices the DeleteByQuery touched.
func (res DeleteByQueryResult) IndexNames() []string {
var indices []string
for index, _ := range res.Indices {
indices = append(indices, index)
}
return indices
}
// All returns the index delete-by-query result of all indices.
func (res DeleteByQueryResult) All() IndexDeleteByQueryResult {
all, _ := res.Indices["_all"]
return all
}
// IndexDeleteByQueryResult is the result of a delete-by-query for a specific
// index.
type IndexDeleteByQueryResult struct {
// Found documents, matching the query.
Found int `json:"found"`
// Deleted documents, successfully, from the given index.
Deleted int `json:"deleted"`
// Missing documents when trying to delete them.
Missing int `json:"missing"`
// Failed documents to be deleted for the given index.
Failed int `json:"failed"`
}

117
vendor/gopkg.in/olivere/elastic.v3/delete_template.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// DeleteTemplateService deletes a search template. More information can
// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
type DeleteTemplateService struct {
client *Client
pretty bool
id string
version *int
versionType string
}
// NewDeleteTemplateService creates a new DeleteTemplateService.
func NewDeleteTemplateService(client *Client) *DeleteTemplateService {
return &DeleteTemplateService{
client: client,
}
}
// Id is the template ID.
func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService {
s.id = id
return s
}
// Version an explicit version number for concurrency control.
func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService {
s.version = &version
return s
}
// VersionType specifies a version type.
func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService {
s.versionType = versionType
return s
}
// buildURL builds the URL for the operation.
func (s *DeleteTemplateService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
"id": s.id,
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.version != nil {
params.Set("version", fmt.Sprintf("%d", *s.version))
}
if s.versionType != "" {
params.Set("version_type", s.versionType)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *DeleteTemplateService) Validate() error {
var invalid []string
if s.id == "" {
invalid = append(invalid, "Id")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest("DELETE", path, params, nil)
if err != nil {
return nil, err
}
// Return operation response
ret := new(DeleteTemplateResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// DeleteTemplateResponse is the response of DeleteTemplateService.Do.
type DeleteTemplateResponse struct {
Found bool `json:"found"`
Index string `json:"_index"`
Type string `json:"_type"`
Id string `json:"_id"`
Version int `json:"_version"`
}

51
vendor/gopkg.in/olivere/elastic.v3/doc.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
/*
Package elastic provides an interface to the Elasticsearch server
(http://www.elasticsearch.org/).
The first thing you do is to create a Client. If you have Elasticsearch
installed and running with its default settings
(i.e. available at http://127.0.0.1:9200), all you need to do is:
client, err := elastic.NewClient()
if err != nil {
// Handle error
}
If your Elasticsearch server is running on a different IP and/or port,
just provide a URL to NewClient:
// Create a client and connect to http://192.168.2.10:9201
client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201"))
if err != nil {
// Handle error
}
You can pass many more configuration parameters to NewClient. Review the
documentation of NewClient for more information.
If no Elasticsearch server is available, services will fail when creating
a new request and will return ErrNoClient.
A Client provides services. The services usually come with a variety of
methods to prepare the query and a Do function to execute it against the
Elasticsearch REST interface and return a response. Here is an example
of the IndexExists service that checks if a given index already exists.
exists, err := client.IndexExists("twitter").Do()
if err != nil {
// Handle error
}
if !exists {
// Index does not exist yet.
}
Look up the documentation for Client to get an idea of the services provided
and what kinds of responses you get when executing the Do function of a service.
Also see the wiki on Github for more details.
*/
package elastic

141
vendor/gopkg.in/olivere/elastic.v3/errors.go generated vendored Normal file
View File

@ -0,0 +1,141 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
// checkResponse will return an error if the request/response indicates
// an error returned from Elasticsearch.
//
// HTTP status codes between in the range [200..299] are considered successful.
// All other errors are considered errors except they are specified in
// ignoreErrors. This is necessary because for some services, HTTP status 404
// is a valid response from Elasticsearch (e.g. the Exists service).
//
// The func tries to parse error details as returned from Elasticsearch
// and encapsulates them in type elastic.Error.
func checkResponse(req *http.Request, res *http.Response, ignoreErrors ...int) error {
// 200-299 are valid status codes
if res.StatusCode >= 200 && res.StatusCode <= 299 {
return nil
}
// Ignore certain errors?
for _, code := range ignoreErrors {
if code == res.StatusCode {
return nil
}
}
return createResponseError(res)
}
// createResponseError creates an Error structure from the HTTP response,
// its status code and the error information sent by Elasticsearch.
func createResponseError(res *http.Response) error {
if res.Body == nil {
return &Error{Status: res.StatusCode}
}
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return &Error{Status: res.StatusCode}
}
errReply := new(Error)
err = json.Unmarshal(data, errReply)
if err != nil {
return &Error{Status: res.StatusCode}
}
if errReply != nil {
if errReply.Status == 0 {
errReply.Status = res.StatusCode
}
return errReply
}
return &Error{Status: res.StatusCode}
}
// Error encapsulates error details as returned from Elasticsearch.
type Error struct {
Status int `json:"status"`
Details *ErrorDetails `json:"error,omitempty"`
}
// ErrorDetails encapsulate error details from Elasticsearch.
// It is used in e.g. elastic.Error and elastic.BulkResponseItem.
type ErrorDetails struct {
Type string `json:"type"`
Reason string `json:"reason"`
ResourceType string `json:"resource.type,omitempty"`
ResourceId string `json:"resource.id,omitempty"`
Index string `json:"index,omitempty"`
Phase string `json:"phase,omitempty"`
Grouped bool `json:"grouped,omitempty"`
CausedBy map[string]interface{} `json:"caused_by,omitempty"`
RootCause []*ErrorDetails `json:"root_cause,omitempty"`
FailedShards []map[string]interface{} `json:"failed_shards,omitempty"`
}
// Error returns a string representation of the error.
func (e *Error) Error() string {
if e.Details != nil && e.Details.Reason != "" {
return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type)
} else {
return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status))
}
}
// IsNotFound returns true if the given error indicates that Elasticsearch
// returned HTTP status 404. The err parameter can be of type *elastic.Error,
// elastic.Error, *http.Response or int (indicating the HTTP status code).
func IsNotFound(err interface{}) bool {
switch e := err.(type) {
case *http.Response:
return e.StatusCode == http.StatusNotFound
case *Error:
return e.Status == http.StatusNotFound
case Error:
return e.Status == http.StatusNotFound
case int:
return e == http.StatusNotFound
}
return false
}
// IsTimeout returns true if the given error indicates that Elasticsearch
// returned HTTP status 408. The err parameter can be of type *elastic.Error,
// elastic.Error, *http.Response or int (indicating the HTTP status code).
func IsTimeout(err interface{}) bool {
switch e := err.(type) {
case *http.Response:
return e.StatusCode == http.StatusRequestTimeout
case *Error:
return e.Status == http.StatusRequestTimeout
case Error:
return e.Status == http.StatusRequestTimeout
case int:
return e == http.StatusRequestTimeout
}
return false
}
// -- General errors --
// shardsInfo represents information from a shard.
type shardsInfo struct {
Total int `json:"total"`
Successful int `json:"successful"`
Failed int `json:"failed"`
}
// shardOperationFailure represents a shard failure.
type shardOperationFailure struct {
Shard int `json:"shard"`
Index string `json:"index"`
Status string `json:"status"`
// "reason"
}

175
vendor/gopkg.in/olivere/elastic.v3/exists.go generated vendored Normal file
View File

@ -0,0 +1,175 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/http"
"net/url"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// ExistsService checks for the existence of a document using HEAD.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
// for details.
type ExistsService struct {
client *Client
pretty bool
id string
index string
typ string
preference string
realtime *bool
refresh *bool
routing string
parent string
}
// NewExistsService creates a new ExistsService.
func NewExistsService(client *Client) *ExistsService {
return &ExistsService{
client: client,
}
}
// Id is the document ID.
func (s *ExistsService) Id(id string) *ExistsService {
s.id = id
return s
}
// Index is the name of the index.
func (s *ExistsService) Index(index string) *ExistsService {
s.index = index
return s
}
// Type is the type of the document (use `_all` to fetch the first document
// matching the ID across all types).
func (s *ExistsService) Type(typ string) *ExistsService {
s.typ = typ
return s
}
// Preference specifies the node or shard the operation should be performed on (default: random).
func (s *ExistsService) Preference(preference string) *ExistsService {
s.preference = preference
return s
}
// Realtime specifies whether to perform the operation in realtime or search mode.
func (s *ExistsService) Realtime(realtime bool) *ExistsService {
s.realtime = &realtime
return s
}
// Refresh the shard containing the document before performing the operation.
func (s *ExistsService) Refresh(refresh bool) *ExistsService {
s.refresh = &refresh
return s
}
// Routing is a specific routing value.
func (s *ExistsService) Routing(routing string) *ExistsService {
s.routing = routing
return s
}
// Parent is the ID of the parent document.
func (s *ExistsService) Parent(parent string) *ExistsService {
s.parent = parent
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *ExistsService) Pretty(pretty bool) *ExistsService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *ExistsService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
"id": s.id,
"index": s.index,
"type": s.typ,
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.realtime != nil {
params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
}
if s.refresh != nil {
params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
}
if s.routing != "" {
params.Set("routing", s.routing)
}
if s.parent != "" {
params.Set("parent", s.parent)
}
if s.preference != "" {
params.Set("preference", s.preference)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *ExistsService) Validate() error {
var invalid []string
if s.id == "" {
invalid = append(invalid, "Id")
}
if s.index == "" {
invalid = append(invalid, "Index")
}
if s.typ == "" {
invalid = append(invalid, "Type")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *ExistsService) Do() (bool, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return false, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return false, err
}
// Get HTTP response
res, err := s.client.PerformRequest("HEAD", path, params, nil, 404)
if err != nil {
return false, err
}
// Return operation response
switch res.StatusCode {
case http.StatusOK:
return true, nil
case http.StatusNotFound:
return false, nil
default:
return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
}
}

329
vendor/gopkg.in/olivere/elastic.v3/explain.go generated vendored Normal file
View File

@ -0,0 +1,329 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"log"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
var (
_ = fmt.Print
_ = log.Print
_ = strings.Index
_ = uritemplates.Expand
_ = url.Parse
)
// ExplainService computes a score explanation for a query and
// a specific document.
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-explain.html.
type ExplainService struct {
client *Client
pretty bool
id string
index string
typ string
q string
routing string
lenient *bool
analyzer string
df string
fields []string
lowercaseExpandedTerms *bool
xSourceInclude []string
analyzeWildcard *bool
parent string
preference string
xSource []string
defaultOperator string
xSourceExclude []string
source string
bodyJson interface{}
bodyString string
}
// NewExplainService creates a new ExplainService.
func NewExplainService(client *Client) *ExplainService {
return &ExplainService{
client: client,
xSource: make([]string, 0),
xSourceExclude: make([]string, 0),
fields: make([]string, 0),
xSourceInclude: make([]string, 0),
}
}
// Id is the document ID.
func (s *ExplainService) Id(id string) *ExplainService {
s.id = id
return s
}
// Index is the name of the index.
func (s *ExplainService) Index(index string) *ExplainService {
s.index = index
return s
}
// Type is the type of the document.
func (s *ExplainService) Type(typ string) *ExplainService {
s.typ = typ
return s
}
// Source is the URL-encoded query definition (instead of using the request body).
func (s *ExplainService) Source(source string) *ExplainService {
s.source = source
return s
}
// XSourceExclude is a list of fields to exclude from the returned _source field.
func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService {
s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
return s
}
// Lenient specifies whether format-based query failures
// (such as providing text to a numeric field) should be ignored.
func (s *ExplainService) Lenient(lenient bool) *ExplainService {
s.lenient = &lenient
return s
}
// Query in the Lucene query string syntax.
func (s *ExplainService) Q(q string) *ExplainService {
s.q = q
return s
}
// Routing sets a specific routing value.
func (s *ExplainService) Routing(routing string) *ExplainService {
s.routing = routing
return s
}
// AnalyzeWildcard specifies whether wildcards and prefix queries
// in the query string query should be analyzed (default: false).
func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService {
s.analyzeWildcard = &analyzeWildcard
return s
}
// Analyzer is the analyzer for the query string query.
func (s *ExplainService) Analyzer(analyzer string) *ExplainService {
s.analyzer = analyzer
return s
}
// Df is the default field for query string query (default: _all).
func (s *ExplainService) Df(df string) *ExplainService {
s.df = df
return s
}
// Fields is a list of fields to return in the response.
func (s *ExplainService) Fields(fields ...string) *ExplainService {
s.fields = append(s.fields, fields...)
return s
}
// LowercaseExpandedTerms specifies whether query terms should be lowercased.
func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService {
s.lowercaseExpandedTerms = &lowercaseExpandedTerms
return s
}
// XSourceInclude is a list of fields to extract and return from the _source field.
func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService {
s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
return s
}
// DefaultOperator is the default operator for query string query (AND or OR).
func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService {
s.defaultOperator = defaultOperator
return s
}
// Parent is the ID of the parent document.
func (s *ExplainService) Parent(parent string) *ExplainService {
s.parent = parent
return s
}
// Preference specifies the node or shard the operation should be performed on (default: random).
func (s *ExplainService) Preference(preference string) *ExplainService {
s.preference = preference
return s
}
// XSource is true or false to return the _source field or not, or a list of fields to return.
func (s *ExplainService) XSource(xSource ...string) *ExplainService {
s.xSource = append(s.xSource, xSource...)
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *ExplainService) Pretty(pretty bool) *ExplainService {
s.pretty = pretty
return s
}
// Query sets a query definition using the Query DSL.
func (s *ExplainService) Query(query Query) *ExplainService {
src, err := query.Source()
if err != nil {
// Do nothing in case of an error
return s
}
body := make(map[string]interface{})
body["query"] = src
s.bodyJson = body
return s
}
// BodyJson sets the query definition using the Query DSL.
func (s *ExplainService) BodyJson(body interface{}) *ExplainService {
s.bodyJson = body
return s
}
// BodyString sets the query definition using the Query DSL as a string.
func (s *ExplainService) BodyString(body string) *ExplainService {
s.bodyString = body
return s
}
// buildURL builds the URL for the operation.
func (s *ExplainService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{
"id": s.id,
"index": s.index,
"type": s.typ,
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if len(s.xSource) > 0 {
params.Set("_source", strings.Join(s.xSource, ","))
}
if s.defaultOperator != "" {
params.Set("default_operator", s.defaultOperator)
}
if s.parent != "" {
params.Set("parent", s.parent)
}
if s.preference != "" {
params.Set("preference", s.preference)
}
if s.source != "" {
params.Set("source", s.source)
}
if len(s.xSourceExclude) > 0 {
params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
}
if s.lenient != nil {
params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
}
if s.q != "" {
params.Set("q", s.q)
}
if s.routing != "" {
params.Set("routing", s.routing)
}
if len(s.fields) > 0 {
params.Set("fields", strings.Join(s.fields, ","))
}
if s.lowercaseExpandedTerms != nil {
params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
}
if len(s.xSourceInclude) > 0 {
params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
}
if s.analyzeWildcard != nil {
params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
}
if s.analyzer != "" {
params.Set("analyzer", s.analyzer)
}
if s.df != "" {
params.Set("df", s.df)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *ExplainService) Validate() error {
var invalid []string
if s.index == "" {
invalid = append(invalid, "Index")
}
if s.typ == "" {
invalid = append(invalid, "Type")
}
if s.id == "" {
invalid = append(invalid, "Id")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *ExplainService) Do() (*ExplainResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Setup HTTP request body
var body interface{}
if s.bodyJson != nil {
body = s.bodyJson
} else {
body = s.bodyString
}
// Get HTTP response
res, err := s.client.PerformRequest("GET", path, params, body)
if err != nil {
return nil, err
}
// Return operation response
ret := new(ExplainResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// ExplainResponse is the response of ExplainService.Do.
type ExplainResponse struct {
Index string `json:"_index"`
Type string `json:"_type"`
Id string `json:"_id"`
Matched bool `json:"matched"`
Explanation map[string]interface{} `json:"explanation"`
}

View File

@ -0,0 +1,74 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"net/url"
"strings"
)
type FetchSourceContext struct {
fetchSource bool
transformSource bool
includes []string
excludes []string
}
func NewFetchSourceContext(fetchSource bool) *FetchSourceContext {
return &FetchSourceContext{
fetchSource: fetchSource,
includes: make([]string, 0),
excludes: make([]string, 0),
}
}
func (fsc *FetchSourceContext) FetchSource() bool {
return fsc.fetchSource
}
func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) {
fsc.fetchSource = fetchSource
}
func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext {
fsc.includes = append(fsc.includes, includes...)
return fsc
}
func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext {
fsc.excludes = append(fsc.excludes, excludes...)
return fsc
}
func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext {
fsc.transformSource = transformSource
return fsc
}
func (fsc *FetchSourceContext) Source() (interface{}, error) {
if !fsc.fetchSource {
return false, nil
}
return map[string]interface{}{
"includes": fsc.includes,
"excludes": fsc.excludes,
}, nil
}
// Query returns the parameters in a form suitable for a URL query string.
func (fsc *FetchSourceContext) Query() url.Values {
params := url.Values{}
if !fsc.fetchSource {
params.Add("_source", "false")
return params
}
if len(fsc.includes) > 0 {
params.Add("_source_include", strings.Join(fsc.includes, ","))
}
if len(fsc.excludes) > 0 {
params.Add("_source_exclude", strings.Join(fsc.excludes, ","))
}
return params
}

255
vendor/gopkg.in/olivere/elastic.v3/field_stats.go generated vendored Normal file
View File

@ -0,0 +1,255 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/http"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
const (
FieldStatsClusterLevel = "cluster"
FieldStatsIndicesLevel = "indices"
)
// FieldStatsService allows finding statistical properties of a field without executing a search,
// but looking up measurements that are natively available in the Lucene index.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-stats.html
// for details
type FieldStatsService struct {
client *Client
pretty bool
level string
index []string
allowNoIndices *bool
expandWildcards string
fields []string
ignoreUnavailable *bool
bodyJson interface{}
bodyString string
}
// NewFieldStatsService creates a new FieldStatsService
func NewFieldStatsService(client *Client) *FieldStatsService {
return &FieldStatsService{
client: client,
index: make([]string, 0),
fields: make([]string, 0),
}
}
// Index is a list of index names; use `_all` or empty string to perform
// the operation on all indices.
func (s *FieldStatsService) Index(index ...string) *FieldStatsService {
s.index = append(s.index, index...)
return s
}
// AllowNoIndices indicates whether to ignore if a wildcard indices expression
// resolves into no concrete indices.
// (This includes `_all` string or when no indices have been specified).
func (s *FieldStatsService) AllowNoIndices(allowNoIndices bool) *FieldStatsService {
s.allowNoIndices = &allowNoIndices
return s
}
// ExpandWildcards indicates whether to expand wildcard expression to
// concrete indices that are open, closed or both.
func (s *FieldStatsService) ExpandWildcards(expandWildcards string) *FieldStatsService {
s.expandWildcards = expandWildcards
return s
}
// Fields is a list of fields for to get field statistics
// for (min value, max value, and more).
func (s *FieldStatsService) Fields(fields ...string) *FieldStatsService {
s.fields = append(s.fields, fields...)
return s
}
// IgnoreUnavailable is documented as: Whether specified concrete indices should be ignored when unavailable (missing or closed).
func (s *FieldStatsService) IgnoreUnavailable(ignoreUnavailable bool) *FieldStatsService {
s.ignoreUnavailable = &ignoreUnavailable
return s
}
// Level sets if stats should be returned on a per index level or on a cluster wide level;
// should be one of 'cluster' or 'indices'; defaults to former
func (s *FieldStatsService) Level(level string) *FieldStatsService {
s.level = level
return s
}
// ClusterLevel is a helper that sets Level to "cluster".
func (s *FieldStatsService) ClusterLevel() *FieldStatsService {
s.level = FieldStatsClusterLevel
return s
}
// IndicesLevel is a helper that sets Level to "indices".
func (s *FieldStatsService) IndicesLevel() *FieldStatsService {
s.level = FieldStatsIndicesLevel
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *FieldStatsService) Pretty(pretty bool) *FieldStatsService {
s.pretty = pretty
return s
}
// BodyJson is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
func (s *FieldStatsService) BodyJson(body interface{}) *FieldStatsService {
s.bodyJson = body
return s
}
// BodyString is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
func (s *FieldStatsService) BodyString(body string) *FieldStatsService {
s.bodyString = body
return s
}
// buildURL builds the URL for the operation.
func (s *FieldStatsService) buildURL() (string, url.Values, error) {
// Build URL
var err error
var path string
if len(s.index) > 0 {
path, err = uritemplates.Expand("/{index}/_field_stats", map[string]string{
"index": strings.Join(s.index, ","),
})
} else {
path = "/_field_stats"
}
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.allowNoIndices != nil {
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
}
if s.expandWildcards != "" {
params.Set("expand_wildcards", s.expandWildcards)
}
if len(s.fields) > 0 {
params.Set("fields", strings.Join(s.fields, ","))
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
}
if s.level != "" {
params.Set("level", s.level)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *FieldStatsService) Validate() error {
var invalid []string
if s.level != "" && (s.level != FieldStatsIndicesLevel && s.level != FieldStatsClusterLevel) {
invalid = append(invalid, "Level")
}
if len(invalid) != 0 {
return fmt.Errorf("missing or invalid required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *FieldStatsService) Do() (*FieldStatsResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Setup HTTP request body
var body interface{}
if s.bodyJson != nil {
body = s.bodyJson
} else {
body = s.bodyString
}
// Get HTTP response
res, err := s.client.PerformRequest("POST", path, params, body, http.StatusNotFound)
if err != nil {
return nil, err
}
// TODO(oe): Is 404 really a valid response here?
if res.StatusCode == http.StatusNotFound {
return &FieldStatsResponse{make(map[string]IndexFieldStats)}, nil
}
// Return operation response
ret := new(FieldStatsResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// -- Request --
// FieldStatsRequest can be used to set up the body to be used in the
// Field Stats API.
type FieldStatsRequest struct {
Fields []string `json:"fields"`
IndexConstraints map[string]*FieldStatsConstraints `json:"index_constraints,omitempty"`
}
// FieldStatsConstraints is a constraint on a field.
type FieldStatsConstraints struct {
Min *FieldStatsComparison `json:"min_value,omitempty"`
Max *FieldStatsComparison `json:"max_value,omitempty"`
}
// FieldStatsComparison contain all comparison operations that can be used
// in FieldStatsConstraints.
type FieldStatsComparison struct {
Lte interface{} `json:"lte,omitempty"`
Lt interface{} `json:"lt,omitempty"`
Gte interface{} `json:"gte,omitempty"`
Gt interface{} `json:"gt,omitempty"`
}
// -- Response --
// FieldStatsResponse is the response body content
type FieldStatsResponse struct {
Indices map[string]IndexFieldStats `json:"indices,omitempty"`
}
// IndexFieldStats contains field stats for an index
type IndexFieldStats struct {
Fields map[string]FieldStats `json:"fields,omitempty"`
}
// FieldStats contains stats of an individual field
type FieldStats struct {
MaxDoc int64 `json:"max_doc"`
DocCount int64 `json:"doc_count"`
Density int64 `json:"density"`
SumDocFrequeny int64 `json:"sum_doc_freq"`
SumTotalTermFrequency int64 `json:"sum_total_term_freq"`
MinValue interface{} `json:"min_value"`
MinValueAsString string `json:"min_value_as_string"`
MaxValue interface{} `json:"max_value"`
MaxValueAsString string `json:"max_value_as_string"`
}

48
vendor/gopkg.in/olivere/elastic.v3/geo_point.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"strconv"
"strings"
)
// GeoPoint is a geographic position described via latitude and longitude.
type GeoPoint struct {
Lat float64 `json:"lat"`
Lon float64 `json:"lon"`
}
// Source returns the object to be serialized in Elasticsearch DSL.
func (pt *GeoPoint) Source() map[string]float64 {
return map[string]float64{
"lat": pt.Lat,
"lon": pt.Lon,
}
}
// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude.
func GeoPointFromLatLon(lat, lon float64) *GeoPoint {
return &GeoPoint{Lat: lat, Lon: lon}
}
// GeoPointFromString initializes a new GeoPoint by a string that is
// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091".
func GeoPointFromString(latLon string) (*GeoPoint, error) {
latlon := strings.SplitN(latLon, ",", 2)
if len(latlon) != 2 {
return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon)
}
lat, err := strconv.ParseFloat(latlon[0], 64)
if err != nil {
return nil, err
}
lon, err := strconv.ParseFloat(latlon[1], 64)
if err != nil {
return nil, err
}
return &GeoPoint{Lat: lat, Lon: lon}, nil
}

271
vendor/gopkg.in/olivere/elastic.v3/get.go generated vendored Normal file
View File

@ -0,0 +1,271 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"encoding/json"
"fmt"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// GetService allows to get a typed JSON document from the index based
// on its id.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
// for details.
type GetService struct {
client *Client
pretty bool
index string
typ string
id string
routing string
preference string
fields []string
refresh *bool
realtime *bool
fsc *FetchSourceContext
version interface{}
versionType string
parent string
ignoreErrorsOnGeneratedFields *bool
}
// NewGetService creates a new GetService.
func NewGetService(client *Client) *GetService {
return &GetService{
client: client,
typ: "_all",
}
}
/*
// String returns a string representation of the GetService request.
func (s *GetService) String() string {
return fmt.Sprintf("[%v][%v][%v]: routing [%v]",
s.index,
s.typ,
s.id,
s.routing)
}
*/
// Index is the name of the index.
func (s *GetService) Index(index string) *GetService {
s.index = index
return s
}
// Type is the type of the document (use `_all` to fetch the first document
// matching the ID across all types).
func (s *GetService) Type(typ string) *GetService {
s.typ = typ
return s
}
// Id is the document ID.
func (s *GetService) Id(id string) *GetService {
s.id = id
return s
}
// Parent is the ID of the parent document.
func (s *GetService) Parent(parent string) *GetService {
s.parent = parent
return s
}
// Routing is the specific routing value.
func (s *GetService) Routing(routing string) *GetService {
s.routing = routing
return s
}
// Preference specifies the node or shard the operation should be performed on (default: random).
func (s *GetService) Preference(preference string) *GetService {
s.preference = preference
return s
}
// Fields is a list of fields to return in the response.
func (s *GetService) Fields(fields ...string) *GetService {
if s.fields == nil {
s.fields = make([]string, 0)
}
s.fields = append(s.fields, fields...)
return s
}
func (s *GetService) FetchSource(fetchSource bool) *GetService {
if s.fsc == nil {
s.fsc = NewFetchSourceContext(fetchSource)
} else {
s.fsc.SetFetchSource(fetchSource)
}
return s
}
func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService {
s.fsc = fetchSourceContext
return s
}
// Refresh the shard containing the document before performing the operation.
func (s *GetService) Refresh(refresh bool) *GetService {
s.refresh = &refresh
return s
}
// Realtime specifies whether to perform the operation in realtime or search mode.
func (s *GetService) Realtime(realtime bool) *GetService {
s.realtime = &realtime
return s
}
// VersionType is the specific version type.
func (s *GetService) VersionType(versionType string) *GetService {
s.versionType = versionType
return s
}
// Version is an explicit version number for concurrency control.
func (s *GetService) Version(version interface{}) *GetService {
s.version = version
return s
}
// IgnoreErrorsOnGeneratedFields indicates whether to ignore fields that
// are generated if the transaction log is accessed.
func (s *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService {
s.ignoreErrorsOnGeneratedFields = &ignore
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *GetService) Pretty(pretty bool) *GetService {
s.pretty = pretty
return s
}
// Validate checks if the operation is valid.
func (s *GetService) Validate() error {
var invalid []string
if s.id == "" {
invalid = append(invalid, "Id")
}
if s.index == "" {
invalid = append(invalid, "Index")
}
if s.typ == "" {
invalid = append(invalid, "Type")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// buildURL builds the URL for the operation.
func (s *GetService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
"id": s.id,
"index": s.index,
"type": s.typ,
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.routing != "" {
params.Set("routing", s.routing)
}
if s.parent != "" {
params.Set("parent", s.parent)
}
if s.preference != "" {
params.Set("preference", s.preference)
}
if len(s.fields) > 0 {
params.Set("fields", strings.Join(s.fields, ","))
}
if s.refresh != nil {
params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
}
if s.version != nil {
params.Set("version", fmt.Sprintf("%v", s.version))
}
if s.versionType != "" {
params.Set("version_type", s.versionType)
}
if s.realtime != nil {
params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
}
if s.ignoreErrorsOnGeneratedFields != nil {
params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *s.ignoreErrorsOnGeneratedFields))
}
if s.fsc != nil {
for k, values := range s.fsc.Query() {
params.Add(k, strings.Join(values, ","))
}
}
return path, params, nil
}
// Do executes the operation.
func (s *GetService) Do() (*GetResult, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest("GET", path, params, nil)
if err != nil {
return nil, err
}
// Return operation response
ret := new(GetResult)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// -- Result of a get request.
// GetResult is the outcome of GetService.Do.
type GetResult struct {
Index string `json:"_index"` // index meta field
Type string `json:"_type"` // type meta field
Id string `json:"_id"` // id meta field
Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields)
Timestamp int64 `json:"_timestamp"` // timestamp meta field
TTL int64 `json:"_ttl"` // ttl meta field
Routing string `json:"_routing"` // routing meta field
Parent string `json:"_parent"` // parent meta field
Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService
Source *json.RawMessage `json:"_source,omitempty"`
Found bool `json:"found,omitempty"`
Fields map[string]interface{} `json:"fields,omitempty"`
//Error string `json:"error,omitempty"` // used only in MultiGet
// TODO double-check that MultiGet now returns details error information
Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet
}

112
vendor/gopkg.in/olivere/elastic.v3/get_template.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// GetTemplateService reads a search template.
// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
type GetTemplateService struct {
client *Client
pretty bool
id string
version interface{}
versionType string
}
// NewGetTemplateService creates a new GetTemplateService.
func NewGetTemplateService(client *Client) *GetTemplateService {
return &GetTemplateService{
client: client,
}
}
// Id is the template ID.
func (s *GetTemplateService) Id(id string) *GetTemplateService {
s.id = id
return s
}
// Version is an explicit version number for concurrency control.
func (s *GetTemplateService) Version(version interface{}) *GetTemplateService {
s.version = version
return s
}
// VersionType is a specific version type.
func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService {
s.versionType = versionType
return s
}
// buildURL builds the URL for the operation.
func (s *GetTemplateService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
"id": s.id,
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.version != nil {
params.Set("version", fmt.Sprintf("%v", s.version))
}
if s.versionType != "" {
params.Set("version_type", s.versionType)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *GetTemplateService) Validate() error {
var invalid []string
if s.id == "" {
invalid = append(invalid, "Id")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation and returns the template.
func (s *GetTemplateService) Do() (*GetTemplateResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest("GET", path, params, nil)
if err != nil {
return nil, err
}
// Return result
ret := new(GetTemplateResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
type GetTemplateResponse struct {
Template string `json:"template"`
}

455
vendor/gopkg.in/olivere/elastic.v3/highlight.go generated vendored Normal file
View File

@ -0,0 +1,455 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
// Highlight allows highlighting search results on one or more fields.
// For details, see:
// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
type Highlight struct {
fields []*HighlighterField
tagsSchema *string
highlightFilter *bool
fragmentSize *int
numOfFragments *int
preTags []string
postTags []string
order *string
encoder *string
requireFieldMatch *bool
boundaryMaxScan *int
boundaryChars []rune
highlighterType *string
fragmenter *string
highlightQuery Query
noMatchSize *int
phraseLimit *int
options map[string]interface{}
forceSource *bool
useExplicitFieldOrder bool
}
func NewHighlight() *Highlight {
hl := &Highlight{
fields: make([]*HighlighterField, 0),
preTags: make([]string, 0),
postTags: make([]string, 0),
boundaryChars: make([]rune, 0),
options: make(map[string]interface{}),
}
return hl
}
func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight {
hl.fields = append(hl.fields, fields...)
return hl
}
func (hl *Highlight) Field(name string) *Highlight {
field := NewHighlighterField(name)
hl.fields = append(hl.fields, field)
return hl
}
func (hl *Highlight) TagsSchema(schemaName string) *Highlight {
hl.tagsSchema = &schemaName
return hl
}
func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight {
hl.highlightFilter = &highlightFilter
return hl
}
func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight {
hl.fragmentSize = &fragmentSize
return hl
}
func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight {
hl.numOfFragments = &numOfFragments
return hl
}
func (hl *Highlight) Encoder(encoder string) *Highlight {
hl.encoder = &encoder
return hl
}
func (hl *Highlight) PreTags(preTags ...string) *Highlight {
hl.preTags = append(hl.preTags, preTags...)
return hl
}
func (hl *Highlight) PostTags(postTags ...string) *Highlight {
hl.postTags = append(hl.postTags, postTags...)
return hl
}
func (hl *Highlight) Order(order string) *Highlight {
hl.order = &order
return hl
}
func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight {
hl.requireFieldMatch = &requireFieldMatch
return hl
}
func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight {
hl.boundaryMaxScan = &boundaryMaxScan
return hl
}
func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight {
hl.boundaryChars = append(hl.boundaryChars, boundaryChars...)
return hl
}
func (hl *Highlight) HighlighterType(highlighterType string) *Highlight {
hl.highlighterType = &highlighterType
return hl
}
func (hl *Highlight) Fragmenter(fragmenter string) *Highlight {
hl.fragmenter = &fragmenter
return hl
}
func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight {
hl.highlightQuery = highlightQuery
return hl
}
func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight {
hl.noMatchSize = &noMatchSize
return hl
}
func (hl *Highlight) Options(options map[string]interface{}) *Highlight {
hl.options = options
return hl
}
func (hl *Highlight) ForceSource(forceSource bool) *Highlight {
hl.forceSource = &forceSource
return hl
}
func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight {
hl.useExplicitFieldOrder = useExplicitFieldOrder
return hl
}
// Creates the query source for the bool query.
func (hl *Highlight) Source() (interface{}, error) {
// Returns the map inside of "highlight":
// "highlight":{
// ... this ...
// }
source := make(map[string]interface{})
if hl.tagsSchema != nil {
source["tags_schema"] = *hl.tagsSchema
}
if hl.preTags != nil && len(hl.preTags) > 0 {
source["pre_tags"] = hl.preTags
}
if hl.postTags != nil && len(hl.postTags) > 0 {
source["post_tags"] = hl.postTags
}
if hl.order != nil {
source["order"] = *hl.order
}
if hl.highlightFilter != nil {
source["highlight_filter"] = *hl.highlightFilter
}
if hl.fragmentSize != nil {
source["fragment_size"] = *hl.fragmentSize
}
if hl.numOfFragments != nil {
source["number_of_fragments"] = *hl.numOfFragments
}
if hl.encoder != nil {
source["encoder"] = *hl.encoder
}
if hl.requireFieldMatch != nil {
source["require_field_match"] = *hl.requireFieldMatch
}
if hl.boundaryMaxScan != nil {
source["boundary_max_scan"] = *hl.boundaryMaxScan
}
if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 {
source["boundary_chars"] = hl.boundaryChars
}
if hl.highlighterType != nil {
source["type"] = *hl.highlighterType
}
if hl.fragmenter != nil {
source["fragmenter"] = *hl.fragmenter
}
if hl.highlightQuery != nil {
src, err := hl.highlightQuery.Source()
if err != nil {
return nil, err
}
source["highlight_query"] = src
}
if hl.noMatchSize != nil {
source["no_match_size"] = *hl.noMatchSize
}
if hl.phraseLimit != nil {
source["phrase_limit"] = *hl.phraseLimit
}
if hl.options != nil && len(hl.options) > 0 {
source["options"] = hl.options
}
if hl.forceSource != nil {
source["force_source"] = *hl.forceSource
}
if hl.fields != nil && len(hl.fields) > 0 {
if hl.useExplicitFieldOrder {
// Use a slice for the fields
fields := make([]map[string]interface{}, 0)
for _, field := range hl.fields {
src, err := field.Source()
if err != nil {
return nil, err
}
fmap := make(map[string]interface{})
fmap[field.Name] = src
fields = append(fields, fmap)
}
source["fields"] = fields
} else {
// Use a map for the fields
fields := make(map[string]interface{}, 0)
for _, field := range hl.fields {
src, err := field.Source()
if err != nil {
return nil, err
}
fields[field.Name] = src
}
source["fields"] = fields
}
}
return source, nil
}
// HighlighterField specifies a highlighted field.
type HighlighterField struct {
Name string
preTags []string
postTags []string
fragmentSize int
fragmentOffset int
numOfFragments int
highlightFilter *bool
order *string
requireFieldMatch *bool
boundaryMaxScan int
boundaryChars []rune
highlighterType *string
fragmenter *string
highlightQuery Query
noMatchSize *int
matchedFields []string
phraseLimit *int
options map[string]interface{}
forceSource *bool
/*
Name string
preTags []string
postTags []string
fragmentSize int
numOfFragments int
fragmentOffset int
highlightFilter *bool
order string
requireFieldMatch *bool
boundaryMaxScan int
boundaryChars []rune
highlighterType string
fragmenter string
highlightQuery Query
noMatchSize *int
matchedFields []string
options map[string]interface{}
forceSource *bool
*/
}
func NewHighlighterField(name string) *HighlighterField {
return &HighlighterField{
Name: name,
preTags: make([]string, 0),
postTags: make([]string, 0),
fragmentSize: -1,
fragmentOffset: -1,
numOfFragments: -1,
boundaryMaxScan: -1,
boundaryChars: make([]rune, 0),
matchedFields: make([]string, 0),
options: make(map[string]interface{}),
}
}
func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField {
f.preTags = append(f.preTags, preTags...)
return f
}
func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField {
f.postTags = append(f.postTags, postTags...)
return f
}
func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField {
f.fragmentSize = fragmentSize
return f
}
func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField {
f.fragmentOffset = fragmentOffset
return f
}
func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField {
f.numOfFragments = numOfFragments
return f
}
func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField {
f.highlightFilter = &highlightFilter
return f
}
func (f *HighlighterField) Order(order string) *HighlighterField {
f.order = &order
return f
}
func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField {
f.requireFieldMatch = &requireFieldMatch
return f
}
func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField {
f.boundaryMaxScan = boundaryMaxScan
return f
}
func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField {
f.boundaryChars = append(f.boundaryChars, boundaryChars...)
return f
}
func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField {
f.highlighterType = &highlighterType
return f
}
func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField {
f.fragmenter = &fragmenter
return f
}
func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField {
f.highlightQuery = highlightQuery
return f
}
func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField {
f.noMatchSize = &noMatchSize
return f
}
func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField {
f.options = options
return f
}
func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField {
f.matchedFields = append(f.matchedFields, matchedFields...)
return f
}
func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField {
f.phraseLimit = &phraseLimit
return f
}
func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField {
f.forceSource = &forceSource
return f
}
func (f *HighlighterField) Source() (interface{}, error) {
source := make(map[string]interface{})
if f.preTags != nil && len(f.preTags) > 0 {
source["pre_tags"] = f.preTags
}
if f.postTags != nil && len(f.postTags) > 0 {
source["post_tags"] = f.postTags
}
if f.fragmentSize != -1 {
source["fragment_size"] = f.fragmentSize
}
if f.numOfFragments != -1 {
source["number_of_fragments"] = f.numOfFragments
}
if f.fragmentOffset != -1 {
source["fragment_offset"] = f.fragmentOffset
}
if f.highlightFilter != nil {
source["highlight_filter"] = *f.highlightFilter
}
if f.order != nil {
source["order"] = *f.order
}
if f.requireFieldMatch != nil {
source["require_field_match"] = *f.requireFieldMatch
}
if f.boundaryMaxScan != -1 {
source["boundary_max_scan"] = f.boundaryMaxScan
}
if f.boundaryChars != nil && len(f.boundaryChars) > 0 {
source["boundary_chars"] = f.boundaryChars
}
if f.highlighterType != nil {
source["type"] = *f.highlighterType
}
if f.fragmenter != nil {
source["fragmenter"] = *f.fragmenter
}
if f.highlightQuery != nil {
src, err := f.highlightQuery.Source()
if err != nil {
return nil, err
}
source["highlight_query"] = src
}
if f.noMatchSize != nil {
source["no_match_size"] = *f.noMatchSize
}
if f.matchedFields != nil && len(f.matchedFields) > 0 {
source["matched_fields"] = f.matchedFields
}
if f.phraseLimit != nil {
source["phrase_limit"] = *f.phraseLimit
}
if f.options != nil && len(f.options) > 0 {
source["options"] = f.options
}
if f.forceSource != nil {
source["force_source"] = *f.forceSource
}
return source, nil
}

283
vendor/gopkg.in/olivere/elastic.v3/index.go generated vendored Normal file
View File

@ -0,0 +1,283 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// IndexService adds or updates a typed JSON document in a specified index,
// making it searchable.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html
// for details.
type IndexService struct {
client *Client
pretty bool
id string
index string
typ string
parent string
replication string
routing string
timeout string
timestamp string
ttl string
version interface{}
opType string
versionType string
refresh *bool
consistency string
bodyJson interface{}
bodyString string
}
// NewIndexService creates a new IndexService.
func NewIndexService(client *Client) *IndexService {
return &IndexService{
client: client,
}
}
// Id is the document ID.
func (s *IndexService) Id(id string) *IndexService {
s.id = id
return s
}
// Index is the name of the index.
func (s *IndexService) Index(index string) *IndexService {
s.index = index
return s
}
// Type is the type of the document.
func (s *IndexService) Type(typ string) *IndexService {
s.typ = typ
return s
}
// Consistency is an explicit write consistency setting for the operation.
func (s *IndexService) Consistency(consistency string) *IndexService {
s.consistency = consistency
return s
}
// Refresh the index after performing the operation.
func (s *IndexService) Refresh(refresh bool) *IndexService {
s.refresh = &refresh
return s
}
// Ttl is an expiration time for the document.
func (s *IndexService) Ttl(ttl string) *IndexService {
s.ttl = ttl
return s
}
// TTL is an expiration time for the document (alias for Ttl).
func (s *IndexService) TTL(ttl string) *IndexService {
s.ttl = ttl
return s
}
// Version is an explicit version number for concurrency control.
func (s *IndexService) Version(version interface{}) *IndexService {
s.version = version
return s
}
// OpType is an explicit operation type, i.e. "create" or "index" (default).
func (s *IndexService) OpType(opType string) *IndexService {
s.opType = opType
return s
}
// Parent is the ID of the parent document.
func (s *IndexService) Parent(parent string) *IndexService {
s.parent = parent
return s
}
// Replication is a specific replication type.
func (s *IndexService) Replication(replication string) *IndexService {
s.replication = replication
return s
}
// Routing is a specific routing value.
func (s *IndexService) Routing(routing string) *IndexService {
s.routing = routing
return s
}
// Timeout is an explicit operation timeout.
func (s *IndexService) Timeout(timeout string) *IndexService {
s.timeout = timeout
return s
}
// Timestamp is an explicit timestamp for the document.
func (s *IndexService) Timestamp(timestamp string) *IndexService {
s.timestamp = timestamp
return s
}
// VersionType is a specific version type.
func (s *IndexService) VersionType(versionType string) *IndexService {
s.versionType = versionType
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *IndexService) Pretty(pretty bool) *IndexService {
s.pretty = pretty
return s
}
// BodyJson is the document as a serializable JSON interface.
func (s *IndexService) BodyJson(body interface{}) *IndexService {
s.bodyJson = body
return s
}
// BodyString is the document encoded as a string.
func (s *IndexService) BodyString(body string) *IndexService {
s.bodyString = body
return s
}
// buildURL builds the URL for the operation.
func (s *IndexService) buildURL() (string, string, url.Values, error) {
var err error
var method, path string
if s.id != "" {
// Create document with manual id
method = "PUT"
path, err = uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
"id": s.id,
"index": s.index,
"type": s.typ,
})
} else {
// Automatic ID generation
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation
method = "POST"
path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{
"index": s.index,
"type": s.typ,
})
}
if err != nil {
return "", "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.consistency != "" {
params.Set("consistency", s.consistency)
}
if s.refresh != nil {
params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
}
if s.opType != "" {
params.Set("op_type", s.opType)
}
if s.parent != "" {
params.Set("parent", s.parent)
}
if s.replication != "" {
params.Set("replication", s.replication)
}
if s.routing != "" {
params.Set("routing", s.routing)
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
}
if s.timestamp != "" {
params.Set("timestamp", s.timestamp)
}
if s.ttl != "" {
params.Set("ttl", s.ttl)
}
if s.version != nil {
params.Set("version", fmt.Sprintf("%v", s.version))
}
if s.versionType != "" {
params.Set("version_type", s.versionType)
}
return method, path, params, nil
}
// Validate checks if the operation is valid.
func (s *IndexService) Validate() error {
var invalid []string
if s.index == "" {
invalid = append(invalid, "Index")
}
if s.typ == "" {
invalid = append(invalid, "Type")
}
if s.bodyString == "" && s.bodyJson == nil {
invalid = append(invalid, "BodyJson")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *IndexService) Do() (*IndexResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
method, path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Setup HTTP request body
var body interface{}
if s.bodyJson != nil {
body = s.bodyJson
} else {
body = s.bodyString
}
// Get HTTP response
res, err := s.client.PerformRequest(method, path, params, body)
if err != nil {
return nil, err
}
// Return operation response
ret := new(IndexResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// IndexResponse is the result of indexing a document in Elasticsearch.
type IndexResponse struct {
// TODO _shards { total, failed, successful }
Index string `json:"_index"`
Type string `json:"_type"`
Id string `json:"_id"`
Version int `json:"_version"`
Created bool `json:"created"`
}

152
vendor/gopkg.in/olivere/elastic.v3/indices_close.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// IndicesCloseService closes an index.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html
// for details.
type IndicesCloseService struct {
client *Client
pretty bool
index string
timeout string
masterTimeout string
ignoreUnavailable *bool
allowNoIndices *bool
expandWildcards string
}
// NewIndicesCloseService creates and initializes a new IndicesCloseService.
func NewIndicesCloseService(client *Client) *IndicesCloseService {
return &IndicesCloseService{client: client}
}
// Index is the name of the index to close.
func (s *IndicesCloseService) Index(index string) *IndicesCloseService {
s.index = index
return s
}
// Timeout is an explicit operation timeout.
func (s *IndicesCloseService) Timeout(timeout string) *IndicesCloseService {
s.timeout = timeout
return s
}
// MasterTimeout specifies the timeout for connection to master.
func (s *IndicesCloseService) MasterTimeout(masterTimeout string) *IndicesCloseService {
s.masterTimeout = masterTimeout
return s
}
// IgnoreUnavailable indicates whether specified concrete indices should be
// ignored when unavailable (missing or closed).
func (s *IndicesCloseService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesCloseService {
s.ignoreUnavailable = &ignoreUnavailable
return s
}
// AllowNoIndices indicates whether to ignore if a wildcard indices
// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
func (s *IndicesCloseService) AllowNoIndices(allowNoIndices bool) *IndicesCloseService {
s.allowNoIndices = &allowNoIndices
return s
}
// ExpandWildcards indicates whether to expand wildcard expression to
// concrete indices that are open, closed or both.
func (s *IndicesCloseService) ExpandWildcards(expandWildcards string) *IndicesCloseService {
s.expandWildcards = expandWildcards
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *IndicesCloseService) Pretty(pretty bool) *IndicesCloseService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *IndicesCloseService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/{index}/_close", map[string]string{
"index": s.index,
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.allowNoIndices != nil {
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
}
if s.expandWildcards != "" {
params.Set("expand_wildcards", s.expandWildcards)
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *IndicesCloseService) Validate() error {
var invalid []string
if s.index == "" {
invalid = append(invalid, "Index")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *IndicesCloseService) Do() (*IndicesCloseResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest("POST", path, params, nil)
if err != nil {
return nil, err
}
// Return operation response
ret := new(IndicesCloseResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// IndicesCloseResponse is the response of IndicesCloseService.Do.
type IndicesCloseResponse struct {
Acknowledged bool `json:"acknowledged"`
}

128
vendor/gopkg.in/olivere/elastic.v3/indices_create.go generated vendored Normal file
View File

@ -0,0 +1,128 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"errors"
"net/url"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// IndicesCreateService creates a new index.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
// for details.
type IndicesCreateService struct {
client *Client
pretty bool
index string
timeout string
masterTimeout string
bodyJson interface{}
bodyString string
}
// NewIndicesCreateService returns a new IndicesCreateService.
func NewIndicesCreateService(client *Client) *IndicesCreateService {
return &IndicesCreateService{client: client}
}
// Index is the name of the index to create.
func (b *IndicesCreateService) Index(index string) *IndicesCreateService {
b.index = index
return b
}
// Timeout the explicit operation timeout, e.g. "5s".
func (s *IndicesCreateService) Timeout(timeout string) *IndicesCreateService {
s.timeout = timeout
return s
}
// MasterTimeout specifies the timeout for connection to master.
func (s *IndicesCreateService) MasterTimeout(masterTimeout string) *IndicesCreateService {
s.masterTimeout = masterTimeout
return s
}
// Body specifies the configuration of the index as a string.
// It is an alias for BodyString.
func (b *IndicesCreateService) Body(body string) *IndicesCreateService {
b.bodyString = body
return b
}
// BodyString specifies the configuration of the index as a string.
func (b *IndicesCreateService) BodyString(body string) *IndicesCreateService {
b.bodyString = body
return b
}
// BodyJson specifies the configuration of the index. The interface{} will
// be serializes as a JSON document, so use a map[string]interface{}.
func (b *IndicesCreateService) BodyJson(body interface{}) *IndicesCreateService {
b.bodyJson = body
return b
}
// Pretty indicates that the JSON response be indented and human readable.
func (b *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService {
b.pretty = pretty
return b
}
// Do executes the operation.
func (b *IndicesCreateService) Do() (*IndicesCreateResult, error) {
if b.index == "" {
return nil, errors.New("missing index name")
}
// Build url
path, err := uritemplates.Expand("/{index}", map[string]string{
"index": b.index,
})
if err != nil {
return nil, err
}
params := make(url.Values)
if b.pretty {
params.Set("pretty", "1")
}
if b.masterTimeout != "" {
params.Set("master_timeout", b.masterTimeout)
}
if b.timeout != "" {
params.Set("timeout", b.timeout)
}
// Setup HTTP request body
var body interface{}
if b.bodyJson != nil {
body = b.bodyJson
} else {
body = b.bodyString
}
// Get response
res, err := b.client.PerformRequest("PUT", path, params, body)
if err != nil {
return nil, err
}
ret := new(IndicesCreateResult)
if err := b.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// -- Result of a create index request.
// IndicesCreateResult is the outcome of creating a new index.
type IndicesCreateResult struct {
Acknowledged bool `json:"acknowledged"`
}

128
vendor/gopkg.in/olivere/elastic.v3/indices_delete.go generated vendored Normal file
View File

@ -0,0 +1,128 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// IndicesDeleteService allows to delete existing indices.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html
// for details.
type IndicesDeleteService struct {
client *Client
pretty bool
index []string
timeout string
masterTimeout string
}
// NewIndicesDeleteService creates and initializes a new IndicesDeleteService.
func NewIndicesDeleteService(client *Client) *IndicesDeleteService {
return &IndicesDeleteService{
client: client,
index: make([]string, 0),
}
}
// Index adds the list of indices to delete.
// Use `_all` or `*` string to delete all indices.
func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService {
s.index = index
return s
}
// Timeout is an explicit operation timeout.
func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService {
s.timeout = timeout
return s
}
// MasterTimeout specifies the timeout for connection to master.
func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService {
s.masterTimeout = masterTimeout
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *IndicesDeleteService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/{index}", map[string]string{
"index": strings.Join(s.index, ","),
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *IndicesDeleteService) Validate() error {
var invalid []string
if len(s.index) == 0 {
invalid = append(invalid, "Index")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *IndicesDeleteService) Do() (*IndicesDeleteResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest("DELETE", path, params, nil)
if err != nil {
return nil, err
}
// Return operation response
ret := new(IndicesDeleteResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// -- Result of a delete index request.
// IndicesDeleteResponse is the response of IndicesDeleteService.Do.
type IndicesDeleteResponse struct {
Acknowledged bool `json:"acknowledged"`
}

View File

@ -0,0 +1,121 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// IndicesDeleteTemplateService deletes index templates.
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
type IndicesDeleteTemplateService struct {
client *Client
pretty bool
name string
timeout string
masterTimeout string
}
// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService.
func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService {
return &IndicesDeleteTemplateService{
client: client,
}
}
// Name is the name of the template.
func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService {
s.name = name
return s
}
// Timeout is an explicit operation timeout.
func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService {
s.timeout = timeout
return s
}
// MasterTimeout specifies the timeout for connection to master.
func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService {
s.masterTimeout = masterTimeout
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/_template/{name}", map[string]string{
"name": s.name,
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *IndicesDeleteTemplateService) Validate() error {
var invalid []string
if s.name == "" {
invalid = append(invalid, "Name")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest("DELETE", path, params, nil)
if err != nil {
return nil, err
}
// Return operation response
ret := new(IndicesDeleteTemplateResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do.
type IndicesDeleteTemplateResponse struct {
Acknowledged bool `json:"acknowledged,omitempty"`
}

View File

@ -0,0 +1,130 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// IndicesDeleteWarmerService allows to delete a warmer.
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html.
type IndicesDeleteWarmerService struct {
client *Client
pretty bool
index []string
name []string
masterTimeout string
}
// NewIndicesDeleteWarmerService creates a new IndicesDeleteWarmerService.
func NewIndicesDeleteWarmerService(client *Client) *IndicesDeleteWarmerService {
return &IndicesDeleteWarmerService{
client: client,
index: make([]string, 0),
name: make([]string, 0),
}
}
// Index is a list of index names the mapping should be added to
// (supports wildcards); use `_all` or omit to add the mapping on all indices.
func (s *IndicesDeleteWarmerService) Index(indices ...string) *IndicesDeleteWarmerService {
s.index = append(s.index, indices...)
return s
}
// Name is a list of warmer names to delete (supports wildcards);
// use `_all` to delete all warmers in the specified indices.
func (s *IndicesDeleteWarmerService) Name(name ...string) *IndicesDeleteWarmerService {
s.name = append(s.name, name...)
return s
}
// MasterTimeout specifies the timeout for connection to master.
func (s *IndicesDeleteWarmerService) MasterTimeout(masterTimeout string) *IndicesDeleteWarmerService {
s.masterTimeout = masterTimeout
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *IndicesDeleteWarmerService) Pretty(pretty bool) *IndicesDeleteWarmerService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *IndicesDeleteWarmerService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{
"index": strings.Join(s.index, ","),
"name": strings.Join(s.name, ","),
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
}
if len(s.name) > 0 {
params.Set("name", strings.Join(s.name, ","))
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *IndicesDeleteWarmerService) Validate() error {
var invalid []string
if len(s.index) == 0 {
invalid = append(invalid, "Index")
}
if len(s.name) == 0 {
invalid = append(invalid, "Name")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *IndicesDeleteWarmerService) Do() (*DeleteWarmerResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest("DELETE", path, params, nil)
if err != nil {
return nil, err
}
// Return operation response
ret := new(DeleteWarmerResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// DeleteWarmerResponse is the response of IndicesDeleteWarmerService.Do.
type DeleteWarmerResponse struct {
Acknowledged bool `json:"acknowledged"`
}

149
vendor/gopkg.in/olivere/elastic.v3/indices_exists.go generated vendored Normal file
View File

@ -0,0 +1,149 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/http"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// IndicesExistsService checks if an index or indices exist or not.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html
// for details.
type IndicesExistsService struct {
client *Client
pretty bool
index []string
ignoreUnavailable *bool
allowNoIndices *bool
expandWildcards string
local *bool
}
// NewIndicesExistsService creates and initializes a new IndicesExistsService.
func NewIndicesExistsService(client *Client) *IndicesExistsService {
return &IndicesExistsService{
client: client,
index: make([]string, 0),
}
}
// Index is a list of one or more indices to check.
func (s *IndicesExistsService) Index(index []string) *IndicesExistsService {
s.index = index
return s
}
// AllowNoIndices indicates whether to ignore if a wildcard indices expression
// resolves into no concrete indices. (This includes `_all` string or
// when no indices have been specified).
func (s *IndicesExistsService) AllowNoIndices(allowNoIndices bool) *IndicesExistsService {
s.allowNoIndices = &allowNoIndices
return s
}
// ExpandWildcards indicates whether to expand wildcard expression to
// concrete indices that are open, closed or both.
func (s *IndicesExistsService) ExpandWildcards(expandWildcards string) *IndicesExistsService {
s.expandWildcards = expandWildcards
return s
}
// Local, when set, returns local information and does not retrieve the state
// from master node (default: false).
func (s *IndicesExistsService) Local(local bool) *IndicesExistsService {
s.local = &local
return s
}
// IgnoreUnavailable indicates whether specified concrete indices should be
// ignored when unavailable (missing or closed).
func (s *IndicesExistsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsService {
s.ignoreUnavailable = &ignoreUnavailable
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *IndicesExistsService) Pretty(pretty bool) *IndicesExistsService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *IndicesExistsService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/{index}", map[string]string{
"index": strings.Join(s.index, ","),
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.local != nil {
params.Set("local", fmt.Sprintf("%v", *s.local))
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
}
if s.allowNoIndices != nil {
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
}
if s.expandWildcards != "" {
params.Set("expand_wildcards", s.expandWildcards)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *IndicesExistsService) Validate() error {
var invalid []string
if len(s.index) == 0 {
invalid = append(invalid, "Index")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *IndicesExistsService) Do() (bool, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return false, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return false, err
}
// Get HTTP response
res, err := s.client.PerformRequest("HEAD", path, params, nil, 404)
if err != nil {
return false, err
}
// Return operation response
switch res.StatusCode {
case http.StatusOK:
return true, nil
case http.StatusNotFound:
return false, nil
default:
return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
}
}

View File

@ -0,0 +1,112 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/http"
"net/url"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// IndicesExistsTemplateService checks if a given template exists.
// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#indices-templates-exists
// for documentation.
type IndicesExistsTemplateService struct {
client *Client
pretty bool
name string
local *bool
}
// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService.
func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService {
return &IndicesExistsTemplateService{
client: client,
}
}
// Name is the name of the template.
func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService {
s.name = name
return s
}
// Local indicates whether to return local information, i.e. do not retrieve
// the state from master node (default: false).
func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService {
s.local = &local
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/_template/{name}", map[string]string{
"name": s.name,
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.local != nil {
params.Set("local", fmt.Sprintf("%v", *s.local))
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *IndicesExistsTemplateService) Validate() error {
var invalid []string
if s.name == "" {
invalid = append(invalid, "Name")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *IndicesExistsTemplateService) Do() (bool, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return false, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return false, err
}
// Get HTTP response
res, err := s.client.PerformRequest("HEAD", path, params, nil, 404)
if err != nil {
return false, err
}
// Return operation response
switch res.StatusCode {
case http.StatusOK:
return true, nil
case http.StatusNotFound:
return false, nil
default:
return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
}
}

View File

@ -0,0 +1,161 @@
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/http"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
// IndicesExistsTypeService checks if one or more types exist in one or more indices.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-types-exists.html
// for details.
type IndicesExistsTypeService struct {
client *Client
pretty bool
typ []string
index []string
expandWildcards string
local *bool
ignoreUnavailable *bool
allowNoIndices *bool
}
// NewIndicesExistsTypeService creates a new IndicesExistsTypeService.
func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService {
return &IndicesExistsTypeService{
client: client,
index: make([]string, 0),
typ: make([]string, 0),
}
}
// Index is a list of index names; use `_all` to check the types across all indices.
func (s *IndicesExistsTypeService) Index(indices ...string) *IndicesExistsTypeService {
s.index = append(s.index, indices...)
return s
}
// Type is a list of document types to check.
func (s *IndicesExistsTypeService) Type(types ...string) *IndicesExistsTypeService {
s.typ = append(s.typ, types...)
return s
}
// IgnoreUnavailable indicates whether specified concrete indices should be
// ignored when unavailable (missing or closed).
func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService {
s.ignoreUnavailable = &ignoreUnavailable
return s
}
// AllowNoIndices indicates whether to ignore if a wildcard indices
// expression resolves into no concrete indices.
// (This includes `_all` string or when no indices have been specified).
func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService {
s.allowNoIndices = &allowNoIndices
return s
}
// ExpandWildcards indicates whether to expand wildcard expression to
// concrete indices that are open, closed or both.
func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService {
s.expandWildcards = expandWildcards
return s
}
// Local specifies whether to return local information, i.e. do not retrieve
// the state from master node (default: false).
func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService {
s.local = &local
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/{index}/{type}", map[string]string{
"index": strings.Join(s.index, ","),
"type": strings.Join(s.typ, ","),
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
}
if s.allowNoIndices != nil {
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
}
if s.expandWildcards != "" {
params.Set("expand_wildcards", s.expandWildcards)
}
if s.local != nil {
params.Set("local", fmt.Sprintf("%v", *s.local))
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *IndicesExistsTypeService) Validate() error {
var invalid []string
if len(s.index) == 0 {
invalid = append(invalid, "Index")
}
if len(s.typ) == 0 {
invalid = append(invalid, "Type")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *IndicesExistsTypeService) Do() (bool, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return false, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return false, err
}
// Get HTTP response
res, err := s.client.PerformRequest("HEAD", path, params, nil, 404)
if err != nil {
return false, err
}
// Return operation response
switch res.StatusCode {
case http.StatusOK:
return true, nil
case http.StatusNotFound:
return false, nil
default:
return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
}
}

Some files were not shown because too many files have changed in this diff Show More