diff --git a/api-errors.go b/api-errors.go index 7d7c22efc..10f596b86 100644 --- a/api-errors.go +++ b/api-errors.go @@ -105,6 +105,12 @@ const ( ErrBucketAlreadyOwnedByYou // Add new error codes here. + // Bucket notification related errors. + ErrEventNotification + ErrARNNotification + ErrRegionNotification + ErrOverlappingFilterNotification + // S3 extended errors. ErrContentSHA256Mismatch @@ -411,6 +417,28 @@ var errorCodeResponse = map[APIErrorCode]APIError{ HTTPStatusCode: http.StatusConflict, }, + /// Bucket notification related errors. + ErrEventNotification: { + Code: "InvalidArgument", + Description: "A specified event is not supported for notifications.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrARNNotification: { + Code: "InvalidArgument", + Description: "A specified destination ARN does not exist or is not well-formed. Verify the destination ARN.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrRegionNotification: { + Code: "InvalidArgument", + Description: "A specified destination is in a different region than the bucket. You must use a destination that resides in the same region as the bucket.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrOverlappingFilterNotification: { + Code: "InvalidArgument", + Description: "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types.", + HTTPStatusCode: http.StatusBadRequest, + }, + /// S3 extensions. ErrContentSHA256Mismatch: { Code: "XAmzContentSHA256Mismatch", diff --git a/api-router.go b/api-router.go index 6ad0696d2..e69095cb6 100644 --- a/api-router.go +++ b/api-router.go @@ -60,6 +60,8 @@ func registerAPIRouter(mux *router.Router, api objectAPIHandlers) { bucket.Methods("GET").HandlerFunc(api.GetBucketLocationHandler).Queries("location", "") // GetBucketPolicy bucket.Methods("GET").HandlerFunc(api.GetBucketPolicyHandler).Queries("policy", "") + // GetBucketNotification + bucket.Methods("GET").HandlerFunc(api.GetBucketNotificationHandler).Queries("notification", "") // ListMultipartUploads bucket.Methods("GET").HandlerFunc(api.ListMultipartUploadsHandler).Queries("uploads", "") // ListObjectsV2 @@ -68,6 +70,8 @@ func registerAPIRouter(mux *router.Router, api objectAPIHandlers) { bucket.Methods("GET").HandlerFunc(api.ListObjectsV1Handler) // PutBucketPolicy bucket.Methods("PUT").HandlerFunc(api.PutBucketPolicyHandler).Queries("policy", "") + // PutBucketNotification + bucket.Methods("PUT").HandlerFunc(api.PutBucketNotificationHandler).Queries("notification", "") // PutBucket bucket.Methods("PUT").HandlerFunc(api.PutBucketHandler) // HeadBucket diff --git a/bucket-handlers.go b/bucket-handlers.go index 504101915..f1511f89c 100644 --- a/bucket-handlers.go +++ b/bucket-handlers.go @@ -23,7 +23,6 @@ import ( "encoding/xml" "io" "io/ioutil" - "mime/multipart" "net/http" "net/url" "strings" @@ -319,9 +318,9 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req // requests which do not follow valid region requirements. if s3Error := isValidLocationConstraint(r); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) - return } - // Make bucket. + + // Proceed to creating a bucket. err := api.ObjectAPI.MakeBucket(bucket) if err != nil { errorIf(err, "Unable to create a bucket.") @@ -333,32 +332,6 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req writeSuccessResponse(w, nil) } -func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, error) { - /// HTML Form values - formValues := make(map[string]string) - filePart := new(bytes.Buffer) - var err error - for err == nil { - var part *multipart.Part - part, err = reader.NextPart() - if part != nil { - if part.FileName() == "" { - var buffer []byte - buffer, err = ioutil.ReadAll(part) - if err != nil { - return nil, nil, err - } - formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer) - } else { - if _, err = io.Copy(filePart, part); err != nil { - return nil, nil, err - } - } - } - } - return filePart, formValues, nil -} - // PostPolicyBucketHandler - POST policy // ---------- // This implementation of the POST operation handles object creation with a specified @@ -415,6 +388,17 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h }) setCommonHeaders(w) writeSuccessResponse(w, encodedSuccessResponse) + + // Load notification config if any. + nConfig, err := api.loadNotificationConfig(bucket) + if err != nil { + errorIf(err, "Unable to load notification config for bucket: \"%s\"", bucket) + return + } + + size := int64(0) // FIXME: support notify size. + // Notify event. + notifyObjectCreatedEvent(nConfig, ObjectCreatedPost, bucket, object, md5Sum, size) } // HeadBucketHandler - HEAD Bucket @@ -464,6 +448,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http. vars := mux.Vars(r) bucket := vars["bucket"] + // Attempt to delete bucket. if err := api.ObjectAPI.DeleteBucket(bucket); err != nil { errorIf(err, "Unable to delete a bucket.") writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) diff --git a/bucket-notification-datatypes.go b/bucket-notification-datatypes.go new file mode 100644 index 000000000..36cc0f06a --- /dev/null +++ b/bucket-notification-datatypes.go @@ -0,0 +1,155 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import "encoding/xml" + +type filterRule struct { + Name string `xml:"FilterRuleName"` + Value string +} + +type keyFilter struct { + FilterRules []filterRule `xml:"FilterRule"` +} + +type notificationConfigFilter struct { + Key keyFilter `xml:"S3Key"` +} + +// Queue SQS configuration. +type queueConfig struct { + Events []string `xml:"Event"` + Filter notificationConfigFilter + ID string `xml:"Id"` + QueueArn string `xml:"Queue"` +} + +// Topic SNS configuration, this is a compliance field +// not used by minio yet. +type topicConfig struct { + Events []string `xml:"Event"` + Filter notificationConfigFilter + ID string `xml:"Id"` + TopicArn string `xml:"Topic"` +} + +// Lambda function configuration, this is a compliance field +// not used by minio yet. +type lambdaFuncConfig struct { + Events []string `xml:"Event"` + Filter notificationConfigFilter + ID string `xml:"Id"` + LambdaFunctionArn string `xml:"CloudFunction"` +} + +// Notification configuration structure represents the XML format of +// notification configuration of buckets. +type notificationConfig struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + QueueConfigurations []queueConfig `xml:"QueueConfiguration"` + TopicConfigurations []topicConfig `xml:"TopicConfiguration"` + LambdaConfigurations []lambdaFuncConfig `xml:"CloudFunctionConfiguration"` +} + +// EventName is AWS S3 event type: +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +type EventName int + +const ( + // ObjectCreatedPut is s3:ObjectCreated:Put + ObjectCreatedPut EventName = iota + // ObjectCreatedPost is s3:ObjectCreated:POst + ObjectCreatedPost + // ObjectCreatedCopy is s3:ObjectCreated:Post + ObjectCreatedCopy + // ObjectCreatedCompleteMultipartUpload is s3:ObjectCreated:CompleteMultipartUpload + ObjectCreatedCompleteMultipartUpload + // ObjectRemovedDelete is s3:ObjectRemoved:Delete + ObjectRemovedDelete +) + +// Stringer interface for event name. +func (eventName EventName) String() string { + switch eventName { + case ObjectCreatedPut: + return "s3:ObjectCreated:Put" + case ObjectCreatedPost: + return "s3:ObjectCreated:Post" + case ObjectCreatedCopy: + return "s3:ObjectCreated:Copy" + case ObjectCreatedCompleteMultipartUpload: + return "s3:ObjectCreated:CompleteMultipartUpload" + case ObjectRemovedDelete: + return "s3:ObjectRemoved:Delete" + default: + return "s3:Unknown" + } +} + +// Indentity represents the user id, this is a compliance field. +type identity struct { + PrincipalID string `json:"principalId"` +} + +func defaultIdentity() identity { + return identity{"minio"} +} + +type s3BucketReference struct { + Name string `json:"name"` + OwnerIdentity identity `json:"ownerIdentity"` + ARN string `json:"arn"` +} + +type s3ObjectReference struct { + Key string `json:"key"` + Size int64 `json:"size,omitempty"` + ETag string `json:"eTag,omitempty"` + VersionID string `json:"versionId,omitempty"` + Sequencer string `json:"sequencer"` +} + +type s3Reference struct { + SchemaVersion string `json:"s3SchemaVersion"` + ConfigurationID string `json:"configurationId"` + Bucket s3BucketReference `json:"bucket"` + Object s3ObjectReference `json:"object"` +} + +// NotificationEvent represents an Amazon an S3 bucket notification event. +type NotificationEvent struct { + EventVersion string `json:"eventVersion"` + EventSource string `json:"eventSource"` + AwsRegion string `json:"awsRegion"` + EventTime string `json:"eventTime"` + EventName string `json:"eventName"` + UserIdentity identity `json:"userIdentity"` + RequestParameters map[string]string `json:"requestParameters"` + ResponseElements map[string]string `json:"responseElements"` + S3 s3Reference `json:"s3"` +} + +// Represents the minio sqs type and inputs. +type arnMinioSqs struct { + sqsType string +} + +// Stringer for constructing AWS ARN compatible string. +func (m arnMinioSqs) String() string { + return minioSqs + serverConfig.GetRegion() + ":" + m.sqsType +} diff --git a/bucket-notification-handlers.go b/bucket-notification-handlers.go new file mode 100644 index 000000000..6254cf9cf --- /dev/null +++ b/bucket-notification-handlers.go @@ -0,0 +1,211 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bytes" + "encoding/xml" + "io" + "net/http" + "path" + + "github.com/gorilla/mux" +) + +const ( + bucketConfigPrefix = "buckets" + bucketNotificationConfig = "notification.xml" +) + +// loads notifcation config if any for a given bucket, returns back structured notification config. +func (api objectAPIHandlers) loadNotificationConfig(bucket string) (nConfig notificationConfig, err error) { + notificationConfigPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) + var objInfo ObjectInfo + objInfo, err = api.ObjectAPI.GetObjectInfo(minioMetaBucket, notificationConfigPath) + if err != nil { + switch err.(type) { + case ObjectNotFound: + return notificationConfig{}, nil + } + return notificationConfig{}, err + } + var buffer bytes.Buffer + err = api.ObjectAPI.GetObject(minioMetaBucket, notificationConfigPath, 0, objInfo.Size, &buffer) + if err != nil { + switch err.(type) { + case ObjectNotFound: + return notificationConfig{}, nil + } + return notificationConfig{}, err + } + + // Unmarshal notification bytes. + notificationConfigBytes := buffer.Bytes() + if err = xml.Unmarshal(notificationConfigBytes, &nConfig); err != nil { + return notificationConfig{}, err + } // Successfully marshalled notification configuration. + + return nConfig, nil +} + +// GetBucketNotificationHandler - This implementation of the GET +// operation uses the notification subresource to return the +// notification configuration of a bucket. If notifications are +// not enabled on the bucket, the operation returns an empty +// NotificationConfiguration element. +func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { + // Validate request authorization. + if s3Error := checkAuth(r); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) + return + } + vars := mux.Vars(r) + bucket := vars["bucket"] + notificationConfigPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) + objInfo, err := api.ObjectAPI.GetObjectInfo(minioMetaBucket, notificationConfigPath) + if err != nil { + switch err.(type) { + case ObjectNotFound: + writeSuccessResponse(w, nil) + return + } + errorIf(err, "Unable to read notification configuration.", notificationConfigPath) + writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + return + } + + // Indicates if any data was written to the http.ResponseWriter + dataWritten := false + + // io.Writer type which keeps track if any data was written. + writer := funcToWriter(func(p []byte) (int, error) { + if !dataWritten { + // Set headers on the first write. + // Set standard object headers. + setObjectHeaders(w, objInfo, nil) + + // Set any additional requested response headers. + setGetRespHeaders(w, r.URL.Query()) + + dataWritten = true + } + return w.Write(p) + }) + + // Reads the object at startOffset and writes to func writer.. + err = api.ObjectAPI.GetObject(minioMetaBucket, notificationConfigPath, 0, objInfo.Size, writer) + if err != nil { + if !dataWritten { + switch err.(type) { + case ObjectNotFound: + writeSuccessResponse(w, nil) + return + } + // Error response only if no data has been written to client yet. i.e if + // partial data has already been written before an error + // occurred then no point in setting StatusCode and + // sending error XML. + apiErr := toAPIErrorCode(err) + writeErrorResponse(w, r, apiErr, r.URL.Path) + } + errorIf(err, "Unable to write to client.") + return + } + if !dataWritten { + // If ObjectAPI.GetObject did not return error and no data has + // been written it would mean that it is a 0-byte object. + // call wrter.Write(nil) to set appropriate headers. + writer.Write(nil) + } + +} + +// PutBucketNotificationHandler - Minio notification feature enables +// you to receive notifications when certain events happen in your bucket. +// Using this API, you can replace an existing notification configuration. +// The configuration is an XML file that defines the event types that you +// want Minio to publish and the destination where you want Minio to publish +// an event notification when it detects an event of the specified type. +// By default, your bucket has no event notifications configured. That is, +// the notification configuration will be an empty NotificationConfiguration. +func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { + // Validate request authorization. + if s3Error := checkAuth(r); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) + return + } + vars := mux.Vars(r) + bucket := vars["bucket"] + + _, err := api.ObjectAPI.GetBucketInfo(bucket) + if err != nil { + errorIf(err, "Unable to bucket info.") + writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + return + } + + // If Content-Length is unknown or zero, deny the request. PutBucketNotification + // always needs a Content-Length if incoming request is not chunked. + if !contains(r.TransferEncoding, "chunked") { + if r.ContentLength == -1 { + writeErrorResponse(w, r, ErrMissingContentLength, r.URL.Path) + return + } + } + + // Reads the incoming notification configuration. + var buffer bytes.Buffer + if r.ContentLength >= 0 { + _, err = io.CopyN(&buffer, r.Body, r.ContentLength) + } else { + _, err = io.Copy(&buffer, r.Body) + } + if err != nil { + errorIf(err, "Unable to read incoming body.") + writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + return + } + + var notificationCfg notificationConfig + // Unmarshal notification bytes. + notificationConfigBytes := buffer.Bytes() + if err = xml.Unmarshal(notificationConfigBytes, ¬ificationCfg); err != nil { + errorIf(err, "Unable to parse notification configuration XML.") + writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path) + return + } // Successfully marshalled notification configuration. + + // Validate unmarshalled bucket notification configuration. + if s3Error := validateNotificationConfig(notificationCfg); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) + return + } + + // Proceed to save notification configuration. + size := int64(len(notificationConfigBytes)) + data := bytes.NewReader(notificationConfigBytes) + notificationConfigPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) + _, err = api.ObjectAPI.PutObject(minioMetaBucket, notificationConfigPath, size, data, nil) + if err != nil { + errorIf(err, "Unable to write bucket notification configuration.", notificationConfigPath) + writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + return + } + + // Success. + writeSuccessResponse(w, nil) +} diff --git a/bucket-notification-utils.go b/bucket-notification-utils.go new file mode 100644 index 000000000..e57597ebf --- /dev/null +++ b/bucket-notification-utils.go @@ -0,0 +1,128 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import "strings" + +// List of valid event types. +var suppportedEventTypes = map[string]struct{}{ + // Object created event types. + "s3:ObjectCreated:*": {}, + "s3:ObjectCreated:Put": {}, + "s3:ObjectCreated:Post": {}, + "s3:ObjectCreated:Copy": {}, + "s3:ObjectCreated:CompleteMultipartUpload": {}, + // Object removed event types. + "s3:ObjectRemoved:*": {}, + "s3:ObjectRemoved:Delete": {}, +} + +// checkEvent - checks if an event is supported. +func checkEvent(event string) APIErrorCode { + _, ok := suppportedEventTypes[event] + if !ok { + return ErrEventNotification + } + return ErrNone +} + +// checkEvents - checks given list of events if all of them are valid. +// given if one of them is invalid, this function returns an error. +func checkEvents(events []string) APIErrorCode { + for _, event := range events { + if s3Error := checkEvent(event); s3Error != ErrNone { + return s3Error + } + } + return ErrNone +} + +// checkQueueArn - check if the queue arn is valid. +func checkQueueArn(queueArn string) APIErrorCode { + if !strings.HasPrefix(queueArn, minioSqs) { + return ErrARNNotification + } + if !strings.HasPrefix(queueArn, minioSqs+serverConfig.GetRegion()+":") { + return ErrRegionNotification + } + return ErrNone +} + +// Check - validates queue configuration and returns error if any. +func checkQueueConfig(qConfig queueConfig) APIErrorCode { + // Check queue arn is valid. + if s3Error := checkQueueArn(qConfig.QueueArn); s3Error != ErrNone { + return s3Error + } + + // Unmarshals QueueArn into structured object. + sqsArn := unmarshalSqsArn(qConfig.QueueArn) + // Validate if sqsArn requested any of the known supported queues. + if !isAMQPQueue(sqsArn) || !isElasticQueue(sqsArn) || !isRedisQueue(sqsArn) { + return ErrARNNotification + } + + // Check if valid events are set in queue config. + if s3Error := checkEvents(qConfig.Events); s3Error != ErrNone { + return s3Error + } + + // Success. + return ErrNone +} + +// Validates all incoming queue configs, checkQueueConfig validates if the +// input fields for each queues is not malformed and has valid configuration +// information. If validation fails bucket notifications are not enabled. +func validateQueueConfigs(queueConfigs []queueConfig) APIErrorCode { + for _, qConfig := range queueConfigs { + if s3Error := checkQueueConfig(qConfig); s3Error != ErrNone { + return s3Error + } + } + // Success. + return ErrNone +} + +// Validates all the bucket notification configuration for their validity, +// if one of the config is malformed or has invalid data it is rejected. +// Configuration is never applied partially. +func validateNotificationConfig(nConfig notificationConfig) APIErrorCode { + if s3Error := validateQueueConfigs(nConfig.QueueConfigurations); s3Error != ErrNone { + return s3Error + } + // Add validation for other configurations. + return ErrNone +} + +// Unmarshals input value of AWS ARN format into minioSqs object. +// Returned value represents minio sqs types, currently supported are +// - amqp +// - elasticsearch +func unmarshalSqsArn(queueArn string) (mSqs arnMinioSqs) { + sqsType := strings.TrimPrefix(queueArn, minioSqs+serverConfig.GetRegion()+":") + mSqs = arnMinioSqs{} + switch sqsType { + case queueTypeAMQP: + mSqs.sqsType = queueTypeAMQP + case queueTypeElastic: + mSqs.sqsType = queueTypeElastic + case queueTypeRedis: + mSqs.sqsType = queueTypeRedis + } // Add more cases here. + return mSqs +} diff --git a/config-migrate.go b/config-migrate.go index e54dde104..f5f571c72 100644 --- a/config-migrate.go +++ b/config-migrate.go @@ -32,6 +32,8 @@ func migrateConfig() { migrateV2ToV3() // Migrate version '3' to '4'. migrateV3ToV4() + // Migrate version '4' to '5'. + migrateV4ToV5() } // Version '1' is not supported anymore and deprecated, safe to delete. @@ -124,8 +126,8 @@ func migrateV3ToV4() { } // Save only the new fields, ignore the rest. - srvConfig := &serverConfigV4{} - srvConfig.Version = globalMinioConfigVersion + srvConfig := &configV4{} + srvConfig.Version = "4" srvConfig.Credential = cv3.Credential srvConfig.Region = cv3.Region if srvConfig.Region == "" { @@ -146,3 +148,49 @@ func migrateV3ToV4() { console.Println("Migration from version ‘" + cv3.Version + "’ to ‘" + srvConfig.Version + "’ completed successfully.") } + +// Version '4' to '5' migrates config, removes previous fields related +// to backend types and server address. This change further simplifies +// the config for future additions. +func migrateV4ToV5() { + cv4, err := loadConfigV4() + if err != nil && os.IsNotExist(err) { + return + } + fatalIf(err, "Unable to load config version ‘4’.") + if cv4.Version != "4" { + return + } + + // Save only the new fields, ignore the rest. + srvConfig := &serverConfigV5{} + srvConfig.Version = globalMinioConfigVersion + srvConfig.Credential = cv4.Credential + srvConfig.Region = cv4.Region + if srvConfig.Region == "" { + // Region needs to be set for AWS Signature Version 4. + srvConfig.Region = "us-east-1" + } + srvConfig.Logger.Console = cv4.Logger.Console + srvConfig.Logger.File = cv4.Logger.File + srvConfig.Logger.Syslog = cv4.Logger.Syslog + srvConfig.Logger.AMQP = amqpLogger{ + Enable: false, + } + srvConfig.Logger.ElasticSearch = elasticSearchLogger{ + Enable: false, + } + srvConfig.Logger.Redis = redisLogger{ + Enable: false, + } + + qc, err := quick.New(srvConfig) + fatalIf(err, "Unable to initialize the quick config.") + configFile, err := getConfigFile() + fatalIf(err, "Unable to get config file.") + + err = qc.Save(configFile) + fatalIf(err, "Failed to migrate config from ‘"+cv4.Version+"’ to ‘"+srvConfig.Version+"’ failed.") + + console.Println("Migration from version ‘" + cv4.Version + "’ to ‘" + srvConfig.Version + "’ completed successfully.") +} diff --git a/config-old.go b/config-old.go index eaa0d29e5..d8bced22b 100644 --- a/config-old.go +++ b/config-old.go @@ -145,3 +145,53 @@ func loadConfigV3() (*configV3, error) { } return c, nil } + +type loggerV4 struct { + Console struct { + Enable bool `json:"enable"` + Level string `json:"level"` + } `json:"console"` + File struct { + Enable bool `json:"enable"` + Filename string `json:"fileName"` + Level string `json:"level"` + } `json:"file"` + Syslog struct { + Enable bool `json:"enable"` + Addr string `json:"address"` + Level string `json:"level"` + } `json:"syslog"` +} + +// configV4 server configuration version '4'. +type configV4 struct { + Version string `json:"version"` + + // S3 API configuration. + Credential credential `json:"credential"` + Region string `json:"region"` + + // Additional error logging configuration. + Logger loggerV4 `json:"logger"` +} + +// loadConfigV4 load config version '4'. +func loadConfigV4() (*configV4, error) { + configFile, err := getConfigFile() + if err != nil { + return nil, err + } + if _, err = os.Stat(configFile); err != nil { + return nil, err + } + c := &configV4{} + c.Version = "4" + qc, err := quick.New(c) + if err != nil { + return nil, err + } + if err := qc.Load(configFile); err != nil { + return nil, err + } + return c, nil +} diff --git a/config-v4.go b/config-v5.go similarity index 67% rename from config-v4.go rename to config-v5.go index abe4167a6..126e1499e 100644 --- a/config-v4.go +++ b/config-v5.go @@ -23,8 +23,8 @@ import ( "github.com/minio/minio/pkg/quick" ) -// serverConfigV4 server configuration version '4'. -type serverConfigV4 struct { +// serverConfigV5 server configuration version '5'. +type serverConfigV5 struct { Version string `json:"version"` // S3 API configuration. @@ -41,7 +41,7 @@ type serverConfigV4 struct { // initConfig - initialize server config. config version (called only once). func initConfig() error { if !isConfigFileExists() { - srvCfg := &serverConfigV4{} + srvCfg := &serverConfigV5{} srvCfg.Version = globalMinioConfigVersion srvCfg.Region = "us-east-1" srvCfg.Credential = mustGenAccessKeys() @@ -76,7 +76,7 @@ func initConfig() error { if _, err = os.Stat(configFile); err != nil { return err } - srvCfg := &serverConfigV4{} + srvCfg := &serverConfigV5{} srvCfg.Version = globalMinioConfigVersion srvCfg.rwMutex = &sync.RWMutex{} qc, err := quick.New(srvCfg) @@ -90,14 +90,15 @@ func initConfig() error { serverConfig = srvCfg // Set the version properly after the unmarshalled json is loaded. serverConfig.Version = globalMinioConfigVersion + return nil } // serverConfig server config. -var serverConfig *serverConfigV4 +var serverConfig *serverConfigV5 // GetVersion get current config version. -func (s serverConfigV4) GetVersion() string { +func (s serverConfigV5) GetVersion() string { s.rwMutex.RLock() defer s.rwMutex.RUnlock() return s.Version @@ -105,78 +106,117 @@ func (s serverConfigV4) GetVersion() string { /// Logger related. +func (s *serverConfigV5) SetAMQPLogger(amqpl amqpLogger) { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + s.Logger.AMQP = amqpl +} + +// GetAMQPLogger get current AMQP logger. +func (s serverConfigV5) GetAMQPLogger() amqpLogger { + s.rwMutex.RLock() + defer s.rwMutex.RUnlock() + return s.Logger.AMQP +} + +func (s *serverConfigV5) SetElasticSearchLogger(esLogger elasticSearchLogger) { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + s.Logger.ElasticSearch = esLogger +} + +// GetElasticSearchLogger get current ElasicSearch logger. +func (s serverConfigV5) GetElasticSearchLogger() elasticSearchLogger { + s.rwMutex.RLock() + defer s.rwMutex.RUnlock() + return s.Logger.ElasticSearch +} + +func (s *serverConfigV5) SetRedisLogger(rLogger redisLogger) { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + s.Logger.Redis = rLogger +} + +// GetRedisLogger get current Redis logger. +func (s serverConfigV5) GetRedisLogger() redisLogger { + s.rwMutex.RLock() + defer s.rwMutex.RUnlock() + return s.Logger.Redis +} + // SetFileLogger set new file logger. -func (s *serverConfigV4) SetFileLogger(flogger fileLogger) { +func (s *serverConfigV5) SetFileLogger(flogger fileLogger) { s.rwMutex.Lock() defer s.rwMutex.Unlock() s.Logger.File = flogger } // GetFileLogger get current file logger. -func (s serverConfigV4) GetFileLogger() fileLogger { +func (s serverConfigV5) GetFileLogger() fileLogger { s.rwMutex.RLock() defer s.rwMutex.RUnlock() return s.Logger.File } // SetConsoleLogger set new console logger. -func (s *serverConfigV4) SetConsoleLogger(clogger consoleLogger) { +func (s *serverConfigV5) SetConsoleLogger(clogger consoleLogger) { s.rwMutex.Lock() defer s.rwMutex.Unlock() s.Logger.Console = clogger } // GetConsoleLogger get current console logger. -func (s serverConfigV4) GetConsoleLogger() consoleLogger { +func (s serverConfigV5) GetConsoleLogger() consoleLogger { s.rwMutex.RLock() defer s.rwMutex.RUnlock() return s.Logger.Console } // SetSyslogLogger set new syslog logger. -func (s *serverConfigV4) SetSyslogLogger(slogger syslogLogger) { +func (s *serverConfigV5) SetSyslogLogger(slogger syslogLogger) { s.rwMutex.Lock() defer s.rwMutex.Unlock() s.Logger.Syslog = slogger } // GetSyslogLogger get current syslog logger. -func (s *serverConfigV4) GetSyslogLogger() syslogLogger { +func (s *serverConfigV5) GetSyslogLogger() syslogLogger { s.rwMutex.RLock() defer s.rwMutex.RUnlock() return s.Logger.Syslog } // SetRegion set new region. -func (s *serverConfigV4) SetRegion(region string) { +func (s *serverConfigV5) SetRegion(region string) { s.rwMutex.Lock() defer s.rwMutex.Unlock() s.Region = region } // GetRegion get current region. -func (s serverConfigV4) GetRegion() string { +func (s serverConfigV5) GetRegion() string { s.rwMutex.RLock() defer s.rwMutex.RUnlock() return s.Region } // SetCredentials set new credentials. -func (s *serverConfigV4) SetCredential(creds credential) { +func (s *serverConfigV5) SetCredential(creds credential) { s.rwMutex.Lock() defer s.rwMutex.Unlock() s.Credential = creds } // GetCredentials get current credentials. -func (s serverConfigV4) GetCredential() credential { +func (s serverConfigV5) GetCredential() credential { s.rwMutex.RLock() defer s.rwMutex.RUnlock() return s.Credential } // Save config. -func (s serverConfigV4) Save() error { +func (s serverConfigV5) Save() error { s.rwMutex.RLock() defer s.rwMutex.RUnlock() diff --git a/fs-v1.go b/fs-v1.go index d02c60c48..0092d9cbe 100644 --- a/fs-v1.go +++ b/fs-v1.go @@ -64,17 +64,29 @@ func loadFormatFS(storageDisk StorageAPI) (format formatConfigV1, err error) { // Should be called when process shuts down. func shutdownFS(storage StorageAPI) { + // List if there are any multipart entries. _, err := storage.ListDir(minioMetaBucket, mpartMetaPrefix) if err != errFileNotFound { - // Multipart directory is not empty hence do not remove .minio volume. + // Multipart directory is not empty hence do not remove '.minio.sys' volume. os.Exit(0) } + // List if there are any bucket configuration entries. + _, err = storage.ListDir(minioMetaBucket, bucketConfigPrefix) + if err != errFileNotFound { + // Bucket config directory is not empty hence do not remove '.minio.sys' volume. + os.Exit(0) + } + // Cleanup everything else. prefix := "" - if err := cleanupDir(storage, minioMetaBucket, prefix); err != nil { - os.Exit(0) - return + if err = cleanupDir(storage, minioMetaBucket, prefix); err != nil { + errorIf(err, "Unable to cleanup minio meta bucket") + os.Exit(1) } - storage.DeleteVol(minioMetaBucket) + if err = storage.DeleteVol(minioMetaBucket); err != nil { + errorIf(err, "Unable to delete minio meta bucket", minioMetaBucket) + os.Exit(1) + } + // Successful exit. os.Exit(0) } @@ -184,6 +196,10 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) { if !IsValidBucketName(vol.Name) { continue } + // Ignore the volume special bucket. + if vol.Name == minioMetaBucket { + continue + } bucketInfos = append(bucketInfos, BucketInfo{ Name: vol.Name, Created: vol.Created, diff --git a/generic-handlers.go b/generic-handlers.go index 836bbfd1f..e9baff18e 100644 --- a/generic-handlers.go +++ b/generic-handlers.go @@ -275,7 +275,6 @@ var notimplementedBucketResourceNames = map[string]bool{ "cors": true, "lifecycle": true, "logging": true, - "notification": true, "replication": true, "tagging": true, "versions": true, diff --git a/globals.go b/globals.go index 3cc5aec27..5ebb1ca8f 100644 --- a/globals.go +++ b/globals.go @@ -28,7 +28,7 @@ const ( // minio configuration related constants. const ( - globalMinioConfigVersion = "4" + globalMinioConfigVersion = "5" globalMinioConfigDir = ".minio" globalMinioCertsDir = "certs" globalMinioCertFile = "public.crt" diff --git a/handler-utils.go b/handler-utils.go index 085fb2850..120afd50d 100644 --- a/handler-utils.go +++ b/handler-utils.go @@ -17,7 +17,10 @@ package main import ( + "bytes" "io" + "io/ioutil" + "mime/multipart" "net/http" "strings" ) @@ -95,3 +98,30 @@ func extractMetadataFromHeader(header http.Header) map[string]string { // Return. return metadata } + +func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, error) { + /// HTML Form values + formValues := make(map[string]string) + filePart := new(bytes.Buffer) + var err error + for err == nil { + var part *multipart.Part + part, err = reader.NextPart() + if part != nil { + if part.FileName() == "" { + var buffer []byte + buffer, err = ioutil.ReadAll(part) + if err != nil { + return nil, nil, err + } + formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer) + } else { + if _, err = io.Copy(filePart, part); err != nil { + return nil, nil, err + } + } + } + } + return filePart, formValues, nil + +} diff --git a/logger-amqp.go b/logger-amqp.go new file mode 100644 index 000000000..3de0d2ac3 --- /dev/null +++ b/logger-amqp.go @@ -0,0 +1,151 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "errors" + + "github.com/Sirupsen/logrus" + "github.com/streadway/amqp" +) + +// amqpLogger - represents logrus compatible AMQP hook. +// All fields represent AMQP configuration details. +type amqpLogger struct { + Enable bool `json:"enable"` + Level string `json:"level"` + URL string `json:"url"` + Exchange string `json:"exchange"` + RoutingKey string `json:"routineKey"` + ExchangeType string `json:"exchangeType"` + Mandatory bool `json:"mandatory"` + Immediate bool `json:"immediate"` + Durable bool `json:"durable"` + Internal bool `json:"internal"` + NoWait bool `json:"noWait"` + AutoDeleted bool `json:"autoDeleted"` +} + +type amqpConn struct { + params amqpLogger + *amqp.Connection +} + +func dialAMQP(amqpL amqpLogger) (amqpConn, error) { + conn, err := amqp.Dial(amqpL.URL) + if err != nil { + return amqpConn{}, err + } + return amqpConn{Connection: conn, params: amqpL}, nil +} + +var errLoggerNotEnabled = errors.New("logger type not enabled") + +func enableAMQPLogger() error { + amqpL := serverConfig.GetAMQPLogger() + if !amqpL.Enable { + return errLoggerNotEnabled + } + + // Connect to amqp server. + amqpC, err := dialAMQP(amqpL) + if err != nil { + return err + } + + lvl, err := logrus.ParseLevel(amqpL.Level) + fatalIf(err, "Unknown log level found in the config file.") + + // Add a amqp hook. + log.Hooks.Add(amqpC) + + // Set default JSON formatter. + log.Formatter = new(logrus.JSONFormatter) + + // Set default log level to info. + log.Level = lvl + + // Successfully enabled. + return nil +} + +// Fire is called when an event should be sent to the message broker. +func (q amqpConn) Fire(entry *logrus.Entry) error { + ch, err := q.Connection.Channel() + if err != nil { + // Any other error other than connection closed, return. + if err != amqp.ErrClosed { + return err + } + // Attempt to connect again. + var conn *amqp.Connection + conn, err = amqp.Dial(q.params.URL) + if err != nil { + return err + } + ch, err = conn.Channel() + if err != nil { + return err + } + } + defer ch.Close() + + err = ch.ExchangeDeclare( + q.params.Exchange, + q.params.ExchangeType, + q.params.Durable, + q.params.AutoDeleted, + q.params.Internal, + q.params.NoWait, + nil, + ) + if err != nil { + return err + } + + body, err := entry.String() + if err != nil { + return err + } + + err = ch.Publish( + q.params.Exchange, + q.params.RoutingKey, + q.params.Mandatory, + q.params.Immediate, + amqp.Publishing{ + ContentType: "application/json", + Body: []byte(body), + }) + if err != nil { + return err + } + + return nil +} + +// Levels is available logging levels. +func (q amqpConn) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} diff --git a/logger-elasticsearch.go b/logger-elasticsearch.go new file mode 100644 index 000000000..f077227fe --- /dev/null +++ b/logger-elasticsearch.go @@ -0,0 +1,116 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "errors" + + "github.com/Sirupsen/logrus" + "gopkg.in/olivere/elastic.v3" +) + +// elasticQueue is a elasticsearch event notification queue. +type elasticSearchLogger struct { + Enable bool `json:"enable"` + Level string `json:"level"` + URL string `json:"url"` + Index string `json:"index"` +} + +type elasticClient struct { + *elastic.Client + params elasticSearchLogger +} + +// Connects to elastic search instance at URL. +func dialElastic(url string) (*elastic.Client, error) { + client, err := elastic.NewClient(elastic.SetURL(url), elastic.SetSniff(false)) + if err != nil { + return nil, err + } + return client, nil +} + +// Enables elasticsearch logger. +func enableElasticLogger() error { + esLogger := serverConfig.GetElasticSearchLogger() + if !esLogger.Enable { + return errLoggerNotEnabled + } + client, err := dialElastic(esLogger.URL) + if err != nil { + return err + } + + // Use the IndexExists service to check if a specified index exists. + exists, err := client.IndexExists(esLogger.Index).Do() + if err != nil { + return err + } + // Index does not exist, attempt to create it. + if !exists { + var createIndex *elastic.IndicesCreateResult + createIndex, err = client.CreateIndex(esLogger.Index).Do() + if err != nil { + return err + } + if !createIndex.Acknowledged { + return errors.New("index not created") + } + } + + elasticCl := elasticClient{ + Client: client, + params: esLogger, + } + + lvl, err := logrus.ParseLevel(esLogger.Level) + fatalIf(err, "Unknown log level found in the config file.") + + // Add a elasticsearch hook. + log.Hooks.Add(elasticCl) + + // Set default JSON formatter. + log.Formatter = new(logrus.JSONFormatter) + + // Set default log level to info. + log.Level = lvl + + return nil +} + +// Fire is required to implement logrus hook +func (q elasticClient) Fire(entry *logrus.Entry) error { + _, err := q.Client.Index().Index(q.params.Index). + Type("event"). + BodyJson(entry.Data). + Do() + + return err +} + +// Required for logrus hook implementation +func (q elasticClient) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} diff --git a/logger-redis.go b/logger-redis.go new file mode 100644 index 000000000..ed019e334 --- /dev/null +++ b/logger-redis.go @@ -0,0 +1,136 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/minio/redigo/redis" +) + +// redisLogger to send logs to Redis server +type redisLogger struct { + Enable bool `json:"enable"` + Level string `json:"level"` + Addr string `json:"address"` + Password string `json:"password"` + Key string `json:"key"` +} + +type redisConn struct { + *redis.Pool + params redisLogger +} + +// Dial a new connection to redis instance at addr, optionally with a password if any. +func dialRedis(addr, password string) (*redis.Pool, error) { + rPool := &redis.Pool{ + MaxIdle: 3, + IdleTimeout: 240 * time.Second, + Dial: func() (redis.Conn, error) { + c, err := redis.Dial("tcp", addr) + if err != nil { + return nil, err + } + if password != "" { + if _, err := c.Do("AUTH", password); err != nil { + c.Close() + return nil, err + } + } + return c, err + }, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + } + + // Test if connection with REDIS can be established. + rConn := rPool.Get() + defer rConn.Close() + + // Check connection. + _, err := rConn.Do("PING") + if err != nil { + return nil, err + } + + // Return pool. + return rPool, nil +} + +func enableRedisLogger() error { + rLogger := serverConfig.GetRedisLogger() + if !rLogger.Enable { + return errLoggerNotEnabled + } + + // Dial redis. + rPool, err := dialRedis(rLogger.Addr, rLogger.Password) + if err != nil { + return err + } + + rrConn := redisConn{ + Pool: rPool, + params: rLogger, + } + + lvl, err := logrus.ParseLevel(rLogger.Level) + fatalIf(err, "Unknown log level found in the config file.") + + // Add a elasticsearch hook. + log.Hooks.Add(rrConn) + + // Set default JSON formatter. + log.Formatter = new(logrus.JSONFormatter) + + // Set default log level to info. + log.Level = lvl + + return nil +} + +func (r redisConn) Fire(entry *logrus.Entry) error { + rConn := r.Pool.Get() + defer rConn.Close() + + data, err := entry.String() + if err != nil { + return err + } + + _, err = rConn.Do("RPUSH", r.params.Key, data) + if err != nil { + return err + } + return nil +} + +// Required for logrus hook implementation +func (r redisConn) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} diff --git a/logger.go b/logger.go index 422326987..9b38e6a11 100644 --- a/logger.go +++ b/logger.go @@ -20,7 +20,6 @@ import ( "bufio" "bytes" "os" - "reflect" "runtime" "runtime/debug" "strconv" @@ -40,11 +39,16 @@ var log = logrus.New() // Default console logger. // - console [default] // - file // - syslog +// - amqp +// - elasticsearch // type logger struct { - Console consoleLogger `json:"console"` - File fileLogger `json:"file"` - Syslog syslogLogger `json:"syslog"` + Console consoleLogger `json:"console"` + File fileLogger `json:"file"` + Syslog syslogLogger `json:"syslog"` + AMQP amqpLogger `json:"amqp"` + ElasticSearch elasticSearchLogger `json:"elasticsearch"` + Redis redisLogger `json:"redis"` // Add new loggers here. } @@ -91,11 +95,8 @@ func errorIf(err error, msg string, data ...interface{}) { if err == nil { return } - sysInfo := sysInfo() fields := logrus.Fields{ - "cause": err.Error(), - "type": reflect.TypeOf(err), - "sysInfo": sysInfo, + "cause": err.Error(), } if globalTrace { fields["stack"] = "\n" + stackInfo() @@ -108,11 +109,8 @@ func fatalIf(err error, msg string, data ...interface{}) { if err == nil { return } - sysInfo := sysInfo() fields := logrus.Fields{ - "cause": err.Error(), - "type": reflect.TypeOf(err), - "sysInfo": sysInfo, + "cause": err.Error(), } if globalTrace { fields["stack"] = "\n" + stackInfo() diff --git a/main.go b/main.go index 5645e23a1..f60445516 100644 --- a/main.go +++ b/main.go @@ -77,6 +77,10 @@ func enableLoggers() { enableConsoleLogger() enableFileLogger() + // Adding new bucket notification related loggers. + enableAMQPLogger() + enableElasticLogger() + // Add your logger here. } diff --git a/object-handlers.go b/object-handlers.go index 1259389ac..ceec951e8 100644 --- a/object-handlers.go +++ b/object-handlers.go @@ -157,6 +157,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req } return w.Write(p) }) + // Reads the object at startOffset and writes to mw. if err := api.ObjectAPI.GetObject(bucket, object, startOffset, length, writer); err != nil { errorIf(err, "Unable to write to client.") @@ -353,6 +354,16 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re writeSuccessResponse(w, encodedSuccessResponse) // Explicitly close the reader, to avoid fd leaks. pipeReader.Close() + + // Load notification config if any. + nConfig, err := api.loadNotificationConfig(bucket) + if err != nil { + errorIf(err, "Unable to load notification config for bucket: \"%s\"", bucket) + return + } + + // Notify object created event. + notifyObjectCreatedEvent(nConfig, ObjectCreatedCopy, bucket, object, objInfo.MD5Sum, objInfo.Size) } // PutObjectHandler - PUT Object @@ -422,6 +433,16 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req w.Header().Set("ETag", "\""+md5Sum+"\"") } writeSuccessResponse(w, nil) + + // Load notification config if any. + nConfig, err := api.loadNotificationConfig(bucket) + if err != nil { + errorIf(err, "Unable to load notification config for bucket: \"%s\"", bucket) + return + } + + // Notify object created event. + notifyObjectCreatedEvent(nConfig, ObjectCreatedPut, bucket, object, md5Sum, size) } /// Multipart objectAPIHandlers @@ -645,7 +666,6 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite uploadID, _, _, _ := getObjectResources(r.URL.Query()) var md5Sum string - var err error switch getRequestAuthType(r) { default: // For all unknown auth types return error. @@ -711,7 +731,6 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite }(doneCh) sendWhiteSpaceChars(w, doneCh) - if err != nil { errorIf(err, "Unable to complete multipart upload.") switch oErr := err.(type) { @@ -735,9 +754,21 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite writeErrorResponseNoHeader(w, r, ErrInternalError, r.URL.Path) return } - // write success response. + + // Write success response. w.Write(encodedSuccessResponse) w.(http.Flusher).Flush() + + // Load notification config if any. + nConfig, err := api.loadNotificationConfig(bucket) + if err != nil { + errorIf(err, "Unable to load notification config for bucket: \"%s\"", bucket) + return + } + + // Notify object created event. + size := int64(0) // FIXME: support event size. + notifyObjectCreatedEvent(nConfig, ObjectCreatedCompleteMultipartUpload, bucket, object, md5Sum, size) } /// Delete objectAPIHandlers @@ -768,6 +799,19 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. /// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html /// Ignore delete object errors, since we are suppposed to reply /// only 204. - api.ObjectAPI.DeleteObject(bucket, object) + if err := api.ObjectAPI.DeleteObject(bucket, object); err != nil { + writeSuccessNoContent(w) + return + } writeSuccessNoContent(w) + + // Load notification config if any. + nConfig, err := api.loadNotificationConfig(bucket) + if err != nil { + errorIf(err, "Unable to load notification config for bucket: \"%s\"", bucket) + return + } + + // Notify object deleted event. + notifyObjectDeletedEvent(nConfig, bucket, object) } diff --git a/object-utils.go b/object-utils.go index 7327b4914..7c5a4c62f 100644 --- a/object-utils.go +++ b/object-utils.go @@ -46,6 +46,10 @@ var isIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) // and periods, but must begin and end with a lowercase letter or a number. // See: http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html func IsValidBucketName(bucket string) bool { + // Special case when bucket is equal to 'metaBucket'. + if bucket == minioMetaBucket { + return true + } if len(bucket) < 3 || len(bucket) > 63 { return false } diff --git a/queues.go b/queues.go new file mode 100644 index 000000000..cbcf1b269 --- /dev/null +++ b/queues.go @@ -0,0 +1,193 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "fmt" + "net/url" + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + minioSqs = "arn:minio:sqs:" + // Static string indicating queue type 'amqp'. + queueTypeAMQP = "1:amqp" + // Static string indicating queue type 'elasticsearch'. + queueTypeElastic = "1:elasticsearch" + // Static string indicating queue type 'redis'. + queueTypeRedis = "1:redis" +) + +// Returns true if queueArn is for an AMQP queue. +func isAMQPQueue(sqsArn arnMinioSqs) bool { + if sqsArn.sqsType == queueTypeAMQP { + amqpL := serverConfig.GetAMQPLogger() + if !amqpL.Enable { + return false + } + // Connect to amqp server to validate. + amqpC, err := dialAMQP(amqpL) + if err != nil { + errorIf(err, "Unable to connect to amqp service.", amqpL) + return false + } + defer amqpC.Close() + } + return true +} + +// Returns true if queueArn is for an Redis queue. +func isRedisQueue(sqsArn arnMinioSqs) bool { + if sqsArn.sqsType == queueTypeRedis { + rLogger := serverConfig.GetRedisLogger() + if !rLogger.Enable { + return false + } + // Connect to redis server to validate. + rPool, err := dialRedis(rLogger.Addr, rLogger.Password) + if err != nil { + errorIf(err, "Unable to connect to redis service.", rLogger) + return false + } + defer rPool.Close() + } + return true +} + +// Returns true if queueArn is for an ElasticSearch queue. +func isElasticQueue(sqsArn arnMinioSqs) bool { + if sqsArn.sqsType == queueTypeElastic { + esLogger := serverConfig.GetElasticSearchLogger() + if !esLogger.Enable { + return false + } + elasticC, err := dialElastic(esLogger.URL) + if err != nil { + errorIf(err, "Unable to connect to elasticsearch service.", esLogger.URL) + return false + } + defer elasticC.Stop() + } + return true +} + +// Match function matches wild cards in 'pattern' for events. +func eventMatch(eventType EventName, events []string) (ok bool) { + for _, event := range events { + ok = wildCardMatch(event, eventType.String()) + if ok { + break + } + } + return ok +} + +// NotifyObjectCreatedEvent - notifies a new 's3:ObjectCreated' event. +// List of events reported through this function are +// - s3:ObjectCreated:Put +// - s3:ObjectCreated:Post +// - s3:ObjectCreated:Copy +// - s3:ObjectCreated:CompleteMultipartUpload +func notifyObjectCreatedEvent(nConfig notificationConfig, eventType EventName, bucket string, object string, etag string, size int64) { + /// Construct a new object created event. + region := serverConfig.GetRegion() + tnow := time.Now().UTC() + sequencer := fmt.Sprintf("%X", tnow.UnixNano()) + // Following blocks fills in all the necessary details of s3 event message structure. + // http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html + events := []*NotificationEvent{ + &NotificationEvent{ + EventVersion: "2.0", + EventSource: "aws:s3", + AwsRegion: region, + EventTime: tnow.Format(iso8601Format), + EventName: eventType.String(), + UserIdentity: defaultIdentity(), + RequestParameters: map[string]string{}, + ResponseElements: map[string]string{}, + S3: s3Reference{ + SchemaVersion: "1.0", + ConfigurationID: "Config", + Bucket: s3BucketReference{ + Name: bucket, + OwnerIdentity: defaultIdentity(), + ARN: "arn:aws:s3:::" + bucket, + }, + Object: s3ObjectReference{ + Key: url.QueryEscape(object), + ETag: etag, + Size: size, + Sequencer: sequencer, + }, + }, + }, + } + // Notify to all the configured queues. + for _, qConfig := range nConfig.QueueConfigurations { + if eventMatch(eventType, qConfig.Events) { + log.WithFields(logrus.Fields{ + "Records": events, + }).Info() + } + } +} + +// NotifyObjectRemovedEvent - notifies a new 's3:ObjectRemoved' event. +// List of events reported through this function are +// - s3:ObjectRemoved:Delete +func notifyObjectDeletedEvent(nConfig notificationConfig, bucket string, object string) { + region := serverConfig.GetRegion() + tnow := time.Now().UTC() + sequencer := fmt.Sprintf("%X", tnow.UnixNano()) + // Following blocks fills in all the necessary details of s3 event message structure. + // http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html + events := []*NotificationEvent{ + &NotificationEvent{ + EventVersion: "2.0", + EventSource: "aws:s3", + AwsRegion: region, + EventTime: tnow.Format(iso8601Format), + EventName: ObjectRemovedDelete.String(), + UserIdentity: defaultIdentity(), + RequestParameters: map[string]string{}, + ResponseElements: map[string]string{}, + S3: s3Reference{ + SchemaVersion: "1.0", + ConfigurationID: "Config", + Bucket: s3BucketReference{ + Name: bucket, + OwnerIdentity: defaultIdentity(), + ARN: "arn:aws:s3:::" + bucket, + }, + Object: s3ObjectReference{ + Key: url.QueryEscape(object), + Sequencer: sequencer, + }, + }, + }, + } + // Notify to all the configured queues. + for _, qConfig := range nConfig.QueueConfigurations { + if eventMatch(ObjectRemovedDelete, qConfig.Events) { + log.WithFields(logrus.Fields{ + "Records": events, + }).Info() + } + } +} diff --git a/server_test.go b/server_test.go index a9e05a92e..658793d4c 100644 --- a/server_test.go +++ b/server_test.go @@ -758,6 +758,7 @@ func (s *TestSuiteCommon) TestPutObject(c *C) { c.Assert(n, Equals, int64(len([]byte("hello world")))) // asserted the contents of the fetched object with the expected result. c.Assert(true, Equals, bytes.Equal(buffer2.Bytes(), []byte("hello world"))) + } // TestListBuckets - Make request for listing of all buckets. @@ -2141,4 +2142,5 @@ func (s *TestSuiteCommon) TestObjectMultipart(c *C) { c.Assert(err, IsNil) // verify whether complete multipart was successfull. c.Assert(response.StatusCode, Equals, http.StatusOK) + } diff --git a/vendor/github.com/facebookgo/clock/README.md b/vendor/github.com/facebookgo/clock/README.md deleted file mode 100644 index 5d4f4fe72..000000000 --- a/vendor/github.com/facebookgo/clock/README.md +++ /dev/null @@ -1,104 +0,0 @@ -clock [![Build Status](https://drone.io/github.com/benbjohnson/clock/status.png)](https://drone.io/github.com/benbjohnson/clock/latest) [![Coverage Status](https://coveralls.io/repos/benbjohnson/clock/badge.png?branch=master)](https://coveralls.io/r/benbjohnson/clock?branch=master) [![GoDoc](https://godoc.org/github.com/benbjohnson/clock?status.png)](https://godoc.org/github.com/benbjohnson/clock) ![Project status](http://img.shields.io/status/experimental.png?color=red) -===== - -Clock is a small library for mocking time in Go. It provides an interface -around the standard library's [`time`][time] package so that the application -can use the realtime clock while tests can use the mock clock. - -[time]: http://golang.org/pkg/time/ - - -## Usage - -### Realtime Clock - -Your application can maintain a `Clock` variable that will allow realtime and -mock clocks to be interchangable. For example, if you had an `Application` type: - -```go -import "github.com/benbjohnson/clock" - -type Application struct { - Clock clock.Clock -} -``` - -You could initialize it to use the realtime clock like this: - -```go -var app Application -app.Clock = clock.New() -... -``` - -Then all timers and time-related functionality should be performed from the -`Clock` variable. - - -### Mocking time - -In your tests, you will want to use a `Mock` clock: - -```go -import ( - "testing" - - "github.com/benbjohnson/clock" -) - -func TestApplication_DoSomething(t *testing.T) { - mock := clock.NewMock() - app := Application{Clock: mock} - ... -} -``` - -Now that you've initialized your application to use the mock clock, you can -adjust the time programmatically. The mock clock always starts from the Unix -epoch (midnight, Jan 1, 1970 UTC). - - -### Controlling time - -The mock clock provides the same functions that the standard library's `time` -package provides. For example, to find the current time, you use the `Now()` -function: - -```go -mock := clock.NewMock() - -// Find the current time. -mock.Now().UTC() // 1970-01-01 00:00:00 +0000 UTC - -// Move the clock forward. -mock.Add(2 * time.Hour) - -// Check the time again. It's 2 hours later! -mock.Now().UTC() // 1970-01-01 02:00:00 +0000 UTC -``` - -Timers and Tickers are also controlled by this same mock clock. They will only -execute when the clock is moved forward: - -``` -mock := clock.NewMock() -count := 0 - -// Kick off a timer to increment every 1 mock second. -go func() { - ticker := clock.Ticker(1 * time.Second) - for { - <-ticker.C - count++ - } -}() -runtime.Gosched() - -// Move the clock forward 10 second. -mock.Add(10 * time.Second) - -// This prints 10. -fmt.Println(count) -``` - - diff --git a/vendor/github.com/facebookgo/clock/clock.go b/vendor/github.com/facebookgo/clock/clock.go deleted file mode 100644 index bca1a7ba8..000000000 --- a/vendor/github.com/facebookgo/clock/clock.go +++ /dev/null @@ -1,363 +0,0 @@ -package clock - -import ( - "runtime" - "sort" - "sync" - "time" -) - -// Clock represents an interface to the functions in the standard library time -// package. Two implementations are available in the clock package. The first -// is a real-time clock which simply wraps the time package's functions. The -// second is a mock clock which will only make forward progress when -// programmatically adjusted. -type Clock interface { - After(d time.Duration) <-chan time.Time - AfterFunc(d time.Duration, f func()) *Timer - Now() time.Time - Sleep(d time.Duration) - Tick(d time.Duration) <-chan time.Time - Ticker(d time.Duration) *Ticker - Timer(d time.Duration) *Timer -} - -// New returns an instance of a real-time clock. -func New() Clock { - return &clock{} -} - -// clock implements a real-time clock by simply wrapping the time package functions. -type clock struct{} - -func (c *clock) After(d time.Duration) <-chan time.Time { return time.After(d) } - -func (c *clock) AfterFunc(d time.Duration, f func()) *Timer { - return &Timer{timer: time.AfterFunc(d, f)} -} - -func (c *clock) Now() time.Time { return time.Now() } - -func (c *clock) Sleep(d time.Duration) { time.Sleep(d) } - -func (c *clock) Tick(d time.Duration) <-chan time.Time { return time.Tick(d) } - -func (c *clock) Ticker(d time.Duration) *Ticker { - t := time.NewTicker(d) - return &Ticker{C: t.C, ticker: t} -} - -func (c *clock) Timer(d time.Duration) *Timer { - t := time.NewTimer(d) - return &Timer{C: t.C, timer: t} -} - -// Mock represents a mock clock that only moves forward programmically. -// It can be preferable to a real-time clock when testing time-based functionality. -type Mock struct { - mu sync.Mutex - now time.Time // current time - timers clockTimers // tickers & timers - - calls Calls - waiting []waiting - callsMutex sync.Mutex -} - -// NewMock returns an instance of a mock clock. -// The current time of the mock clock on initialization is the Unix epoch. -func NewMock() *Mock { - return &Mock{now: time.Unix(0, 0)} -} - -// Add moves the current time of the mock clock forward by the duration. -// This should only be called from a single goroutine at a time. -func (m *Mock) Add(d time.Duration) { - // Calculate the final current time. - t := m.now.Add(d) - - // Continue to execute timers until there are no more before the new time. - for { - if !m.runNextTimer(t) { - break - } - } - - // Ensure that we end with the new time. - m.mu.Lock() - m.now = t - m.mu.Unlock() - - // Give a small buffer to make sure the other goroutines get handled. - gosched() -} - -// runNextTimer executes the next timer in chronological order and moves the -// current time to the timer's next tick time. The next time is not executed if -// it's next time if after the max time. Returns true if a timer is executed. -func (m *Mock) runNextTimer(max time.Time) bool { - m.mu.Lock() - - // Sort timers by time. - sort.Sort(m.timers) - - // If we have no more timers then exit. - if len(m.timers) == 0 { - m.mu.Unlock() - return false - } - - // Retrieve next timer. Exit if next tick is after new time. - t := m.timers[0] - if t.Next().After(max) { - m.mu.Unlock() - return false - } - - // Move "now" forward and unlock clock. - m.now = t.Next() - m.mu.Unlock() - - // Execute timer. - t.Tick(m.now) - return true -} - -// After waits for the duration to elapse and then sends the current time on the returned channel. -func (m *Mock) After(d time.Duration) <-chan time.Time { - defer m.inc(&m.calls.After) - return m.Timer(d).C -} - -// AfterFunc waits for the duration to elapse and then executes a function. -// A Timer is returned that can be stopped. -func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer { - defer m.inc(&m.calls.AfterFunc) - t := m.Timer(d) - t.C = nil - t.fn = f - return t -} - -// Now returns the current wall time on the mock clock. -func (m *Mock) Now() time.Time { - defer m.inc(&m.calls.Now) - m.mu.Lock() - defer m.mu.Unlock() - return m.now -} - -// Sleep pauses the goroutine for the given duration on the mock clock. -// The clock must be moved forward in a separate goroutine. -func (m *Mock) Sleep(d time.Duration) { - defer m.inc(&m.calls.Sleep) - <-m.After(d) -} - -// Tick is a convenience function for Ticker(). -// It will return a ticker channel that cannot be stopped. -func (m *Mock) Tick(d time.Duration) <-chan time.Time { - defer m.inc(&m.calls.Tick) - return m.Ticker(d).C -} - -// Ticker creates a new instance of Ticker. -func (m *Mock) Ticker(d time.Duration) *Ticker { - defer m.inc(&m.calls.Ticker) - m.mu.Lock() - defer m.mu.Unlock() - ch := make(chan time.Time) - t := &Ticker{ - C: ch, - c: ch, - mock: m, - d: d, - next: m.now.Add(d), - } - m.timers = append(m.timers, (*internalTicker)(t)) - return t -} - -// Timer creates a new instance of Timer. -func (m *Mock) Timer(d time.Duration) *Timer { - defer m.inc(&m.calls.Timer) - m.mu.Lock() - defer m.mu.Unlock() - ch := make(chan time.Time) - t := &Timer{ - C: ch, - c: ch, - mock: m, - next: m.now.Add(d), - } - m.timers = append(m.timers, (*internalTimer)(t)) - return t -} - -func (m *Mock) removeClockTimer(t clockTimer) { - m.mu.Lock() - defer m.mu.Unlock() - for i, timer := range m.timers { - if timer == t { - copy(m.timers[i:], m.timers[i+1:]) - m.timers[len(m.timers)-1] = nil - m.timers = m.timers[:len(m.timers)-1] - break - } - } - sort.Sort(m.timers) -} - -func (m *Mock) inc(addr *uint32) { - m.callsMutex.Lock() - defer m.callsMutex.Unlock() - *addr++ - var newWaiting []waiting - for _, w := range m.waiting { - if m.calls.atLeast(w.expected) { - close(w.done) - continue - } - newWaiting = append(newWaiting, w) - } - m.waiting = newWaiting -} - -// Wait waits for at least the relevant calls before returning. The expected -// Calls are always over the lifetime of the Mock. Values in the Calls struct -// are used as the minimum number of calls, this allows you to wait for only -// the calls you care about. -func (m *Mock) Wait(s Calls) { - m.callsMutex.Lock() - if m.calls.atLeast(s) { - m.callsMutex.Unlock() - return - } - done := make(chan struct{}) - m.waiting = append(m.waiting, waiting{expected: s, done: done}) - m.callsMutex.Unlock() - <-done -} - -// clockTimer represents an object with an associated start time. -type clockTimer interface { - Next() time.Time - Tick(time.Time) -} - -// clockTimers represents a list of sortable timers. -type clockTimers []clockTimer - -func (a clockTimers) Len() int { return len(a) } -func (a clockTimers) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a clockTimers) Less(i, j int) bool { return a[i].Next().Before(a[j].Next()) } - -// Timer represents a single event. -// The current time will be sent on C, unless the timer was created by AfterFunc. -type Timer struct { - C <-chan time.Time - c chan time.Time - timer *time.Timer // realtime impl, if set - next time.Time // next tick time - mock *Mock // mock clock, if set - fn func() // AfterFunc function, if set -} - -// Stop turns off the ticker. -func (t *Timer) Stop() { - if t.timer != nil { - t.timer.Stop() - } else { - t.mock.removeClockTimer((*internalTimer)(t)) - } -} - -type internalTimer Timer - -func (t *internalTimer) Next() time.Time { return t.next } -func (t *internalTimer) Tick(now time.Time) { - if t.fn != nil { - t.fn() - } else { - t.c <- now - } - t.mock.removeClockTimer((*internalTimer)(t)) - gosched() -} - -// Ticker holds a channel that receives "ticks" at regular intervals. -type Ticker struct { - C <-chan time.Time - c chan time.Time - ticker *time.Ticker // realtime impl, if set - next time.Time // next tick time - mock *Mock // mock clock, if set - d time.Duration // time between ticks -} - -// Stop turns off the ticker. -func (t *Ticker) Stop() { - if t.ticker != nil { - t.ticker.Stop() - } else { - t.mock.removeClockTimer((*internalTicker)(t)) - } -} - -type internalTicker Ticker - -func (t *internalTicker) Next() time.Time { return t.next } -func (t *internalTicker) Tick(now time.Time) { - select { - case t.c <- now: - case <-time.After(1 * time.Millisecond): - } - t.next = now.Add(t.d) - gosched() -} - -// Sleep momentarily so that other goroutines can process. -func gosched() { runtime.Gosched() } - -// Calls keeps track of the count of calls for each of the methods on the Clock -// interface. -type Calls struct { - After uint32 - AfterFunc uint32 - Now uint32 - Sleep uint32 - Tick uint32 - Ticker uint32 - Timer uint32 -} - -// atLeast returns true if at least the number of calls in o have been made. -func (c Calls) atLeast(o Calls) bool { - if c.After < o.After { - return false - } - if c.AfterFunc < o.AfterFunc { - return false - } - if c.Now < o.Now { - return false - } - if c.Sleep < o.Sleep { - return false - } - if c.Tick < o.Tick { - return false - } - if c.Ticker < o.Ticker { - return false - } - if c.Timer < o.Timer { - return false - } - return true -} - -type waiting struct { - expected Calls - done chan struct{} -} diff --git a/vendor/github.com/minio/redigo/LICENSE b/vendor/github.com/minio/redigo/LICENSE new file mode 100644 index 000000000..67db85882 --- /dev/null +++ b/vendor/github.com/minio/redigo/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/minio/redigo/redis/commandinfo.go b/vendor/github.com/minio/redigo/redis/commandinfo.go new file mode 100644 index 000000000..0ad2af5df --- /dev/null +++ b/vendor/github.com/minio/redigo/redis/commandinfo.go @@ -0,0 +1,54 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "strings" +) + +const ( + WatchState = 1 << iota + MultiState + SubscribeState + MonitorState +) + +type CommandInfo struct { + Set, Clear int +} + +var commandInfos = map[string]CommandInfo{ + "WATCH": {Set: WatchState}, + "UNWATCH": {Clear: WatchState}, + "MULTI": {Set: MultiState}, + "EXEC": {Clear: WatchState | MultiState}, + "DISCARD": {Clear: WatchState | MultiState}, + "PSUBSCRIBE": {Set: SubscribeState}, + "SUBSCRIBE": {Set: SubscribeState}, + "MONITOR": {Set: MonitorState}, +} + +func init() { + for n, ci := range commandInfos { + commandInfos[strings.ToLower(n)] = ci + } +} + +func LookupCommandInfo(commandName string) CommandInfo { + if ci, ok := commandInfos[commandName]; ok { + return ci + } + return commandInfos[strings.ToUpper(commandName)] +} diff --git a/vendor/github.com/minio/redigo/redis/conn.go b/vendor/github.com/minio/redigo/redis/conn.go new file mode 100644 index 000000000..ed358c601 --- /dev/null +++ b/vendor/github.com/minio/redigo/redis/conn.go @@ -0,0 +1,570 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "net" + "net/url" + "regexp" + "strconv" + "sync" + "time" +) + +// conn is the low-level implementation of Conn +type conn struct { + + // Shared + mu sync.Mutex + pending int + err error + conn net.Conn + + // Read + readTimeout time.Duration + br *bufio.Reader + + // Write + writeTimeout time.Duration + bw *bufio.Writer + + // Scratch space for formatting argument length. + // '*' or '$', length, "\r\n" + lenScratch [32]byte + + // Scratch space for formatting integers and floats. + numScratch [40]byte +} + +// DialTimeout acts like Dial but takes timeouts for establishing the +// connection to the server, writing a command and reading a reply. +// +// Deprecated: Use Dial with options instead. +func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { + return Dial(network, address, + DialConnectTimeout(connectTimeout), + DialReadTimeout(readTimeout), + DialWriteTimeout(writeTimeout)) +} + +// DialOption specifies an option for dialing a Redis server. +type DialOption struct { + f func(*dialOptions) +} + +type dialOptions struct { + readTimeout time.Duration + writeTimeout time.Duration + dial func(network, addr string) (net.Conn, error) + db int + password string +} + +// DialReadTimeout specifies the timeout for reading a single command reply. +func DialReadTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.readTimeout = d + }} +} + +// DialWriteTimeout specifies the timeout for writing a single command. +func DialWriteTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.writeTimeout = d + }} +} + +// DialConnectTimeout specifies the timeout for connecting to the Redis server. +func DialConnectTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + dialer := net.Dialer{Timeout: d} + do.dial = dialer.Dial + }} +} + +// DialNetDial specifies a custom dial function for creating TCP +// connections. If this option is left out, then net.Dial is +// used. DialNetDial overrides DialConnectTimeout. +func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption { + return DialOption{func(do *dialOptions) { + do.dial = dial + }} +} + +// DialDatabase specifies the database to select when dialing a connection. +func DialDatabase(db int) DialOption { + return DialOption{func(do *dialOptions) { + do.db = db + }} +} + +// DialPassword specifies the password to use when connecting to +// the Redis server. +func DialPassword(password string) DialOption { + return DialOption{func(do *dialOptions) { + do.password = password + }} +} + +// Dial connects to the Redis server at the given network and +// address using the specified options. +func Dial(network, address string, options ...DialOption) (Conn, error) { + do := dialOptions{ + dial: net.Dial, + } + for _, option := range options { + option.f(&do) + } + + netConn, err := do.dial(network, address) + if err != nil { + return nil, err + } + c := &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: do.readTimeout, + writeTimeout: do.writeTimeout, + } + + if do.password != "" { + if _, err := c.Do("AUTH", do.password); err != nil { + netConn.Close() + return nil, err + } + } + + if do.db != 0 { + if _, err := c.Do("SELECT", do.db); err != nil { + netConn.Close() + return nil, err + } + } + + return c, nil +} + +var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`) + +// DialURL connects to a Redis server at the given URL using the Redis +// URI scheme. URLs should follow the draft IANA specification for the +// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis). +func DialURL(rawurl string, options ...DialOption) (Conn, error) { + u, err := url.Parse(rawurl) + if err != nil { + return nil, err + } + + if u.Scheme != "redis" { + return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme) + } + + // As per the IANA draft spec, the host defaults to localhost and + // the port defaults to 6379. + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // assume port is missing + host = u.Host + port = "6379" + } + if host == "" { + host = "localhost" + } + address := net.JoinHostPort(host, port) + + if u.User != nil { + password, isSet := u.User.Password() + if isSet { + options = append(options, DialPassword(password)) + } + } + + match := pathDBRegexp.FindStringSubmatch(u.Path) + if len(match) == 2 { + db := 0 + if len(match[1]) > 0 { + db, err = strconv.Atoi(match[1]) + if err != nil { + return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) + } + } + if db != 0 { + options = append(options, DialDatabase(db)) + } + } else if u.Path != "" { + return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) + } + + return Dial("tcp", address, options...) +} + +// NewConn returns a new Redigo connection for the given net connection. +func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: readTimeout, + writeTimeout: writeTimeout, + } +} + +func (c *conn) Close() error { + c.mu.Lock() + err := c.err + if c.err == nil { + c.err = errors.New("redigo: closed") + err = c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) fatal(err error) error { + c.mu.Lock() + if c.err == nil { + c.err = err + // Close connection to force errors on subsequent calls and to unblock + // other reader or writer. + c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) Err() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func (c *conn) writeLen(prefix byte, n int) error { + c.lenScratch[len(c.lenScratch)-1] = '\n' + c.lenScratch[len(c.lenScratch)-2] = '\r' + i := len(c.lenScratch) - 3 + for { + c.lenScratch[i] = byte('0' + n%10) + i -= 1 + n = n / 10 + if n == 0 { + break + } + } + c.lenScratch[i] = prefix + _, err := c.bw.Write(c.lenScratch[i:]) + return err +} + +func (c *conn) writeString(s string) error { + c.writeLen('$', len(s)) + c.bw.WriteString(s) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeBytes(p []byte) error { + c.writeLen('$', len(p)) + c.bw.Write(p) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeInt64(n int64) error { + return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) +} + +func (c *conn) writeFloat64(n float64) error { + return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) +} + +func (c *conn) writeCommand(cmd string, args []interface{}) (err error) { + c.writeLen('*', 1+len(args)) + err = c.writeString(cmd) + for _, arg := range args { + if err != nil { + break + } + switch arg := arg.(type) { + case string: + err = c.writeString(arg) + case []byte: + err = c.writeBytes(arg) + case int: + err = c.writeInt64(int64(arg)) + case int64: + err = c.writeInt64(arg) + case float64: + err = c.writeFloat64(arg) + case bool: + if arg { + err = c.writeString("1") + } else { + err = c.writeString("0") + } + case nil: + err = c.writeString("") + default: + var buf bytes.Buffer + fmt.Fprint(&buf, arg) + err = c.writeBytes(buf.Bytes()) + } + } + return err +} + +type protocolError string + +func (pe protocolError) Error() string { + return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) +} + +func (c *conn) readLine() ([]byte, error) { + p, err := c.br.ReadSlice('\n') + if err == bufio.ErrBufferFull { + return nil, protocolError("long response line") + } + if err != nil { + return nil, err + } + i := len(p) - 2 + if i < 0 || p[i] != '\r' { + return nil, protocolError("bad response line terminator") + } + return p[:i], nil +} + +// parseLen parses bulk string and array lengths. +func parseLen(p []byte) (int, error) { + if len(p) == 0 { + return -1, protocolError("malformed length") + } + + if p[0] == '-' && len(p) == 2 && p[1] == '1' { + // handle $-1 and $-1 null replies. + return -1, nil + } + + var n int + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return -1, protocolError("illegal bytes in length") + } + n += int(b - '0') + } + + return n, nil +} + +// parseInt parses an integer reply. +func parseInt(p []byte) (interface{}, error) { + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + + var negate bool + if p[0] == '-' { + negate = true + p = p[1:] + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + } + + var n int64 + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return 0, protocolError("illegal bytes in length") + } + n += int64(b - '0') + } + + if negate { + n = -n + } + return n, nil +} + +var ( + okReply interface{} = "OK" + pongReply interface{} = "PONG" +) + +func (c *conn) readReply() (interface{}, error) { + line, err := c.readLine() + if err != nil { + return nil, err + } + if len(line) == 0 { + return nil, protocolError("short response line") + } + switch line[0] { + case '+': + switch { + case len(line) == 3 && line[1] == 'O' && line[2] == 'K': + // Avoid allocation for frequent "+OK" response. + return okReply, nil + case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': + // Avoid allocation in PING command benchmarks :) + return pongReply, nil + default: + return string(line[1:]), nil + } + case '-': + return Error(string(line[1:])), nil + case ':': + return parseInt(line[1:]) + case '$': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + p := make([]byte, n) + _, err = io.ReadFull(c.br, p) + if err != nil { + return nil, err + } + if line, err := c.readLine(); err != nil { + return nil, err + } else if len(line) != 0 { + return nil, protocolError("bad bulk string format") + } + return p, nil + case '*': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + r := make([]interface{}, n) + for i := range r { + r[i], err = c.readReply() + if err != nil { + return nil, err + } + } + return r, nil + } + return nil, protocolError("unexpected response line") +} + +func (c *conn) Send(cmd string, args ...interface{}) error { + c.mu.Lock() + c.pending += 1 + c.mu.Unlock() + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.writeCommand(cmd, args); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Flush() error { + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.bw.Flush(); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Receive() (reply interface{}, err error) { + if c.readTimeout != 0 { + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + } + if reply, err = c.readReply(); err != nil { + return nil, c.fatal(err) + } + // When using pub/sub, the number of receives can be greater than the + // number of sends. To enable normal use of the connection after + // unsubscribing from all channels, we do not decrement pending to a + // negative value. + // + // The pending field is decremented after the reply is read to handle the + // case where Receive is called before Send. + c.mu.Lock() + if c.pending > 0 { + c.pending -= 1 + } + c.mu.Unlock() + if err, ok := reply.(Error); ok { + return nil, err + } + return +} + +func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { + c.mu.Lock() + pending := c.pending + c.pending = 0 + c.mu.Unlock() + + if cmd == "" && pending == 0 { + return nil, nil + } + + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + + if cmd != "" { + if err := c.writeCommand(cmd, args); err != nil { + return nil, c.fatal(err) + } + } + + if err := c.bw.Flush(); err != nil { + return nil, c.fatal(err) + } + + if c.readTimeout != 0 { + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + } + + if cmd == "" { + reply := make([]interface{}, pending) + for i := range reply { + r, e := c.readReply() + if e != nil { + return nil, c.fatal(e) + } + reply[i] = r + } + return reply, nil + } + + var err error + var reply interface{} + for i := 0; i <= pending; i++ { + var e error + if reply, e = c.readReply(); e != nil { + return nil, c.fatal(e) + } + if e, ok := reply.(Error); ok && err == nil { + err = e + } + } + return reply, err +} diff --git a/vendor/github.com/minio/redigo/redis/doc.go b/vendor/github.com/minio/redigo/redis/doc.go new file mode 100644 index 000000000..6571c9de2 --- /dev/null +++ b/vendor/github.com/minio/redigo/redis/doc.go @@ -0,0 +1,168 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redis is a client for the Redis database. +// +// The Redigo FAQ (https://github.com/minio/redigo/wiki/FAQ) contains more +// documentation about this package. +// +// Connections +// +// The Conn interface is the primary interface for working with Redis. +// Applications create connections by calling the Dial, DialWithTimeout or +// NewConn functions. In the future, functions will be added for creating +// sharded and other types of connections. +// +// The application must call the connection Close method when the application +// is done with the connection. +// +// Executing Commands +// +// The Conn interface has a generic method for executing Redis commands: +// +// Do(commandName string, args ...interface{}) (reply interface{}, err error) +// +// The Redis command reference (http://redis.io/commands) lists the available +// commands. An example of using the Redis APPEND command is: +// +// n, err := conn.Do("APPEND", "key", "value") +// +// The Do method converts command arguments to binary strings for transmission +// to the server as follows: +// +// Go Type Conversion +// []byte Sent as is +// string Sent as is +// int, int64 strconv.FormatInt(v) +// float64 strconv.FormatFloat(v, 'g', -1, 64) +// bool true -> "1", false -> "0" +// nil "" +// all other types fmt.Print(v) +// +// Redis command reply types are represented using the following Go types: +// +// Redis type Go type +// error redis.Error +// integer int64 +// simple string string +// bulk string []byte or nil if value not present. +// array []interface{} or nil if value not present. +// +// Use type assertions or the reply helper functions to convert from +// interface{} to the specific Go type for the command result. +// +// Pipelining +// +// Connections support pipelining using the Send, Flush and Receive methods. +// +// Send(commandName string, args ...interface{}) error +// Flush() error +// Receive() (reply interface{}, err error) +// +// Send writes the command to the connection's output buffer. Flush flushes the +// connection's output buffer to the server. Receive reads a single reply from +// the server. The following example shows a simple pipeline. +// +// c.Send("SET", "foo", "bar") +// c.Send("GET", "foo") +// c.Flush() +// c.Receive() // reply from SET +// v, err = c.Receive() // reply from GET +// +// The Do method combines the functionality of the Send, Flush and Receive +// methods. The Do method starts by writing the command and flushing the output +// buffer. Next, the Do method receives all pending replies including the reply +// for the command just sent by Do. If any of the received replies is an error, +// then Do returns the error. If there are no errors, then Do returns the last +// reply. If the command argument to the Do method is "", then the Do method +// will flush the output buffer and receive pending replies without sending a +// command. +// +// Use the Send and Do methods to implement pipelined transactions. +// +// c.Send("MULTI") +// c.Send("INCR", "foo") +// c.Send("INCR", "bar") +// r, err := c.Do("EXEC") +// fmt.Println(r) // prints [1, 1] +// +// Concurrency +// +// Connections support one concurrent caller to the Recieve method and one +// concurrent caller to the Send and Flush methods. No other concurrency is +// supported including concurrent calls to the Do method. +// +// For full concurrent access to Redis, use the thread-safe Pool to get, use +// and release a connection from within a goroutine. Connections returned from +// a Pool have the concurrency restrictions described in the previous +// paragraph. +// +// Publish and Subscribe +// +// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. +// +// c.Send("SUBSCRIBE", "example") +// c.Flush() +// for { +// reply, err := c.Receive() +// if err != nil { +// return err +// } +// // process pushed message +// } +// +// The PubSubConn type wraps a Conn with convenience methods for implementing +// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods +// send and flush a subscription management command. The receive method +// converts a pushed message to convenient types for use in a type switch. +// +// psc := redis.PubSubConn{c} +// psc.Subscribe("example") +// for { +// switch v := psc.Receive().(type) { +// case redis.Message: +// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) +// case redis.Subscription: +// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) +// case error: +// return v +// } +// } +// +// Reply Helpers +// +// The Bool, Int, Bytes, String, Strings and Values functions convert a reply +// to a value of a specific type. To allow convenient wrapping of calls to the +// connection Do and Receive methods, the functions take a second argument of +// type error. If the error is non-nil, then the helper function returns the +// error. If the error is nil, the function converts the reply to the specified +// type: +// +// exists, err := redis.Bool(c.Do("EXISTS", "foo")) +// if err != nil { +// // handle error return from c.Do or type conversion error. +// } +// +// The Scan function converts elements of a array reply to Go types: +// +// var value1 int +// var value2 string +// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) +// if err != nil { +// // handle error +// } +// if _, err := redis.Scan(reply, &value1, &value2); err != nil { +// // handle error +// } +package redis // import "github.com/minio/redigo/redis" diff --git a/vendor/github.com/minio/redigo/redis/log.go b/vendor/github.com/minio/redigo/redis/log.go new file mode 100644 index 000000000..129b86d67 --- /dev/null +++ b/vendor/github.com/minio/redigo/redis/log.go @@ -0,0 +1,117 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "fmt" + "log" +) + +// NewLoggingConn returns a logging wrapper around a connection. +func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { + if prefix != "" { + prefix = prefix + "." + } + return &loggingConn{conn, logger, prefix} +} + +type loggingConn struct { + Conn + logger *log.Logger + prefix string +} + +func (c *loggingConn) Close() error { + err := c.Conn.Close() + var buf bytes.Buffer + fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) + c.logger.Output(2, buf.String()) + return err +} + +func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { + const chop = 32 + switch v := v.(type) { + case []byte: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case string: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case []interface{}: + if len(v) == 0 { + buf.WriteString("[]") + } else { + sep := "[" + fin := "]" + if len(v) > chop { + v = v[:chop] + fin = "...]" + } + for _, vv := range v { + buf.WriteString(sep) + c.printValue(buf, vv) + sep = ", " + } + buf.WriteString(fin) + } + default: + fmt.Fprint(buf, v) + } +} + +func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s%s(", c.prefix, method) + if method != "Receive" { + buf.WriteString(commandName) + for _, arg := range args { + buf.WriteString(", ") + c.printValue(&buf, arg) + } + } + buf.WriteString(") -> (") + if method != "Send" { + c.printValue(&buf, reply) + buf.WriteString(", ") + } + fmt.Fprintf(&buf, "%v)", err) + c.logger.Output(3, buf.String()) +} + +func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { + reply, err := c.Conn.Do(commandName, args...) + c.print("Do", commandName, args, reply, err) + return reply, err +} + +func (c *loggingConn) Send(commandName string, args ...interface{}) error { + err := c.Conn.Send(commandName, args...) + c.print("Send", commandName, args, nil, err) + return err +} + +func (c *loggingConn) Receive() (interface{}, error) { + reply, err := c.Conn.Receive() + c.print("Receive", "", nil, reply, err) + return reply, err +} diff --git a/vendor/github.com/minio/redigo/redis/pool.go b/vendor/github.com/minio/redigo/redis/pool.go new file mode 100644 index 000000000..749d8a9c5 --- /dev/null +++ b/vendor/github.com/minio/redigo/redis/pool.go @@ -0,0 +1,391 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "container/list" + "crypto/rand" + "crypto/sha1" + "errors" + "io" + "strconv" + "sync" + "time" +) + +var nowFunc = time.Now // for testing + +// ErrPoolExhausted is returned from a pool connection method (Do, Send, +// Receive, Flush, Err) when the maximum number of database connections in the +// pool has been reached. +var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") + +var ( + errPoolClosed = errors.New("redigo: connection pool closed") + errConnClosed = errors.New("redigo: connection closed") +) + +// Pool maintains a pool of connections. The application calls the Get method +// to get a connection from the pool and the connection's Close method to +// return the connection's resources to the pool. +// +// The following example shows how to use a pool in a web application. The +// application creates a pool at application startup and makes it available to +// request handlers using a global variable. +// +// func newPool(server, password string) *redis.Pool { +// return &redis.Pool{ +// MaxIdle: 3, +// IdleTimeout: 240 * time.Second, +// Dial: func () (redis.Conn, error) { +// c, err := redis.Dial("tcp", server) +// if err != nil { +// return nil, err +// } +// if _, err := c.Do("AUTH", password); err != nil { +// c.Close() +// return nil, err +// } +// return c, err +// }, +// TestOnBorrow: func(c redis.Conn, t time.Time) error { +// _, err := c.Do("PING") +// return err +// }, +// } +// } +// +// var ( +// pool *redis.Pool +// redisServer = flag.String("redisServer", ":6379", "") +// redisPassword = flag.String("redisPassword", "", "") +// ) +// +// func main() { +// flag.Parse() +// pool = newPool(*redisServer, *redisPassword) +// ... +// } +// +// A request handler gets a connection from the pool and closes the connection +// when the handler is done: +// +// func serveHome(w http.ResponseWriter, r *http.Request) { +// conn := pool.Get() +// defer conn.Close() +// .... +// } +// +type Pool struct { + + // Dial is an application supplied function for creating and configuring a + // connection. + // + // The connection returned from Dial must not be in a special state + // (subscribed to pubsub channel, transaction started, ...). + Dial func() (Conn, error) + + // TestOnBorrow is an optional application supplied function for checking + // the health of an idle connection before the connection is used again by + // the application. Argument t is the time that the connection was returned + // to the pool. If the function returns an error, then the connection is + // closed. + TestOnBorrow func(c Conn, t time.Time) error + + // Maximum number of idle connections in the pool. + MaxIdle int + + // Maximum number of connections allocated by the pool at a given time. + // When zero, there is no limit on the number of connections in the pool. + MaxActive int + + // Close connections after remaining idle for this duration. If the value + // is zero, then idle connections are not closed. Applications should set + // the timeout to a value less than the server's timeout. + IdleTimeout time.Duration + + // If Wait is true and the pool is at the MaxActive limit, then Get() waits + // for a connection to be returned to the pool before returning. + Wait bool + + // mu protects fields defined below. + mu sync.Mutex + cond *sync.Cond + closed bool + active int + + // Stack of idleConn with most recently used at the front. + idle list.List +} + +type idleConn struct { + c Conn + t time.Time +} + +// NewPool creates a new pool. +// +// Deprecated: Initialize the Pool directory as shown in the example. +func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { + return &Pool{Dial: newFn, MaxIdle: maxIdle} +} + +// Get gets a connection. The application must close the returned connection. +// This method always returns a valid connection so that applications can defer +// error handling to the first use of the connection. If there is an error +// getting an underlying connection, then the connection Err, Do, Send, Flush +// and Receive methods return that error. +func (p *Pool) Get() Conn { + c, err := p.get() + if err != nil { + return errorConnection{err} + } + return &pooledConnection{p: p, c: c} +} + +// ActiveCount returns the number of active connections in the pool. +func (p *Pool) ActiveCount() int { + p.mu.Lock() + active := p.active + p.mu.Unlock() + return active +} + +// Close releases the resources used by the pool. +func (p *Pool) Close() error { + p.mu.Lock() + idle := p.idle + p.idle.Init() + p.closed = true + p.active -= idle.Len() + if p.cond != nil { + p.cond.Broadcast() + } + p.mu.Unlock() + for e := idle.Front(); e != nil; e = e.Next() { + e.Value.(idleConn).c.Close() + } + return nil +} + +// release decrements the active count and signals waiters. The caller must +// hold p.mu during the call. +func (p *Pool) release() { + p.active -= 1 + if p.cond != nil { + p.cond.Signal() + } +} + +// get prunes stale connections and returns a connection from the idle list or +// creates a new connection. +func (p *Pool) get() (Conn, error) { + p.mu.Lock() + + // Prune stale connections. + + if timeout := p.IdleTimeout; timeout > 0 { + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Back() + if e == nil { + break + } + ic := e.Value.(idleConn) + if ic.t.Add(timeout).After(nowFunc()) { + break + } + p.idle.Remove(e) + p.release() + p.mu.Unlock() + ic.c.Close() + p.mu.Lock() + } + } + + for { + + // Get idle connection. + + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Front() + if e == nil { + break + } + ic := e.Value.(idleConn) + p.idle.Remove(e) + test := p.TestOnBorrow + p.mu.Unlock() + if test == nil || test(ic.c, ic.t) == nil { + return ic.c, nil + } + ic.c.Close() + p.mu.Lock() + p.release() + } + + // Check for pool closed before dialing a new connection. + + if p.closed { + p.mu.Unlock() + return nil, errors.New("redigo: get on closed pool") + } + + // Dial new connection if under limit. + + if p.MaxActive == 0 || p.active < p.MaxActive { + dial := p.Dial + p.active += 1 + p.mu.Unlock() + c, err := dial() + if err != nil { + p.mu.Lock() + p.release() + p.mu.Unlock() + c = nil + } + return c, err + } + + if !p.Wait { + p.mu.Unlock() + return nil, ErrPoolExhausted + } + + if p.cond == nil { + p.cond = sync.NewCond(&p.mu) + } + p.cond.Wait() + } +} + +func (p *Pool) put(c Conn, forceClose bool) error { + err := c.Err() + p.mu.Lock() + if !p.closed && err == nil && !forceClose { + p.idle.PushFront(idleConn{t: nowFunc(), c: c}) + if p.idle.Len() > p.MaxIdle { + c = p.idle.Remove(p.idle.Back()).(idleConn).c + } else { + c = nil + } + } + + if c == nil { + if p.cond != nil { + p.cond.Signal() + } + p.mu.Unlock() + return nil + } + + p.release() + p.mu.Unlock() + return c.Close() +} + +type pooledConnection struct { + p *Pool + c Conn + state int +} + +var ( + sentinel []byte + sentinelOnce sync.Once +) + +func initSentinel() { + p := make([]byte, 64) + if _, err := rand.Read(p); err == nil { + sentinel = p + } else { + h := sha1.New() + io.WriteString(h, "Oops, rand failed. Use time instead.") + io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) + sentinel = h.Sum(nil) + } +} + +func (pc *pooledConnection) Close() error { + c := pc.c + if _, ok := c.(errorConnection); ok { + return nil + } + pc.c = errorConnection{errConnClosed} + + if pc.state&MultiState != 0 { + c.Send("DISCARD") + pc.state &^= (MultiState | WatchState) + } else if pc.state&WatchState != 0 { + c.Send("UNWATCH") + pc.state &^= WatchState + } + if pc.state&SubscribeState != 0 { + c.Send("UNSUBSCRIBE") + c.Send("PUNSUBSCRIBE") + // To detect the end of the message stream, ask the server to echo + // a sentinel value and read until we see that value. + sentinelOnce.Do(initSentinel) + c.Send("ECHO", sentinel) + c.Flush() + for { + p, err := c.Receive() + if err != nil { + break + } + if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { + pc.state &^= SubscribeState + break + } + } + } + c.Do("") + pc.p.put(c, pc.state != 0) + return nil +} + +func (pc *pooledConnection) Err() error { + return pc.c.Err() +} + +func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + ci := LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Do(commandName, args...) +} + +func (pc *pooledConnection) Send(commandName string, args ...interface{}) error { + ci := LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Send(commandName, args...) +} + +func (pc *pooledConnection) Flush() error { + return pc.c.Flush() +} + +func (pc *pooledConnection) Receive() (reply interface{}, err error) { + return pc.c.Receive() +} + +type errorConnection struct{ err error } + +func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } +func (ec errorConnection) Send(string, ...interface{}) error { return ec.err } +func (ec errorConnection) Err() error { return ec.err } +func (ec errorConnection) Close() error { return ec.err } +func (ec errorConnection) Flush() error { return ec.err } +func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } diff --git a/vendor/github.com/minio/redigo/redis/pubsub.go b/vendor/github.com/minio/redigo/redis/pubsub.go new file mode 100644 index 000000000..c0ecce824 --- /dev/null +++ b/vendor/github.com/minio/redigo/redis/pubsub.go @@ -0,0 +1,144 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import "errors" + +// Subscription represents a subscribe or unsubscribe notification. +type Subscription struct { + + // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" + Kind string + + // The channel that was changed. + Channel string + + // The current number of subscriptions for connection. + Count int +} + +// Message represents a message notification. +type Message struct { + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// PMessage represents a pmessage notification. +type PMessage struct { + + // The matched pattern. + Pattern string + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// Pong represents a pubsub pong notification. +type Pong struct { + Data string +} + +// PubSubConn wraps a Conn with convenience methods for subscribers. +type PubSubConn struct { + Conn Conn +} + +// Close closes the connection. +func (c PubSubConn) Close() error { + return c.Conn.Close() +} + +// Subscribe subscribes the connection to the specified channels. +func (c PubSubConn) Subscribe(channel ...interface{}) error { + c.Conn.Send("SUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PSubscribe subscribes the connection to the given patterns. +func (c PubSubConn) PSubscribe(channel ...interface{}) error { + c.Conn.Send("PSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Unsubscribe unsubscribes the connection from the given channels, or from all +// of them if none is given. +func (c PubSubConn) Unsubscribe(channel ...interface{}) error { + c.Conn.Send("UNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PUnsubscribe unsubscribes the connection from the given patterns, or from all +// of them if none is given. +func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { + c.Conn.Send("PUNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Ping sends a PING to the server with the specified data. +func (c PubSubConn) Ping(data string) error { + c.Conn.Send("PING", data) + return c.Conn.Flush() +} + +// Receive returns a pushed message as a Subscription, Message, PMessage, Pong +// or error. The return value is intended to be used directly in a type switch +// as illustrated in the PubSubConn example. +func (c PubSubConn) Receive() interface{} { + reply, err := Values(c.Conn.Receive()) + if err != nil { + return err + } + + var kind string + reply, err = Scan(reply, &kind) + if err != nil { + return err + } + + switch kind { + case "message": + var m Message + if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { + return err + } + return m + case "pmessage": + var pm PMessage + if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil { + return err + } + return pm + case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": + s := Subscription{Kind: kind} + if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { + return err + } + return s + case "pong": + var p Pong + if _, err := Scan(reply, &p.Data); err != nil { + return err + } + return p + } + return errors.New("redigo: unknown pubsub notification") +} diff --git a/vendor/github.com/minio/redigo/redis/redis.go b/vendor/github.com/minio/redigo/redis/redis.go new file mode 100644 index 000000000..c90a48ed4 --- /dev/null +++ b/vendor/github.com/minio/redigo/redis/redis.go @@ -0,0 +1,44 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +// Error represents an error returned in a command reply. +type Error string + +func (err Error) Error() string { return string(err) } + +// Conn represents a connection to a Redis server. +type Conn interface { + // Close closes the connection. + Close() error + + // Err returns a non-nil value if the connection is broken. The returned + // value is either the first non-nil value returned from the underlying + // network connection or a protocol parsing error. Applications should + // close broken connections. + Err() error + + // Do sends a command to the server and returns the received reply. + Do(commandName string, args ...interface{}) (reply interface{}, err error) + + // Send writes the command to the client's output buffer. + Send(commandName string, args ...interface{}) error + + // Flush flushes the output buffer to the Redis server. + Flush() error + + // Receive receives a single reply from the Redis server + Receive() (reply interface{}, err error) +} diff --git a/vendor/github.com/minio/redigo/redis/reply.go b/vendor/github.com/minio/redigo/redis/reply.go new file mode 100644 index 000000000..57896147f --- /dev/null +++ b/vendor/github.com/minio/redigo/redis/reply.go @@ -0,0 +1,393 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "strconv" +) + +// ErrNil indicates that a reply value is nil. +var ErrNil = errors.New("redigo: nil returned") + +// Int is a helper that converts a command reply to an integer. If err is not +// equal to nil, then Int returns 0, err. Otherwise, Int converts the +// reply to an int as follows: +// +// Reply type Result +// integer int(reply), nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int(reply interface{}, err error) (int, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + x := int(reply) + if int64(x) != reply { + return 0, strconv.ErrRange + } + return x, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 0) + return int(n), err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) +} + +// Int64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int64(reply interface{}, err error) (int64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + return reply, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) +} + +var errNegativeInt = errors.New("redigo: unexpected value for Uint64") + +// Uint64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Uint64(reply interface{}, err error) (uint64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + if reply < 0 { + return 0, errNegativeInt + } + return uint64(reply), nil + case []byte: + n, err := strconv.ParseUint(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) +} + +// Float64 is a helper that converts a command reply to 64 bit float. If err is +// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts +// the reply to an int as follows: +// +// Reply type Result +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Float64(reply interface{}, err error) (float64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case []byte: + n, err := strconv.ParseFloat(string(reply), 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) +} + +// String is a helper that converts a command reply to a string. If err is not +// equal to nil, then String returns "", err. Otherwise String converts the +// reply to a string as follows: +// +// Reply type Result +// bulk string string(reply), nil +// simple string reply, nil +// nil "", ErrNil +// other "", error +func String(reply interface{}, err error) (string, error) { + if err != nil { + return "", err + } + switch reply := reply.(type) { + case []byte: + return string(reply), nil + case string: + return reply, nil + case nil: + return "", ErrNil + case Error: + return "", reply + } + return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) +} + +// Bytes is a helper that converts a command reply to a slice of bytes. If err +// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts +// the reply to a slice of bytes as follows: +// +// Reply type Result +// bulk string reply, nil +// simple string []byte(reply), nil +// nil nil, ErrNil +// other nil, error +func Bytes(reply interface{}, err error) ([]byte, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []byte: + return reply, nil + case string: + return []byte(reply), nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) +} + +// Bool is a helper that converts a command reply to a boolean. If err is not +// equal to nil, then Bool returns false, err. Otherwise Bool converts the +// reply to boolean as follows: +// +// Reply type Result +// integer value != 0, nil +// bulk string strconv.ParseBool(reply) +// nil false, ErrNil +// other false, error +func Bool(reply interface{}, err error) (bool, error) { + if err != nil { + return false, err + } + switch reply := reply.(type) { + case int64: + return reply != 0, nil + case []byte: + return strconv.ParseBool(string(reply)) + case nil: + return false, ErrNil + case Error: + return false, reply + } + return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) +} + +// MultiBulk is a helper that converts an array command reply to a []interface{}. +// +// Deprecated: Use Values instead. +func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } + +// Values is a helper that converts an array command reply to a []interface{}. +// If err is not equal to nil, then Values returns nil, err. Otherwise, Values +// converts the reply as follows: +// +// Reply type Result +// array reply, nil +// nil nil, ErrNil +// other nil, error +func Values(reply interface{}, err error) ([]interface{}, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + return reply, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) +} + +// Strings is a helper that converts an array command reply to a []string. If +// err is not equal to nil, then Strings returns nil, err. Nil array items are +// converted to "" in the output slice. Strings returns an error if an array +// item is not a bulk string or nil. +func Strings(reply interface{}, err error) ([]string, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + result := make([]string, len(reply)) + for i := range reply { + if reply[i] == nil { + continue + } + p, ok := reply[i].([]byte) + if !ok { + return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i]) + } + result[i] = string(p) + } + return result, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply) +} + +// ByteSlices is a helper that converts an array command reply to a [][]byte. +// If err is not equal to nil, then ByteSlices returns nil, err. Nil array +// items are stay nil. ByteSlices returns an error if an array item is not a +// bulk string or nil. +func ByteSlices(reply interface{}, err error) ([][]byte, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + result := make([][]byte, len(reply)) + for i := range reply { + if reply[i] == nil { + continue + } + p, ok := reply[i].([]byte) + if !ok { + return nil, fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", reply[i]) + } + result[i] = p + } + return result, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for ByteSlices, got type %T", reply) +} + +// Ints is a helper that converts an array command reply to a []int. If +// err is not equal to nil, then Ints returns nil, err. +func Ints(reply interface{}, err error) ([]int, error) { + var ints []int + values, err := Values(reply, err) + if err != nil { + return ints, err + } + if err := ScanSlice(values, &ints); err != nil { + return ints, err + } + return ints, nil +} + +// StringMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. +// Requires an even number of values in result. +func StringMap(result interface{}, err error) (map[string]string, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: StringMap expects even number of values result") + } + m := make(map[string]string, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, okKey := values[i].([]byte) + value, okValue := values[i+1].([]byte) + if !okKey || !okValue { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + m[string(key)] = string(value) + } + return m, nil +} + +// IntMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]int. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func IntMap(result interface{}, err error) (map[string]int, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: IntMap expects even number of values result") + } + m := make(map[string]int, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + value, err := Int(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} + +// Int64Map is a helper that converts an array of strings (alternating key, value) +// into a map[string]int64. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func Int64Map(result interface{}, err error) (map[string]int64, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: Int64Map expects even number of values result") + } + m := make(map[string]int64, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + value, err := Int64(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} diff --git a/vendor/github.com/minio/redigo/redis/scan.go b/vendor/github.com/minio/redigo/redis/scan.go new file mode 100644 index 000000000..962e94bcc --- /dev/null +++ b/vendor/github.com/minio/redigo/redis/scan.go @@ -0,0 +1,555 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" +) + +func ensureLen(d reflect.Value, n int) { + if n > d.Cap() { + d.Set(reflect.MakeSlice(d.Type(), n, n)) + } else { + d.SetLen(n) + } +} + +func cannotConvert(d reflect.Value, s interface{}) error { + var sname string + switch s.(type) { + case string: + sname = "Redis simple string" + case Error: + sname = "Redis error" + case int64: + sname = "Redis integer" + case []byte: + sname = "Redis bulk string" + case []interface{}: + sname = "Redis array" + default: + sname = reflect.TypeOf(s).String() + } + return fmt.Errorf("cannot convert from %s to %s", sname, d.Type()) +} + +func convertAssignBulkString(d reflect.Value, s []byte) (err error) { + switch d.Type().Kind() { + case reflect.Float32, reflect.Float64: + var x float64 + x, err = strconv.ParseFloat(string(s), d.Type().Bits()) + d.SetFloat(x) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var x int64 + x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) + d.SetInt(x) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var x uint64 + x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) + d.SetUint(x) + case reflect.Bool: + var x bool + x, err = strconv.ParseBool(string(s)) + d.SetBool(x) + case reflect.String: + d.SetString(string(s)) + case reflect.Slice: + if d.Type().Elem().Kind() != reflect.Uint8 { + err = cannotConvert(d, s) + } else { + d.SetBytes(s) + } + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignInt(d reflect.Value, s int64) (err error) { + switch d.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + d.SetInt(s) + if d.Int() != s { + err = strconv.ErrRange + d.SetInt(0) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if s < 0 { + err = strconv.ErrRange + } else { + x := uint64(s) + d.SetUint(x) + if d.Uint() != x { + err = strconv.ErrRange + d.SetUint(0) + } + } + case reflect.Bool: + d.SetBool(s != 0) + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignValue(d reflect.Value, s interface{}) (err error) { + switch s := s.(type) { + case []byte: + err = convertAssignBulkString(d, s) + case int64: + err = convertAssignInt(d, s) + default: + err = cannotConvert(d, s) + } + return err +} + +func convertAssignArray(d reflect.Value, s []interface{}) error { + if d.Type().Kind() != reflect.Slice { + return cannotConvert(d, s) + } + ensureLen(d, len(s)) + for i := 0; i < len(s); i++ { + if err := convertAssignValue(d.Index(i), s[i]); err != nil { + return err + } + } + return nil +} + +func convertAssign(d interface{}, s interface{}) (err error) { + // Handle the most common destination types using type switches and + // fall back to reflection for all other types. + switch s := s.(type) { + case nil: + // ingore + case []byte: + switch d := d.(type) { + case *string: + *d = string(s) + case *int: + *d, err = strconv.Atoi(string(s)) + case *bool: + *d, err = strconv.ParseBool(string(s)) + case *[]byte: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignBulkString(d.Elem(), s) + } + } + case int64: + switch d := d.(type) { + case *int: + x := int(s) + if int64(x) != s { + err = strconv.ErrRange + x = 0 + } + *d = x + case *bool: + *d = s != 0 + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignInt(d.Elem(), s) + } + } + case string: + switch d := d.(type) { + case *string: + *d = string(s) + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + case []interface{}: + switch d := d.(type) { + case *[]interface{}: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignArray(d.Elem(), s) + } + } + case Error: + err = s + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + return +} + +// Scan copies from src to the values pointed at by dest. +// +// The values pointed at by dest must be an integer, float, boolean, string, +// []byte, interface{} or slices of these types. Scan uses the standard strconv +// package to convert bulk strings to numeric and boolean types. +// +// If a dest value is nil, then the corresponding src value is skipped. +// +// If a src element is nil, then the corresponding dest value is not modified. +// +// To enable easy use of Scan in a loop, Scan returns the slice of src +// following the copied values. +func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { + if len(src) < len(dest) { + return nil, errors.New("redigo.Scan: array short") + } + var err error + for i, d := range dest { + err = convertAssign(d, src[i]) + if err != nil { + err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err) + break + } + } + return src[len(dest):], err +} + +type fieldSpec struct { + name string + index []int + omitEmpty bool +} + +type structSpec struct { + m map[string]*fieldSpec + l []*fieldSpec +} + +func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { + return ss.m[string(name)] +} + +func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + switch { + case f.PkgPath != "" && !f.Anonymous: + // Ignore unexported fields. + case f.Anonymous: + // TODO: Handle pointers. Requires change to decoder and + // protection against infinite recursion. + if f.Type.Kind() == reflect.Struct { + compileStructSpec(f.Type, depth, append(index, i), ss) + } + default: + fs := &fieldSpec{name: f.Name} + tag := f.Tag.Get("redis") + p := strings.Split(tag, ",") + if len(p) > 0 { + if p[0] == "-" { + continue + } + if len(p[0]) > 0 { + fs.name = p[0] + } + for _, s := range p[1:] { + switch s { + case "omitempty": + fs.omitEmpty = true + default: + panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name())) + } + } + } + d, found := depth[fs.name] + if !found { + d = 1 << 30 + } + switch { + case len(index) == d: + // At same depth, remove from result. + delete(ss.m, fs.name) + j := 0 + for i := 0; i < len(ss.l); i++ { + if fs.name != ss.l[i].name { + ss.l[j] = ss.l[i] + j += 1 + } + } + ss.l = ss.l[:j] + case len(index) < d: + fs.index = make([]int, len(index)+1) + copy(fs.index, index) + fs.index[len(index)] = i + depth[fs.name] = len(index) + ss.m[fs.name] = fs + ss.l = append(ss.l, fs) + } + } + } +} + +var ( + structSpecMutex sync.RWMutex + structSpecCache = make(map[reflect.Type]*structSpec) + defaultFieldSpec = &fieldSpec{} +) + +func structSpecForType(t reflect.Type) *structSpec { + + structSpecMutex.RLock() + ss, found := structSpecCache[t] + structSpecMutex.RUnlock() + if found { + return ss + } + + structSpecMutex.Lock() + defer structSpecMutex.Unlock() + ss, found = structSpecCache[t] + if found { + return ss + } + + ss = &structSpec{m: make(map[string]*fieldSpec)} + compileStructSpec(t, make(map[string]int), nil, ss) + structSpecCache[t] = ss + return ss +} + +var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct") + +// ScanStruct scans alternating names and values from src to a struct. The +// HGETALL and CONFIG GET commands return replies in this format. +// +// ScanStruct uses exported field names to match values in the response. Use +// 'redis' field tag to override the name: +// +// Field int `redis:"myName"` +// +// Fields with the tag redis:"-" are ignored. +// +// Integer, float, boolean, string and []byte fields are supported. Scan uses the +// standard strconv package to convert bulk string values to numeric and +// boolean types. +// +// If a src element is nil, then the corresponding field is not modified. +func ScanStruct(src []interface{}, dest interface{}) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanStructValue + } + d = d.Elem() + if d.Kind() != reflect.Struct { + return errScanStructValue + } + ss := structSpecForType(d.Type()) + + if len(src)%2 != 0 { + return errors.New("redigo.ScanStruct: number of values not a multiple of 2") + } + + for i := 0; i < len(src); i += 2 { + s := src[i+1] + if s == nil { + continue + } + name, ok := src[i].([]byte) + if !ok { + return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i) + } + fs := ss.fieldSpec(name) + if fs == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err) + } + } + return nil +} + +var ( + errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct") +) + +// ScanSlice scans src to the slice pointed to by dest. The elements the dest +// slice must be integer, float, boolean, string, struct or pointer to struct +// values. +// +// Struct fields must be integer, float, boolean or string values. All struct +// fields are used unless a subset is specified using fieldNames. +func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanSliceValue + } + d = d.Elem() + if d.Kind() != reflect.Slice { + return errScanSliceValue + } + + isPtr := false + t := d.Type().Elem() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + isPtr = true + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + ensureLen(d, len(src)) + for i, s := range src { + if s == nil { + continue + } + if err := convertAssignValue(d.Index(i), s); err != nil { + return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err) + } + } + return nil + } + + ss := structSpecForType(t) + fss := ss.l + if len(fieldNames) > 0 { + fss = make([]*fieldSpec, len(fieldNames)) + for i, name := range fieldNames { + fss[i] = ss.m[name] + if fss[i] == nil { + return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name) + } + } + } + + if len(fss) == 0 { + return errors.New("redigo.ScanSlice: no struct fields") + } + + n := len(src) / len(fss) + if n*len(fss) != len(src) { + return errors.New("redigo.ScanSlice: length not a multiple of struct field count") + } + + ensureLen(d, n) + for i := 0; i < n; i++ { + d := d.Index(i) + if isPtr { + if d.IsNil() { + d.Set(reflect.New(t)) + } + d = d.Elem() + } + for j, fs := range fss { + s := src[i*len(fss)+j] + if s == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err) + } + } + } + return nil +} + +// Args is a helper for constructing command arguments from structured values. +type Args []interface{} + +// Add returns the result of appending value to args. +func (args Args) Add(value ...interface{}) Args { + return append(args, value...) +} + +// AddFlat returns the result of appending the flattened value of v to args. +// +// Maps are flattened by appending the alternating keys and map values to args. +// +// Slices are flattened by appending the slice elements to args. +// +// Structs are flattened by appending the alternating names and values of +// exported fields to args. If v is a nil struct pointer, then nothing is +// appended. The 'redis' field tag overrides struct field names. See ScanStruct +// for more information on the use of the 'redis' field tag. +// +// Other types are appended to args as is. +func (args Args) AddFlat(v interface{}) Args { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Struct: + args = flattenStruct(args, rv) + case reflect.Slice: + for i := 0; i < rv.Len(); i++ { + args = append(args, rv.Index(i).Interface()) + } + case reflect.Map: + for _, k := range rv.MapKeys() { + args = append(args, k.Interface(), rv.MapIndex(k).Interface()) + } + case reflect.Ptr: + if rv.Type().Elem().Kind() == reflect.Struct { + if !rv.IsNil() { + args = flattenStruct(args, rv.Elem()) + } + } else { + args = append(args, v) + } + default: + args = append(args, v) + } + return args +} + +func flattenStruct(args Args, v reflect.Value) Args { + ss := structSpecForType(v.Type()) + for _, fs := range ss.l { + fv := v.FieldByIndex(fs.index) + if fs.omitEmpty { + var empty = false + switch fv.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + empty = fv.Len() == 0 + case reflect.Bool: + empty = !fv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + empty = fv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + empty = fv.Uint() == 0 + case reflect.Float32, reflect.Float64: + empty = fv.Float() == 0 + case reflect.Interface, reflect.Ptr: + empty = fv.IsNil() + } + if empty { + continue + } + } + args = append(args, fs.name, fv.Interface()) + } + return args +} diff --git a/vendor/github.com/minio/redigo/redis/script.go b/vendor/github.com/minio/redigo/redis/script.go new file mode 100644 index 000000000..78605a90a --- /dev/null +++ b/vendor/github.com/minio/redigo/redis/script.go @@ -0,0 +1,86 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +// Script encapsulates the source, hash and key count for a Lua script. See +// http://redis.io/commands/eval for information on scripts in Redis. +type Script struct { + keyCount int + src string + hash string +} + +// NewScript returns a new script object. If keyCount is greater than or equal +// to zero, then the count is automatically inserted in the EVAL command +// argument list. If keyCount is less than zero, then the application supplies +// the count as the first value in the keysAndArgs argument to the Do, Send and +// SendHash methods. +func NewScript(keyCount int, src string) *Script { + h := sha1.New() + io.WriteString(h, src) + return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} +} + +func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { + var args []interface{} + if s.keyCount < 0 { + args = make([]interface{}, 1+len(keysAndArgs)) + args[0] = spec + copy(args[1:], keysAndArgs) + } else { + args = make([]interface{}, 2+len(keysAndArgs)) + args[0] = spec + args[1] = s.keyCount + copy(args[2:], keysAndArgs) + } + return args +} + +// Do evaluates the script. Under the covers, Do optimistically evaluates the +// script using the EVALSHA command. If the command fails because the script is +// not loaded, then Do evaluates the script using the EVAL command (thus +// causing the script to load). +func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { + v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) + if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { + v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) + } + return v, err +} + +// SendHash evaluates the script without waiting for the reply. The script is +// evaluated with the EVALSHA command. The application must ensure that the +// script is loaded by a previous call to Send, Do or Load methods. +func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) +} + +// Send evaluates the script without waiting for the reply. +func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVAL", s.args(s.src, keysAndArgs)...) +} + +// Load loads the script without evaluating it. +func (s *Script) Load(c Conn) error { + _, err := c.Do("SCRIPT", "LOAD", s.src) + return err +} diff --git a/vendor/github.com/streadway/amqp/LICENSE b/vendor/github.com/streadway/amqp/LICENSE new file mode 100644 index 000000000..243c0ce7c --- /dev/null +++ b/vendor/github.com/streadway/amqp/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/streadway/amqp/README.md b/vendor/github.com/streadway/amqp/README.md new file mode 100644 index 000000000..7869af81e --- /dev/null +++ b/vendor/github.com/streadway/amqp/README.md @@ -0,0 +1,81 @@ +# AMQP + +AMQP 0.9.1 client with RabbitMQ extensions in Go. + +# Status + +*Beta* + +[![Build Status](https://secure.travis-ci.org/streadway/amqp.png)](http://travis-ci.org/streadway/amqp) + +API changes unlikely and will be discussed on [Github +issues](https://github.com/streadway/amqp/issues) along with any bugs or +enhancements. + +# Goals + +Provide a functional interface that closely represents the AMQP 0.9.1 model +targeted to RabbitMQ as a server. This includes the minimum necessary to +interact the semantics of the protocol. + +# Non-goals + +Things not intended to be supported. + + * Auto reconnect and re-synchronization of client and server topologies. + * Reconnection would require understanding the error paths when the + topology cannot be declared on reconnect. This would require a new set + of types and code paths that are best suited at the call-site of this + package. AMQP has a dynamic topology that needs all peers to agree. If + this doesn't happen, the behavior is undefined. Instead of producing a + possible interface with undefined behavior, this package is designed to + be simple for the caller to implement the necessary connection-time + topology declaration so that reconnection is trivial and encapsulated in + the caller's application code. + * AMQP Protocol negotiation for forward or backward compatibility. + * 0.9.1 is stable and widely deployed. Versions 0.10 and 1.0 are divergent + specifications that change the semantics and wire format of the protocol. + We will accept patches for other protocol support but have no plans for + implementation ourselves. + * Anything other than PLAIN and EXTERNAL authentication mechanisms. + * Keeping the mechanisms interface modular makes it possible to extend + outside of this package. If other mechanisms prove to be popular, then + we would accept patches to include them in this pacakge. + +# Usage + +See the 'examples' subdirectory for simple producers and consumers executables. +If you have a use-case in mind which isn't well-represented by the examples, +please file an issue. + +# Documentation + +Use [Godoc documentation](http://godoc.org/github.com/streadway/amqp) for +reference and usage. + +[RabbitMQ tutorials in +Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) are also +available. + +# Contributing + +Pull requests are very much welcomed. Create your pull request on a non-master +branch, make sure a test or example is included that covers your change and +your commits represent coherent changes that include a reason for the change. + +To run the integration tests, make sure you have RabbitMQ running on any host, +export the environment variable `AMQP_URL=amqp://host/` and run `go test -tags +integration`. TravisCI will also run the integration tests. + +Thanks to the [community of contributors](https://github.com/streadway/amqp/graphs/contributors). + +# External packages + + * Google App Engine Dialer support: [https://github.com/soundtrackyourbrand/gaeamqp](https://github.com/soundtrackyourbrand/gaeamqp) + * RabbitMQ examples in Go: [https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) + +# License + +BSD 2 clause - see LICENSE for more details. + + diff --git a/vendor/github.com/streadway/amqp/allocator.go b/vendor/github.com/streadway/amqp/allocator.go new file mode 100644 index 000000000..928418826 --- /dev/null +++ b/vendor/github.com/streadway/amqp/allocator.go @@ -0,0 +1,106 @@ +package amqp + +import ( + "bytes" + "fmt" + "math/big" +) + +const ( + free = 0 + allocated = 1 +) + +// allocator maintains a bitset of allocated numbers. +type allocator struct { + pool *big.Int + last int + low int + high int +} + +// NewAllocator reserves and frees integers out of a range between low and +// high. +// +// O(N) worst case space used, where N is maximum allocated, divided by +// sizeof(big.Word) +func newAllocator(low, high int) *allocator { + return &allocator{ + pool: big.NewInt(0), + last: low, + low: low, + high: high, + } +} + +// String returns a string describing the contents of the allocator like +// "allocator[low..high] reserved..until" +// +// O(N) where N is high-low +func (a allocator) String() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "allocator[%d..%d]", a.low, a.high) + + for low := a.low; low <= a.high; low++ { + high := low + for a.reserved(high) && high <= a.high { + high++ + } + + if high > low+1 { + fmt.Fprintf(b, " %d..%d", low, high-1) + } else if high > low { + fmt.Fprintf(b, " %d", high-1) + } + + low = high + } + return b.String() +} + +// Next reserves and returns the next available number out of the range between +// low and high. If no number is available, false is returned. +// +// O(N) worst case runtime where N is allocated, but usually O(1) due to a +// rolling index into the oldest allocation. +func (a *allocator) next() (int, bool) { + wrapped := a.last + + // Find trailing bit + for ; a.last <= a.high; a.last++ { + if a.reserve(a.last) { + return a.last, true + } + } + + // Find preceeding free'd pool + a.last = a.low + + for ; a.last < wrapped; a.last++ { + if a.reserve(a.last) { + return a.last, true + } + } + + return 0, false +} + +// reserve claims the bit if it is not already claimed, returning true if +// succesfully claimed. +func (a *allocator) reserve(n int) bool { + if a.reserved(n) { + return false + } + a.pool.SetBit(a.pool, n-a.low, allocated) + return true +} + +// reserved returns true if the integer has been allocated +func (a *allocator) reserved(n int) bool { + return a.pool.Bit(n-a.low) == allocated +} + +// release frees the use of the number for another allocation +func (a *allocator) release(n int) { + a.pool.SetBit(a.pool, n-a.low, free) +} diff --git a/vendor/github.com/streadway/amqp/auth.go b/vendor/github.com/streadway/amqp/auth.go new file mode 100644 index 000000000..bff7d7948 --- /dev/null +++ b/vendor/github.com/streadway/amqp/auth.go @@ -0,0 +1,44 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" +) + +// Authentication interface provides a means for different SASL authentication +// mechanisms to be used during connection tuning. +type Authentication interface { + Mechanism() string + Response() string +} + +// PlainAuth is a similar to Basic Auth in HTTP. +type PlainAuth struct { + Username string + Password string +} + +func (me *PlainAuth) Mechanism() string { + return "PLAIN" +} + +func (me *PlainAuth) Response() string { + return fmt.Sprintf("\000%s\000%s", me.Username, me.Password) +} + +// Finds the first mechanism preferred by the client that the server supports. +func pickSASLMechanism(client []Authentication, serverMechanisms []string) (auth Authentication, ok bool) { + for _, auth = range client { + for _, mech := range serverMechanisms { + if auth.Mechanism() == mech { + return auth, true + } + } + } + + return +} diff --git a/vendor/github.com/streadway/amqp/certs.sh b/vendor/github.com/streadway/amqp/certs.sh new file mode 100755 index 000000000..834f42242 --- /dev/null +++ b/vendor/github.com/streadway/amqp/certs.sh @@ -0,0 +1,159 @@ +#!/bin/sh +# +# Creates the CA, server and client certs to be used by tls_test.go +# http://www.rabbitmq.com/ssl.html +# +# Copy stdout into the const section of tls_test.go or use for RabbitMQ +# +root=$PWD/certs + +if [ -f $root/ca/serial ]; then + echo >&2 "Previous installation found" + echo >&2 "Remove $root/ca and rerun to overwrite" + exit 1 +fi + +mkdir -p $root/ca/private +mkdir -p $root/ca/certs +mkdir -p $root/server +mkdir -p $root/client + +cd $root/ca + +chmod 700 private +touch index.txt +echo 'unique_subject = no' > index.txt.attr +echo '01' > serial +echo >openssl.cnf ' +[ ca ] +default_ca = testca + +[ testca ] +dir = . +certificate = $dir/cacert.pem +database = $dir/index.txt +new_certs_dir = $dir/certs +private_key = $dir/private/cakey.pem +serial = $dir/serial + +default_crl_days = 7 +default_days = 3650 +default_md = sha1 + +policy = testca_policy +x509_extensions = certificate_extensions + +[ testca_policy ] +commonName = supplied +stateOrProvinceName = optional +countryName = optional +emailAddress = optional +organizationName = optional +organizationalUnitName = optional + +[ certificate_extensions ] +basicConstraints = CA:false + +[ req ] +default_bits = 2048 +default_keyfile = ./private/cakey.pem +default_md = sha1 +prompt = yes +distinguished_name = root_ca_distinguished_name +x509_extensions = root_ca_extensions + +[ root_ca_distinguished_name ] +commonName = hostname + +[ root_ca_extensions ] +basicConstraints = CA:true +keyUsage = keyCertSign, cRLSign + +[ client_ca_extensions ] +basicConstraints = CA:false +keyUsage = digitalSignature +extendedKeyUsage = 1.3.6.1.5.5.7.3.2 + +[ server_ca_extensions ] +basicConstraints = CA:false +keyUsage = keyEncipherment +extendedKeyUsage = 1.3.6.1.5.5.7.3.1 +subjectAltName = @alt_names + +[ alt_names ] +IP.1 = 127.0.0.1 +' + +openssl req \ + -x509 \ + -nodes \ + -config openssl.cnf \ + -newkey rsa:2048 \ + -days 3650 \ + -subj "/CN=MyTestCA/" \ + -out cacert.pem \ + -outform PEM + +openssl x509 \ + -in cacert.pem \ + -out cacert.cer \ + -outform DER + +openssl genrsa -out $root/server/key.pem 2048 +openssl genrsa -out $root/client/key.pem 2048 + +openssl req \ + -new \ + -nodes \ + -config openssl.cnf \ + -subj "/CN=127.0.0.1/O=server/" \ + -key $root/server/key.pem \ + -out $root/server/req.pem \ + -outform PEM + +openssl req \ + -new \ + -nodes \ + -config openssl.cnf \ + -subj "/CN=127.0.0.1/O=client/" \ + -key $root/client/key.pem \ + -out $root/client/req.pem \ + -outform PEM + +openssl ca \ + -config openssl.cnf \ + -in $root/server/req.pem \ + -out $root/server/cert.pem \ + -notext \ + -batch \ + -extensions server_ca_extensions + +openssl ca \ + -config openssl.cnf \ + -in $root/client/req.pem \ + -out $root/client/cert.pem \ + -notext \ + -batch \ + -extensions client_ca_extensions + +cat <<-END +const caCert = \` +`cat $root/ca/cacert.pem` +\` + +const serverCert = \` +`cat $root/server/cert.pem` +\` + +const serverKey = \` +`cat $root/server/key.pem` +\` + +const clientCert = \` +`cat $root/client/cert.pem` +\` + +const clientKey = \` +`cat $root/client/key.pem` +\` +END diff --git a/vendor/github.com/streadway/amqp/channel.go b/vendor/github.com/streadway/amqp/channel.go new file mode 100644 index 000000000..7ac6ec98f --- /dev/null +++ b/vendor/github.com/streadway/amqp/channel.go @@ -0,0 +1,1557 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "reflect" + "sync" +) + +// 0 1 3 7 size+7 size+8 +// +------+---------+-------------+ +------------+ +-----------+ +// | type | channel | size | | payload | | frame-end | +// +------+---------+-------------+ +------------+ +-----------+ +// octet short long size octets octet +const frameHeaderSize = 1 + 2 + 4 + 1 + +/* +Channel represents an AMQP channel. Used as a context for valid message +exchange. Errors on methods with this Channel as a receiver means this channel +should be discarded and a new channel established. + +*/ +type Channel struct { + destructor sync.Once + sendM sync.Mutex // sequence channel frames + m sync.Mutex // struct field mutex + + connection *Connection + + rpc chan message + consumers *consumers + + id uint16 + + // true when we will never notify again + noNotify bool + + // Channel and Connection exceptions will be broadcast on these listeners. + closes []chan *Error + + // Listeners for active=true flow control. When true is sent to a listener, + // publishing should pause until false is sent to listeners. + flows []chan bool + + // Listeners for returned publishings for unroutable messages on mandatory + // publishings or undeliverable messages on immediate publishings. + returns []chan Return + + // Listeners for when the server notifies the client that + // a consumer has been cancelled. + cancels []chan string + + // Allocated when in confirm mode in order to track publish counter and order confirms + confirms *confirms + confirming bool + + // Selects on any errors from shutdown during RPC + errors chan *Error + + // State machine that manages frame order, must only be mutated by the connection + recv func(*Channel, frame) error + + // State that manages the send behavior after before and after shutdown, must + // only be mutated in shutdown() + send func(*Channel, message) error + + // Current state for frame re-assembly, only mutated from recv + message messageWithContent + header *headerFrame + body []byte +} + +// Constructs a new channel with the given framing rules +func newChannel(c *Connection, id uint16) *Channel { + return &Channel{ + connection: c, + id: id, + rpc: make(chan message), + consumers: makeConsumers(), + confirms: newConfirms(), + recv: (*Channel).recvMethod, + send: (*Channel).sendOpen, + errors: make(chan *Error, 1), + } +} + +// shutdown is called by Connection after the channel has been removed from the +// connection registry. +func (me *Channel) shutdown(e *Error) { + me.destructor.Do(func() { + me.m.Lock() + defer me.m.Unlock() + + // Broadcast abnormal shutdown + if e != nil { + for _, c := range me.closes { + c <- e + } + } + + me.send = (*Channel).sendClosed + + // Notify RPC if we're selecting + if e != nil { + me.errors <- e + } + + me.consumers.closeAll() + + for _, c := range me.closes { + close(c) + } + + for _, c := range me.flows { + close(c) + } + + for _, c := range me.returns { + close(c) + } + + for _, c := range me.cancels { + close(c) + } + + if me.confirms != nil { + me.confirms.Close() + } + + me.noNotify = true + }) +} + +func (me *Channel) open() error { + return me.call(&channelOpen{}, &channelOpenOk{}) +} + +// Performs a request/response call for when the message is not NoWait and is +// specified as Synchronous. +func (me *Channel) call(req message, res ...message) error { + if err := me.send(me, req); err != nil { + return err + } + + if req.wait() { + select { + case e := <-me.errors: + return e + + case msg := <-me.rpc: + if msg != nil { + for _, try := range res { + if reflect.TypeOf(msg) == reflect.TypeOf(try) { + // *res = *msg + vres := reflect.ValueOf(try).Elem() + vmsg := reflect.ValueOf(msg).Elem() + vres.Set(vmsg) + return nil + } + } + return ErrCommandInvalid + } else { + // RPC channel has been closed without an error, likely due to a hard + // error on the Connection. This indicates we have already been + // shutdown and if were waiting, will have returned from the errors chan. + return ErrClosed + } + } + } + + return nil +} + +func (me *Channel) sendClosed(msg message) (err error) { + me.sendM.Lock() + defer me.sendM.Unlock() + + // After a 'channel.close' is sent or received the only valid response is + // channel.close-ok + if _, ok := msg.(*channelCloseOk); ok { + return me.connection.send(&methodFrame{ + ChannelId: me.id, + Method: msg, + }) + } + + return ErrClosed +} + +func (me *Channel) sendOpen(msg message) (err error) { + me.sendM.Lock() + defer me.sendM.Unlock() + + if content, ok := msg.(messageWithContent); ok { + props, body := content.getContent() + class, _ := content.id() + + // catch client max frame size==0 and server max frame size==0 + // set size to length of what we're trying to publish + var size int + if me.connection.Config.FrameSize > 0 { + size = me.connection.Config.FrameSize - frameHeaderSize + } else { + size = len(body) + } + + if err = me.connection.send(&methodFrame{ + ChannelId: me.id, + Method: content, + }); err != nil { + return + } + + if err = me.connection.send(&headerFrame{ + ChannelId: me.id, + ClassId: class, + Size: uint64(len(body)), + Properties: props, + }); err != nil { + return + } + + // chunk body into size (max frame size - frame header size) + for i, j := 0, size; i < len(body); i, j = j, j+size { + if j > len(body) { + j = len(body) + } + + if err = me.connection.send(&bodyFrame{ + ChannelId: me.id, + Body: body[i:j], + }); err != nil { + return + } + } + } else { + err = me.connection.send(&methodFrame{ + ChannelId: me.id, + Method: msg, + }) + } + + return +} + +// Eventually called via the state machine from the connection's reader +// goroutine, so assumes serialized access. +func (me *Channel) dispatch(msg message) { + switch m := msg.(type) { + case *channelClose: + me.connection.closeChannel(me, newError(m.ReplyCode, m.ReplyText)) + me.send(me, &channelCloseOk{}) + + case *channelFlow: + for _, c := range me.flows { + c <- m.Active + } + me.send(me, &channelFlowOk{Active: m.Active}) + + case *basicCancel: + for _, c := range me.cancels { + c <- m.ConsumerTag + } + me.send(me, &basicCancelOk{ConsumerTag: m.ConsumerTag}) + + case *basicReturn: + ret := newReturn(*m) + for _, c := range me.returns { + c <- *ret + } + + case *basicAck: + if me.confirming { + if m.Multiple { + me.confirms.Multiple(Confirmation{m.DeliveryTag, true}) + } else { + me.confirms.One(Confirmation{m.DeliveryTag, true}) + } + } + + case *basicNack: + if me.confirming { + if m.Multiple { + me.confirms.Multiple(Confirmation{m.DeliveryTag, false}) + } else { + me.confirms.One(Confirmation{m.DeliveryTag, false}) + } + } + + case *basicDeliver: + me.consumers.send(m.ConsumerTag, newDelivery(me, m)) + // TODO log failed consumer and close channel, this can happen when + // deliveries are in flight and a no-wait cancel has happened + + default: + me.rpc <- msg + } +} + +func (me *Channel) transition(f func(*Channel, frame) error) error { + me.recv = f + return nil +} + +func (me *Channel) recvMethod(f frame) error { + switch frame := f.(type) { + case *methodFrame: + if msg, ok := frame.Method.(messageWithContent); ok { + me.body = make([]byte, 0) + me.message = msg + return me.transition((*Channel).recvHeader) + } + + me.dispatch(frame.Method) // termination state + return me.transition((*Channel).recvMethod) + + case *headerFrame: + // drop + return me.transition((*Channel).recvMethod) + + case *bodyFrame: + // drop + return me.transition((*Channel).recvMethod) + + default: + panic("unexpected frame type") + } + + panic("unreachable") +} + +func (me *Channel) recvHeader(f frame) error { + switch frame := f.(type) { + case *methodFrame: + // interrupt content and handle method + return me.recvMethod(f) + + case *headerFrame: + // start collecting if we expect body frames + me.header = frame + + if frame.Size == 0 { + me.message.setContent(me.header.Properties, me.body) + me.dispatch(me.message) // termination state + return me.transition((*Channel).recvMethod) + } else { + return me.transition((*Channel).recvContent) + } + + case *bodyFrame: + // drop and reset + return me.transition((*Channel).recvMethod) + + default: + panic("unexpected frame type") + } + + panic("unreachable") +} + +// state after method + header and before the length +// defined by the header has been reached +func (me *Channel) recvContent(f frame) error { + switch frame := f.(type) { + case *methodFrame: + // interrupt content and handle method + return me.recvMethod(f) + + case *headerFrame: + // drop and reset + return me.transition((*Channel).recvMethod) + + case *bodyFrame: + me.body = append(me.body, frame.Body...) + + if uint64(len(me.body)) >= me.header.Size { + me.message.setContent(me.header.Properties, me.body) + me.dispatch(me.message) // termination state + return me.transition((*Channel).recvMethod) + } + + return me.transition((*Channel).recvContent) + + default: + panic("unexpected frame type") + } + + panic("unreachable") +} + +/* +Close initiate a clean channel closure by sending a close message with the error +code set to '200'. + +It is safe to call this method multiple times. + +*/ +func (me *Channel) Close() error { + defer me.connection.closeChannel(me, nil) + return me.call( + &channelClose{ReplyCode: replySuccess}, + &channelCloseOk{}, + ) +} + +/* +NotifyClose registers a listener for when the server sends a channel or +connection exception in the form of a Connection.Close or Channel.Close method. +Connection exceptions will be broadcast to all open channels and all channels +will be closed, where channel exceptions will only be broadcast to listeners to +this channel. + +The chan provided will be closed when the Channel is closed and on a +graceful close, no error will be sent. + +*/ +func (me *Channel) NotifyClose(c chan *Error) chan *Error { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.closes = append(me.closes, c) + } + + return c +} + +/* +NotifyFlow registers a listener for basic.flow methods sent by the server. +When `true` is sent on one of the listener channels, all publishers should +pause until a `false` is sent. + +The server may ask the producer to pause or restart the flow of Publishings +sent by on a channel. This is a simple flow-control mechanism that a server can +use to avoid overflowing its queues or otherwise finding itself receiving more +messages than it can process. Note that this method is not intended for window +control. It does not affect contents returned by basic.get-ok methods. + +When a new channel is opened, it is active (flow is active). Some +applications assume that channels are inactive until started. To emulate +this behavior a client MAY open the channel, then pause it. + +Publishers should respond to a flow messages as rapidly as possible and the +server may disconnect over producing channels that do not respect these +messages. + +basic.flow-ok methods will always be returned to the server regardless of +the number of listeners there are. + +To control the flow of deliveries from the server. Use the Channel.Flow() +method instead. + +Note: RabbitMQ will rather use TCP pushback on the network connection instead +of sending basic.flow. This means that if a single channel is producing too +much on the same connection, all channels using that connection will suffer, +including acknowledgments from deliveries. Use different Connections if you +desire to interleave consumers and producers in the same process to avoid your +basic.ack messages from getting rate limited with your basic.publish messages. + +*/ +func (me *Channel) NotifyFlow(c chan bool) chan bool { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.flows = append(me.flows, c) + } + + return c +} + +/* +NotifyReturn registers a listener for basic.return methods. These can be sent +from the server when a publish is undeliverable either from the mandatory or +immediate flags. + +A return struct has a copy of the Publishing along with some error +information about why the publishing failed. + +*/ +func (me *Channel) NotifyReturn(c chan Return) chan Return { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.returns = append(me.returns, c) + } + + return c +} + +/* +NotifyCancel registers a listener for basic.cancel methods. These can be sent +from the server when a queue is deleted or when consuming from a mirrored queue +where the master has just failed (and was moved to another node) + +The subscription tag is returned to the listener. + +*/ +func (me *Channel) NotifyCancel(c chan string) chan string { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.cancels = append(me.cancels, c) + } + + return c +} + +/* +NotifyConfirm calls NotifyPublish and starts a goroutines sending +ordered Ack and Nack DeliveryTag to the respective channels. + +For strict ordering, use NotifyPublish instead. +*/ +func (me *Channel) NotifyConfirm(ack, nack chan uint64) (chan uint64, chan uint64) { + confirms := me.NotifyPublish(make(chan Confirmation, len(ack)+len(nack))) + + go func() { + for c := range confirms { + if c.Ack { + ack <- c.DeliveryTag + } else { + nack <- c.DeliveryTag + } + } + close(ack) + if nack != ack { + close(nack) + } + }() + + return ack, nack +} + +/* +NotifyPublish registers a listener for reliable publishing. Receives from this +chan for every publish after Channel.Confirm will be in order starting with +DeliveryTag 1. + +There will be one and only one Confimration Publishing starting with the +delviery tag of 1 and progressing sequentially until the total number of +Publishings have been seen by the server. + +Acknowledgments will be received in the order of delivery from the +NotifyPublish channels even if the server acknowledges them out of order. + +The listener chan will be closed when the Channel is closed. + +The capacity of the chan Confirmation must be at least as large as the +number of outstanding publishings. Not having enough buffered chans will +create a deadlock if you attempt to perform other operations on the Connection +or Channel while confirms are in-flight. + +It's advisable to wait for all Confirmations to arrive before calling +Channel.Close() or Connection.Close(). + +*/ +func (me *Channel) NotifyPublish(confirm chan Confirmation) chan Confirmation { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(confirm) + } else { + me.confirms.Listen(confirm) + } + + return confirm + +} + +/* +Qos controls how many messages or how many bytes the server will try to keep on +the network for consumers before receiving delivery acks. The intent of Qos is +to make sure the network buffers stay full between the server and client. + +With a prefetch count greater than zero, the server will deliver that many +messages to consumers before acknowledgments are received. The server ignores +this option when consumers are started with noAck because no acknowledgments +are expected or sent. + +With a prefetch size greater than zero, the server will try to keep at least +that many bytes of deliveries flushed to the network before receiving +acknowledgments from the consumers. This option is ignored when consumers are +started with noAck. + +When global is true, these Qos settings apply to all existing and future +consumers on all channels on the same connection. When false, the Channel.Qos +settings will apply to all existing and future consumers on this channel. +RabbitMQ does not implement the global flag. + +To get round-robin behavior between consumers consuming from the same queue on +different connections, set the prefetch count to 1, and the next available +message on the server will be delivered to the next available consumer. + +If your consumer work time is reasonably consistent and not much greater +than two times your network round trip time, you will see significant +throughput improvements starting with a prefetch count of 2 or slightly +greater as described by benchmarks on RabbitMQ. + +http://www.rabbitmq.com/blog/2012/04/25/rabbitmq-performance-measurements-part-2/ +*/ +func (me *Channel) Qos(prefetchCount, prefetchSize int, global bool) error { + return me.call( + &basicQos{ + PrefetchCount: uint16(prefetchCount), + PrefetchSize: uint32(prefetchSize), + Global: global, + }, + &basicQosOk{}, + ) +} + +/* +Cancel stops deliveries to the consumer chan established in Channel.Consume and +identified by consumer. + +Only use this method to cleanly stop receiving deliveries from the server and +cleanly shut down the consumer chan identified by this tag. Using this method +and waiting for remaining messages to flush from the consumer chan will ensure +all messages received on the network will be delivered to the receiver of your +consumer chan. + +Continue consuming from the chan Delivery provided by Channel.Consume until the +chan closes. + +When noWait is true, do not wait for the server to acknowledge the cancel. +Only use this when you are certain there are no deliveries requiring +acknowledgment are in-flight otherwise they will arrive and be dropped in the +client without an ack and will not be redelivered to other consumers. + +*/ +func (me *Channel) Cancel(consumer string, noWait bool) error { + req := &basicCancel{ + ConsumerTag: consumer, + NoWait: noWait, + } + res := &basicCancelOk{} + + if err := me.call(req, res); err != nil { + return err + } + + if req.wait() { + me.consumers.close(res.ConsumerTag) + } else { + // Potentially could drop deliveries in flight + me.consumers.close(consumer) + } + + return nil +} + +/* +QueueDeclare declares a queue to hold messages and deliver to consumers. +Declaring creates a queue if it doesn't already exist, or ensures that an +existing queue matches the same parameters. + +Every queue declared gets a default binding to the empty exchange "" which has +the type "direct" with the routing key matching the queue's name. With this +default binding, it is possible to publish messages that route directly to +this queue by publishing to "" with the routing key of the queue name. + + QueueDeclare("alerts", true, false, false, false, nil) + Publish("", "alerts", false, false, Publishing{Body: []byte("...")}) + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alerts -> "" -> alerts -> alerts + +The queue name may be empty, in which the server will generate a unique name +which will be returned in the Name field of Queue struct. + +Durable and Non-Auto-Deleted queues will survive server restarts and remain +when there are no remaining consumers or bindings. Persistent publishings will +be restored in this queue on server restart. These queues are only able to be +bound to durable exchanges. + +Non-Durable and Auto-Deleted queues will not be redeclared on server restart +and will be deleted by the server after a short time when the last consumer is +canceled or the last consumer's channel is closed. Queues with this lifetime +can also be deleted normally with QueueDelete. These durable queues can only +be bound to non-durable exchanges. + +Non-Durable and Non-Auto-Deleted queues will remain declared as long as the +server is running regardless of how many consumers. This lifetime is useful +for temporary topologies that may have long delays between consumer activity. +These queues can only be bound to non-durable exchanges. + +Durable and Auto-Deleted queues will be restored on server restart, but without +active consumers, will not survive and be removed. This Lifetime is unlikely +to be useful. + +Exclusive queues are only accessible by the connection that declares them and +will be deleted when the connection closes. Channels on other connections +will receive an error when attempting declare, bind, consume, purge or delete a +queue with the same name. + +When noWait is true, the queue will assume to be declared on the server. A +channel exception will arrive if the conditions are met for existing queues +or attempting to modify an existing queue from a different connection. + +When the error return value is not nil, you can assume the queue could not be +declared with these parameters and the channel will be closed. + +*/ +func (me *Channel) QueueDeclare(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) { + if err := args.Validate(); err != nil { + return Queue{}, err + } + + req := &queueDeclare{ + Queue: name, + Passive: false, + Durable: durable, + AutoDelete: autoDelete, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &queueDeclareOk{} + + if err := me.call(req, res); err != nil { + return Queue{}, err + } + + if req.wait() { + return Queue{ + Name: res.Queue, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + }, nil + } + + return Queue{ + Name: name, + }, nil + + panic("unreachable") +} + +/* + +QueueDeclarePassive is functionally and parametrically equivalent to +QueueDeclare, except that it sets the "passive" attribute to true. A passive +queue is assumed by RabbitMQ to already exist, and attempting to connect to a +non-existent queue will cause RabbitMQ to throw an exception. This function +can be used to test for the existence of a queue. + +*/ +func (me *Channel) QueueDeclarePassive(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) { + if err := args.Validate(); err != nil { + return Queue{}, err + } + + req := &queueDeclare{ + Queue: name, + Passive: true, + Durable: durable, + AutoDelete: autoDelete, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &queueDeclareOk{} + + if err := me.call(req, res); err != nil { + return Queue{}, err + } + + if req.wait() { + return Queue{ + Name: res.Queue, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + }, nil + } + + return Queue{ + Name: name, + }, nil + + panic("unreachable") +} + +/* +QueueInspect passively declares a queue by name to inspect the current message +count, consumer count. + +Use this method to check how many unacknowledged messages reside in the queue +and how many consumers are receiving deliveries and whether a queue by this +name already exists. + +If the queue by this name exists, use Channel.QueueDeclare check if it is +declared with specific parameters. + +If a queue by this name does not exist, an error will be returned and the +channel will be closed. + +*/ +func (me *Channel) QueueInspect(name string) (Queue, error) { + req := &queueDeclare{ + Queue: name, + Passive: true, + } + res := &queueDeclareOk{} + + err := me.call(req, res) + + state := Queue{ + Name: name, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + } + + return state, err +} + +/* +QueueBind binds an exchange to a queue so that publishings to the exchange will +be routed to the queue when the publishing routing key matches the binding +routing key. + + QueueBind("pagers", "alert", "log", false, nil) + QueueBind("emails", "info", "log", false, nil) + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alert --> log ----> alert --> pagers + key: info ---> log ----> info ---> emails + key: debug --> log (none) (dropped) + +If a binding with the same key and arguments already exists between the +exchange and queue, the attempt to rebind will be ignored and the existing +binding will be retained. + +In the case that multiple bindings may cause the message to be routed to the +same queue, the server will only route the publishing once. This is possible +with topic exchanges. + + QueueBind("pagers", "alert", "amq.topic", false, nil) + QueueBind("emails", "info", "amq.topic", false, nil) + QueueBind("emails", "#", "amq.topic", false, nil) // match everything + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alert --> amq.topic ----> alert --> pagers + key: info ---> amq.topic ----> # ------> emails + \---> info ---/ + key: debug --> amq.topic ----> # ------> emails + +It is only possible to bind a durable queue to a durable exchange regardless of +whether the queue or exchange is auto-deleted. Bindings between durable queues +and exchanges will also be restored on server restart. + +If the binding could not complete, an error will be returned and the channel +will be closed. + +When noWait is true and the queue could not be bound, the channel will be +closed with an error. + +*/ +func (me *Channel) QueueBind(name, key, exchange string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &queueBind{ + Queue: name, + Exchange: exchange, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &queueBindOk{}, + ) +} + +/* +QueueUnbind removes a binding between an exchange and queue matching the key and +arguments. + +It is possible to send and empty string for the exchange name which means to +unbind the queue from the default exchange. + +*/ +func (me *Channel) QueueUnbind(name, key, exchange string, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &queueUnbind{ + Queue: name, + Exchange: exchange, + RoutingKey: key, + Arguments: args, + }, + &queueUnbindOk{}, + ) +} + +/* +QueuePurge removes all messages from the named queue which are not waiting to +be acknowledged. Messages that have been delivered but have not yet been +acknowledged will not be removed. + +When successful, returns the number of messages purged. + +If noWait is true, do not wait for the server response and the number of +messages purged will not be meaningful. +*/ +func (me *Channel) QueuePurge(name string, noWait bool) (int, error) { + req := &queuePurge{ + Queue: name, + NoWait: noWait, + } + res := &queuePurgeOk{} + + err := me.call(req, res) + + return int(res.MessageCount), err +} + +/* +QueueDelete removes the queue from the server including all bindings then +purges the messages based on server configuration, returning the number of +messages purged. + +When ifUnused is true, the queue will not be deleted if there are any +consumers on the queue. If there are consumers, an error will be returned and +the channel will be closed. + +When ifEmpty is true, the queue will not be deleted if there are any messages +remaining on the queue. If there are messages, an error will be returned and +the channel will be closed. + +When noWait is true, the queue will be deleted without waiting for a response +from the server. The purged message count will not be meaningful. If the queue +could not be deleted, a channel exception will be raised and the channel will +be closed. + +*/ +func (me *Channel) QueueDelete(name string, ifUnused, ifEmpty, noWait bool) (int, error) { + req := &queueDelete{ + Queue: name, + IfUnused: ifUnused, + IfEmpty: ifEmpty, + NoWait: noWait, + } + res := &queueDeleteOk{} + + err := me.call(req, res) + + return int(res.MessageCount), err +} + +/* +Consume immediately starts delivering queued messages. + +Begin receiving on the returned chan Delivery before any other operation on the +Connection or Channel. + +Continues deliveries to the returned chan Delivery until Channel.Cancel, +Connection.Close, Channel.Close, or an AMQP exception occurs. Consumers must +range over the chan to ensure all deliveries are received. Unreceived +deliveries will block all methods on the same connection. + +All deliveries in AMQP must be acknowledged. It is expected of the consumer to +call Delivery.Ack after it has successfully processed the delivery. If the +consumer is cancelled or the channel or connection is closed any unacknowledged +deliveries will be requeued at the end of the same queue. + +The consumer is identified by a string that is unique and scoped for all +consumers on this channel. If you wish to eventually cancel the consumer, use +the same non-empty idenfitier in Channel.Cancel. An empty string will cause +the library to generate a unique identity. The consumer identity will be +included in every Delivery in the ConsumerTag field + +When autoAck (also known as noAck) is true, the server will acknowledge +deliveries to this consumer prior to writing the delivery to the network. When +autoAck is true, the consumer should not call Delivery.Ack. Automatically +acknowledging deliveries means that some deliveries may get lost if the +consumer is unable to process them after the server delivers them. + +When exclusive is true, the server will ensure that this is the sole consumer +from this queue. When exclusive is false, the server will fairly distribute +deliveries across multiple consumers. + +When noLocal is true, the server will not deliver publishing sent from the same +connection to this consumer. It's advisable to use separate connections for +Channel.Publish and Channel.Consume so not to have TCP pushback on publishing +affect the ability to consume messages, so this parameter is here mostly for +completeness. + +When noWait is true, do not wait for the server to confirm the request and +immediately begin deliveries. If it is not possible to consume, a channel +exception will be raised and the channel will be closed. + +Optional arguments can be provided that have specific semantics for the queue +or server. + +When the channel or connection closes, all delivery chans will also close. + +Deliveries on the returned chan will be buffered indefinitely. To limit memory +of this buffer, use the Channel.Qos method to limit the amount of +unacknowledged/buffered deliveries the server will deliver on this Channel. + +*/ +func (me *Channel) Consume(queue, consumer string, autoAck, exclusive, noLocal, noWait bool, args Table) (<-chan Delivery, error) { + // When we return from me.call, there may be a delivery already for the + // consumer that hasn't been added to the consumer hash yet. Because of + // this, we never rely on the server picking a consumer tag for us. + + if err := args.Validate(); err != nil { + return nil, err + } + + if consumer == "" { + consumer = uniqueConsumerTag() + } + + req := &basicConsume{ + Queue: queue, + ConsumerTag: consumer, + NoLocal: noLocal, + NoAck: autoAck, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &basicConsumeOk{} + + deliveries := make(chan Delivery) + + me.consumers.add(consumer, deliveries) + + if err := me.call(req, res); err != nil { + me.consumers.close(consumer) + return nil, err + } + + return (<-chan Delivery)(deliveries), nil +} + +/* +ExchangeDeclare declares an exchange on the server. If the exchange does not +already exist, the server will create it. If the exchange exists, the server +verifies that it is of the provided type, durability and auto-delete flags. + +Errors returned from this method will close the channel. + +Exchange names starting with "amq." are reserved for pre-declared and +standardized exchanges. The client MAY declare an exchange starting with +"amq." if the passive option is set, or the exchange already exists. Names can +consists of a non-empty sequence of letters, digits, hyphen, underscore, +period, or colon. + +Each exchange belongs to one of a set of exchange kinds/types implemented by +the server. The exchange types define the functionality of the exchange - i.e. +how messages are routed through it. Once an exchange is declared, its type +cannot be changed. The common types are "direct", "fanout", "topic" and +"headers". + +Durable and Non-Auto-Deleted exchanges will survive server restarts and remain +declared when there are no remaining bindings. This is the best lifetime for +long-lived exchange configurations like stable routes and default exchanges. + +Non-Durable and Auto-Deleted exchanges will be deleted when there are no +remaining bindings and not restored on server restart. This lifetime is +useful for temporary topologies that should not pollute the virtual host on +failure or after the consumers have completed. + +Non-Durable and Non-Auto-deleted exchanges will remain as long as the server is +running including when there are no remaining bindings. This is useful for +temporary topologies that may have long delays between bindings. + +Durable and Auto-Deleted exchanges will survive server restarts and will be +removed before and after server restarts when there are no remaining bindings. +These exchanges are useful for robust temporary topologies or when you require +binding durable queues to auto-deleted exchanges. + +Note: RabbitMQ declares the default exchange types like 'amq.fanout' as +durable, so queues that bind to these pre-declared exchanges must also be +durable. + +Exchanges declared as `internal` do not accept accept publishings. Internal +exchanges are useful for when you wish to implement inter-exchange topologies +that should not be exposed to users of the broker. + +When noWait is true, declare without waiting for a confirmation from the server. +The channel may be closed as a result of an error. Add a NotifyClose listener +to respond to any exceptions. + +Optional amqp.Table of arguments that are specific to the server's implementation of +the exchange can be sent for exchange types that require extra parameters. +*/ +func (me *Channel) ExchangeDeclare(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &exchangeDeclare{ + Exchange: name, + Type: kind, + Passive: false, + Durable: durable, + AutoDelete: autoDelete, + Internal: internal, + NoWait: noWait, + Arguments: args, + }, + &exchangeDeclareOk{}, + ) +} + +/* + +ExchangeDeclarePassive is functionally and parametrically equivalent to +ExchangeDeclare, except that it sets the "passive" attribute to true. A passive +exchange is assumed by RabbitMQ to already exist, and attempting to connect to a +non-existent exchange will cause RabbitMQ to throw an exception. This function +can be used to detect the existence of an exchange. + +*/ +func (me *Channel) ExchangeDeclarePassive(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &exchangeDeclare{ + Exchange: name, + Type: kind, + Passive: true, + Durable: durable, + AutoDelete: autoDelete, + Internal: internal, + NoWait: noWait, + Arguments: args, + }, + &exchangeDeclareOk{}, + ) +} + +/* +ExchangeDelete removes the named exchange from the server. When an exchange is +deleted all queue bindings on the exchange are also deleted. If this exchange +does not exist, the channel will be closed with an error. + +When ifUnused is true, the server will only delete the exchange if it has no queue +bindings. If the exchange has queue bindings the server does not delete it +but close the channel with an exception instead. Set this to true if you are +not the sole owner of the exchange. + +When noWait is true, do not wait for a server confirmation that the exchange has +been deleted. Failing to delete the channel could close the channel. Add a +NotifyClose listener to respond to these channel exceptions. +*/ +func (me *Channel) ExchangeDelete(name string, ifUnused, noWait bool) error { + return me.call( + &exchangeDelete{ + Exchange: name, + IfUnused: ifUnused, + NoWait: noWait, + }, + &exchangeDeleteOk{}, + ) +} + +/* +ExchangeBind binds an exchange to another exchange to create inter-exchange +routing topologies on the server. This can decouple the private topology and +routing exchanges from exchanges intended solely for publishing endpoints. + +Binding two exchanges with identical arguments will not create duplicate +bindings. + +Binding one exchange to another with multiple bindings will only deliver a +message once. For example if you bind your exchange to `amq.fanout` with two +different binding keys, only a single message will be delivered to your +exchange even though multiple bindings will match. + +Given a message delivered to the source exchange, the message will be forwarded +to the destination exchange when the routing key is matched. + + ExchangeBind("sell", "MSFT", "trade", false, nil) + ExchangeBind("buy", "AAPL", "trade", false, nil) + + Delivery Source Key Destination + example exchange exchange + ----------------------------------------------- + key: AAPL --> trade ----> MSFT sell + \---> AAPL --> buy + +When noWait is true, do not wait for the server to confirm the binding. If any +error occurs the channel will be closed. Add a listener to NotifyClose to +handle these errors. + +Optional arguments specific to the exchanges bound can also be specified. +*/ +func (me *Channel) ExchangeBind(destination, key, source string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &exchangeBind{ + Destination: destination, + Source: source, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &exchangeBindOk{}, + ) +} + +/* +ExchangeUnbind unbinds the destination exchange from the source exchange on the +server by removing the routing key between them. This is the inverse of +ExchangeBind. If the binding does not currently exist, an error will be +returned. + +When noWait is true, do not wait for the server to confirm the deletion of the +binding. If any error occurs the channel will be closed. Add a listener to +NotifyClose to handle these errors. + +Optional arguments that are specific to the type of exchanges bound can also be +provided. These must match the same arguments specified in ExchangeBind to +identify the binding. +*/ +func (me *Channel) ExchangeUnbind(destination, key, source string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &exchangeUnbind{ + Destination: destination, + Source: source, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &exchangeUnbindOk{}, + ) +} + +/* +Publish sends a Publishing from the client to an exchange on the server. + +When you want a single message to be delivered to a single queue, you can +publish to the default exchange with the routingKey of the queue name. This is +because every declared queue gets an implicit route to the default exchange. + +Since publishings are asynchronous, any undeliverable message will get returned +by the server. Add a listener with Channel.NotifyReturn to handle any +undeliverable message when calling publish with either the mandatory or +immediate parameters as true. + +Publishings can be undeliverable when the mandatory flag is true and no queue is +bound that matches the routing key, or when the immediate flag is true and no +consumer on the matched queue is ready to accept the delivery. + +This can return an error when the channel, connection or socket is closed. The +error or lack of an error does not indicate whether the server has received this +publishing. + +It is possible for publishing to not reach the broker if the underlying socket +is shutdown without pending publishing packets being flushed from the kernel +buffers. The easy way of making it probable that all publishings reach the +server is to always call Connection.Close before terminating your publishing +application. The way to ensure that all publishings reach the server is to add +a listener to Channel.NotifyPublish and put the channel in confirm mode with +Channel.Confirm. Publishing delivery tags and their corresponding +confirmations start at 1. Exit when all publishings are confirmed. + +When Publish does not return an error and the channel is in confirm mode, the +internal counter for DeliveryTags with the first confirmation starting at 1. + +*/ +func (me *Channel) Publish(exchange, key string, mandatory, immediate bool, msg Publishing) error { + if err := msg.Headers.Validate(); err != nil { + return err + } + + me.m.Lock() + defer me.m.Unlock() + + if err := me.send(me, &basicPublish{ + Exchange: exchange, + RoutingKey: key, + Mandatory: mandatory, + Immediate: immediate, + Body: msg.Body, + Properties: properties{ + Headers: msg.Headers, + ContentType: msg.ContentType, + ContentEncoding: msg.ContentEncoding, + DeliveryMode: msg.DeliveryMode, + Priority: msg.Priority, + CorrelationId: msg.CorrelationId, + ReplyTo: msg.ReplyTo, + Expiration: msg.Expiration, + MessageId: msg.MessageId, + Timestamp: msg.Timestamp, + Type: msg.Type, + UserId: msg.UserId, + AppId: msg.AppId, + }, + }); err != nil { + return err + } + + if me.confirming { + me.confirms.Publish() + } + + return nil +} + +/* +Get synchronously receives a single Delivery from the head of a queue from the +server to the client. In almost all cases, using Channel.Consume will be +preferred. + +If there was a delivery waiting on the queue and that delivery was received the +second return value will be true. If there was no delivery waiting or an error +occured, the ok bool will be false. + +All deliveries must be acknowledged including those from Channel.Get. Call +Delivery.Ack on the returned delivery when you have fully processed this +delivery. + +When autoAck is true, the server will automatically acknowledge this message so +you don't have to. But if you are unable to fully process this message before +the channel or connection is closed, the message will not get requeued. + +*/ +func (me *Channel) Get(queue string, autoAck bool) (msg Delivery, ok bool, err error) { + req := &basicGet{Queue: queue, NoAck: autoAck} + res := &basicGetOk{} + empty := &basicGetEmpty{} + + if err := me.call(req, res, empty); err != nil { + return Delivery{}, false, err + } + + if res.DeliveryTag > 0 { + return *(newDelivery(me, res)), true, nil + } + + return Delivery{}, false, nil +} + +/* +Tx puts the channel into transaction mode on the server. All publishings and +acknowledgments following this method will be atomically committed or rolled +back for a single queue. Call either Channel.TxCommit or Channel.TxRollback to +leave a this transaction and immediately start a new transaction. + +The atomicity across multiple queues is not defined as queue declarations and +bindings are not included in the transaction. + +The behavior of publishings that are delivered as mandatory or immediate while +the channel is in a transaction is not defined. + +Once a channel has been put into transaction mode, it cannot be taken out of +transaction mode. Use a different channel for non-transactional semantics. + +*/ +func (me *Channel) Tx() error { + return me.call( + &txSelect{}, + &txSelectOk{}, + ) +} + +/* +TxCommit atomically commits all publishings and acknowledgments for a single +queue and immediately start a new transaction. + +Calling this method without having called Channel.Tx is an error. + +*/ +func (me *Channel) TxCommit() error { + return me.call( + &txCommit{}, + &txCommitOk{}, + ) +} + +/* +TxRollback atomically rolls back all publishings and acknowledgments for a +single queue and immediately start a new transaction. + +Calling this method without having called Channel.Tx is an error. + +*/ +func (me *Channel) TxRollback() error { + return me.call( + &txRollback{}, + &txRollbackOk{}, + ) +} + +/* +Flow pauses the delivery of messages to consumers on this channel. Channels +are opened with flow control not active, to open a channel with paused +deliveries immediately call this method with true after calling +Connection.Channel. + +When active is true, this method asks the server to temporarily pause deliveries +until called again with active as false. + +Channel.Get methods will not be affected by flow control. + +This method is not intended to act as window control. Use Channel.Qos to limit +the number of unacknowledged messages or bytes in flight instead. + +The server may also send us flow methods to throttle our publishings. A well +behaving publishing client should add a listener with Channel.NotifyFlow and +pause its publishings when true is sent on that channel. + +Note: RabbitMQ prefers to use TCP push back to control flow for all channels on +a connection, so under high volume scenarios, it's wise to open separate +Connections for publishings and deliveries. + +*/ +func (me *Channel) Flow(active bool) error { + return me.call( + &channelFlow{Active: active}, + &channelFlowOk{}, + ) +} + +/* +Confirm puts this channel into confirm mode so that the client can ensure all +publishings have successfully been received by the server. After entering this +mode, the server will send a basic.ack or basic.nack message with the deliver +tag set to a 1 based incrementing index corresponding to every publishing +received after the this method returns. + +Add a listener to Channel.NotifyPublish to respond to the Confirmations. If +Channel.NotifyPublish is not called, the Confirmations will be silently +ignored. + +The order of acknowledgments is not bound to the order of deliveries. + +Ack and Nack confirmations will arrive at some point in the future. + +Unroutable mandatory or immediate messages are acknowledged immediately after +any Channel.NotifyReturn listeners have been notified. Other messages are +acknowledged when all queues that should have the message routed to them have +either have received acknowledgment of delivery or have enqueued the message, +persisting the message if necessary. + +When noWait is true, the client will not wait for a response. A channel +exception could occur if the server does not support this method. + +*/ +func (me *Channel) Confirm(noWait bool) error { + me.m.Lock() + defer me.m.Unlock() + + if err := me.call( + &confirmSelect{Nowait: noWait}, + &confirmSelectOk{}, + ); err != nil { + return err + } + + me.confirming = true + + return nil +} + +/* +Recover redelivers all unacknowledged deliveries on this channel. + +When requeue is false, messages will be redelivered to the original consumer. + +When requeue is true, messages will be redelivered to any available consumer, +potentially including the original. + +If the deliveries cannot be recovered, an error will be returned and the channel +will be closed. + +Note: this method is not implemented on RabbitMQ, use Delivery.Nack instead +*/ +func (me *Channel) Recover(requeue bool) error { + return me.call( + &basicRecover{Requeue: requeue}, + &basicRecoverOk{}, + ) +} + +/* +Ack acknowledges a delivery by its delivery tag when having been consumed with +Channel.Consume or Channel.Get. + +Ack acknowledges all message received prior to the delivery tag when multiple +is true. + +See also Delivery.Ack +*/ +func (me *Channel) Ack(tag uint64, multiple bool) error { + return me.send(me, &basicAck{ + DeliveryTag: tag, + Multiple: multiple, + }) +} + +/* +Nack negatively acknowledges a delivery by its delivery tag. Prefer this +method to notify the server that you were not able to process this delivery and +it must be redelivered or dropped. + +See also Delivery.Nack +*/ +func (me *Channel) Nack(tag uint64, multiple bool, requeue bool) error { + return me.send(me, &basicNack{ + DeliveryTag: tag, + Multiple: multiple, + Requeue: requeue, + }) +} + +/* +Reject negatively acknowledges a delivery by its delivery tag. Prefer Nack +over Reject when communicating with a RabbitMQ server because you can Nack +multiple messages, reducing the amount of protocol messages to exchange. + +See also Delivery.Reject +*/ +func (me *Channel) Reject(tag uint64, requeue bool) error { + return me.send(me, &basicReject{ + DeliveryTag: tag, + Requeue: requeue, + }) +} diff --git a/vendor/github.com/streadway/amqp/confirms.go b/vendor/github.com/streadway/amqp/confirms.go new file mode 100644 index 000000000..ebee9368b --- /dev/null +++ b/vendor/github.com/streadway/amqp/confirms.go @@ -0,0 +1,93 @@ +package amqp + +import "sync" + +// confirms resequences and notifies one or multiple publisher confirmation listeners +type confirms struct { + m sync.Mutex + listeners []chan Confirmation + sequencer map[uint64]Confirmation + published uint64 + expecting uint64 +} + +// newConfirms allocates a confirms +func newConfirms() *confirms { + return &confirms{ + sequencer: map[uint64]Confirmation{}, + published: 0, + expecting: 1, + } +} + +func (c *confirms) Listen(l chan Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + c.listeners = append(c.listeners, l) +} + +// publish increments the publishing counter +func (c *confirms) Publish() uint64 { + c.m.Lock() + defer c.m.Unlock() + + c.published++ + return c.published +} + +// confirm confirms one publishing, increments the expecting delivery tag, and +// removes bookkeeping for that delivery tag. +func (c *confirms) confirm(confirmation Confirmation) { + delete(c.sequencer, c.expecting) + c.expecting++ + for _, l := range c.listeners { + l <- confirmation + } +} + +// resequence confirms any out of order delivered confirmations +func (c *confirms) resequence() { + for c.expecting <= c.published { + sequenced, found := c.sequencer[c.expecting] + if !found { + return + } + c.confirm(sequenced) + } +} + +// one confirms one publishing and all following in the publishing sequence +func (c *confirms) One(confirmed Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + if c.expecting == confirmed.DeliveryTag { + c.confirm(confirmed) + } else { + c.sequencer[confirmed.DeliveryTag] = confirmed + } + c.resequence() +} + +// multiple confirms all publishings up until the delivery tag +func (c *confirms) Multiple(confirmed Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + for c.expecting <= confirmed.DeliveryTag { + c.confirm(Confirmation{c.expecting, confirmed.Ack}) + } +} + +// Close closes all listeners, discarding any out of sequence confirmations +func (c *confirms) Close() error { + c.m.Lock() + defer c.m.Unlock() + + for _, l := range c.listeners { + close(l) + } + c.listeners = nil + return nil +} diff --git a/vendor/github.com/streadway/amqp/connection.go b/vendor/github.com/streadway/amqp/connection.go new file mode 100644 index 000000000..ad4007978 --- /dev/null +++ b/vendor/github.com/streadway/amqp/connection.go @@ -0,0 +1,769 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bufio" + "crypto/tls" + "io" + "net" + "reflect" + "strconv" + "strings" + "sync" + "time" +) + +const ( + maxChannelMax = (2 << 15) - 1 + + defaultHeartbeat = 10 * time.Second + defaultConnectionTimeout = 30 * time.Second + defaultProduct = "https://github.com/streadway/amqp" + defaultVersion = "β" + defaultChannelMax = maxChannelMax +) + +// Config is used in DialConfig and Open to specify the desired tuning +// parameters used during a connection open handshake. The negotiated tuning +// will be stored in the returned connection's Config field. +type Config struct { + // The SASL mechanisms to try in the client request, and the successful + // mechanism used on the Connection object. + // If SASL is nil, PlainAuth from the URL is used. + SASL []Authentication + + // Vhost specifies the namespace of permissions, exchanges, queues and + // bindings on the server. Dial sets this to the path parsed from the URL. + Vhost string + + ChannelMax int // 0 max channels means 2^16 - 1 + FrameSize int // 0 max bytes means unlimited + Heartbeat time.Duration // less than 1s uses the server's interval + + // TLSClientConfig specifies the client configuration of the TLS connection + // when establishing a tls transport. + // If the URL uses an amqps scheme, then an empty tls.Config with the + // ServerName from the URL is used. + TLSClientConfig *tls.Config + + // Properties is table of properties that the client advertises to the server. + // This is an optional setting - if the application does not set this, + // the underlying library will use a generic set of client properties. + Properties Table + + // Dial returns a net.Conn prepared for a TLS handshake with TSLClientConfig, + // then an AMQP connection handshake. + // If Dial is nil, net.DialTimeout with a 30s connection and 30s read + // deadline is used. + Dial func(network, addr string) (net.Conn, error) +} + +// Connection manages the serialization and deserialization of frames from IO +// and dispatches the frames to the appropriate channel. All RPC methods and +// asyncronous Publishing, Delivery, Ack, Nack and Return messages are +// multiplexed on this channel. There must always be active receivers for +// every asynchronous message on this connection. +type Connection struct { + destructor sync.Once // shutdown once + sendM sync.Mutex // conn writer mutex + m sync.Mutex // struct field mutex + + conn io.ReadWriteCloser + + rpc chan message + writer *writer + sends chan time.Time // timestamps of each frame sent + deadlines chan readDeadliner // heartbeater updates read deadlines + + allocator *allocator // id generator valid after openTune + channels map[uint16]*Channel + + noNotify bool // true when we will never notify again + closes []chan *Error + blocks []chan Blocking + + errors chan *Error + + Config Config // The negotiated Config after connection.open + + Major int // Server's major version + Minor int // Server's minor version + Properties Table // Server properties +} + +type readDeadliner interface { + SetReadDeadline(time.Time) error +} + +type localNetAddr interface { + LocalAddr() net.Addr +} + +// defaultDial establishes a connection when config.Dial is not provided +func defaultDial(network, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(network, addr, defaultConnectionTimeout) + if err != nil { + return nil, err + } + + // Heartbeating hasn't started yet, don't stall forever on a dead server. + if err := conn.SetReadDeadline(time.Now().Add(defaultConnectionTimeout)); err != nil { + return nil, err + } + + return conn, nil +} + +// Dial accepts a string in the AMQP URI format and returns a new Connection +// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10 +// seconds and sets the initial read deadline to 30 seconds. +// +// Dial uses the zero value of tls.Config when it encounters an amqps:// +// scheme. It is equivalent to calling DialTLS(amqp, nil). +func Dial(url string) (*Connection, error) { + return DialConfig(url, Config{ + Heartbeat: defaultHeartbeat, + }) +} + +// DialTLS accepts a string in the AMQP URI format and returns a new Connection +// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10 +// seconds and sets the initial read deadline to 30 seconds. +// +// DialTLS uses the provided tls.Config when encountering an amqps:// scheme. +func DialTLS(url string, amqps *tls.Config) (*Connection, error) { + return DialConfig(url, Config{ + Heartbeat: defaultHeartbeat, + TLSClientConfig: amqps, + }) +} + +// DialConfig accepts a string in the AMQP URI format and a configuration for +// the transport and connection setup, returning a new Connection. Defaults to +// a server heartbeat interval of 10 seconds and sets the initial read deadline +// to 30 seconds. +func DialConfig(url string, config Config) (*Connection, error) { + var err error + var conn net.Conn + + uri, err := ParseURI(url) + if err != nil { + return nil, err + } + + if config.SASL == nil { + config.SASL = []Authentication{uri.PlainAuth()} + } + + if config.Vhost == "" { + config.Vhost = uri.Vhost + } + + if uri.Scheme == "amqps" && config.TLSClientConfig == nil { + config.TLSClientConfig = new(tls.Config) + } + + addr := net.JoinHostPort(uri.Host, strconv.FormatInt(int64(uri.Port), 10)) + + dialer := config.Dial + if dialer == nil { + dialer = defaultDial + } + + conn, err = dialer("tcp", addr) + if err != nil { + return nil, err + } + + if config.TLSClientConfig != nil { + // Use the URI's host for hostname validation unless otherwise set. Make a + // copy so not to modify the caller's reference when the caller reuses a + // tls.Config for a different URL. + if config.TLSClientConfig.ServerName == "" { + c := *config.TLSClientConfig + c.ServerName = uri.Host + config.TLSClientConfig = &c + } + + client := tls.Client(conn, config.TLSClientConfig) + if err := client.Handshake(); err != nil { + conn.Close() + return nil, err + } + + conn = client + } + + return Open(conn, config) +} + +/* +Open accepts an already established connection, or other io.ReadWriteCloser as +a transport. Use this method if you have established a TLS connection or wish +to use your own custom transport. + +*/ +func Open(conn io.ReadWriteCloser, config Config) (*Connection, error) { + me := &Connection{ + conn: conn, + writer: &writer{bufio.NewWriter(conn)}, + channels: make(map[uint16]*Channel), + rpc: make(chan message), + sends: make(chan time.Time), + errors: make(chan *Error, 1), + deadlines: make(chan readDeadliner, 1), + } + go me.reader(conn) + return me, me.open(config) +} + +/* +LocalAddr returns the local TCP peer address, or ":0" (the zero value of net.TCPAddr) +as a fallback default value if the underlying transport does not support LocalAddr(). +*/ +func (me *Connection) LocalAddr() net.Addr { + if c, ok := me.conn.(localNetAddr); ok { + return c.LocalAddr() + } + return &net.TCPAddr{} +} + +/* +NotifyClose registers a listener for close events either initiated by an error +accompaning a connection.close method or by a normal shutdown. + +On normal shutdowns, the chan will be closed. + +To reconnect after a transport or protocol error, register a listener here and +re-run your setup process. + +*/ +func (me *Connection) NotifyClose(c chan *Error) chan *Error { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.closes = append(me.closes, c) + } + + return c +} + +/* +NotifyBlock registers a listener for RabbitMQ specific TCP flow control method +extensions connection.blocked and connection.unblocked. Flow control is active +with a reason when Blocking.Blocked is true. When a Connection is blocked, all +methods will block across all connections until server resources become free +again. + +This optional extension is supported by the server when the +"connection.blocked" server capability key is true. + +*/ +func (me *Connection) NotifyBlocked(c chan Blocking) chan Blocking { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.blocks = append(me.blocks, c) + } + + return c +} + +/* +Close requests and waits for the response to close the AMQP connection. + +It's advisable to use this message when publishing to ensure all kernel buffers +have been flushed on the server and client before exiting. + +An error indicates that server may not have received this request to close but +the connection should be treated as closed regardless. + +After returning from this call, all resources associated with this connection, +including the underlying io, Channels, Notify listeners and Channel consumers +will also be closed. +*/ +func (me *Connection) Close() error { + defer me.shutdown(nil) + return me.call( + &connectionClose{ + ReplyCode: replySuccess, + ReplyText: "kthxbai", + }, + &connectionCloseOk{}, + ) +} + +func (me *Connection) closeWith(err *Error) error { + defer me.shutdown(err) + return me.call( + &connectionClose{ + ReplyCode: uint16(err.Code), + ReplyText: err.Reason, + }, + &connectionCloseOk{}, + ) +} + +func (me *Connection) send(f frame) error { + me.sendM.Lock() + err := me.writer.WriteFrame(f) + me.sendM.Unlock() + + if err != nil { + // shutdown could be re-entrant from signaling notify chans + go me.shutdown(&Error{ + Code: FrameError, + Reason: err.Error(), + }) + } else { + // Broadcast we sent a frame, reducing heartbeats, only + // if there is something that can receive - like a non-reentrant + // call or if the heartbeater isn't running + select { + case me.sends <- time.Now(): + default: + } + } + + return err +} + +func (me *Connection) shutdown(err *Error) { + me.destructor.Do(func() { + if err != nil { + for _, c := range me.closes { + c <- err + } + } + + for _, ch := range me.channels { + me.closeChannel(ch, err) + } + + if err != nil { + me.errors <- err + } + + me.conn.Close() + + for _, c := range me.closes { + close(c) + } + + for _, c := range me.blocks { + close(c) + } + + me.m.Lock() + me.noNotify = true + me.m.Unlock() + }) +} + +// All methods sent to the connection channel should be synchronous so we +// can handle them directly without a framing component +func (me *Connection) demux(f frame) { + if f.channel() == 0 { + me.dispatch0(f) + } else { + me.dispatchN(f) + } +} + +func (me *Connection) dispatch0(f frame) { + switch mf := f.(type) { + case *methodFrame: + switch m := mf.Method.(type) { + case *connectionClose: + // Send immediately as shutdown will close our side of the writer. + me.send(&methodFrame{ + ChannelId: 0, + Method: &connectionCloseOk{}, + }) + + me.shutdown(newError(m.ReplyCode, m.ReplyText)) + case *connectionBlocked: + for _, c := range me.blocks { + c <- Blocking{Active: true, Reason: m.Reason} + } + case *connectionUnblocked: + for _, c := range me.blocks { + c <- Blocking{Active: false} + } + default: + me.rpc <- m + } + case *heartbeatFrame: + // kthx - all reads reset our deadline. so we can drop this + default: + // lolwat - channel0 only responds to methods and heartbeats + me.closeWith(ErrUnexpectedFrame) + } +} + +func (me *Connection) dispatchN(f frame) { + me.m.Lock() + channel := me.channels[f.channel()] + me.m.Unlock() + + if channel != nil { + channel.recv(channel, f) + } else { + me.dispatchClosed(f) + } +} + +// section 2.3.7: "When a peer decides to close a channel or connection, it +// sends a Close method. The receiving peer MUST respond to a Close with a +// Close-Ok, and then both parties can close their channel or connection. Note +// that if peers ignore Close, deadlock can happen when both peers send Close +// at the same time." +// +// When we don't have a channel, so we must respond with close-ok on a close +// method. This can happen between a channel exception on an asynchronous +// method like basic.publish and a synchronous close with channel.close. +// In that case, we'll get both a channel.close and channel.close-ok in any +// order. +func (me *Connection) dispatchClosed(f frame) { + // Only consider method frames, drop content/header frames + if mf, ok := f.(*methodFrame); ok { + switch mf.Method.(type) { + case *channelClose: + me.send(&methodFrame{ + ChannelId: f.channel(), + Method: &channelCloseOk{}, + }) + case *channelCloseOk: + // we are already closed, so do nothing + default: + // unexpected method on closed channel + me.closeWith(ErrClosed) + } + } +} + +// Reads each frame off the IO and hand off to the connection object that +// will demux the streams and dispatch to one of the opened channels or +// handle on channel 0 (the connection channel). +func (me *Connection) reader(r io.Reader) { + buf := bufio.NewReader(r) + frames := &reader{buf} + conn, haveDeadliner := r.(readDeadliner) + + for { + frame, err := frames.ReadFrame() + + if err != nil { + me.shutdown(&Error{Code: FrameError, Reason: err.Error()}) + return + } + + me.demux(frame) + + if haveDeadliner { + me.deadlines <- conn + } + } +} + +// Ensures that at least one frame is being sent at the tuned interval with a +// jitter tolerance of 1s +func (me *Connection) heartbeater(interval time.Duration, done chan *Error) { + const maxServerHeartbeatsInFlight = 3 + + var sendTicks <-chan time.Time + if interval > 0 { + ticker := time.NewTicker(interval) + defer ticker.Stop() + sendTicks = ticker.C + } + + lastSent := time.Now() + + for { + select { + case at, stillSending := <-me.sends: + // When actively sending, depend on sent frames to reset server timer + if stillSending { + lastSent = at + } else { + return + } + + case at := <-sendTicks: + // When idle, fill the space with a heartbeat frame + if at.Sub(lastSent) > interval-time.Second { + if err := me.send(&heartbeatFrame{}); err != nil { + // send heartbeats even after close/closeOk so we + // tick until the connection starts erroring + return + } + } + + case conn := <-me.deadlines: + // When reading, reset our side of the deadline, if we've negotiated one with + // a deadline that covers at least 2 server heartbeats + if interval > 0 { + conn.SetReadDeadline(time.Now().Add(maxServerHeartbeatsInFlight * interval)) + } + + case <-done: + return + } + } +} + +// Convenience method to inspect the Connection.Properties["capabilities"] +// Table for server identified capabilities like "basic.ack" or +// "confirm.select". +func (me *Connection) isCapable(featureName string) bool { + capabilities, _ := me.Properties["capabilities"].(Table) + hasFeature, _ := capabilities[featureName].(bool) + return hasFeature +} + +// allocateChannel records but does not open a new channel with a unique id. +// This method is the initial part of the channel lifecycle and paired with +// releaseChannel +func (me *Connection) allocateChannel() (*Channel, error) { + me.m.Lock() + defer me.m.Unlock() + + id, ok := me.allocator.next() + if !ok { + return nil, ErrChannelMax + } + + ch := newChannel(me, uint16(id)) + me.channels[uint16(id)] = ch + + return ch, nil +} + +// releaseChannel removes a channel from the registry as the final part of the +// channel lifecycle +func (me *Connection) releaseChannel(id uint16) { + me.m.Lock() + defer me.m.Unlock() + + delete(me.channels, id) + me.allocator.release(int(id)) +} + +// openChannel allocates and opens a channel, must be paired with closeChannel +func (me *Connection) openChannel() (*Channel, error) { + ch, err := me.allocateChannel() + if err != nil { + return nil, err + } + + if err := ch.open(); err != nil { + return nil, err + } + return ch, nil +} + +// closeChannel releases and initiates a shutdown of the channel. All channel +// closures should be initiated here for proper channel lifecycle management on +// this connection. +func (me *Connection) closeChannel(ch *Channel, e *Error) { + ch.shutdown(e) + me.releaseChannel(ch.id) +} + +/* +Channel opens a unique, concurrent server channel to process the bulk of AMQP +messages. Any error from methods on this receiver will render the receiver +invalid and a new Channel should be opened. + +*/ +func (me *Connection) Channel() (*Channel, error) { + return me.openChannel() +} + +func (me *Connection) call(req message, res ...message) error { + // Special case for when the protocol header frame is sent insted of a + // request method + if req != nil { + if err := me.send(&methodFrame{ChannelId: 0, Method: req}); err != nil { + return err + } + } + + select { + case err := <-me.errors: + return err + + case msg := <-me.rpc: + // Try to match one of the result types + for _, try := range res { + if reflect.TypeOf(msg) == reflect.TypeOf(try) { + // *res = *msg + vres := reflect.ValueOf(try).Elem() + vmsg := reflect.ValueOf(msg).Elem() + vres.Set(vmsg) + return nil + } + } + return ErrCommandInvalid + } + + panic("unreachable") +} + +// Connection = open-Connection *use-Connection close-Connection +// open-Connection = C:protocol-header +// S:START C:START-OK +// *challenge +// S:TUNE C:TUNE-OK +// C:OPEN S:OPEN-OK +// challenge = S:SECURE C:SECURE-OK +// use-Connection = *channel +// close-Connection = C:CLOSE S:CLOSE-OK +// / S:CLOSE C:CLOSE-OK +func (me *Connection) open(config Config) error { + if err := me.send(&protocolHeader{}); err != nil { + return err + } + + return me.openStart(config) +} + +func (me *Connection) openStart(config Config) error { + start := &connectionStart{} + + if err := me.call(nil, start); err != nil { + return err + } + + me.Major = int(start.VersionMajor) + me.Minor = int(start.VersionMinor) + me.Properties = Table(start.ServerProperties) + + // eventually support challenge/response here by also responding to + // connectionSecure. + auth, ok := pickSASLMechanism(config.SASL, strings.Split(start.Mechanisms, " ")) + if !ok { + return ErrSASL + } + + // Save this mechanism off as the one we chose + me.Config.SASL = []Authentication{auth} + + return me.openTune(config, auth) +} + +func (me *Connection) openTune(config Config, auth Authentication) error { + if len(config.Properties) == 0 { + config.Properties = Table{ + "product": defaultProduct, + "version": defaultVersion, + } + } + + config.Properties["capabilities"] = Table{ + "connection.blocked": true, + "consumer_cancel_notify": true, + } + + ok := &connectionStartOk{ + Mechanism: auth.Mechanism(), + Response: auth.Response(), + ClientProperties: config.Properties, + } + tune := &connectionTune{} + + if err := me.call(ok, tune); err != nil { + // per spec, a connection can only be closed when it has been opened + // so at this point, we know it's an auth error, but the socket + // was closed instead. Return a meaningful error. + return ErrCredentials + } + + // When the server and client both use default 0, then the max channel is + // only limited by uint16. + me.Config.ChannelMax = pick(config.ChannelMax, int(tune.ChannelMax)) + if me.Config.ChannelMax == 0 { + me.Config.ChannelMax = defaultChannelMax + } + me.Config.ChannelMax = min(me.Config.ChannelMax, maxChannelMax) + + // Frame size includes headers and end byte (len(payload)+8), even if + // this is less than FrameMinSize, use what the server sends because the + // alternative is to stop the handshake here. + me.Config.FrameSize = pick(config.FrameSize, int(tune.FrameMax)) + + // Save this off for resetDeadline() + me.Config.Heartbeat = time.Second * time.Duration(pick( + int(config.Heartbeat/time.Second), + int(tune.Heartbeat))) + + // "The client should start sending heartbeats after receiving a + // Connection.Tune method" + go me.heartbeater(me.Config.Heartbeat, me.NotifyClose(make(chan *Error, 1))) + + if err := me.send(&methodFrame{ + ChannelId: 0, + Method: &connectionTuneOk{ + ChannelMax: uint16(me.Config.ChannelMax), + FrameMax: uint32(me.Config.FrameSize), + Heartbeat: uint16(me.Config.Heartbeat / time.Second), + }, + }); err != nil { + return err + } + + return me.openVhost(config) +} + +func (me *Connection) openVhost(config Config) error { + req := &connectionOpen{VirtualHost: config.Vhost} + res := &connectionOpenOk{} + + if err := me.call(req, res); err != nil { + // Cannot be closed yet, but we know it's a vhost problem + return ErrVhost + } + + me.Config.Vhost = config.Vhost + + return me.openComplete() +} + +// openComplete performs any final Connection initialization dependent on the +// connection handshake. +func (me *Connection) openComplete() error { + me.allocator = newAllocator(1, me.Config.ChannelMax) + return nil +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func pick(client, server int) int { + if client == 0 || server == 0 { + return max(client, server) + } + return min(client, server) +} diff --git a/vendor/github.com/streadway/amqp/consumers.go b/vendor/github.com/streadway/amqp/consumers.go new file mode 100644 index 000000000..b6bd60575 --- /dev/null +++ b/vendor/github.com/streadway/amqp/consumers.go @@ -0,0 +1,118 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" + "os" + "sync" + "sync/atomic" +) + +var consumerSeq uint64 + +func uniqueConsumerTag() string { + return fmt.Sprintf("ctag-%s-%d", os.Args[0], atomic.AddUint64(&consumerSeq, 1)) +} + +type consumerBuffers map[string]chan *Delivery + +// Concurrent type that manages the consumerTag -> +// ingress consumerBuffer mapping +type consumers struct { + sync.Mutex + chans consumerBuffers +} + +func makeConsumers() *consumers { + return &consumers{chans: make(consumerBuffers)} +} + +func bufferDeliveries(in chan *Delivery, out chan Delivery) { + var queue []*Delivery + var queueIn = in + + for delivery := range in { + select { + case out <- *delivery: + // delivered immediately while the consumer chan can receive + default: + queue = append(queue, delivery) + } + + for len(queue) > 0 { + select { + case out <- *queue[0]: + queue = queue[1:] + case delivery, open := <-queueIn: + if open { + queue = append(queue, delivery) + } else { + // stop receiving to drain the queue + queueIn = nil + } + } + } + } + + close(out) +} + +// On key conflict, close the previous channel. +func (me *consumers) add(tag string, consumer chan Delivery) { + me.Lock() + defer me.Unlock() + + if prev, found := me.chans[tag]; found { + close(prev) + } + + in := make(chan *Delivery) + go bufferDeliveries(in, consumer) + + me.chans[tag] = in +} + +func (me *consumers) close(tag string) (found bool) { + me.Lock() + defer me.Unlock() + + ch, found := me.chans[tag] + + if found { + delete(me.chans, tag) + close(ch) + } + + return found +} + +func (me *consumers) closeAll() { + me.Lock() + defer me.Unlock() + + for _, ch := range me.chans { + close(ch) + } + + me.chans = make(consumerBuffers) +} + +// Sends a delivery to a the consumer identified by `tag`. +// If unbuffered channels are used for Consume this method +// could block all deliveries until the consumer +// receives on the other end of the channel. +func (me *consumers) send(tag string, msg *Delivery) bool { + me.Lock() + defer me.Unlock() + + buffer, found := me.chans[tag] + if found { + buffer <- msg + } + + return found +} diff --git a/vendor/github.com/streadway/amqp/delivery.go b/vendor/github.com/streadway/amqp/delivery.go new file mode 100644 index 000000000..f84ae4592 --- /dev/null +++ b/vendor/github.com/streadway/amqp/delivery.go @@ -0,0 +1,173 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "errors" + "time" +) + +var errDeliveryNotInitialized = errors.New("delivery not initialized") + +// Acknowledger notifies the server of successful or failed consumption of +// delivieries via identifier found in the Delivery.DeliveryTag field. +// +// Applications can provide mock implementations in tests of Delivery handlers. +type Acknowledger interface { + Ack(tag uint64, multiple bool) error + Nack(tag uint64, multiple bool, requeue bool) error + Reject(tag uint64, requeue bool) error +} + +// Delivery captures the fields for a previously delivered message resident in +// a queue to be delivered by the server to a consumer from Channel.Consume or +// Channel.Get. +type Delivery struct { + Acknowledger Acknowledger // the channel from which this delivery arrived + + Headers Table // Application or header exchange table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // queue implemention use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user - should be authenticated user + AppId string // application use - creating application id + + // Valid only with Channel.Consume + ConsumerTag string + + // Valid only with Channel.Get + MessageCount uint32 + + DeliveryTag uint64 + Redelivered bool + Exchange string // basic.publish exhange + RoutingKey string // basic.publish routing key + + Body []byte +} + +func newDelivery(channel *Channel, msg messageWithContent) *Delivery { + props, body := msg.getContent() + + delivery := Delivery{ + Acknowledger: channel, + + Headers: props.Headers, + ContentType: props.ContentType, + ContentEncoding: props.ContentEncoding, + DeliveryMode: props.DeliveryMode, + Priority: props.Priority, + CorrelationId: props.CorrelationId, + ReplyTo: props.ReplyTo, + Expiration: props.Expiration, + MessageId: props.MessageId, + Timestamp: props.Timestamp, + Type: props.Type, + UserId: props.UserId, + AppId: props.AppId, + + Body: body, + } + + // Properties for the delivery types + switch m := msg.(type) { + case *basicDeliver: + delivery.ConsumerTag = m.ConsumerTag + delivery.DeliveryTag = m.DeliveryTag + delivery.Redelivered = m.Redelivered + delivery.Exchange = m.Exchange + delivery.RoutingKey = m.RoutingKey + + case *basicGetOk: + delivery.MessageCount = m.MessageCount + delivery.DeliveryTag = m.DeliveryTag + delivery.Redelivered = m.Redelivered + delivery.Exchange = m.Exchange + delivery.RoutingKey = m.RoutingKey + } + + return &delivery +} + +/* +Ack delegates an acknowledgement through the Acknowledger interface that the +client or server has finished work on a delivery. + +All deliveries in AMQP must be acknowledged. If you called Channel.Consume +with autoAck true then the server will be automatically ack each message and +this method should not be called. Otherwise, you must call Delivery.Ack after +you have successfully processed this delivery. + +When multiple is true, this delivery and all prior unacknowledged deliveries +on the same channel will be acknowledged. This is useful for batch processing +of deliveries. + +An error will indicate that the acknowledge could not be delivered to the +channel it was sent from. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (me Delivery) Ack(multiple bool) error { + if me.Acknowledger == nil { + return errDeliveryNotInitialized + } + return me.Acknowledger.Ack(me.DeliveryTag, multiple) +} + +/* +Reject delegates a negatively acknowledgement through the Acknowledger interface. + +When requeue is true, queue this message to be delivered to a consumer on a +different channel. When requeue is false or the server is unable to queue this +message, it will be dropped. + +If you are batch processing deliveries, and your server supports it, prefer +Delivery.Nack. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (me Delivery) Reject(requeue bool) error { + if me.Acknowledger == nil { + return errDeliveryNotInitialized + } + return me.Acknowledger.Reject(me.DeliveryTag, requeue) +} + +/* +Nack negatively acknowledge the delivery of message(s) identified by the +delivery tag from either the client or server. + +When multiple is true, nack messages up to and including delivered messages up +until the delivery tag delivered on the same channel. + +When requeue is true, request the server to deliver this message to a different +consumer. If it is not possible or requeue is false, the message will be +dropped or delivered to a server configured dead-letter queue. + +This method must not be used to select or requeue messages the client wishes +not to handle, rather it is to inform the server that the client is incapable +of handling this message at this time. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (me Delivery) Nack(multiple, requeue bool) error { + if me.Acknowledger == nil { + return errDeliveryNotInitialized + } + return me.Acknowledger.Nack(me.DeliveryTag, multiple, requeue) +} diff --git a/vendor/github.com/streadway/amqp/doc.go b/vendor/github.com/streadway/amqp/doc.go new file mode 100644 index 000000000..94c29f825 --- /dev/null +++ b/vendor/github.com/streadway/amqp/doc.go @@ -0,0 +1,108 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* +AMQP 0.9.1 client with RabbitMQ extensions + +Understand the AMQP 0.9.1 messaging model by reviewing these links first. Much +of the terminology in this library directly relates to AMQP concepts. + + Resources + + http://www.rabbitmq.com/tutorials/amqp-concepts.html + http://www.rabbitmq.com/getstarted.html + http://www.rabbitmq.com/amqp-0-9-1-reference.html + +Design + +Most other broker clients publish to queues, but in AMQP, clients publish +Exchanges instead. AMQP is programmable, meaning that both the producers and +consumers agree on the configuration of the broker, instead requiring an +operator or system configuration that declares the logical topology in the +broker. The routing between producers and consumer queues is via Bindings. +These bindings form the logical topology of the broker. + +In this library, a message sent from publisher is called a "Publishing" and a +message received to a consumer is called a "Delivery". The fields of +Publishings and Deliveries are close but not exact mappings to the underlying +wire format to maintain stronger types. Many other libraries will combine +message properties with message headers. In this library, the message well +known properties are strongly typed fields on the Publishings and Deliveries, +whereas the user defined headers are in the Headers field. + +The method naming closely matches the protocol's method name with positional +parameters mapping to named protocol message fields. The motivation here is to +present a comprehensive view over all possible interactions with the server. + +Generally, methods that map to protocol methods of the "basic" class will be +elided in this interface, and "select" methods of various channel mode selectors +will be elided for example Channel.Confirm and Channel.Tx. + +The library is intentionally designed to be synchronous, where responses for +each protocol message are required to be received in an RPC manner. Some +methods have a noWait parameter like Channel.QueueDeclare, and some methods are +asynchronous like Channel.Publish. The error values should still be checked for +these methods as they will indicate IO failures like when the underlying +connection closes. + +Asynchronous Events + +Clients of this library may be interested in receiving some of the protocol +messages other than Deliveries like basic.ack methods while a channel is in +confirm mode. + +The Notify* methods with Connection and Channel receivers model the pattern of +asynchronous events like closes due to exceptions, or messages that are sent out +of band from an RPC call like basic.ack or basic.flow. + +Any asynchronous events, including Deliveries and Publishings must always have +a receiver until the corresponding chans are closed. Without asynchronous +receivers, the sychronous methods will block. + +Use Case + +It's important as a client to an AMQP topology to ensure the state of the +broker matches your expectations. For both publish and consume use cases, +make sure you declare the queues, exchanges and bindings you expect to exist +prior to calling Channel.Publish or Channel.Consume. + + // Connections start with amqp.Dial() typically from a command line argument + // or environment variable. + connection, err := amqp.Dial(os.Getenv("AMQP_URL")) + + // To cleanly shutdown by flushing kernel buffers, make sure to close and + // wait for the response. + defer connection.Close() + + // Most operations happen on a channel. If any error is returned on a + // channel, the channel will no longer be valid, throw it away and try with + // a different channel. If you use many channels, it's useful for the + // server to + channel, err := connection.Channel() + + // Declare your topology here, if it doesn't exist, it will be created, if + // it existed already and is not what you expect, then that's considered an + // error. + + // Use your connection on this topology with either Publish or Consume, or + // inspect your queues with QueueInspect. It's unwise to mix Publish and + // Consume to let TCP do its job well. + +SSL/TLS - Secure connections + +When Dial encounters an amqps:// scheme, it will use the zero value of a +tls.Config. This will only perform server certificate and host verification. + +Use DialTLS when you wish to provide a client certificate (recommended), +include a private certificate authority's certificate in the cert chain for +server validity, or run insecure by not verifying the server certificate dial +your own connection. DialTLS will use the provided tls.Config when it +encounters an amqps:// scheme and will dial a plain connection when it +encounters an amqp:// scheme. + +SSL/TLS in RabbitMQ is documented here: http://www.rabbitmq.com/ssl.html + +*/ +package amqp diff --git a/vendor/github.com/streadway/amqp/fuzz.go b/vendor/github.com/streadway/amqp/fuzz.go new file mode 100644 index 000000000..bf7c7689b --- /dev/null +++ b/vendor/github.com/streadway/amqp/fuzz.go @@ -0,0 +1,16 @@ +// +build gofuzz +package amqp + +import "bytes" + +func Fuzz(data []byte) int { + r := reader{bytes.NewReader(data)} + frame, err := r.ReadFrame() + if err != nil { + if frame != nil { + panic("frame is not nil") + } + return 0 + } + return 1 +} diff --git a/vendor/github.com/streadway/amqp/gen.sh b/vendor/github.com/streadway/amqp/gen.sh new file mode 100755 index 000000000..d46e19bd8 --- /dev/null +++ b/vendor/github.com/streadway/amqp/gen.sh @@ -0,0 +1,2 @@ +#!/bin/sh +go run spec/gen.go < spec/amqp0-9-1.stripped.extended.xml | gofmt > spec091.go diff --git a/vendor/github.com/streadway/amqp/read.go b/vendor/github.com/streadway/amqp/read.go new file mode 100644 index 000000000..74e90ef8f --- /dev/null +++ b/vendor/github.com/streadway/amqp/read.go @@ -0,0 +1,447 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "time" +) + +/* +Reads a frame from an input stream and returns an interface that can be cast into +one of the following: + + methodFrame + PropertiesFrame + bodyFrame + heartbeatFrame + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a +'frame-end' octet that detects malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or + +“gathering reads” to avoid doing three separate system calls to read a frame. +*/ +func (me *reader) ReadFrame() (frame frame, err error) { + var scratch [7]byte + + if _, err = io.ReadFull(me.r, scratch[:7]); err != nil { + return + } + + typ := uint8(scratch[0]) + channel := binary.BigEndian.Uint16(scratch[1:3]) + size := binary.BigEndian.Uint32(scratch[3:7]) + + switch typ { + case frameMethod: + if frame, err = me.parseMethodFrame(channel, size); err != nil { + return + } + + case frameHeader: + if frame, err = me.parseHeaderFrame(channel, size); err != nil { + return + } + + case frameBody: + if frame, err = me.parseBodyFrame(channel, size); err != nil { + return nil, err + } + + case frameHeartbeat: + if frame, err = me.parseHeartbeatFrame(channel, size); err != nil { + return + } + + default: + return nil, ErrFrame + } + + if _, err = io.ReadFull(me.r, scratch[:1]); err != nil { + return nil, err + } + + if scratch[0] != frameEnd { + return nil, ErrFrame + } + + return +} + +func readShortstr(r io.Reader) (v string, err error) { + var length uint8 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readLongstr(r io.Reader) (v string, err error) { + var length uint32 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readDecimal(r io.Reader) (v Decimal, err error) { + if err = binary.Read(r, binary.BigEndian, &v.Scale); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &v.Value); err != nil { + return + } + return +} + +func readFloat32(r io.Reader) (v float32, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readFloat64(r io.Reader) (v float64, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readTimestamp(r io.Reader) (v time.Time, err error) { + var sec int64 + if err = binary.Read(r, binary.BigEndian, &sec); err != nil { + return + } + return time.Unix(sec, 0), nil +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func readField(r io.Reader) (v interface{}, err error) { + var typ byte + if err = binary.Read(r, binary.BigEndian, &typ); err != nil { + return + } + + switch typ { + case 't': + var value uint8 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return (value != 0), nil + + case 'b': + var value [1]byte + if _, err = io.ReadFull(r, value[0:1]); err != nil { + return + } + return value[0], nil + + case 's': + var value int16 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'I': + var value int32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'l': + var value int64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'f': + var value float32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'd': + var value float64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'D': + return readDecimal(r) + + case 'S': + return readLongstr(r) + + case 'A': + return readArray(r) + + case 'T': + return readTimestamp(r) + + case 'F': + return readTable(r) + + case 'x': + var len int32 + if err = binary.Read(r, binary.BigEndian, &len); err != nil { + return nil, err + } + + value := make([]byte, len) + if _, err = io.ReadFull(r, value); err != nil { + return nil, err + } + return value, err + + case 'V': + return nil, nil + } + + return nil, ErrSyntax +} + +/* + Field tables are long strings that contain packed name-value pairs. The + name-value pairs are encoded as short string defining the name, and octet + defining the values type and then the value itself. The valid field types for + tables are an extension of the native integer, bit, string, and timestamp + types, and are shown in the grammar. Multi-octet integer fields are always + held in network byte order. +*/ +func readTable(r io.Reader) (table Table, err error) { + var nested bytes.Buffer + var str string + + if str, err = readLongstr(r); err != nil { + return + } + + nested.Write([]byte(str)) + + table = make(Table) + + for nested.Len() > 0 { + var key string + var value interface{} + + if key, err = readShortstr(&nested); err != nil { + return + } + + if value, err = readField(&nested); err != nil { + return + } + + table[key] = value + } + + return +} + +func readArray(r io.Reader) ([]interface{}, error) { + var size uint32 + var err error + + if err = binary.Read(r, binary.BigEndian, &size); err != nil { + return nil, err + } + + lim := &io.LimitedReader{R: r, N: int64(size)} + arr := make([]interface{}, 0) + var field interface{} + + for { + if field, err = readField(lim); err != nil { + if err == io.EOF { + break + } + return nil, err + } + arr = append(arr, field) + } + + return arr, nil +} + +// Checks if this bit mask matches the flags bitset +func hasProperty(mask uint16, prop int) bool { + return int(mask)&prop > 0 +} + +func (me *reader) parseHeaderFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &headerFrame{ + ChannelId: channel, + } + + if err = binary.Read(me.r, binary.BigEndian, &hf.ClassId); err != nil { + return + } + + if err = binary.Read(me.r, binary.BigEndian, &hf.weight); err != nil { + return + } + + if err = binary.Read(me.r, binary.BigEndian, &hf.Size); err != nil { + return + } + + var flags uint16 + + if err = binary.Read(me.r, binary.BigEndian, &flags); err != nil { + return + } + + if hasProperty(flags, flagContentType) { + if hf.Properties.ContentType, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagContentEncoding) { + if hf.Properties.ContentEncoding, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagHeaders) { + if hf.Properties.Headers, err = readTable(me.r); err != nil { + return + } + } + if hasProperty(flags, flagDeliveryMode) { + if err = binary.Read(me.r, binary.BigEndian, &hf.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(flags, flagPriority) { + if err = binary.Read(me.r, binary.BigEndian, &hf.Properties.Priority); err != nil { + return + } + } + if hasProperty(flags, flagCorrelationId) { + if hf.Properties.CorrelationId, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagReplyTo) { + if hf.Properties.ReplyTo, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagExpiration) { + if hf.Properties.Expiration, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagMessageId) { + if hf.Properties.MessageId, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagTimestamp) { + if hf.Properties.Timestamp, err = readTimestamp(me.r); err != nil { + return + } + } + if hasProperty(flags, flagType) { + if hf.Properties.Type, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagUserId) { + if hf.Properties.UserId, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagAppId) { + if hf.Properties.AppId, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagReserved1) { + if hf.Properties.reserved1, err = readShortstr(me.r); err != nil { + return + } + } + + return hf, nil +} + +func (me *reader) parseBodyFrame(channel uint16, size uint32) (frame frame, err error) { + bf := &bodyFrame{ + ChannelId: channel, + Body: make([]byte, size), + } + + if _, err = io.ReadFull(me.r, bf.Body); err != nil { + return nil, err + } + + return bf, nil +} + +var errHeartbeatPayload = errors.New("Heartbeats should not have a payload") + +func (me *reader) parseHeartbeatFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &heartbeatFrame{ + ChannelId: channel, + } + + if size > 0 { + return nil, errHeartbeatPayload + } + + return hf, nil +} diff --git a/vendor/github.com/streadway/amqp/return.go b/vendor/github.com/streadway/amqp/return.go new file mode 100644 index 000000000..dfebd635d --- /dev/null +++ b/vendor/github.com/streadway/amqp/return.go @@ -0,0 +1,64 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "time" +) + +// Return captures a flattened struct of fields returned by the server when a +// Publishing is unable to be delivered either due to the `mandatory` flag set +// and no route found, or `immediate` flag set and no free consumer. +type Return struct { + ReplyCode uint16 // reason + ReplyText string // description + Exchange string // basic.publish exchange + RoutingKey string // basic.publish routing key + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implemention use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + + Body []byte +} + +func newReturn(msg basicReturn) *Return { + props, body := msg.getContent() + + return &Return{ + ReplyCode: msg.ReplyCode, + ReplyText: msg.ReplyText, + Exchange: msg.Exchange, + RoutingKey: msg.RoutingKey, + + Headers: props.Headers, + ContentType: props.ContentType, + ContentEncoding: props.ContentEncoding, + DeliveryMode: props.DeliveryMode, + Priority: props.Priority, + CorrelationId: props.CorrelationId, + ReplyTo: props.ReplyTo, + Expiration: props.Expiration, + MessageId: props.MessageId, + Timestamp: props.Timestamp, + Type: props.Type, + UserId: props.UserId, + AppId: props.AppId, + + Body: body, + } +} diff --git a/vendor/github.com/streadway/amqp/spec091.go b/vendor/github.com/streadway/amqp/spec091.go new file mode 100644 index 000000000..a95380303 --- /dev/null +++ b/vendor/github.com/streadway/amqp/spec091.go @@ -0,0 +1,3306 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* GENERATED FILE - DO NOT EDIT */ +/* Rebuild from the spec/gen.go tool */ + +package amqp + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Error codes that can be sent from the server during a connection or +// channel exception or used by the client to indicate a class of error like +// ErrCredentials. The text of the error is likely more interesting than +// these constants. +const ( + frameMethod = 1 + frameHeader = 2 + frameBody = 3 + frameHeartbeat = 8 + frameMinSize = 4096 + frameEnd = 206 + replySuccess = 200 + ContentTooLarge = 311 + NoRoute = 312 + NoConsumers = 313 + ConnectionForced = 320 + InvalidPath = 402 + AccessRefused = 403 + NotFound = 404 + ResourceLocked = 405 + PreconditionFailed = 406 + FrameError = 501 + SyntaxError = 502 + CommandInvalid = 503 + ChannelError = 504 + UnexpectedFrame = 505 + ResourceError = 506 + NotAllowed = 530 + NotImplemented = 540 + InternalError = 541 +) + +func isSoftExceptionCode(code int) bool { + switch code { + case 311: + return true + case 312: + return true + case 313: + return true + case 403: + return true + case 404: + return true + case 405: + return true + case 406: + return true + + } + return false +} + +type connectionStart struct { + VersionMajor byte + VersionMinor byte + ServerProperties Table + Mechanisms string + Locales string +} + +func (me *connectionStart) id() (uint16, uint16) { + return 10, 10 +} + +func (me *connectionStart) wait() bool { + return true +} + +func (me *connectionStart) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.VersionMajor); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, me.VersionMinor); err != nil { + return + } + + if err = writeTable(w, me.ServerProperties); err != nil { + return + } + + if err = writeLongstr(w, me.Mechanisms); err != nil { + return + } + if err = writeLongstr(w, me.Locales); err != nil { + return + } + + return +} + +func (me *connectionStart) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.VersionMajor); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &me.VersionMinor); err != nil { + return + } + + if me.ServerProperties, err = readTable(r); err != nil { + return + } + + if me.Mechanisms, err = readLongstr(r); err != nil { + return + } + if me.Locales, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionStartOk struct { + ClientProperties Table + Mechanism string + Response string + Locale string +} + +func (me *connectionStartOk) id() (uint16, uint16) { + return 10, 11 +} + +func (me *connectionStartOk) wait() bool { + return true +} + +func (me *connectionStartOk) write(w io.Writer) (err error) { + + if err = writeTable(w, me.ClientProperties); err != nil { + return + } + + if err = writeShortstr(w, me.Mechanism); err != nil { + return + } + + if err = writeLongstr(w, me.Response); err != nil { + return + } + + if err = writeShortstr(w, me.Locale); err != nil { + return + } + + return +} + +func (me *connectionStartOk) read(r io.Reader) (err error) { + + if me.ClientProperties, err = readTable(r); err != nil { + return + } + + if me.Mechanism, err = readShortstr(r); err != nil { + return + } + + if me.Response, err = readLongstr(r); err != nil { + return + } + + if me.Locale, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionSecure struct { + Challenge string +} + +func (me *connectionSecure) id() (uint16, uint16) { + return 10, 20 +} + +func (me *connectionSecure) wait() bool { + return true +} + +func (me *connectionSecure) write(w io.Writer) (err error) { + + if err = writeLongstr(w, me.Challenge); err != nil { + return + } + + return +} + +func (me *connectionSecure) read(r io.Reader) (err error) { + + if me.Challenge, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionSecureOk struct { + Response string +} + +func (me *connectionSecureOk) id() (uint16, uint16) { + return 10, 21 +} + +func (me *connectionSecureOk) wait() bool { + return true +} + +func (me *connectionSecureOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, me.Response); err != nil { + return + } + + return +} + +func (me *connectionSecureOk) read(r io.Reader) (err error) { + + if me.Response, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionTune struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (me *connectionTune) id() (uint16, uint16) { + return 10, 30 +} + +func (me *connectionTune) wait() bool { + return true +} + +func (me *connectionTune) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.Heartbeat); err != nil { + return + } + + return +} + +func (me *connectionTune) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.Heartbeat); err != nil { + return + } + + return +} + +type connectionTuneOk struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (me *connectionTuneOk) id() (uint16, uint16) { + return 10, 31 +} + +func (me *connectionTuneOk) wait() bool { + return true +} + +func (me *connectionTuneOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.Heartbeat); err != nil { + return + } + + return +} + +func (me *connectionTuneOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.Heartbeat); err != nil { + return + } + + return +} + +type connectionOpen struct { + VirtualHost string + reserved1 string + reserved2 bool +} + +func (me *connectionOpen) id() (uint16, uint16) { + return 10, 40 +} + +func (me *connectionOpen) wait() bool { + return true +} + +func (me *connectionOpen) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, me.VirtualHost); err != nil { + return + } + if err = writeShortstr(w, me.reserved1); err != nil { + return + } + + if me.reserved2 { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *connectionOpen) read(r io.Reader) (err error) { + var bits byte + + if me.VirtualHost, err = readShortstr(r); err != nil { + return + } + if me.reserved1, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.reserved2 = (bits&(1<<0) > 0) + + return +} + +type connectionOpenOk struct { + reserved1 string +} + +func (me *connectionOpenOk) id() (uint16, uint16) { + return 10, 41 +} + +func (me *connectionOpenOk) wait() bool { + return true +} + +func (me *connectionOpenOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.reserved1); err != nil { + return + } + + return +} + +func (me *connectionOpenOk) read(r io.Reader) (err error) { + + if me.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (me *connectionClose) id() (uint16, uint16) { + return 10, 50 +} + +func (me *connectionClose) wait() bool { + return true +} + +func (me *connectionClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, me.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, me.MethodId); err != nil { + return + } + + return +} + +func (me *connectionClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ReplyCode); err != nil { + return + } + + if me.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &me.MethodId); err != nil { + return + } + + return +} + +type connectionCloseOk struct { +} + +func (me *connectionCloseOk) id() (uint16, uint16) { + return 10, 51 +} + +func (me *connectionCloseOk) wait() bool { + return true +} + +func (me *connectionCloseOk) write(w io.Writer) (err error) { + + return +} + +func (me *connectionCloseOk) read(r io.Reader) (err error) { + + return +} + +type connectionBlocked struct { + Reason string +} + +func (me *connectionBlocked) id() (uint16, uint16) { + return 10, 60 +} + +func (me *connectionBlocked) wait() bool { + return false +} + +func (me *connectionBlocked) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.Reason); err != nil { + return + } + + return +} + +func (me *connectionBlocked) read(r io.Reader) (err error) { + + if me.Reason, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionUnblocked struct { +} + +func (me *connectionUnblocked) id() (uint16, uint16) { + return 10, 61 +} + +func (me *connectionUnblocked) wait() bool { + return false +} + +func (me *connectionUnblocked) write(w io.Writer) (err error) { + + return +} + +func (me *connectionUnblocked) read(r io.Reader) (err error) { + + return +} + +type channelOpen struct { + reserved1 string +} + +func (me *channelOpen) id() (uint16, uint16) { + return 20, 10 +} + +func (me *channelOpen) wait() bool { + return true +} + +func (me *channelOpen) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.reserved1); err != nil { + return + } + + return +} + +func (me *channelOpen) read(r io.Reader) (err error) { + + if me.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type channelOpenOk struct { + reserved1 string +} + +func (me *channelOpenOk) id() (uint16, uint16) { + return 20, 11 +} + +func (me *channelOpenOk) wait() bool { + return true +} + +func (me *channelOpenOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, me.reserved1); err != nil { + return + } + + return +} + +func (me *channelOpenOk) read(r io.Reader) (err error) { + + if me.reserved1, err = readLongstr(r); err != nil { + return + } + + return +} + +type channelFlow struct { + Active bool +} + +func (me *channelFlow) id() (uint16, uint16) { + return 20, 20 +} + +func (me *channelFlow) wait() bool { + return true +} + +func (me *channelFlow) write(w io.Writer) (err error) { + var bits byte + + if me.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *channelFlow) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Active = (bits&(1<<0) > 0) + + return +} + +type channelFlowOk struct { + Active bool +} + +func (me *channelFlowOk) id() (uint16, uint16) { + return 20, 21 +} + +func (me *channelFlowOk) wait() bool { + return false +} + +func (me *channelFlowOk) write(w io.Writer) (err error) { + var bits byte + + if me.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *channelFlowOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Active = (bits&(1<<0) > 0) + + return +} + +type channelClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (me *channelClose) id() (uint16, uint16) { + return 20, 40 +} + +func (me *channelClose) wait() bool { + return true +} + +func (me *channelClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, me.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, me.MethodId); err != nil { + return + } + + return +} + +func (me *channelClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ReplyCode); err != nil { + return + } + + if me.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &me.MethodId); err != nil { + return + } + + return +} + +type channelCloseOk struct { +} + +func (me *channelCloseOk) id() (uint16, uint16) { + return 20, 41 +} + +func (me *channelCloseOk) wait() bool { + return true +} + +func (me *channelCloseOk) write(w io.Writer) (err error) { + + return +} + +func (me *channelCloseOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDeclare struct { + reserved1 uint16 + Exchange string + Type string + Passive bool + Durable bool + AutoDelete bool + Internal bool + NoWait bool + Arguments Table +} + +func (me *exchangeDeclare) id() (uint16, uint16) { + return 40, 10 +} + +func (me *exchangeDeclare) wait() bool { + return true && !me.NoWait +} + +func (me *exchangeDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.Type); err != nil { + return + } + + if me.Passive { + bits |= 1 << 0 + } + + if me.Durable { + bits |= 1 << 1 + } + + if me.AutoDelete { + bits |= 1 << 2 + } + + if me.Internal { + bits |= 1 << 3 + } + + if me.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *exchangeDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.Type, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Passive = (bits&(1<<0) > 0) + me.Durable = (bits&(1<<1) > 0) + me.AutoDelete = (bits&(1<<2) > 0) + me.Internal = (bits&(1<<3) > 0) + me.NoWait = (bits&(1<<4) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeDeclareOk struct { +} + +func (me *exchangeDeclareOk) id() (uint16, uint16) { + return 40, 11 +} + +func (me *exchangeDeclareOk) wait() bool { + return true +} + +func (me *exchangeDeclareOk) write(w io.Writer) (err error) { + + return +} + +func (me *exchangeDeclareOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDelete struct { + reserved1 uint16 + Exchange string + IfUnused bool + NoWait bool +} + +func (me *exchangeDelete) id() (uint16, uint16) { + return 40, 20 +} + +func (me *exchangeDelete) wait() bool { + return true && !me.NoWait +} + +func (me *exchangeDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + + if me.IfUnused { + bits |= 1 << 0 + } + + if me.NoWait { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *exchangeDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.IfUnused = (bits&(1<<0) > 0) + me.NoWait = (bits&(1<<1) > 0) + + return +} + +type exchangeDeleteOk struct { +} + +func (me *exchangeDeleteOk) id() (uint16, uint16) { + return 40, 21 +} + +func (me *exchangeDeleteOk) wait() bool { + return true +} + +func (me *exchangeDeleteOk) write(w io.Writer) (err error) { + + return +} + +func (me *exchangeDeleteOk) read(r io.Reader) (err error) { + + return +} + +type exchangeBind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (me *exchangeBind) id() (uint16, uint16) { + return 40, 30 +} + +func (me *exchangeBind) wait() bool { + return true && !me.NoWait +} + +func (me *exchangeBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Destination); err != nil { + return + } + if err = writeShortstr(w, me.Source); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *exchangeBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Destination, err = readShortstr(r); err != nil { + return + } + if me.Source, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeBindOk struct { +} + +func (me *exchangeBindOk) id() (uint16, uint16) { + return 40, 31 +} + +func (me *exchangeBindOk) wait() bool { + return true +} + +func (me *exchangeBindOk) write(w io.Writer) (err error) { + + return +} + +func (me *exchangeBindOk) read(r io.Reader) (err error) { + + return +} + +type exchangeUnbind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (me *exchangeUnbind) id() (uint16, uint16) { + return 40, 40 +} + +func (me *exchangeUnbind) wait() bool { + return true && !me.NoWait +} + +func (me *exchangeUnbind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Destination); err != nil { + return + } + if err = writeShortstr(w, me.Source); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *exchangeUnbind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Destination, err = readShortstr(r); err != nil { + return + } + if me.Source, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeUnbindOk struct { +} + +func (me *exchangeUnbindOk) id() (uint16, uint16) { + return 40, 51 +} + +func (me *exchangeUnbindOk) wait() bool { + return true +} + +func (me *exchangeUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (me *exchangeUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queueDeclare struct { + reserved1 uint16 + Queue string + Passive bool + Durable bool + Exclusive bool + AutoDelete bool + NoWait bool + Arguments Table +} + +func (me *queueDeclare) id() (uint16, uint16) { + return 50, 10 +} + +func (me *queueDeclare) wait() bool { + return true && !me.NoWait +} + +func (me *queueDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if me.Passive { + bits |= 1 << 0 + } + + if me.Durable { + bits |= 1 << 1 + } + + if me.Exclusive { + bits |= 1 << 2 + } + + if me.AutoDelete { + bits |= 1 << 3 + } + + if me.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *queueDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Passive = (bits&(1<<0) > 0) + me.Durable = (bits&(1<<1) > 0) + me.Exclusive = (bits&(1<<2) > 0) + me.AutoDelete = (bits&(1<<3) > 0) + me.NoWait = (bits&(1<<4) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueDeclareOk struct { + Queue string + MessageCount uint32 + ConsumerCount uint32 +} + +func (me *queueDeclareOk) id() (uint16, uint16) { + return 50, 11 +} + +func (me *queueDeclareOk) wait() bool { + return true +} + +func (me *queueDeclareOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.MessageCount); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, me.ConsumerCount); err != nil { + return + } + + return +} + +func (me *queueDeclareOk) read(r io.Reader) (err error) { + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.MessageCount); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &me.ConsumerCount); err != nil { + return + } + + return +} + +type queueBind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + NoWait bool + Arguments Table +} + +func (me *queueBind) id() (uint16, uint16) { + return 50, 20 +} + +func (me *queueBind) wait() bool { + return true && !me.NoWait +} + +func (me *queueBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *queueBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueBindOk struct { +} + +func (me *queueBindOk) id() (uint16, uint16) { + return 50, 21 +} + +func (me *queueBindOk) wait() bool { + return true +} + +func (me *queueBindOk) write(w io.Writer) (err error) { + + return +} + +func (me *queueBindOk) read(r io.Reader) (err error) { + + return +} + +type queueUnbind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + Arguments Table +} + +func (me *queueUnbind) id() (uint16, uint16) { + return 50, 50 +} + +func (me *queueUnbind) wait() bool { + return true +} + +func (me *queueUnbind) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *queueUnbind) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueUnbindOk struct { +} + +func (me *queueUnbindOk) id() (uint16, uint16) { + return 50, 51 +} + +func (me *queueUnbindOk) wait() bool { + return true +} + +func (me *queueUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (me *queueUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queuePurge struct { + reserved1 uint16 + Queue string + NoWait bool +} + +func (me *queuePurge) id() (uint16, uint16) { + return 50, 30 +} + +func (me *queuePurge) wait() bool { + return true && !me.NoWait +} + +func (me *queuePurge) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *queuePurge) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + return +} + +type queuePurgeOk struct { + MessageCount uint32 +} + +func (me *queuePurgeOk) id() (uint16, uint16) { + return 50, 31 +} + +func (me *queuePurgeOk) wait() bool { + return true +} + +func (me *queuePurgeOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.MessageCount); err != nil { + return + } + + return +} + +func (me *queuePurgeOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.MessageCount); err != nil { + return + } + + return +} + +type queueDelete struct { + reserved1 uint16 + Queue string + IfUnused bool + IfEmpty bool + NoWait bool +} + +func (me *queueDelete) id() (uint16, uint16) { + return 50, 40 +} + +func (me *queueDelete) wait() bool { + return true && !me.NoWait +} + +func (me *queueDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if me.IfUnused { + bits |= 1 << 0 + } + + if me.IfEmpty { + bits |= 1 << 1 + } + + if me.NoWait { + bits |= 1 << 2 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *queueDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.IfUnused = (bits&(1<<0) > 0) + me.IfEmpty = (bits&(1<<1) > 0) + me.NoWait = (bits&(1<<2) > 0) + + return +} + +type queueDeleteOk struct { + MessageCount uint32 +} + +func (me *queueDeleteOk) id() (uint16, uint16) { + return 50, 41 +} + +func (me *queueDeleteOk) wait() bool { + return true +} + +func (me *queueDeleteOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.MessageCount); err != nil { + return + } + + return +} + +func (me *queueDeleteOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.MessageCount); err != nil { + return + } + + return +} + +type basicQos struct { + PrefetchSize uint32 + PrefetchCount uint16 + Global bool +} + +func (me *basicQos) id() (uint16, uint16) { + return 60, 10 +} + +func (me *basicQos) wait() bool { + return true +} + +func (me *basicQos) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.PrefetchSize); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.PrefetchCount); err != nil { + return + } + + if me.Global { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicQos) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.PrefetchSize); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.PrefetchCount); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Global = (bits&(1<<0) > 0) + + return +} + +type basicQosOk struct { +} + +func (me *basicQosOk) id() (uint16, uint16) { + return 60, 11 +} + +func (me *basicQosOk) wait() bool { + return true +} + +func (me *basicQosOk) write(w io.Writer) (err error) { + + return +} + +func (me *basicQosOk) read(r io.Reader) (err error) { + + return +} + +type basicConsume struct { + reserved1 uint16 + Queue string + ConsumerTag string + NoLocal bool + NoAck bool + Exclusive bool + NoWait bool + Arguments Table +} + +func (me *basicConsume) id() (uint16, uint16) { + return 60, 20 +} + +func (me *basicConsume) wait() bool { + return true && !me.NoWait +} + +func (me *basicConsume) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + if me.NoLocal { + bits |= 1 << 0 + } + + if me.NoAck { + bits |= 1 << 1 + } + + if me.Exclusive { + bits |= 1 << 2 + } + + if me.NoWait { + bits |= 1 << 3 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *basicConsume) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoLocal = (bits&(1<<0) > 0) + me.NoAck = (bits&(1<<1) > 0) + me.Exclusive = (bits&(1<<2) > 0) + me.NoWait = (bits&(1<<3) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type basicConsumeOk struct { + ConsumerTag string +} + +func (me *basicConsumeOk) id() (uint16, uint16) { + return 60, 21 +} + +func (me *basicConsumeOk) wait() bool { + return true +} + +func (me *basicConsumeOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + return +} + +func (me *basicConsumeOk) read(r io.Reader) (err error) { + + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicCancel struct { + ConsumerTag string + NoWait bool +} + +func (me *basicCancel) id() (uint16, uint16) { + return 60, 30 +} + +func (me *basicCancel) wait() bool { + return true && !me.NoWait +} + +func (me *basicCancel) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicCancel) read(r io.Reader) (err error) { + var bits byte + + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + return +} + +type basicCancelOk struct { + ConsumerTag string +} + +func (me *basicCancelOk) id() (uint16, uint16) { + return 60, 31 +} + +func (me *basicCancelOk) wait() bool { + return true +} + +func (me *basicCancelOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + return +} + +func (me *basicCancelOk) read(r io.Reader) (err error) { + + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicPublish struct { + reserved1 uint16 + Exchange string + RoutingKey string + Mandatory bool + Immediate bool + Properties properties + Body []byte +} + +func (me *basicPublish) id() (uint16, uint16) { + return 60, 40 +} + +func (me *basicPublish) wait() bool { + return false +} + +func (me *basicPublish) getContent() (properties, []byte) { + return me.Properties, me.Body +} + +func (me *basicPublish) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body +} + +func (me *basicPublish) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if me.Mandatory { + bits |= 1 << 0 + } + + if me.Immediate { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicPublish) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Mandatory = (bits&(1<<0) > 0) + me.Immediate = (bits&(1<<1) > 0) + + return +} + +type basicReturn struct { + ReplyCode uint16 + ReplyText string + Exchange string + RoutingKey string + Properties properties + Body []byte +} + +func (me *basicReturn) id() (uint16, uint16) { + return 60, 50 +} + +func (me *basicReturn) wait() bool { + return false +} + +func (me *basicReturn) getContent() (properties, []byte) { + return me.Properties, me.Body +} + +func (me *basicReturn) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body +} + +func (me *basicReturn) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, me.ReplyText); err != nil { + return + } + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + return +} + +func (me *basicReturn) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ReplyCode); err != nil { + return + } + + if me.ReplyText, err = readShortstr(r); err != nil { + return + } + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicDeliver struct { + ConsumerTag string + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + Properties properties + Body []byte +} + +func (me *basicDeliver) id() (uint16, uint16) { + return 60, 60 +} + +func (me *basicDeliver) wait() bool { + return false +} + +func (me *basicDeliver) getContent() (properties, []byte) { + return me.Properties, me.Body +} + +func (me *basicDeliver) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body +} + +func (me *basicDeliver) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + return +} + +func (me *basicDeliver) read(r io.Reader) (err error) { + var bits byte + + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Redelivered = (bits&(1<<0) > 0) + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicGet struct { + reserved1 uint16 + Queue string + NoAck bool +} + +func (me *basicGet) id() (uint16, uint16) { + return 60, 70 +} + +func (me *basicGet) wait() bool { + return true +} + +func (me *basicGet) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if me.NoAck { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicGet) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoAck = (bits&(1<<0) > 0) + + return +} + +type basicGetOk struct { + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + MessageCount uint32 + Properties properties + Body []byte +} + +func (me *basicGetOk) id() (uint16, uint16) { + return 60, 71 +} + +func (me *basicGetOk) wait() bool { + return true +} + +func (me *basicGetOk) getContent() (properties, []byte) { + return me.Properties, me.Body +} + +func (me *basicGetOk) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body +} + +func (me *basicGetOk) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.MessageCount); err != nil { + return + } + + return +} + +func (me *basicGetOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Redelivered = (bits&(1<<0) > 0) + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.MessageCount); err != nil { + return + } + + return +} + +type basicGetEmpty struct { + reserved1 string +} + +func (me *basicGetEmpty) id() (uint16, uint16) { + return 60, 72 +} + +func (me *basicGetEmpty) wait() bool { + return true +} + +func (me *basicGetEmpty) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.reserved1); err != nil { + return + } + + return +} + +func (me *basicGetEmpty) read(r io.Reader) (err error) { + + if me.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicAck struct { + DeliveryTag uint64 + Multiple bool +} + +func (me *basicAck) id() (uint16, uint16) { + return 60, 80 +} + +func (me *basicAck) wait() bool { + return false +} + +func (me *basicAck) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Multiple { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicAck) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Multiple = (bits&(1<<0) > 0) + + return +} + +type basicReject struct { + DeliveryTag uint64 + Requeue bool +} + +func (me *basicReject) id() (uint16, uint16) { + return 60, 90 +} + +func (me *basicReject) wait() bool { + return false +} + +func (me *basicReject) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicReject) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverAsync struct { + Requeue bool +} + +func (me *basicRecoverAsync) id() (uint16, uint16) { + return 60, 100 +} + +func (me *basicRecoverAsync) wait() bool { + return false +} + +func (me *basicRecoverAsync) write(w io.Writer) (err error) { + var bits byte + + if me.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicRecoverAsync) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecover struct { + Requeue bool +} + +func (me *basicRecover) id() (uint16, uint16) { + return 60, 110 +} + +func (me *basicRecover) wait() bool { + return true +} + +func (me *basicRecover) write(w io.Writer) (err error) { + var bits byte + + if me.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicRecover) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverOk struct { +} + +func (me *basicRecoverOk) id() (uint16, uint16) { + return 60, 111 +} + +func (me *basicRecoverOk) wait() bool { + return true +} + +func (me *basicRecoverOk) write(w io.Writer) (err error) { + + return +} + +func (me *basicRecoverOk) read(r io.Reader) (err error) { + + return +} + +type basicNack struct { + DeliveryTag uint64 + Multiple bool + Requeue bool +} + +func (me *basicNack) id() (uint16, uint16) { + return 60, 120 +} + +func (me *basicNack) wait() bool { + return false +} + +func (me *basicNack) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Multiple { + bits |= 1 << 0 + } + + if me.Requeue { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicNack) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Multiple = (bits&(1<<0) > 0) + me.Requeue = (bits&(1<<1) > 0) + + return +} + +type txSelect struct { +} + +func (me *txSelect) id() (uint16, uint16) { + return 90, 10 +} + +func (me *txSelect) wait() bool { + return true +} + +func (me *txSelect) write(w io.Writer) (err error) { + + return +} + +func (me *txSelect) read(r io.Reader) (err error) { + + return +} + +type txSelectOk struct { +} + +func (me *txSelectOk) id() (uint16, uint16) { + return 90, 11 +} + +func (me *txSelectOk) wait() bool { + return true +} + +func (me *txSelectOk) write(w io.Writer) (err error) { + + return +} + +func (me *txSelectOk) read(r io.Reader) (err error) { + + return +} + +type txCommit struct { +} + +func (me *txCommit) id() (uint16, uint16) { + return 90, 20 +} + +func (me *txCommit) wait() bool { + return true +} + +func (me *txCommit) write(w io.Writer) (err error) { + + return +} + +func (me *txCommit) read(r io.Reader) (err error) { + + return +} + +type txCommitOk struct { +} + +func (me *txCommitOk) id() (uint16, uint16) { + return 90, 21 +} + +func (me *txCommitOk) wait() bool { + return true +} + +func (me *txCommitOk) write(w io.Writer) (err error) { + + return +} + +func (me *txCommitOk) read(r io.Reader) (err error) { + + return +} + +type txRollback struct { +} + +func (me *txRollback) id() (uint16, uint16) { + return 90, 30 +} + +func (me *txRollback) wait() bool { + return true +} + +func (me *txRollback) write(w io.Writer) (err error) { + + return +} + +func (me *txRollback) read(r io.Reader) (err error) { + + return +} + +type txRollbackOk struct { +} + +func (me *txRollbackOk) id() (uint16, uint16) { + return 90, 31 +} + +func (me *txRollbackOk) wait() bool { + return true +} + +func (me *txRollbackOk) write(w io.Writer) (err error) { + + return +} + +func (me *txRollbackOk) read(r io.Reader) (err error) { + + return +} + +type confirmSelect struct { + Nowait bool +} + +func (me *confirmSelect) id() (uint16, uint16) { + return 85, 10 +} + +func (me *confirmSelect) wait() bool { + return true +} + +func (me *confirmSelect) write(w io.Writer) (err error) { + var bits byte + + if me.Nowait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *confirmSelect) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Nowait = (bits&(1<<0) > 0) + + return +} + +type confirmSelectOk struct { +} + +func (me *confirmSelectOk) id() (uint16, uint16) { + return 85, 11 +} + +func (me *confirmSelectOk) wait() bool { + return true +} + +func (me *confirmSelectOk) write(w io.Writer) (err error) { + + return +} + +func (me *confirmSelectOk) read(r io.Reader) (err error) { + + return +} + +func (me *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err error) { + mf := &methodFrame{ + ChannelId: channel, + } + + if err = binary.Read(me.r, binary.BigEndian, &mf.ClassId); err != nil { + return + } + + if err = binary.Read(me.r, binary.BigEndian, &mf.MethodId); err != nil { + return + } + + switch mf.ClassId { + + case 10: // connection + switch mf.MethodId { + + case 10: // connection start + //fmt.Println("NextMethod: class:10 method:10") + method := &connectionStart{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // connection start-ok + //fmt.Println("NextMethod: class:10 method:11") + method := &connectionStartOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // connection secure + //fmt.Println("NextMethod: class:10 method:20") + method := &connectionSecure{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // connection secure-ok + //fmt.Println("NextMethod: class:10 method:21") + method := &connectionSecureOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // connection tune + //fmt.Println("NextMethod: class:10 method:30") + method := &connectionTune{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // connection tune-ok + //fmt.Println("NextMethod: class:10 method:31") + method := &connectionTuneOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // connection open + //fmt.Println("NextMethod: class:10 method:40") + method := &connectionOpen{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 41: // connection open-ok + //fmt.Println("NextMethod: class:10 method:41") + method := &connectionOpenOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 50: // connection close + //fmt.Println("NextMethod: class:10 method:50") + method := &connectionClose{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 51: // connection close-ok + //fmt.Println("NextMethod: class:10 method:51") + method := &connectionCloseOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 60: // connection blocked + //fmt.Println("NextMethod: class:10 method:60") + method := &connectionBlocked{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 61: // connection unblocked + //fmt.Println("NextMethod: class:10 method:61") + method := &connectionUnblocked{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 20: // channel + switch mf.MethodId { + + case 10: // channel open + //fmt.Println("NextMethod: class:20 method:10") + method := &channelOpen{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // channel open-ok + //fmt.Println("NextMethod: class:20 method:11") + method := &channelOpenOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // channel flow + //fmt.Println("NextMethod: class:20 method:20") + method := &channelFlow{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // channel flow-ok + //fmt.Println("NextMethod: class:20 method:21") + method := &channelFlowOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // channel close + //fmt.Println("NextMethod: class:20 method:40") + method := &channelClose{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 41: // channel close-ok + //fmt.Println("NextMethod: class:20 method:41") + method := &channelCloseOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 40: // exchange + switch mf.MethodId { + + case 10: // exchange declare + //fmt.Println("NextMethod: class:40 method:10") + method := &exchangeDeclare{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // exchange declare-ok + //fmt.Println("NextMethod: class:40 method:11") + method := &exchangeDeclareOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // exchange delete + //fmt.Println("NextMethod: class:40 method:20") + method := &exchangeDelete{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // exchange delete-ok + //fmt.Println("NextMethod: class:40 method:21") + method := &exchangeDeleteOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // exchange bind + //fmt.Println("NextMethod: class:40 method:30") + method := &exchangeBind{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // exchange bind-ok + //fmt.Println("NextMethod: class:40 method:31") + method := &exchangeBindOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // exchange unbind + //fmt.Println("NextMethod: class:40 method:40") + method := &exchangeUnbind{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 51: // exchange unbind-ok + //fmt.Println("NextMethod: class:40 method:51") + method := &exchangeUnbindOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 50: // queue + switch mf.MethodId { + + case 10: // queue declare + //fmt.Println("NextMethod: class:50 method:10") + method := &queueDeclare{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // queue declare-ok + //fmt.Println("NextMethod: class:50 method:11") + method := &queueDeclareOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // queue bind + //fmt.Println("NextMethod: class:50 method:20") + method := &queueBind{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // queue bind-ok + //fmt.Println("NextMethod: class:50 method:21") + method := &queueBindOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 50: // queue unbind + //fmt.Println("NextMethod: class:50 method:50") + method := &queueUnbind{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 51: // queue unbind-ok + //fmt.Println("NextMethod: class:50 method:51") + method := &queueUnbindOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // queue purge + //fmt.Println("NextMethod: class:50 method:30") + method := &queuePurge{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // queue purge-ok + //fmt.Println("NextMethod: class:50 method:31") + method := &queuePurgeOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // queue delete + //fmt.Println("NextMethod: class:50 method:40") + method := &queueDelete{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 41: // queue delete-ok + //fmt.Println("NextMethod: class:50 method:41") + method := &queueDeleteOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 60: // basic + switch mf.MethodId { + + case 10: // basic qos + //fmt.Println("NextMethod: class:60 method:10") + method := &basicQos{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // basic qos-ok + //fmt.Println("NextMethod: class:60 method:11") + method := &basicQosOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // basic consume + //fmt.Println("NextMethod: class:60 method:20") + method := &basicConsume{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // basic consume-ok + //fmt.Println("NextMethod: class:60 method:21") + method := &basicConsumeOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // basic cancel + //fmt.Println("NextMethod: class:60 method:30") + method := &basicCancel{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // basic cancel-ok + //fmt.Println("NextMethod: class:60 method:31") + method := &basicCancelOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // basic publish + //fmt.Println("NextMethod: class:60 method:40") + method := &basicPublish{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 50: // basic return + //fmt.Println("NextMethod: class:60 method:50") + method := &basicReturn{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 60: // basic deliver + //fmt.Println("NextMethod: class:60 method:60") + method := &basicDeliver{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 70: // basic get + //fmt.Println("NextMethod: class:60 method:70") + method := &basicGet{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 71: // basic get-ok + //fmt.Println("NextMethod: class:60 method:71") + method := &basicGetOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 72: // basic get-empty + //fmt.Println("NextMethod: class:60 method:72") + method := &basicGetEmpty{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 80: // basic ack + //fmt.Println("NextMethod: class:60 method:80") + method := &basicAck{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 90: // basic reject + //fmt.Println("NextMethod: class:60 method:90") + method := &basicReject{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 100: // basic recover-async + //fmt.Println("NextMethod: class:60 method:100") + method := &basicRecoverAsync{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 110: // basic recover + //fmt.Println("NextMethod: class:60 method:110") + method := &basicRecover{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 111: // basic recover-ok + //fmt.Println("NextMethod: class:60 method:111") + method := &basicRecoverOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 120: // basic nack + //fmt.Println("NextMethod: class:60 method:120") + method := &basicNack{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 90: // tx + switch mf.MethodId { + + case 10: // tx select + //fmt.Println("NextMethod: class:90 method:10") + method := &txSelect{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // tx select-ok + //fmt.Println("NextMethod: class:90 method:11") + method := &txSelectOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // tx commit + //fmt.Println("NextMethod: class:90 method:20") + method := &txCommit{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // tx commit-ok + //fmt.Println("NextMethod: class:90 method:21") + method := &txCommitOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // tx rollback + //fmt.Println("NextMethod: class:90 method:30") + method := &txRollback{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // tx rollback-ok + //fmt.Println("NextMethod: class:90 method:31") + method := &txRollbackOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 85: // confirm + switch mf.MethodId { + + case 10: // confirm select + //fmt.Println("NextMethod: class:85 method:10") + method := &confirmSelect{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // confirm select-ok + //fmt.Println("NextMethod: class:85 method:11") + method := &confirmSelectOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + default: + return nil, fmt.Errorf("Bad method frame, unknown class %d", mf.ClassId) + } + + return mf, nil +} diff --git a/vendor/github.com/streadway/amqp/types.go b/vendor/github.com/streadway/amqp/types.go new file mode 100644 index 000000000..8071bf7cd --- /dev/null +++ b/vendor/github.com/streadway/amqp/types.go @@ -0,0 +1,390 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" + "io" + "time" +) + +var ( + // Errors that this library could return/emit from a channel or connection + ErrClosed = &Error{Code: ChannelError, Reason: "channel/connection is not open"} + ErrChannelMax = &Error{Code: ChannelError, Reason: "channel id space exhausted"} + ErrSASL = &Error{Code: AccessRefused, Reason: "SASL could not negotiate a shared mechanism"} + ErrCredentials = &Error{Code: AccessRefused, Reason: "username or password not allowed"} + ErrVhost = &Error{Code: AccessRefused, Reason: "no access to this vhost"} + ErrSyntax = &Error{Code: SyntaxError, Reason: "invalid field or value inside of a frame"} + ErrFrame = &Error{Code: FrameError, Reason: "frame could not be parsed"} + ErrCommandInvalid = &Error{Code: CommandInvalid, Reason: "unexpected command received"} + ErrUnexpectedFrame = &Error{Code: UnexpectedFrame, Reason: "unexpected frame received"} + ErrFieldType = &Error{Code: SyntaxError, Reason: "unsupported table field type"} +) + +// Error captures the code and reason a channel or connection has been closed +// by the server. +type Error struct { + Code int // constant code from the specification + Reason string // description of the error + Server bool // true when initiated from the server, false when from this library + Recover bool // true when this error can be recovered by retrying later or with differnet parameters +} + +func newError(code uint16, text string) *Error { + return &Error{ + Code: int(code), + Reason: text, + Recover: isSoftExceptionCode(int(code)), + Server: true, + } +} + +func (me Error) Error() string { + return fmt.Sprintf("Exception (%d) Reason: %q", me.Code, me.Reason) +} + +// Used by header frames to capture routing and header information +type properties struct { + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implemention use - Transient (1) or Persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + reserved1 string // was cluster-id - process for buffer consumption +} + +// DeliveryMode. Transient means higher throughput but messages will not be +// restored on broker restart. The delivery mode of publishings is unrelated +// to the durability of the queues they reside on. Transient messages will +// not be restored to durable queues, persistent messages will be restored to +// durable queues and lost on non-durable queues during server restart. +// +// This remains typed as uint8 to match Publishing.DeliveryMode. Other +// delivery modes specific to custom queue implementations are not enumerated +// here. +const ( + Transient uint8 = 1 + Persistent uint8 = 2 +) + +// The property flags are an array of bits that indicate the presence or +// absence of each property value in sequence. The bits are ordered from most +// high to low - bit 15 indicates the first property. +const ( + flagContentType = 0x8000 + flagContentEncoding = 0x4000 + flagHeaders = 0x2000 + flagDeliveryMode = 0x1000 + flagPriority = 0x0800 + flagCorrelationId = 0x0400 + flagReplyTo = 0x0200 + flagExpiration = 0x0100 + flagMessageId = 0x0080 + flagTimestamp = 0x0040 + flagType = 0x0020 + flagUserId = 0x0010 + flagAppId = 0x0008 + flagReserved1 = 0x0004 +) + +// Queue captures the current server state of the queue on the server returned +// from Channel.QueueDeclare or Channel.QueueInspect. +type Queue struct { + Name string // server confirmed or generated name + Messages int // count of messages not awaiting acknowledgment + Consumers int // number of consumers receiving deliveries +} + +// Publishing captures the client message sent to the server. The fields +// outside of the Headers table included in this struct mirror the underlying +// fields in the content frame. They use native types for convenience and +// efficiency. +type Publishing struct { + // Application or exchange specific fields, + // the headers exchange will inspect this field. + Headers Table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // Transient (0 or 1) or Persistent (2) + Priority uint8 // 0 to 9 + CorrelationId string // correlation identifier + ReplyTo string // address to to reply to (ex: RPC) + Expiration string // message expiration spec + MessageId string // message identifier + Timestamp time.Time // message timestamp + Type string // message type name + UserId string // creating user id - ex: "guest" + AppId string // creating application id + + // The application specific payload of the message + Body []byte +} + +// Blocking notifies the server's TCP flow control of the Connection. When a +// server hits a memory or disk alarm it will block all connections until the +// resources are reclaimed. Use NotifyBlock on the Connection to receive these +// events. +type Blocking struct { + Active bool // TCP pushback active/inactive on server + Reason string // Server reason for activation +} + +// Confirmation notifies the acknowledgment or negative acknowledgement of a +// publishing identified by its delivery tag. Use NotifyPublish on the Channel +// to consume these events. +type Confirmation struct { + DeliveryTag uint64 // A 1 based counter of publishings from when the channel was put in Confirm mode + Ack bool // True when the server succesfully received the publishing +} + +// Decimal matches the AMQP decimal type. Scale is the number of decimal +// digits Scale == 2, Value == 12345, Decimal == 123.45 +type Decimal struct { + Scale uint8 + Value int32 +} + +// Table stores user supplied fields of the following types: +// +// bool +// byte +// float32 +// float64 +// int16 +// int32 +// int64 +// nil +// string +// time.Time +// amqp.Decimal +// amqp.Table +// []byte +// []interface{} - containing above types +// +// Functions taking a table will immediately fail when the table contains a +// value of an unsupported type. +// +// The caller must be specific in which precision of integer it wishes to +// encode. +// +// Use a type assertion when reading values from a table for type converstion. +// +// RabbitMQ expects int32 for integer values. +// +type Table map[string]interface{} + +func validateField(f interface{}) error { + switch fv := f.(type) { + case nil, bool, byte, int16, int32, int64, float32, float64, string, []byte, Decimal, time.Time: + return nil + + case []interface{}: + for _, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("in array %s", err) + } + } + return nil + + case Table: + for k, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("table field %q %s", k, err) + } + } + return nil + } + + return fmt.Errorf("value %t not supported", f) +} + +func (t Table) Validate() error { + return validateField(t) +} + +// Heap interface for maintaining delivery tags +type tagSet []uint64 + +func (me tagSet) Len() int { return len(me) } +func (me tagSet) Less(i, j int) bool { return (me)[i] < (me)[j] } +func (me tagSet) Swap(i, j int) { (me)[i], (me)[j] = (me)[j], (me)[i] } +func (me *tagSet) Push(tag interface{}) { *me = append(*me, tag.(uint64)) } +func (me *tagSet) Pop() interface{} { + val := (*me)[len(*me)-1] + *me = (*me)[:len(*me)-1] + return val +} + +type message interface { + id() (uint16, uint16) + wait() bool + read(io.Reader) error + write(io.Writer) error +} + +type messageWithContent interface { + message + getContent() (properties, []byte) + setContent(properties, []byte) +} + +/* +The base interface implemented as: + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a 'frame-end' octet that detects +malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or “gathering reads” to avoid doing three separate +system calls to read a frame. + +*/ +type frame interface { + write(io.Writer) error + channel() uint16 +} + +type reader struct { + r io.Reader +} + +type writer struct { + w io.Writer +} + +// Implements the frame interface for Connection RPC +type protocolHeader struct{} + +func (protocolHeader) write(w io.Writer) error { + _, err := w.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1}) + return err +} + +func (protocolHeader) channel() uint16 { + panic("only valid as initial handshake") +} + +/* +Method frames carry the high-level protocol commands (which we call "methods"). +One method frame carries one command. The method frame payload has this format: + + 0 2 4 + +----------+-----------+-------------- - - + | class-id | method-id | arguments... + +----------+-----------+-------------- - - + short short ... + +To process a method frame, we: + 1. Read the method frame payload. + 2. Unpack it into a structure. A given method always has the same structure, + so we can unpack the method rapidly. 3. Check that the method is allowed in + the current context. + 4. Check that the method arguments are valid. + 5. Execute the method. + +Method frame bodies are constructed as a list of AMQP data fields (bits, +integers, strings and string tables). The marshalling code is trivially +generated directly from the protocol specifications, and can be very rapid. +*/ +type methodFrame struct { + ChannelId uint16 + ClassId uint16 + MethodId uint16 + Method message +} + +func (me *methodFrame) channel() uint16 { return me.ChannelId } + +/* +Heartbeating is a technique designed to undo one of TCP/IP's features, namely +its ability to recover from a broken physical connection by closing only after +a quite long time-out. In some scenarios we need to know very rapidly if a +peer is disconnected or not responding for other reasons (e.g. it is looping). +Since heartbeating can be done at a low level, we implement this as a special +type of frame that peers exchange at the transport level, rather than as a +class method. +*/ +type heartbeatFrame struct { + ChannelId uint16 +} + +func (me *heartbeatFrame) channel() uint16 { return me.ChannelId } + +/* +Certain methods (such as Basic.Publish, Basic.Deliver, etc.) are formally +defined as carrying content. When a peer sends such a method frame, it always +follows it with a content header and zero or more content body frames. + +A content header frame has this format: + + 0 2 4 12 14 + +----------+--------+-----------+----------------+------------- - - + | class-id | weight | body size | property flags | property list... + +----------+--------+-----------+----------------+------------- - - + short short long long short remainder... + +We place content body in distinct frames (rather than including it in the +method) so that AMQP may support "zero copy" techniques in which content is +never marshalled or encoded. We place the content properties in their own +frame so that recipients can selectively discard contents they do not want to +process +*/ +type headerFrame struct { + ChannelId uint16 + ClassId uint16 + weight uint16 + Size uint64 + Properties properties +} + +func (me *headerFrame) channel() uint16 { return me.ChannelId } + +/* +Content is the application data we carry from client-to-client via the AMQP +server. Content is, roughly speaking, a set of properties plus a binary data +part. The set of allowed properties are defined by the Basic class, and these +form the "content header frame". The data can be any size, and MAY be broken +into several (or many) chunks, each forming a "content body frame". + +Looking at the frames for a specific channel, as they pass on the wire, we +might see something like this: + + [method] + [method] [header] [body] [body] + [method] + ... +*/ +type bodyFrame struct { + ChannelId uint16 + Body []byte +} + +func (me *bodyFrame) channel() uint16 { return me.ChannelId } diff --git a/vendor/github.com/streadway/amqp/uri.go b/vendor/github.com/streadway/amqp/uri.go new file mode 100644 index 000000000..582464db5 --- /dev/null +++ b/vendor/github.com/streadway/amqp/uri.go @@ -0,0 +1,170 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "errors" + "fmt" + "net/url" + "strconv" + "strings" +) + +var errURIScheme = errors.New("AMQP scheme must be either 'amqp://' or 'amqps://'") + +var schemePorts = map[string]int{ + "amqp": 5672, + "amqps": 5671, +} + +var defaultURI = URI{ + Scheme: "amqp", + Host: "localhost", + Port: 5672, + Username: "guest", + Password: "guest", + Vhost: "/", +} + +// URI represents a parsed AMQP URI string. +type URI struct { + Scheme string + Host string + Port int + Username string + Password string + Vhost string +} + +// ParseURI attempts to parse the given AMQP URI according to the spec. +// See http://www.rabbitmq.com/uri-spec.html. +// +// Default values for the fields are: +// +// Scheme: amqp +// Host: localhost +// Port: 5672 +// Username: guest +// Password: guest +// Vhost: / +// +func ParseURI(uri string) (URI, error) { + me := defaultURI + + u, err := url.Parse(uri) + if err != nil { + return me, err + } + + defaultPort, okScheme := schemePorts[u.Scheme] + + if okScheme { + me.Scheme = u.Scheme + } else { + return me, errURIScheme + } + + host, port := splitHostPort(u.Host) + + if host != "" { + me.Host = host + } + + if port != "" { + port32, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return me, err + } + me.Port = int(port32) + } else { + me.Port = defaultPort + } + + if u.User != nil { + me.Username = u.User.Username() + if password, ok := u.User.Password(); ok { + me.Password = password + } + } + + if u.Path != "" { + if strings.HasPrefix(u.Path, "/") { + if u.Host == "" && strings.HasPrefix(u.Path, "///") { + // net/url doesn't handle local context authorities and leaves that up + // to the scheme handler. In our case, we translate amqp:/// into the + // default host and whatever the vhost should be + if len(u.Path) > 3 { + me.Vhost = u.Path[3:] + } + } else if len(u.Path) > 1 { + me.Vhost = u.Path[1:] + } + } else { + me.Vhost = u.Path + } + } + + return me, nil +} + +// Splits host:port, host, [ho:st]:port, or [ho:st]. Unlike net.SplitHostPort +// which splits :port, host:port or [host]:port +// +// Handles hosts that have colons that are in brackets like [::1]:http +func splitHostPort(addr string) (host, port string) { + i := strings.LastIndex(addr, ":") + + if i >= 0 { + host, port = addr[:i], addr[i+1:] + + if len(port) > 0 && port[len(port)-1] == ']' && addr[0] == '[' { + // we've split on an inner colon, the port was missing outside of the + // brackets so use the full addr. We could assert that host should not + // contain any colons here + host, port = addr, "" + } + } else { + host = addr + } + + return +} + +// PlainAuth returns a PlainAuth structure based on the parsed URI's +// Username and Password fields. +func (me URI) PlainAuth() *PlainAuth { + return &PlainAuth{ + Username: me.Username, + Password: me.Password, + } +} + +func (me URI) String() string { + var authority string + + if me.Username != defaultURI.Username || me.Password != defaultURI.Password { + authority += me.Username + + if me.Password != defaultURI.Password { + authority += ":" + me.Password + } + + authority += "@" + } + + authority += me.Host + + if defaultPort, found := schemePorts[me.Scheme]; !found || defaultPort != me.Port { + authority += ":" + strconv.FormatInt(int64(me.Port), 10) + } + + var vhost string + if me.Vhost != defaultURI.Vhost { + vhost = me.Vhost + } + + return fmt.Sprintf("%s://%s/%s", me.Scheme, authority, url.QueryEscape(vhost)) +} diff --git a/vendor/github.com/streadway/amqp/write.go b/vendor/github.com/streadway/amqp/write.go new file mode 100644 index 000000000..d392ca237 --- /dev/null +++ b/vendor/github.com/streadway/amqp/write.go @@ -0,0 +1,411 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "io" + "math" + "time" +) + +func (me *writer) WriteFrame(frame frame) (err error) { + if err = frame.write(me.w); err != nil { + return + } + + if buf, ok := me.w.(*bufio.Writer); ok { + err = buf.Flush() + } + + return +} + +func (me *methodFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + + if me.Method == nil { + return errors.New("malformed frame: missing method") + } + + class, method := me.Method.id() + + if err = binary.Write(&payload, binary.BigEndian, class); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, method); err != nil { + return + } + + if err = me.Method.write(&payload); err != nil { + return + } + + return writeFrame(w, frameMethod, me.ChannelId, payload.Bytes()) +} + +// Heartbeat +// +// Payload is empty +func (me *heartbeatFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameHeartbeat, me.ChannelId, []byte{}) +} + +// CONTENT HEADER +// 0 2 4 12 14 +// +----------+--------+-----------+----------------+------------- - - +// | class-id | weight | body size | property flags | property list... +// +----------+--------+-----------+----------------+------------- - - +// short short long long short remainder... +// +func (me *headerFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + var zeroTime time.Time + + if err = binary.Write(&payload, binary.BigEndian, me.ClassId); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, me.weight); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, me.Size); err != nil { + return + } + + // First pass will build the mask to be serialized, second pass will serialize + // each of the fields that appear in the mask. + + var mask uint16 + + if len(me.Properties.ContentType) > 0 { + mask = mask | flagContentType + } + if len(me.Properties.ContentEncoding) > 0 { + mask = mask | flagContentEncoding + } + if me.Properties.Headers != nil && len(me.Properties.Headers) > 0 { + mask = mask | flagHeaders + } + if me.Properties.DeliveryMode > 0 { + mask = mask | flagDeliveryMode + } + if me.Properties.Priority > 0 { + mask = mask | flagPriority + } + if len(me.Properties.CorrelationId) > 0 { + mask = mask | flagCorrelationId + } + if len(me.Properties.ReplyTo) > 0 { + mask = mask | flagReplyTo + } + if len(me.Properties.Expiration) > 0 { + mask = mask | flagExpiration + } + if len(me.Properties.MessageId) > 0 { + mask = mask | flagMessageId + } + if me.Properties.Timestamp != zeroTime { + mask = mask | flagTimestamp + } + if len(me.Properties.Type) > 0 { + mask = mask | flagType + } + if len(me.Properties.UserId) > 0 { + mask = mask | flagUserId + } + if len(me.Properties.AppId) > 0 { + mask = mask | flagAppId + } + + if err = binary.Write(&payload, binary.BigEndian, mask); err != nil { + return + } + + if hasProperty(mask, flagContentType) { + if err = writeShortstr(&payload, me.Properties.ContentType); err != nil { + return + } + } + if hasProperty(mask, flagContentEncoding) { + if err = writeShortstr(&payload, me.Properties.ContentEncoding); err != nil { + return + } + } + if hasProperty(mask, flagHeaders) { + if err = writeTable(&payload, me.Properties.Headers); err != nil { + return + } + } + if hasProperty(mask, flagDeliveryMode) { + if err = binary.Write(&payload, binary.BigEndian, me.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(mask, flagPriority) { + if err = binary.Write(&payload, binary.BigEndian, me.Properties.Priority); err != nil { + return + } + } + if hasProperty(mask, flagCorrelationId) { + if err = writeShortstr(&payload, me.Properties.CorrelationId); err != nil { + return + } + } + if hasProperty(mask, flagReplyTo) { + if err = writeShortstr(&payload, me.Properties.ReplyTo); err != nil { + return + } + } + if hasProperty(mask, flagExpiration) { + if err = writeShortstr(&payload, me.Properties.Expiration); err != nil { + return + } + } + if hasProperty(mask, flagMessageId) { + if err = writeShortstr(&payload, me.Properties.MessageId); err != nil { + return + } + } + if hasProperty(mask, flagTimestamp) { + if err = binary.Write(&payload, binary.BigEndian, uint64(me.Properties.Timestamp.Unix())); err != nil { + return + } + } + if hasProperty(mask, flagType) { + if err = writeShortstr(&payload, me.Properties.Type); err != nil { + return + } + } + if hasProperty(mask, flagUserId) { + if err = writeShortstr(&payload, me.Properties.UserId); err != nil { + return + } + } + if hasProperty(mask, flagAppId) { + if err = writeShortstr(&payload, me.Properties.AppId); err != nil { + return + } + } + + return writeFrame(w, frameHeader, me.ChannelId, payload.Bytes()) +} + +// Body +// +// Payload is one byterange from the full body who's size is declared in the +// Header frame +func (me *bodyFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameBody, me.ChannelId, me.Body) +} + +func writeFrame(w io.Writer, typ uint8, channel uint16, payload []byte) (err error) { + end := []byte{frameEnd} + size := uint(len(payload)) + + _, err = w.Write([]byte{ + byte(typ), + byte((channel & 0xff00) >> 8), + byte((channel & 0x00ff) >> 0), + byte((size & 0xff000000) >> 24), + byte((size & 0x00ff0000) >> 16), + byte((size & 0x0000ff00) >> 8), + byte((size & 0x000000ff) >> 0), + }) + + if err != nil { + return + } + + if _, err = w.Write(payload); err != nil { + return + } + + if _, err = w.Write(end); err != nil { + return + } + + return +} + +func writeShortstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length uint8 = uint8(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +func writeLongstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length uint32 = uint32(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func writeField(w io.Writer, value interface{}) (err error) { + var buf [9]byte + var enc []byte + + switch v := value.(type) { + case bool: + buf[0] = 't' + if v { + buf[1] = byte(1) + } else { + buf[1] = byte(0) + } + enc = buf[:2] + + case byte: + buf[0] = 'b' + buf[1] = byte(v) + enc = buf[:2] + + case int16: + buf[0] = 's' + binary.BigEndian.PutUint16(buf[1:3], uint16(v)) + enc = buf[:3] + + case int32: + buf[0] = 'I' + binary.BigEndian.PutUint32(buf[1:5], uint32(v)) + enc = buf[:5] + + case int64: + buf[0] = 'l' + binary.BigEndian.PutUint64(buf[1:9], uint64(v)) + enc = buf[:9] + + case float32: + buf[0] = 'f' + binary.BigEndian.PutUint32(buf[1:5], math.Float32bits(v)) + enc = buf[:5] + + case float64: + buf[0] = 'd' + binary.BigEndian.PutUint64(buf[1:9], math.Float64bits(v)) + enc = buf[:9] + + case Decimal: + buf[0] = 'D' + buf[1] = byte(v.Scale) + binary.BigEndian.PutUint32(buf[2:6], uint32(v.Value)) + enc = buf[:6] + + case string: + buf[0] = 'S' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + enc = append(buf[:5], []byte(v)...) + + case []interface{}: // field-array + buf[0] = 'A' + + sec := new(bytes.Buffer) + for _, val := range v { + if err = writeField(sec, val); err != nil { + return + } + } + + binary.BigEndian.PutUint32(buf[1:5], uint32(sec.Len())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + if _, err = w.Write(sec.Bytes()); err != nil { + return + } + + return + + case time.Time: + buf[0] = 'T' + binary.BigEndian.PutUint64(buf[1:9], uint64(v.Unix())) + enc = buf[:9] + + case Table: + if _, err = w.Write([]byte{'F'}); err != nil { + return + } + return writeTable(w, v) + + case []byte: + buf[0] = 'x' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + if _, err = w.Write(buf[0:5]); err != nil { + return + } + if _, err = w.Write(v); err != nil { + return + } + return + + case nil: + buf[0] = 'V' + enc = buf[:1] + + default: + return ErrFieldType + } + + _, err = w.Write(enc) + + return +} + +func writeTable(w io.Writer, table Table) (err error) { + var buf bytes.Buffer + + for key, val := range table { + if err = writeShortstr(&buf, key); err != nil { + return + } + if err = writeField(&buf, val); err != nil { + return + } + } + + return writeLongstr(w, string(buf.Bytes())) +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/CHANGELOG-3.0.md b/vendor/gopkg.in/olivere/elastic.v3/CHANGELOG-3.0.md new file mode 100644 index 000000000..07f3e66bf --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/CHANGELOG-3.0.md @@ -0,0 +1,363 @@ +# Elastic 3.0 + +Elasticsearch 2.0 comes with some [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html). You will probably need to upgrade your application and/or rewrite part of it due to those changes. + +We use that window of opportunity to also update Elastic (the Go client) from version 2.0 to 3.0. This will introduce both changes due to the Elasticsearch 2.0 update as well as changes that make Elastic cleaner by removing some old cruft. + +So, to summarize: + +1. Elastic 2.0 is compatible with Elasticsearch 1.7+ and is still actively maintained. +2. Elastic 3.0 is compatible with Elasticsearch 2.0+ and will soon become the new master branch. + +The rest of the document is a list of all changes in Elastic 3.0. + +## Pointer types + +All types have changed to be pointer types, not value types. This not only is cleaner but also simplifies the API as illustrated by the following example: + +Example for Elastic 2.0 (old): + +```go +q := elastic.NewMatchAllQuery() +res, err := elastic.Search("one").Query(&q).Do() // notice the & here +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewMatchAllQuery() +res, err := elastic.Search("one").Query(q).Do() // no more & +// ... which can be simplified as: +res, err := elastic.Search("one").Query(elastic.NewMatchAllQuery()).Do() +``` + +It also helps to prevent [subtle issues](https://github.com/olivere/elastic/issues/115#issuecomment-130753046). + +## Query/filter merge + +One of the biggest changes in Elasticsearch 2.0 is the [merge of queries and filters](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_queries_and_filters_merged). In Elasticsearch 1.x, you had a whole range of queries and filters that were basically identical (e.g. `term_query` and `term_filter`). + +The practical aspect of the merge is that you can now basically use queries where once you had to use filters instead. For Elastic 3.0 this means: We could remove a whole bunch of files. Yay! + +Notice that some methods still come by "filter", e.g. `PostFilter`. However, they accept a `Query` now when they used to accept a `Filter` before. + +Example for Elastic 2.0 (old): + +```go +q := elastic.NewMatchAllQuery() +f := elastic.NewTermFilter("tag", "important") +res, err := elastic.Search().Index("one").Query(&q).PostFilter(f) +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewMatchAllQuery() +f := elastic.NewTermQuery("tag", "important") // it's a query now! +res, err := elastic.Search().Index("one").Query(q).PostFilter(f) +``` + +## Facets are removed + +[Facets have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_facets_have_been_removed) in Elasticsearch 2.0. You need to use aggregations now. + +## Errors + +Elasticsearch 2.0 returns more information about an error in the HTTP response body. Elastic 3.0 now reads this information and makes it accessible by the consumer. + +Errors and all its details are now returned in [`Error`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59). + +### HTTP Status 404 (Not Found) + +When Elasticsearch does not find an entity or an index, it generally returns HTTP status code 404. In Elastic 2.0 this was a valid result and didn't raise an error from the `Do` functions. This has now changed in Elastic 3.0. + +Starting with Elastic 3.0, there are only two types of responses considered successful. First, responses with HTTP status codes [200..299]. Second, HEAD requests which return HTTP status 404. The latter is used by Elasticsearch to e.g. check for existence of indices or documents. All other responses will return an error. + +To check for HTTP Status 404 (with non-HEAD requests), e.g. when trying to get or delete a missing document, you can use the [`IsNotFound`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L84) helper (see below). + +The following example illustrates how to check for a missing document in Elastic 2.0 and what has changed in 3.0. + +Example for Elastic 2.0 (old): + +```go +res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() +if err != nil { + // Something else went wrong (but 404 is NOT an error in Elastic 2.0) +} +if !res.Found { + // Document has not been found +} +``` + +Example for Elastic 3.0 (new): + +```go +res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() +if err != nil { + if elastic.IsNotFound(err) { + // Document has not been found + } else { + // Something else went wrong + } +} +``` + +### HTTP Status 408 (Timeouts) + +Elasticsearch now responds with HTTP status code 408 (Timeout) when a request fails due to a timeout. E.g. if you specify a timeout with the Cluster Health API, the HTTP response status will be 408 if the timeout is raised. See [here](https://github.com/elastic/elasticsearch/commit/fe3179d9cccb569784434b2135ca9ae13d5158d3) for the specific commit to the Cluster Health API. + +To check for HTTP Status 408, we introduced the [`IsTimeout`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L101) helper. + +Example for Elastic 2.0 (old): + +```go +health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() +if err != nil { + // ... +} +if health.TimedOut { + // We have a timeout +} +``` + +Example for Elastic 3.0 (new): + +```go +health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() +if elastic.IsTimeout(err) { + // We have a timeout +} +``` + +### Bulk Errors + +The error response of a bulk operation used to be a simple string in Elasticsearch 1.x. +In Elasticsearch 2.0, it returns a structured JSON object with a lot more details about the error. +These errors are now captured in an object of type [`ErrorDetails`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59) which is used in [`BulkResponseItem`](https://github.com/olivere/elastic/blob/release-branch.v3/bulk.go#L206). + +### Removed specific Elastic errors + +The specific error types `ErrMissingIndex`, `ErrMissingType`, and `ErrMissingId` have been removed. They were only used by `DeleteService` and are replaced by a generic error message. + +## Numeric types + +Elastic 3.0 has settled to use `float64` everywhere. It used to be a mix of `float32` and `float64` in Elastic 2.0. E.g. all boostable queries in Elastic 3.0 now have a boost type of `float64` where it used to be `float32`. + +## Pluralization + +Some services accept zero, one or more indices or types to operate on. +E.g. in the `SearchService` accepts a list of zero, one, or more indices to +search and therefor had a func called `Index(index string)` and a func +called `Indices(indices ...string)`. + +Elastic 3.0 now only uses the singular form that, when applicable, accepts a +variadic type. E.g. in the case of the `SearchService`, you now only have +one func with the following signature: `Index(indices ...string)`. + +Notice this is only limited to `Index(...)` and `Type(...)`. There are other +services with variadic functions. These have not been changed. + +## Multiple calls to variadic functions + +Some services with variadic functions have cleared the underlying slice when +called while other services just add to the existing slice. This has now been +normalized to always add to the underlying slice. + +Example for Elastic 2.0 (old): + +```go +// Would only cleared scroll id "two" +// because ScrollId cleared the values when called multiple times +client.ClearScroll().ScrollId("one").ScrollId("two").Do() +``` + +Example for Elastic 3.0 (new): + +```go +// Now (correctly) clears both scroll id "one" and "two" +// because ScrollId no longer clears the values when called multiple times +client.ClearScroll().ScrollId("one").ScrollId("two").Do() +``` + +## Ping service requires URL + +The `Ping` service raised some issues because it is different from all +other services. If not explicitly given a URL, it always pings `127.0.0.1:9200`. + +Users expected to ping the cluster, but that is not possible as the cluster +can be a set of many nodes: So which node do we ping then? + +To make it more clear, the `Ping` function on the client now requires users +to explicitly set the URL of the node to ping. + +## Meta fields + +Many of the meta fields e.g. `_parent` or `_routing` are now +[part of the top-level of a document](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_mapping_changes.html#migration-meta-fields) +and are no longer returned as parts of the `fields` object. We had to change +larger parts of e.g. the `Reindexer` to get it to work seamlessly with Elasticsearch 2.0. + +Notice that all stored meta-fields are now [returned by default](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_crud_and_routing_changes.html#_all_stored_meta_fields_returned_by_default). + +## HasParentQuery / HasChildQuery + +`NewHasParentQuery` and `NewHasChildQuery` must now include both parent/child type and query. It is now in line with the Java API. + +Example for Elastic 2.0 (old): + +```go +allQ := elastic.NewMatchAllQuery() +q := elastic.NewHasChildFilter("tweet").Query(&allQ) +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewHasChildQuery("tweet", elastic.NewMatchAllQuery()) +``` + +## SetBasicAuth client option + +You can now tell Elastic to pass HTTP Basic Auth credentials with each request. In previous versions of Elastic you had to set up your own `http.Transport` to do this. This should make it more convenient to use Elastic in combination with [Shield](https://www.elastic.co/products/shield) in its [basic setup](https://www.elastic.co/guide/en/shield/current/enable-basic-auth.html). + +Example: + +```go +client, err := elastic.NewClient(elastic.SetBasicAuth("user", "secret")) +if err != nil { + log.Fatal(err) +} +``` + +## Delete-by-Query API + +The Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_delete_by_query_is_now_a_plugin). It is no longer core part of Elasticsearch. You can [install it as a plugin as described here](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). + +Elastic 3.0 still contains the `DeleteByQueryService`, but you need to install the plugin first. If you don't install it and use `DeleteByQueryService` you will most probably get a 404. + +An older version of this document stated the following: + +> Elastic 3.0 still contains the `DeleteByQueryService` but it will fail with `ErrPluginNotFound` when the plugin is not installed. +> +> Example for Elastic 3.0 (new): +> +> ```go +> _, err := client.DeleteByQuery().Query(elastic.NewTermQuery("client", "1")).Do() +> if err == elastic.ErrPluginNotFound { +> // Delete By Query API is not available +> } +> ``` + +I have decided that this is not a good way to handle the case of a missing plugin. The main reason is that with this logic, you'd always have to check if the plugin is missing in case of an error. This is not only slow, but it also puts logic into a service where it should really be just opaque and return the response of Elasticsearch. + +If you rely on certain plugins to be installed, you should check on startup. That's where the following two helpers come into play. + +## HasPlugin and SetRequiredPlugins + +Some of the core functionality of Elasticsearch has now been moved into plugins. E.g. the Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). + +You need to make sure to add these plugins to your Elasticsearch installation to still be able to use the `DeleteByQueryService`. You can test this now with the `HasPlugin(name string)` helper in the client. + +Example for Elastic 3.0 (new): + +```go +err, found := client.HasPlugin("delete-by-query") +if err == nil && found { + // ... Delete By Query API is available +} +``` + +To simplify this process, there is now a `SetRequiredPlugins` helper that can be passed as an option func when creating a new client. If the plugin is not installed, the client wouldn't be created in the first place. + +```go +// Will raise an error if the "delete-by-query" plugin is NOT installed +client, err := elastic.NewClient(elastic.SetRequiredPlugins("delete-by-query")) +if err != nil { + log.Fatal(err) +} +``` + +Notice that there also is a way to define [mandatory plugins](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html#_mandatory_plugins) in the Elasticsearch configuration file. + +## Common Query has been renamed to Common Terms Query + +The `CommonQuery` has been renamed to `CommonTermsQuery` to be in line with the [Java API](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_java_api_changes.html#_query_filter_refactoring). + +## Remove `MoreLikeThis` and `MoreLikeThisField` + +The More Like This API and the More Like This Field query [have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_more_like_this) and replaced with the `MoreLikeThisQuery`. + +## Remove Filtered Query + +With the merge of queries and filters, the [filtered query became deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). While it is only deprecated and therefore still available in Elasticsearch 2.0, we have decided to remove it from Elastic 3.0. Why? Because we think that when you're already forced to rewrite many of your application code, it might be a good chance to get rid of things that are deprecated as well. So you might simply change your filtered query with a boolean query as [described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). + +## Remove FuzzyLikeThis and FuzzyLikeThisField + +Both have been removed from Elasticsearch 2.0 as well. + +## Remove LimitFilter + +The `limit` filter is [deprecated in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_limit_literal_filter_deprecated) and becomes a no-op. Now is a good chance to remove it from your application as well. Use the `terminate_after` parameter in your search [as described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-body.html) to achieve similar effects. + +## Remove `_cache` and `_cache_key` from filters + +Both have been [removed from Elasticsearch 2.0 as well](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_filter_auto_caching). + +## Partial fields are gone + +Partial fields are [removed in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_search_changes.html#_partial_fields) in favor of [source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-source-filtering.html). + +## Scripting + +A [`Script`](https://github.com/olivere/elastic/blob/release-branch.v3/script.go) type has been added to Elastic 3.0. In Elastic 2.0, there were various places (e.g. aggregations) where you could just add the script as a string, specify the scripting language, add parameters etc. With Elastic 3.0, you should now always use the `Script` type. + +Example for Elastic 2.0 (old): + +```go +update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script("ctx._source.retweets += num"). + ScriptParams(map[string]interface{}{"num": 1}). + Upsert(map[string]interface{}{"retweets": 0}). + Do() +``` + +Example for Elastic 3.0 (new): + +```go +update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script(elastic.NewScript("ctx._source.retweets += num").Param("num", 1)). + Upsert(map[string]interface{}{"retweets": 0}). + Do() +``` + +## Cluster State + +The combination of `Metric(string)` and `Metrics(...string)` has been replaced by a single func with the signature `Metric(...string)`. + +## Unexported structs in response + +Services generally return a typed response from a `Do` func. Those structs are exported so that they can be passed around in your own application. In Elastic 3.0 however, we changed that (most) sub-structs are now unexported, meaning: You can only pass around the whole response, not sub-structures of it. This makes it easier for restructuring responses according to the Elasticsearch API. See [`ClusterStateResponse`](https://github.com/olivere/elastic/blob/release-branch.v3/cluster_state.go#L182) as an example. + +## Add offset to Histogram aggregation + +Histogram aggregations now have an [offset](https://github.com/elastic/elasticsearch/pull/9505) option. + +## Services + +### REST API specification + +As you might know, Elasticsearch comes with a REST API specification. The specification describes the endpoints in a JSON structure. + +Most services in Elastic predated the REST API specification. We are in the process of bringing all these services in line with the specification. Services can be generated by `go generate` (not 100% automatic though). This is an ongoing process. + +This probably doesn't mean a lot to you. However, you can now be more confident that Elastic supports all features that the REST API specification describes. + +At the same time, the file names of the services are renamed to match the REST API specification naming. + +### REST API Test Suite + +The REST API specification of Elasticsearch comes along with a test suite that official clients typically use to test for conformance. Up until now, Elastic didn't run this test suite. However, we are in the process of setting up infrastructure and tests to match this suite as well. + +This process in not completed though. + + diff --git a/vendor/gopkg.in/olivere/elastic.v3/CONTRIBUTING.md b/vendor/gopkg.in/olivere/elastic.v3/CONTRIBUTING.md new file mode 100644 index 000000000..4fbc79dd0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# How to contribute + +Elastic is an open-source project and we are looking forward to each +contribution. + +Notice that while the [official Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) is rather good, it is a high-level +overview of the features of Elasticsearch. However, Elastic tries to resemble +the Java API of Elasticsearch which you can find [on GitHub](https://github.com/elastic/elasticsearch). + +This explains why you might think that some options are strange or missing +in Elastic, while often they're just different. Please check the Java API first. + +Having said that: Elasticsearch is moving fast and it might be very likely +that we missed some features or changes. Feel free to change that. + +## Your Pull Request + +To make it easy to review and understand your changes, please keep the +following things in mind before submitting your pull request: + +* You compared the existing implemenation with the Java API, did you? +* Please work on the latest possible state of `olivere/elastic`. + Use `release-branch.v2` for targeting Elasticsearch 1.x and + `release-branch.v3` for targeting 2.x. +* Create a branch dedicated to your change. +* If possible, write a test case which confirms your change. +* Make sure your changes and your tests work with all recent versions of + Elasticsearch. We currently support Elasticsearch 1.7.x in the + release-branch.v2 and Elasticsearch 2.x in the release-branch.v3. +* Test your changes before creating a pull request (`go test ./...`). +* Don't mix several features or bug fixes in one pull request. +* Create a meaningful commit message. +* Explain your change, e.g. provide a link to the issue you are fixing and + probably a link to the Elasticsearch documentation and/or source code. +* Format your source with `go fmt`. + +## Additional Resources + +* [GitHub documentation](http://help.github.com/) +* [GitHub pull request documentation](http://help.github.com/send-pull-requests/) diff --git a/vendor/gopkg.in/olivere/elastic.v3/CONTRIBUTORS b/vendor/gopkg.in/olivere/elastic.v3/CONTRIBUTORS new file mode 100644 index 000000000..dff2a14d7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/CONTRIBUTORS @@ -0,0 +1,60 @@ +# This is a list of people who have contributed code +# to the Elastic repository. +# +# It is just my small "thank you" to all those that helped +# making Elastic what it is. +# +# Please keep this list sorted. + +Adam Alix [@adamalix](https://github.com/adamalix) +Adam Weiner [@adamweiner](https://github.com/adamweiner) +Alexey Sharov [@nizsheanez](https://github.com/nizsheanez) +Benjamin Zarzycki [@kf6nux](https://github.com/kf6nux) +Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va) +Brady Love [@bradylove](https://github.com/bradylove) +Bruce Zhou [@brucez-isell](https://github.com/brucez-isell) +Chris M [@tebriel](https://github.com/tebriel) +Christophe Courtaut [@kri5](https://github.com/kri5) +Conrad Pankoff [@deoxxa](https://github.com/deoxxa) +Corey Scott [@corsc](https://github.com/corsc) +Daniel Barrett [@shendaras](https://github.com/shendaras) +Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath) +Daniel Imfeld [@dimfeld](https://github.com/dimfeld) +Dwayne Schultz [@myshkin5](https://github.com/myshkin5) +Faolan C-P [@fcheslack](https://github.com/fcheslack) +Gerhard Häring [@ghaering](https://github.com/ghaering) +Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos) +Guillaume J. Charmes [@creack](https://github.com/creack) +Han Yu [@MoonighT](https://github.com/MoonighT) +Harrison Wright [@wright8191](https://github.com/wright8191) +Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy) +Isaac Saldana [@isaldana](https://github.com/isaldana) +Jack Lindamood [@cep21](https://github.com/cep21) +Joe Buck [@four2five](https://github.com/four2five) +John Barker [@j16r](https://github.com/j16r) +John Goodall [@jgoodall](https://github.com/jgoodall) +Junpei Tsuji [@jun06t](https://github.com/jun06t) +Kenta SUZUKI [@suzuken](https://github.com/suzuken) +Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh) +Mara Kim [@autochthe](https://github.com/autochthe) +Marcy Buccellato [@marcybuccellato](https://github.com/marcybuccellato) +Medhi Bechina [@mdzor](https://github.com/mdzor) +naimulhaider [@naimulhaider](https://github.com/naimulhaider) +navins [@ishare](https://github.com/ishare) +Naoya Tsutsumi [@tutuming](https://github.com/tutuming) +Nicholas Wolff [@nwolff](https://github.com/nwolff) +Nick Whyte [@nickw444](https://github.com/nickw444) +Orne Brocaar [@brocaar](https://github.com/brocaar) +Radoslaw Wesolowski [r--w](https://github.com/r--w) +Ryan Schmukler [@rschmukler](https://github.com/rschmukler) +Sacheendra talluri [@sacheendra](https://github.com/sacheendra) +Sean DuBois [@Sean-Der](https://github.com/Sean-Der) +Shalin LK [@shalinlk](https://github.com/shalinlk) +Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic) +Stuart Warren [@Woz](https://github.com/stuart-warren) +Sundar [@sundarv85](https://github.com/sundarv85) +Tetsuya Morimoto [@t2y](https://github.com/t2y) +TimeEmit [@TimeEmit](https://github.com/timeemit) +TusharM [@tusharm](https://github.com/tusharm) +wolfkdy [@wolfkdy](https://github.com/wolfkdy) +zakthomas [@zakthomas](https://github.com/zakthomas) diff --git a/vendor/gopkg.in/olivere/elastic.v3/ISSUE_TEMPLATE.md b/vendor/gopkg.in/olivere/elastic.v3/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..558cd6711 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/ISSUE_TEMPLATE.md @@ -0,0 +1,16 @@ +Please use the following questions as a guideline to help me answer +your issue/question without further inquiry. Thank you. + +### Which version of Elastic are you using? + +[ ] elastic.v2 (for Elasticsearch 1.x) +[ ] elastic.v3 (for Elasticsearch 2.x) + +### Please describe the expected behavior + + +### Please describe the actual behavior + + +### Any steps to reproduce the behavior? + diff --git a/vendor/github.com/facebookgo/clock/LICENSE b/vendor/gopkg.in/olivere/elastic.v3/LICENSE similarity index 66% rename from vendor/github.com/facebookgo/clock/LICENSE rename to vendor/gopkg.in/olivere/elastic.v3/LICENSE index ce212cb1c..8b22cdb60 100644 --- a/vendor/github.com/facebookgo/clock/LICENSE +++ b/vendor/gopkg.in/olivere/elastic.v3/LICENSE @@ -1,21 +1,20 @@ The MIT License (MIT) - -Copyright (c) 2014 Ben Johnson +Copyright © 2012-2015 Oliver Eilhard Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal +of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/vendor/gopkg.in/olivere/elastic.v3/README.md b/vendor/gopkg.in/olivere/elastic.v3/README.md new file mode 100644 index 000000000..cc4416eec --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/README.md @@ -0,0 +1,421 @@ +# Elastic + +Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the +[Go](http://www.golang.org/) programming language. + +[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v3)](https://travis-ci.org/olivere/elastic) +[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/gopkg.in/olivere/elastic.v3) +[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE) + +See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic. + + +## Releases + +**The release branches (e.g. [`release-branch.v3`](https://github.com/olivere/elastic/tree/release-branch.v3)) are actively being worked on and can break at any time. If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).** + +Here's the version matrix: + +Elasticsearch version | Elastic version -| Package URL +----------------------|------------------|------------ +2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3)) +1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) +0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) + +**Example:** + +You have installed Elasticsearch 2.1.1 and want to use Elastic. As listed above, you should use Elastic 3.0. So you first install the stable release of Elastic 3.0 from gopkg.in. + +```sh +$ go get gopkg.in/olivere/elastic.v3 +``` + +You then import it with this import path: + +```go +import "gopkg.in/olivere/elastic.v3" +``` + +### Elastic 3.0 + +Elastic 3.0 targets Elasticsearch 2.0 and later. Elasticsearch 2.0.0 was [released on 28th October 2015](https://www.elastic.co/blog/elasticsearch-2-0-0-released). + +Notice that there are a lot of [breaking changes in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html) and we used this as an opportunity to [clean up and refactor Elastic as well](https://github.com/olivere/elastic/blob/release-branch.v3/CHANGELOG-3.0.md). + +### Elastic 2.0 + +Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2). + +### Elastic 1.0 + +Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic +to a recent version. + +However, if you cannot update for some reason, don't worry. Version 1.0 is +still available. All you need to do is go-get it and change your import path +as described above. + + +## Status + +We use Elastic in production since 2012. Elastic is stable but the API changes +now and then. We strive for API compatibility. +However, Elasticsearch sometimes introduces [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes.html) +and we sometimes have to adapt. + +Having said that, there have been no big API changes that required you +to rewrite your application big time. More often than not it's renaming APIs +and adding/removing features so that Elastic is in sync with Elasticsearch. + +Elastic has been used in production with the following Elasticsearch versions: +0.90, 1.0-1.7. Furthermore, we use [Travis CI](https://travis-ci.org/) +to test Elastic with the most recent versions of Elasticsearch and Go. +See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml) +file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic) +for the results. + +Elasticsearch has quite a few features. Most of them are implemented +by Elastic. I add features and APIs as required. It's straightforward +to implement missing pieces. I'm accepting pull requests :-) + +Having said that, I hope you find the project useful. + + +## Getting Started + +The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go). The client connects to Elasticsearch on `http://127.0.0.1:9200` by default. + +You typically create one client for your app. Here's a complete example of +creating a client, creating an index, adding a document, executing a search etc. + +```go +// Create a client +client, err := elastic.NewClient() +if err != nil { + // Handle error +} + +// Create an index +_, err = client.CreateIndex("twitter").Do() +if err != nil { + // Handle error + panic(err) +} + +// Add a document to the index +tweet := Tweet{User: "olivere", Message: "Take Five"} +_, err = client.Index(). + Index("twitter"). + Type("tweet"). + Id("1"). + BodyJson(tweet). + Refresh(true). + Do() +if err != nil { + // Handle error + panic(err) +} + +// Search with a term query +termQuery := elastic.NewTermQuery("user", "olivere") +searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do() // execute +if err != nil { + // Handle error + panic(err) +} + +// searchResult is of type SearchResult and returns hits, suggestions, +// and all kinds of other information from Elasticsearch. +fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + +// Each is a convenience function that iterates over hits in a search result. +// It makes sure you don't need to check for nil values in the response. +// However, it ignores errors in serialization. If you want full control +// over iterating the hits, see below. +var ttyp Tweet +for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { + if t, ok := item.(Tweet); ok { + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } +} +// TotalHits is another convenience function that works even when something goes wrong. +fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) + +// Here's how you iterate through results with full control over each step. +if searchResult.Hits.TotalHits > 0 { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } +} else { + // No hits + fmt.Print("Found no tweets\n") +} + +// Delete the index again +_, err = client.DeleteIndex("twitter").Do() +if err != nil { + // Handle error + panic(err) +} +``` + +Here's a [link to a complete working example](https://gist.github.com/olivere/114347ff9d9cfdca7bdc0ecea8b82263). + +See the [wiki](https://github.com/olivere/elastic/wiki) for more details. + + +## API Status + +### Document APIs + +- [x] Index API +- [x] Get API +- [x] Delete API +- [x] Delete By Query API +- [x] Update API +- [x] Update By Query API +- [x] Multi Get API +- [x] Bulk API +- [x] Reindex API +- [x] Term Vectors +- [x] Multi termvectors API + +### Search APIs + +- [x] Search +- [x] Search Template +- [ ] Search Shards API +- [x] Suggesters + - [x] Term Suggester + - [x] Phrase Suggester + - [x] Completion Suggester + - [x] Context Suggester +- [x] Multi Search API +- [x] Count API +- [ ] Search Exists API +- [ ] Validate API +- [x] Explain API +- [x] Percolator API +- [x] Field Stats API + +### Aggregations + +- Metrics Aggregations + - [x] Avg + - [x] Cardinality + - [x] Extended Stats + - [x] Geo Bounds + - [x] Max + - [x] Min + - [x] Percentiles + - [x] Percentile Ranks + - [ ] Scripted Metric + - [x] Stats + - [x] Sum + - [x] Top Hits + - [x] Value Count +- Bucket Aggregations + - [x] Children + - [x] Date Histogram + - [x] Date Range + - [x] Filter + - [x] Filters + - [x] Geo Distance + - [ ] GeoHash Grid + - [x] Global + - [x] Histogram + - [x] IPv4 Range + - [x] Missing + - [x] Nested + - [x] Range + - [x] Reverse Nested + - [x] Sampler + - [x] Significant Terms + - [x] Terms +- Pipeline Aggregations + - [x] Avg Bucket + - [x] Derivative + - [x] Max Bucket + - [x] Min Bucket + - [x] Sum Bucket + - [x] Moving Average + - [x] Cumulative Sum + - [x] Bucket Script + - [x] Bucket Selector + - [x] Serial Differencing +- [x] Aggregation Metadata + +### Indices APIs + +- [x] Create Index +- [x] Delete Index +- [x] Get Index +- [x] Indices Exists +- [x] Open / Close Index +- [x] Put Mapping +- [x] Get Mapping +- [ ] Get Field Mapping +- [ ] Types Exists +- [x] Index Aliases +- [x] Update Indices Settings +- [x] Get Settings +- [ ] Analyze +- [x] Index Templates +- [x] Warmers +- [x] Indices Stats +- [ ] Indices Segments +- [ ] Indices Recovery +- [ ] Clear Cache +- [x] Flush +- [x] Refresh +- [x] Optimize +- [ ] Shadow Replica Indices +- [ ] Upgrade + +### cat APIs + +The cat APIs are not implemented as of now. We think they are better suited for operating with Elasticsearch on the command line. + +- [ ] cat aliases +- [ ] cat allocation +- [ ] cat count +- [ ] cat fielddata +- [ ] cat health +- [ ] cat indices +- [ ] cat master +- [ ] cat nodes +- [ ] cat pending tasks +- [ ] cat plugins +- [ ] cat recovery +- [ ] cat thread pool +- [ ] cat shards +- [ ] cat segments + +### Cluster APIs + +- [x] Cluster Health +- [x] Cluster State +- [x] Cluster Stats +- [ ] Pending Cluster Tasks +- [ ] Cluster Reroute +- [ ] Cluster Update Settings +- [ ] Nodes Stats +- [x] Nodes Info +- [x] Task Management API +- [ ] Nodes hot_threads + +### Query DSL + +- [x] Match All Query +- [x] Inner hits +- Full text queries + - [x] Match Query + - [x] Multi Match Query + - [x] Common Terms Query + - [x] Query String Query + - [x] Simple Query String Query +- Term level queries + - [x] Term Query + - [x] Terms Query + - [x] Range Query + - [x] Exists Query + - [x] Missing Query + - [x] Prefix Query + - [x] Wildcard Query + - [x] Regexp Query + - [x] Fuzzy Query + - [x] Type Query + - [x] Ids Query +- Compound queries + - [x] Constant Score Query + - [x] Bool Query + - [x] Dis Max Query + - [x] Function Score Query + - [x] Boosting Query + - [x] Indices Query + - [x] And Query (deprecated) + - [x] Not Query + - [x] Or Query (deprecated) + - [ ] Filtered Query (deprecated) + - [ ] Limit Query (deprecated) +- Joining queries + - [x] Nested Query + - [x] Has Child Query + - [x] Has Parent Query +- Geo queries + - [ ] GeoShape Query + - [x] Geo Bounding Box Query + - [x] Geo Distance Query + - [ ] Geo Distance Range Query + - [x] Geo Polygon Query + - [ ] Geohash Cell Query +- Specialized queries + - [x] More Like This Query + - [x] Template Query + - [x] Script Query +- Span queries + - [ ] Span Term Query + - [ ] Span Multi Term Query + - [ ] Span First Query + - [ ] Span Near Query + - [ ] Span Or Query + - [ ] Span Not Query + - [ ] Span Containing Query + - [ ] Span Within Query + +### Modules + +- [ ] Snapshot and Restore + +### Sorting + +- [x] Sort by score +- [x] Sort by field +- [x] Sort by geo distance +- [x] Sort by script + +### Scan + +Scrolling through documents (e.g. `search_type=scan`) are implemented via +the `Scroll` and `Scan` services. The `ClearScroll` API is implemented as well. + + +## How to contribute + +Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md). + +## Credits + +Thanks a lot for the great folks working hard on +[Elasticsearch](http://www.elasticsearch.org/) +and +[Go](http://www.golang.org/). + +Elastic uses portions of the +[uritemplates](https://github.com/jtacoma/uritemplates) library +by Joshua Tacoma and +[backoff](https://github.com/cenkalti/backoff) by Cenk Altı. + +## LICENSE + +MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/) +or the LICENSE file provided in the repository for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/backoff/LICENSE b/vendor/gopkg.in/olivere/elastic.v3/backoff/LICENSE new file mode 100644 index 000000000..f6f2dcc97 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/backoff/LICENSE @@ -0,0 +1,22 @@ +Portions of this code rely on this LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/gopkg.in/olivere/elastic.v3/backoff/backoff.go b/vendor/gopkg.in/olivere/elastic.v3/backoff/backoff.go new file mode 100644 index 000000000..f6d7ad9a0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/backoff/backoff.go @@ -0,0 +1,159 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package backoff + +import ( + "math" + "math/rand" + "sync" + "sync/atomic" + "time" +) + +// Backoff is an interface for different types of backoff algorithms. +type Backoff interface { + Next() time.Duration + Reset() +} + +// Stop is used as a signal to indicate that no more retries should be made. +const Stop time.Duration = -1 + +// -- Simple Backoff -- + +// SimpleBackoff takes a list of fixed values for backoff intervals. +// Each call to Next returns the next value from that fixed list. +// After each value is returned, subsequent calls to Next will only return +// the last element. The caller may specify if the values are "jittered". +type SimpleBackoff struct { + sync.Mutex + ticks []int + index int + jitter bool + stop bool +} + +// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified +// list of fixed intervals in milliseconds. +func NewSimpleBackoff(ticks ...int) *SimpleBackoff { + return &SimpleBackoff{ + ticks: ticks, + index: 0, + jitter: false, + stop: false, + } +} + +// Jitter, when set, randomizes to return a value of [0.5*value .. 1.5*value]. +func (b *SimpleBackoff) Jitter(doJitter bool) *SimpleBackoff { + b.Lock() + defer b.Unlock() + b.jitter = doJitter + return b +} + +// SendStop, when enables, makes Next to return Stop once +// the list of values is exhausted. +func (b *SimpleBackoff) SendStop(doStop bool) *SimpleBackoff { + b.Lock() + defer b.Unlock() + b.stop = doStop + return b +} + +// Next returns the next wait interval. +func (b *SimpleBackoff) Next() time.Duration { + b.Lock() + defer b.Unlock() + + i := b.index + if i >= len(b.ticks) { + if b.stop { + return Stop + } + i = len(b.ticks) - 1 + b.index = i + } else { + b.index++ + } + + ms := b.ticks[i] + if b.jitter { + ms = jitter(ms) + } + return time.Duration(ms) * time.Millisecond +} + +// Reset resets SimpleBackoff. +func (b *SimpleBackoff) Reset() { + b.Lock() + b.index = 0 + b.Unlock() +} + +// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis]. +func jitter(millis int) int { + if millis <= 0 { + return 0 + } + return millis/2 + rand.Intn(millis) +} + +// -- Exponential -- + +// ExponentialBackoff implements the simple exponential backoff described by +// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html. +type ExponentialBackoff struct { + sync.Mutex + t float64 // initial timeout (in msec) + f float64 // exponential factor (e.g. 2) + m float64 // maximum timeout (in msec) + n int64 // number of retries + stop bool // indicates whether Next should send "Stop" whan max timeout is reached +} + +// NewExponentialBackoff returns a ExponentialBackoff backoff policy. +// Use initialTimeout to set the first/minimal interval +// and maxTimeout to set the maximum wait interval. +func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff { + return &ExponentialBackoff{ + t: float64(int64(initialTimeout / time.Millisecond)), + f: 2.0, + m: float64(int64(maxTimeout / time.Millisecond)), + n: 0, + stop: false, + } +} + +// SendStop, when enables, makes Next to return Stop once +// the maximum timeout is reached. +func (b *ExponentialBackoff) SendStop(doStop bool) *ExponentialBackoff { + b.Lock() + defer b.Unlock() + b.stop = doStop + return b +} + +// Next returns the next wait interval. +func (t *ExponentialBackoff) Next() time.Duration { + t.Lock() + defer t.Unlock() + + n := float64(atomic.AddInt64(&t.n, 1)) + r := 1.0 + rand.Float64() // random number in [1..2] + m := math.Min(r*t.t*math.Pow(t.f, n), t.m) + if t.stop && m >= t.m { + return Stop + } + d := time.Duration(int64(m)) * time.Millisecond + return d +} + +// Reset resets the backoff policy so that it can be reused. +func (t *ExponentialBackoff) Reset() { + t.Lock() + t.n = 0 + t.Unlock() +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/backoff/retry.go b/vendor/gopkg.in/olivere/elastic.v3/backoff/retry.go new file mode 100644 index 000000000..701e03ccc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/backoff/retry.go @@ -0,0 +1,53 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +// This file is (c) 2014 Cenk Altı and governed by the MIT license. +// See https://github.com/cenkalti/backoff for original source. + +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the function f until it does not return error or BackOff stops. +// f is guaranteed to be run at least once. +// It is the caller's responsibility to reset b after Retry returns. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b Backoff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b Backoff, notify Notify) error { + var err error + var next time.Duration + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if next = b.Next(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + time.Sleep(next) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk.go b/vendor/gopkg.in/olivere/elastic.v3/bulk.go new file mode 100644 index 000000000..2003c2743 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk.go @@ -0,0 +1,353 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "errors" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// BulkService allows for batching bulk requests and sending them to +// Elasticsearch in one roundtrip. Use the Add method with BulkIndexRequest, +// BulkUpdateRequest, and BulkDeleteRequest to add bulk requests to a batch, +// then use Do to send them to Elasticsearch. +// +// BulkService will be reset after each Do call. In other words, you can +// reuse BulkService to send many batches. You do not have to create a new +// BulkService for each batch. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/docs-bulk.html +// for more details. +type BulkService struct { + client *Client + + index string + typ string + requests []BulkableRequest + timeout string + refresh *bool + pretty bool + + sizeInBytes int64 +} + +// NewBulkService initializes a new BulkService. +func NewBulkService(client *Client) *BulkService { + builder := &BulkService{ + client: client, + requests: make([]BulkableRequest, 0), + } + return builder +} + +func (s *BulkService) reset() { + s.requests = make([]BulkableRequest, 0) + s.sizeInBytes = 0 +} + +// Index specifies the index to use for all batches. You may also leave +// this blank and specify the index in the individual bulk requests. +func (s *BulkService) Index(index string) *BulkService { + s.index = index + return s +} + +// Type specifies the type to use for all batches. You may also leave +// this blank and specify the type in the individual bulk requests. +func (s *BulkService) Type(typ string) *BulkService { + s.typ = typ + return s +} + +// Timeout is a global timeout for processing bulk requests. This is a +// server-side timeout, i.e. it tells Elasticsearch the time after which +// it should stop processing. +func (s *BulkService) Timeout(timeout string) *BulkService { + s.timeout = timeout + return s +} + +// Refresh, when set to true, tells Elasticsearch to make the bulk requests +// available to search immediately after being processed. Normally, this +// only happens after a specified refresh interval. +func (s *BulkService) Refresh(refresh bool) *BulkService { + s.refresh = &refresh + return s +} + +// Pretty tells Elasticsearch whether to return a formatted JSON response. +func (s *BulkService) Pretty(pretty bool) *BulkService { + s.pretty = pretty + return s +} + +// Add adds bulkable requests, i.e. BulkIndexRequest, BulkUpdateRequest, +// and/or BulkDeleteRequest. +func (s *BulkService) Add(requests ...BulkableRequest) *BulkService { + for _, r := range requests { + s.requests = append(s.requests, r) + s.sizeInBytes += s.estimateSizeInBytes(r) + } + return s +} + +// EstimatedSizeInBytes returns the estimated size of all bulkable +// requests added via Add. +func (s *BulkService) EstimatedSizeInBytes() int64 { + return s.sizeInBytes +} + +// estimateSizeInBytes returns the estimates size of the given +// bulkable request, i.e. BulkIndexRequest, BulkUpdateRequest, and +// BulkDeleteRequest. +func (s *BulkService) estimateSizeInBytes(r BulkableRequest) int64 { + lines, _ := r.Source() + size := 0 + for _, line := range lines { + // +1 for the \n + size += len(line) + 1 + } + return int64(size) +} + +// NumberOfActions returns the number of bulkable requests that need to +// be sent to Elasticsearch on the next batch. +func (s *BulkService) NumberOfActions() int { + return len(s.requests) +} + +func (s *BulkService) bodyAsString() (string, error) { + buf := bytes.NewBufferString("") + + for _, req := range s.requests { + source, err := req.Source() + if err != nil { + return "", err + } + for _, line := range source { + _, err := buf.WriteString(fmt.Sprintf("%s\n", line)) + if err != nil { + return "", nil + } + } + } + + return buf.String(), nil +} + +// Do sends the batched requests to Elasticsearch. Note that, when successful, +// you can reuse the BulkService for the next batch as the list of bulk +// requests is cleared on success. +func (s *BulkService) Do() (*BulkResponse, error) { + // No actions? + if s.NumberOfActions() == 0 { + return nil, errors.New("elastic: No bulk actions to commit") + } + + // Get body + body, err := s.bodyAsString() + if err != nil { + return nil, err + } + + // Build url + path := "/" + if s.index != "" { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": s.index, + }) + if err != nil { + return nil, err + } + path += index + "/" + } + if s.typ != "" { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": s.typ, + }) + if err != nil { + return nil, err + } + path += typ + "/" + } + path += "_bulk" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return results + ret := new(BulkResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + + // Reset so the request can be reused + s.reset() + + return ret, nil +} + +// BulkResponse is a response to a bulk execution. +// +// Example: +// { +// "took":3, +// "errors":false, +// "items":[{ +// "index":{ +// "_index":"index1", +// "_type":"tweet", +// "_id":"1", +// "_version":3, +// "status":201 +// } +// },{ +// "index":{ +// "_index":"index2", +// "_type":"tweet", +// "_id":"2", +// "_version":3, +// "status":200 +// } +// },{ +// "delete":{ +// "_index":"index1", +// "_type":"tweet", +// "_id":"1", +// "_version":4, +// "status":200, +// "found":true +// } +// },{ +// "update":{ +// "_index":"index2", +// "_type":"tweet", +// "_id":"2", +// "_version":4, +// "status":200 +// } +// }] +// } +type BulkResponse struct { + Took int `json:"took,omitempty"` + Errors bool `json:"errors,omitempty"` + Items []map[string]*BulkResponseItem `json:"items,omitempty"` +} + +// BulkResponseItem is the result of a single bulk request. +type BulkResponseItem struct { + Index string `json:"_index,omitempty"` + Type string `json:"_type,omitempty"` + Id string `json:"_id,omitempty"` + Version int `json:"_version,omitempty"` + Status int `json:"status,omitempty"` + Found bool `json:"found,omitempty"` + Error *ErrorDetails `json:"error,omitempty"` +} + +// Indexed returns all bulk request results of "index" actions. +func (r *BulkResponse) Indexed() []*BulkResponseItem { + return r.ByAction("index") +} + +// Created returns all bulk request results of "create" actions. +func (r *BulkResponse) Created() []*BulkResponseItem { + return r.ByAction("create") +} + +// Updated returns all bulk request results of "update" actions. +func (r *BulkResponse) Updated() []*BulkResponseItem { + return r.ByAction("update") +} + +// Deleted returns all bulk request results of "delete" actions. +func (r *BulkResponse) Deleted() []*BulkResponseItem { + return r.ByAction("delete") +} + +// ByAction returns all bulk request results of a certain action, +// e.g. "index" or "delete". +func (r *BulkResponse) ByAction(action string) []*BulkResponseItem { + if r.Items == nil { + return nil + } + items := make([]*BulkResponseItem, 0) + for _, item := range r.Items { + if result, found := item[action]; found { + items = append(items, result) + } + } + return items +} + +// ById returns all bulk request results of a given document id, +// regardless of the action ("index", "delete" etc.). +func (r *BulkResponse) ById(id string) []*BulkResponseItem { + if r.Items == nil { + return nil + } + items := make([]*BulkResponseItem, 0) + for _, item := range r.Items { + for _, result := range item { + if result.Id == id { + items = append(items, result) + } + } + } + return items +} + +// Failed returns those items of a bulk response that have errors, +// i.e. those that don't have a status code between 200 and 299. +func (r *BulkResponse) Failed() []*BulkResponseItem { + if r.Items == nil { + return nil + } + errors := make([]*BulkResponseItem, 0) + for _, item := range r.Items { + for _, result := range item { + if !(result.Status >= 200 && result.Status <= 299) { + errors = append(errors, result) + } + } + } + return errors +} + +// Succeeded returns those items of a bulk response that have no errors, +// i.e. those have a status code between 200 and 299. +func (r *BulkResponse) Succeeded() []*BulkResponseItem { + if r.Items == nil { + return nil + } + succeeded := make([]*BulkResponseItem, 0) + for _, item := range r.Items { + for _, result := range item { + if result.Status >= 200 && result.Status <= 299 { + succeeded = append(succeeded, result) + } + } + } + return succeeded +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_delete_request.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_delete_request.go new file mode 100644 index 000000000..5a9a74ee0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_delete_request.go @@ -0,0 +1,158 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// -- Bulk delete request -- + +// Bulk request to remove a document from Elasticsearch. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// for details. +type BulkDeleteRequest struct { + BulkableRequest + index string + typ string + id string + parent string + routing string + refresh *bool + version int64 // default is MATCH_ANY + versionType string // default is "internal" + + source []string +} + +// NewBulkDeleteRequest returns a new BulkDeleteRequest. +func NewBulkDeleteRequest() *BulkDeleteRequest { + return &BulkDeleteRequest{} +} + +// Index specifies the Elasticsearch index to use for this delete request. +// If unspecified, the index set on the BulkService will be used. +func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest { + r.index = index + r.source = nil + return r +} + +// Type specifies the Elasticsearch type to use for this delete request. +// If unspecified, the type set on the BulkService will be used. +func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest { + r.typ = typ + r.source = nil + return r +} + +// Id specifies the identifier of the document to delete. +func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest { + r.id = id + r.source = nil + return r +} + +// Parent specifies the parent of the request, which is used in parent/child +// mappings. +func (r *BulkDeleteRequest) Parent(parent string) *BulkDeleteRequest { + r.parent = parent + r.source = nil + return r +} + +// Routing specifies a routing value for the request. +func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest { + r.routing = routing + r.source = nil + return r +} + +// Refresh indicates whether to update the shards immediately after +// the delete has been processed. Deleted documents will disappear +// in search immediately at the cost of slower bulk performance. +func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest { + r.refresh = &refresh + r.source = nil + return r +} + +// Version indicates the version to be deleted as part of an optimistic +// concurrency model. +func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest { + r.version = version + r.source = nil + return r +} + +// VersionType can be "internal" (default), "external", "external_gte", +// "external_gt", or "force". +func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest { + r.versionType = versionType + r.source = nil + return r +} + +// String returns the on-wire representation of the delete request, +// concatenated as a single string. +func (r *BulkDeleteRequest) String() string { + lines, err := r.Source() + if err != nil { + return fmt.Sprintf("error: %v", err) + } + return strings.Join(lines, "\n") +} + +// Source returns the on-wire representation of the delete request, +// split into an action-and-meta-data line and an (optional) source line. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// for details. +func (r *BulkDeleteRequest) Source() ([]string, error) { + if r.source != nil { + return r.source, nil + } + lines := make([]string, 1) + + source := make(map[string]interface{}) + deleteCommand := make(map[string]interface{}) + if r.index != "" { + deleteCommand["_index"] = r.index + } + if r.typ != "" { + deleteCommand["_type"] = r.typ + } + if r.id != "" { + deleteCommand["_id"] = r.id + } + if r.parent != "" { + deleteCommand["_parent"] = r.parent + } + if r.routing != "" { + deleteCommand["_routing"] = r.routing + } + if r.version > 0 { + deleteCommand["_version"] = r.version + } + if r.versionType != "" { + deleteCommand["_version_type"] = r.versionType + } + if r.refresh != nil { + deleteCommand["refresh"] = *r.refresh + } + source["delete"] = deleteCommand + + body, err := json.Marshal(source) + if err != nil { + return nil, err + } + + lines[0] = string(body) + r.source = lines + + return lines, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_index_request.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_index_request.go new file mode 100644 index 000000000..bab1b8cc4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_index_request.go @@ -0,0 +1,232 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Bulk request to add a document to Elasticsearch. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// for details. +type BulkIndexRequest struct { + BulkableRequest + index string + typ string + id string + opType string + routing string + parent string + timestamp string + ttl int64 + refresh *bool + version int64 // default is MATCH_ANY + versionType string // default is "internal" + doc interface{} + + source []string +} + +// NewBulkIndexRequest returns a new BulkIndexRequest. +// The operation type is "index" by default. +func NewBulkIndexRequest() *BulkIndexRequest { + return &BulkIndexRequest{ + opType: "index", + } +} + +// Index specifies the Elasticsearch index to use for this index request. +// If unspecified, the index set on the BulkService will be used. +func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest { + r.index = index + r.source = nil + return r +} + +// Type specifies the Elasticsearch type to use for this index request. +// If unspecified, the type set on the BulkService will be used. +func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest { + r.typ = typ + r.source = nil + return r +} + +// Id specifies the identifier of the document to index. +func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest { + r.id = id + r.source = nil + return r +} + +// OpType specifies if this request should follow create-only or upsert +// behavior. This follows the OpType of the standard document index API. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#operation-type +// for details. +func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest { + r.opType = opType + r.source = nil + return r +} + +// Routing specifies a routing value for the request. +func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest { + r.routing = routing + r.source = nil + return r +} + +// Parent specifies the identifier of the parent document (if available). +func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest { + r.parent = parent + r.source = nil + return r +} + +// Timestamp can be used to index a document with a timestamp. +// This is deprecated as of 2.0.0-beta2; you should use a normal date field +// and set its value explicitly. +func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest { + r.timestamp = timestamp + r.source = nil + return r +} + +// Ttl (time to live) sets an expiration date for the document. Expired +// documents will be expunged automatically. +// This is deprecated as of 2.0.0-beta2 and will be replaced by a different +// implementation in a future version. +func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest { + r.ttl = ttl + r.source = nil + return r +} + +// Refresh indicates whether to update the shards immediately after +// the request has been processed. Newly added documents will appear +// in search immediately at the cost of slower bulk performance. +func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest { + r.refresh = &refresh + r.source = nil + return r +} + +// Version indicates the version of the document as part of an optimistic +// concurrency model. +func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest { + r.version = version + r.source = nil + return r +} + +// VersionType specifies how versions are created. It can be e.g. internal, +// external, external_gte, or force. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-versioning +// for details. +func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest { + r.versionType = versionType + r.source = nil + return r +} + +// Doc specifies the document to index. +func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest { + r.doc = doc + r.source = nil + return r +} + +// String returns the on-wire representation of the index request, +// concatenated as a single string. +func (r *BulkIndexRequest) String() string { + lines, err := r.Source() + if err != nil { + return fmt.Sprintf("error: %v", err) + } + return strings.Join(lines, "\n") +} + +// Source returns the on-wire representation of the index request, +// split into an action-and-meta-data line and an (optional) source line. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// for details. +func (r *BulkIndexRequest) Source() ([]string, error) { + // { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + // { "field1" : "value1" } + + if r.source != nil { + return r.source, nil + } + + lines := make([]string, 2) + + // "index" ... + command := make(map[string]interface{}) + indexCommand := make(map[string]interface{}) + if r.index != "" { + indexCommand["_index"] = r.index + } + if r.typ != "" { + indexCommand["_type"] = r.typ + } + if r.id != "" { + indexCommand["_id"] = r.id + } + if r.routing != "" { + indexCommand["_routing"] = r.routing + } + if r.parent != "" { + indexCommand["_parent"] = r.parent + } + if r.timestamp != "" { + indexCommand["_timestamp"] = r.timestamp + } + if r.ttl > 0 { + indexCommand["_ttl"] = r.ttl + } + if r.version > 0 { + indexCommand["_version"] = r.version + } + if r.versionType != "" { + indexCommand["_version_type"] = r.versionType + } + if r.refresh != nil { + indexCommand["refresh"] = *r.refresh + } + command[r.opType] = indexCommand + line, err := json.Marshal(command) + if err != nil { + return nil, err + } + lines[0] = string(line) + + // "field1" ... + if r.doc != nil { + switch t := r.doc.(type) { + default: + body, err := json.Marshal(r.doc) + if err != nil { + return nil, err + } + lines[1] = string(body) + case json.RawMessage: + lines[1] = string(t) + case *json.RawMessage: + lines[1] = string(*t) + case string: + lines[1] = t + case *string: + lines[1] = *t + } + } else { + lines[1] = "{}" + } + + r.source = lines + return lines, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_processor.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_processor.go new file mode 100644 index 000000000..c833e9a15 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_processor.go @@ -0,0 +1,541 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "sync" + "sync/atomic" + "time" + + "gopkg.in/olivere/elastic.v3/backoff" +) + +// BulkProcessorService allows to easily process bulk requests. It allows setting +// policies when to flush new bulk requests, e.g. based on a number of actions, +// on the size of the actions, and/or to flush periodically. It also allows +// to control the number of concurrent bulk requests allowed to be executed +// in parallel. +// +// BulkProcessorService, by default, commits either every 1000 requests or when the +// (estimated) size of the bulk requests exceeds 5 MB. However, it does not +// commit periodically. BulkProcessorService also does retry by default, using +// an exponential backoff algorithm. +// +// The caller is responsible for setting the index and type on every +// bulk request added to BulkProcessorService. +// +// BulkProcessorService takes ideas from the BulkProcessor of the +// Elasticsearch Java API as documented in +// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html. +type BulkProcessorService struct { + c *Client + beforeFn BulkBeforeFunc + afterFn BulkAfterFunc + name string // name of processor + numWorkers int // # of workers (>= 1) + bulkActions int // # of requests after which to commit + bulkSize int // # of bytes after which to commit + flushInterval time.Duration // periodic flush interval + wantStats bool // indicates whether to gather statistics + initialTimeout time.Duration // initial wait time before retry on errors + maxTimeout time.Duration // max time to wait for retry on errors +} + +// NewBulkProcessorService creates a new BulkProcessorService. +func NewBulkProcessorService(client *Client) *BulkProcessorService { + return &BulkProcessorService{ + c: client, + numWorkers: 1, + bulkActions: 1000, + bulkSize: 5 << 20, // 5 MB + initialTimeout: time.Duration(200) * time.Millisecond, + maxTimeout: time.Duration(10000) * time.Millisecond, + } +} + +// BulkBeforeFunc defines the signature of callbacks that are executed +// before a commit to Elasticsearch. +type BulkBeforeFunc func(executionId int64, requests []BulkableRequest) + +// BulkAfterFunc defines the signature of callbacks that are executed +// after a commit to Elasticsearch. The err parameter signals an error. +type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) + +// Before specifies a function to be executed before bulk requests get comitted +// to Elasticsearch. +func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService { + s.beforeFn = fn + return s +} + +// After specifies a function to be executed when bulk requests have been +// comitted to Elasticsearch. The After callback executes both when the +// commit was successful as well as on failures. +func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService { + s.afterFn = fn + return s +} + +// Name is an optional name to identify this bulk processor. +func (s *BulkProcessorService) Name(name string) *BulkProcessorService { + s.name = name + return s +} + +// Workers is the number of concurrent workers allowed to be +// executed. Defaults to 1 and must be greater or equal to 1. +func (s *BulkProcessorService) Workers(num int) *BulkProcessorService { + s.numWorkers = num + return s +} + +// BulkActions specifies when to flush based on the number of actions +// currently added. Defaults to 1000 and can be set to -1 to be disabled. +func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService { + s.bulkActions = bulkActions + return s +} + +// BulkSize specifies when to flush based on the size (in bytes) of the actions +// currently added. Defaults to 5 MB and can be set to -1 to be disabled. +func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService { + s.bulkSize = bulkSize + return s +} + +// FlushInterval specifies when to flush at the end of the given interval. +// This is disabled by default. If you want the bulk processor to +// operate completely asynchronously, set both BulkActions and BulkSize to +// -1 and set the FlushInterval to a meaningful interval. +func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService { + s.flushInterval = interval + return s +} + +// Stats tells bulk processor to gather stats while running. +// Use Stats to return the stats. This is disabled by default. +func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService { + s.wantStats = wantStats + return s +} + +// Do creates a new BulkProcessor and starts it. +// Consider the BulkProcessor as a running instance that accepts bulk requests +// and commits them to Elasticsearch, spreading the work across one or more +// workers. +// +// You can interoperate with the BulkProcessor returned by Do, e.g. Start and +// Stop (or Close) it. +// +// Calling Do several times returns new BulkProcessors. You probably don't +// want to do this. BulkProcessorService implements just a builder pattern. +func (s *BulkProcessorService) Do() (*BulkProcessor, error) { + p := newBulkProcessor( + s.c, + s.beforeFn, + s.afterFn, + s.name, + s.numWorkers, + s.bulkActions, + s.bulkSize, + s.flushInterval, + s.wantStats, + s.initialTimeout, + s.maxTimeout) + + err := p.Start() + if err != nil { + return nil, err + } + return p, nil +} + +// -- Bulk Processor Statistics -- + +// BulkProcessorStats contains various statistics of a bulk processor +// while it is running. Use the Stats func to return it while running. +type BulkProcessorStats struct { + Flushed int64 // number of times the flush interval has been invoked + Committed int64 // # of times workers committed bulk requests + Indexed int64 // # of requests indexed + Created int64 // # of requests that ES reported as creates (201) + Updated int64 // # of requests that ES reported as updates + Deleted int64 // # of requests that ES reported as deletes + Succeeded int64 // # of requests that ES reported as successful + Failed int64 // # of requests that ES reported as failed + + Workers []*BulkProcessorWorkerStats // stats for each worker +} + +// BulkProcessorWorkerStats represents per-worker statistics. +type BulkProcessorWorkerStats struct { + Queued int64 // # of requests queued in this worker + LastDuration time.Duration // duration of last commit +} + +// newBulkProcessorStats initializes and returns a BulkProcessorStats struct. +func newBulkProcessorStats(workers int) *BulkProcessorStats { + stats := &BulkProcessorStats{ + Workers: make([]*BulkProcessorWorkerStats, workers), + } + for i := 0; i < workers; i++ { + stats.Workers[i] = &BulkProcessorWorkerStats{} + } + return stats +} + +func (st *BulkProcessorStats) dup() *BulkProcessorStats { + dst := new(BulkProcessorStats) + dst.Flushed = st.Flushed + dst.Committed = st.Committed + dst.Indexed = st.Indexed + dst.Created = st.Created + dst.Updated = st.Updated + dst.Deleted = st.Deleted + dst.Succeeded = st.Succeeded + dst.Failed = st.Failed + for _, src := range st.Workers { + dst.Workers = append(dst.Workers, src.dup()) + } + return dst +} + +func (st *BulkProcessorWorkerStats) dup() *BulkProcessorWorkerStats { + dst := new(BulkProcessorWorkerStats) + dst.Queued = st.Queued + dst.LastDuration = st.LastDuration + return dst +} + +// -- Bulk Processor -- + +// BulkProcessor encapsulates a task that accepts bulk requests and +// orchestrates committing them to Elasticsearch via one or more workers. +// +// BulkProcessor is returned by setting up a BulkProcessorService and +// calling the Do method. +type BulkProcessor struct { + c *Client + beforeFn BulkBeforeFunc + afterFn BulkAfterFunc + name string + bulkActions int + bulkSize int + numWorkers int + executionId int64 + requestsC chan BulkableRequest + workerWg sync.WaitGroup + workers []*bulkWorker + flushInterval time.Duration + flusherStopC chan struct{} + wantStats bool + initialTimeout time.Duration // initial wait time before retry on errors + maxTimeout time.Duration // max time to wait for retry on errors + + startedMu sync.Mutex // guards the following block + started bool + + statsMu sync.Mutex // guards the following block + stats *BulkProcessorStats +} + +func newBulkProcessor( + client *Client, + beforeFn BulkBeforeFunc, + afterFn BulkAfterFunc, + name string, + numWorkers int, + bulkActions int, + bulkSize int, + flushInterval time.Duration, + wantStats bool, + initialTimeout time.Duration, + maxTimeout time.Duration) *BulkProcessor { + return &BulkProcessor{ + c: client, + beforeFn: beforeFn, + afterFn: afterFn, + name: name, + numWorkers: numWorkers, + bulkActions: bulkActions, + bulkSize: bulkSize, + flushInterval: flushInterval, + wantStats: wantStats, + initialTimeout: initialTimeout, + maxTimeout: maxTimeout, + } +} + +// Start starts the bulk processor. If the processor is already started, +// nil is returned. +func (p *BulkProcessor) Start() error { + p.startedMu.Lock() + defer p.startedMu.Unlock() + + if p.started { + return nil + } + + // We must have at least one worker. + if p.numWorkers < 1 { + p.numWorkers = 1 + } + + p.requestsC = make(chan BulkableRequest) + p.executionId = 0 + p.stats = newBulkProcessorStats(p.numWorkers) + + // Create and start up workers. + p.workers = make([]*bulkWorker, p.numWorkers) + for i := 0; i < p.numWorkers; i++ { + p.workerWg.Add(1) + p.workers[i] = newBulkWorker(p, i) + go p.workers[i].work() + } + + // Start the ticker for flush (if enabled) + if int64(p.flushInterval) > 0 { + p.flusherStopC = make(chan struct{}) + go p.flusher(p.flushInterval) + } + + p.started = true + + return nil +} + +// Stop is an alias for Close. +func (p *BulkProcessor) Stop() error { + return p.Close() +} + +// Close stops the bulk processor previously started with Do. +// If it is already stopped, this is a no-op and nil is returned. +// +// By implementing Close, BulkProcessor implements the io.Closer interface. +func (p *BulkProcessor) Close() error { + p.startedMu.Lock() + defer p.startedMu.Unlock() + + // Already stopped? Do nothing. + if !p.started { + return nil + } + + // Stop flusher (if enabled) + if p.flusherStopC != nil { + p.flusherStopC <- struct{}{} + <-p.flusherStopC + close(p.flusherStopC) + p.flusherStopC = nil + } + + // Stop all workers. + close(p.requestsC) + p.workerWg.Wait() + + p.started = false + + return nil +} + +// Stats returns the latest bulk processor statistics. +// Collecting stats must be enabled first by calling Stats(true) on +// the service that created this processor. +func (p *BulkProcessor) Stats() BulkProcessorStats { + p.statsMu.Lock() + defer p.statsMu.Unlock() + return *p.stats.dup() +} + +// Add adds a single request to commit by the BulkProcessorService. +// +// The caller is responsible for setting the index and type on the request. +func (p *BulkProcessor) Add(request BulkableRequest) { + p.requestsC <- request +} + +// Flush manually asks all workers to commit their outstanding requests. +// It returns only when all workers acknowledge completion. +func (p *BulkProcessor) Flush() error { + p.statsMu.Lock() + p.stats.Flushed++ + p.statsMu.Unlock() + + for _, w := range p.workers { + w.flushC <- struct{}{} + <-w.flushAckC // wait for completion + } + return nil +} + +// flusher is a single goroutine that periodically asks all workers to +// commit their outstanding bulk requests. It is only started if +// FlushInterval is greater than 0. +func (p *BulkProcessor) flusher(interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: // Periodic flush + p.Flush() // TODO swallow errors here? + + case <-p.flusherStopC: + p.flusherStopC <- struct{}{} + return + } + } +} + +// -- Bulk Worker -- + +// bulkWorker encapsulates a single worker, running in a goroutine, +// receiving bulk requests and eventually committing them to Elasticsearch. +// It is strongly bound to a BulkProcessor. +type bulkWorker struct { + p *BulkProcessor + i int + bulkActions int + bulkSize int + service *BulkService + flushC chan struct{} + flushAckC chan struct{} +} + +// newBulkWorker creates a new bulkWorker instance. +func newBulkWorker(p *BulkProcessor, i int) *bulkWorker { + return &bulkWorker{ + p: p, + i: i, + bulkActions: p.bulkActions, + bulkSize: p.bulkSize, + service: NewBulkService(p.c), + flushC: make(chan struct{}), + flushAckC: make(chan struct{}), + } +} + +// work waits for bulk requests and manual flush calls on the respective +// channels and is invoked as a goroutine when the bulk processor is started. +func (w *bulkWorker) work() { + defer func() { + w.p.workerWg.Done() + close(w.flushAckC) + close(w.flushC) + }() + + var stop bool + for !stop { + select { + case req, open := <-w.p.requestsC: + if open { + // Received a new request + w.service.Add(req) + if w.commitRequired() { + w.commit() // TODO swallow errors here? + } + } else { + // Channel closed: Stop. + stop = true + if w.service.NumberOfActions() > 0 { + w.commit() // TODO swallow errors here? + } + } + + case <-w.flushC: + // Commit outstanding requests + if w.service.NumberOfActions() > 0 { + w.commit() // TODO swallow errors here? + } + w.flushAckC <- struct{}{} + } + } +} + +// commit commits the bulk requests in the given service, +// invoking callbacks as specified. +func (w *bulkWorker) commit() error { + var res *BulkResponse + + // commitFunc will commit bulk requests and, on failure, be retried + // via exponential backoff + commitFunc := func() error { + var err error + res, err = w.service.Do() + return err + } + // notifyFunc will be called if retry fails + notifyFunc := func(err error, d time.Duration) { + w.p.c.errorf("elastic: bulk processor %q failed but will retry in %v: %v", w.p.name, d, err) + } + + id := atomic.AddInt64(&w.p.executionId, 1) + + // Update # documents in queue before eventual retries + w.p.statsMu.Lock() + if w.p.wantStats { + w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) + } + w.p.statsMu.Unlock() + + // Save requests because they will be reset in commitFunc + reqs := w.service.requests + + // Invoke before callback + if w.p.beforeFn != nil { + w.p.beforeFn(id, reqs) + } + + // Commit bulk requests + policy := backoff.NewExponentialBackoff(w.p.initialTimeout, w.p.maxTimeout).SendStop(true) + err := backoff.RetryNotify(commitFunc, policy, notifyFunc) + w.updateStats(res) + if err != nil { + w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err) + } + + // Invoke after callback + if w.p.afterFn != nil { + w.p.afterFn(id, reqs, res, err) + } + + return err +} + +func (w *bulkWorker) updateStats(res *BulkResponse) { + // Update stats + if res != nil { + w.p.statsMu.Lock() + if w.p.wantStats { + w.p.stats.Committed++ + if res != nil { + w.p.stats.Indexed += int64(len(res.Indexed())) + w.p.stats.Created += int64(len(res.Created())) + w.p.stats.Updated += int64(len(res.Updated())) + w.p.stats.Deleted += int64(len(res.Deleted())) + w.p.stats.Succeeded += int64(len(res.Succeeded())) + w.p.stats.Failed += int64(len(res.Failed())) + } + w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) + w.p.stats.Workers[w.i].LastDuration = time.Duration(int64(res.Took)) * time.Millisecond + } + w.p.statsMu.Unlock() + } +} + +// commitRequired returns true if the service has to commit its +// bulk requests. This can be either because the number of actions +// or the estimated size in bytes is larger than specified in the +// BulkProcessorService. +func (w *bulkWorker) commitRequired() bool { + if w.bulkActions >= 0 && w.service.NumberOfActions() >= w.bulkActions { + return true + } + if w.bulkSize >= 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) { + return true + } + return false +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_request.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_request.go new file mode 100644 index 000000000..315b535ca --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_request.go @@ -0,0 +1,17 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// -- Bulkable request (index/update/delete) -- + +// Generic interface to bulkable requests. +type BulkableRequest interface { + fmt.Stringer + Source() ([]string, error) +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_update_request.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_update_request.go new file mode 100644 index 000000000..8c899c9d8 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_update_request.go @@ -0,0 +1,280 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Bulk request to update a document in Elasticsearch. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// for details. +type BulkUpdateRequest struct { + BulkableRequest + index string + typ string + id string + + routing string + parent string + script *Script + version int64 // default is MATCH_ANY + versionType string // default is "internal" + retryOnConflict *int + refresh *bool + upsert interface{} + docAsUpsert *bool + doc interface{} + ttl int64 + timestamp string + + source []string +} + +// NewBulkUpdateRequest returns a new BulkUpdateRequest. +func NewBulkUpdateRequest() *BulkUpdateRequest { + return &BulkUpdateRequest{} +} + +// Index specifies the Elasticsearch index to use for this update request. +// If unspecified, the index set on the BulkService will be used. +func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest { + r.index = index + r.source = nil + return r +} + +// Type specifies the Elasticsearch type to use for this update request. +// If unspecified, the type set on the BulkService will be used. +func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest { + r.typ = typ + r.source = nil + return r +} + +// Id specifies the identifier of the document to update. +func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest { + r.id = id + r.source = nil + return r +} + +// Routing specifies a routing value for the request. +func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest { + r.routing = routing + r.source = nil + return r +} + +// Parent specifies the identifier of the parent document (if available). +func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest { + r.parent = parent + r.source = nil + return r +} + +// Script specifies an update script. +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/docs-bulk.html#bulk-update +// and https://www.elastic.co/guide/en/elasticsearch/reference/2.x/modules-scripting.html +// for details. +func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest { + r.script = script + r.source = nil + return r +} + +// RetryOnConflict specifies how often to retry in case of a version conflict. +func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest { + r.retryOnConflict = &retryOnConflict + r.source = nil + return r +} + +// Version indicates the version of the document as part of an optimistic +// concurrency model. +func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest { + r.version = version + r.source = nil + return r +} + +// VersionType can be "internal" (default), "external", "external_gte", +// "external_gt", or "force". +func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest { + r.versionType = versionType + r.source = nil + return r +} + +// Refresh indicates whether to update the shards immediately after +// the request has been processed. Updated documents will appear +// in search immediately at the cost of slower bulk performance. +func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest { + r.refresh = &refresh + r.source = nil + return r +} + +// Doc specifies the updated document. +func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest { + r.doc = doc + r.source = nil + return r +} + +// DocAsUpsert indicates whether the contents of Doc should be used as +// the Upsert value. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/docs-update.html#_literal_doc_as_upsert_literal +// for details. +func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest { + r.docAsUpsert = &docAsUpsert + r.source = nil + return r +} + +// Upsert specifies the document to use for upserts. It will be used for +// create if the original document does not exist. +func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest { + r.upsert = doc + r.source = nil + return r +} + +// Ttl specifies the time to live, and optional expiry time. +// This is deprecated as of 2.0.0-beta2. +func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest { + r.ttl = ttl + r.source = nil + return r +} + +// Timestamp specifies a timestamp for the document. +// This is deprecated as of 2.0.0-beta2. +func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest { + r.timestamp = timestamp + r.source = nil + return r +} + +// String returns the on-wire representation of the update request, +// concatenated as a single string. +func (r *BulkUpdateRequest) String() string { + lines, err := r.Source() + if err != nil { + return fmt.Sprintf("error: %v", err) + } + return strings.Join(lines, "\n") +} + +func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) { + switch t := data.(type) { + default: + body, err := json.Marshal(data) + if err != nil { + return "", err + } + return string(body), nil + case json.RawMessage: + return string(t), nil + case *json.RawMessage: + return string(*t), nil + case string: + return t, nil + case *string: + return *t, nil + } +} + +// Source returns the on-wire representation of the update request, +// split into an action-and-meta-data line and an (optional) source line. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// for details. +func (r BulkUpdateRequest) Source() ([]string, error) { + // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } + // { "doc" : { "field1" : "value1", ... } } + // or + // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } + // { "script" : { ... } } + + if r.source != nil { + return r.source, nil + } + + lines := make([]string, 2) + + // "update" ... + command := make(map[string]interface{}) + updateCommand := make(map[string]interface{}) + if r.index != "" { + updateCommand["_index"] = r.index + } + if r.typ != "" { + updateCommand["_type"] = r.typ + } + if r.id != "" { + updateCommand["_id"] = r.id + } + if r.routing != "" { + updateCommand["_routing"] = r.routing + } + if r.parent != "" { + updateCommand["_parent"] = r.parent + } + if r.timestamp != "" { + updateCommand["_timestamp"] = r.timestamp + } + if r.ttl > 0 { + updateCommand["_ttl"] = r.ttl + } + if r.version > 0 { + updateCommand["_version"] = r.version + } + if r.versionType != "" { + updateCommand["_version_type"] = r.versionType + } + if r.refresh != nil { + updateCommand["refresh"] = *r.refresh + } + if r.retryOnConflict != nil { + updateCommand["_retry_on_conflict"] = *r.retryOnConflict + } + command["update"] = updateCommand + line, err := json.Marshal(command) + if err != nil { + return nil, err + } + lines[0] = string(line) + + // 2nd line: {"doc" : { ... }} or {"script": {...}} + source := make(map[string]interface{}) + if r.docAsUpsert != nil { + source["doc_as_upsert"] = *r.docAsUpsert + } + if r.upsert != nil { + source["upsert"] = r.upsert + } + if r.doc != nil { + // {"doc":{...}} + source["doc"] = r.doc + } else if r.script != nil { + // {"script":...} + src, err := r.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + lines[1], err = r.getSourceAsString(source) + if err != nil { + return nil, err + } + + r.source = lines + return lines, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/canonicalize.go b/vendor/gopkg.in/olivere/elastic.v3/canonicalize.go new file mode 100644 index 000000000..1473f1466 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/canonicalize.go @@ -0,0 +1,38 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "net/url" + +// canonicalize takes a list of URLs and returns its canonicalized form, i.e. +// remove anything but scheme, userinfo, host, path, and port. +// It also removes all trailing slashes. It also skips invalid URLs or +// URLs that do not use protocol http or https. +// +// Example: +// http://127.0.0.1:9200/?query=1 -> http://127.0.0.1:9200 +// http://127.0.0.1:9200/db1/ -> http://127.0.0.1:9200/db1 +// 127.0.0.1:9200 -> http://127.0.0.1:9200 +func canonicalize(rawurls ...string) []string { + var canonicalized []string + for _, rawurl := range rawurls { + u, err := url.Parse(rawurl) + if err == nil { + if len(u.Scheme) == 0 { + u.Scheme = DefaultScheme + } + if u.Scheme == "http" || u.Scheme == "https" { + // Trim trailing slashes + for len(u.Path) > 0 && u.Path[len(u.Path)-1] == '/' { + u.Path = u.Path[0 : len(u.Path)-1] + } + u.Fragment = "" + u.RawQuery = "" + canonicalized = append(canonicalized, u.String()) + } + } + } + return canonicalized +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/clear_scroll.go b/vendor/gopkg.in/olivere/elastic.v3/clear_scroll.go new file mode 100644 index 000000000..2ad9ae6ff --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/clear_scroll.go @@ -0,0 +1,101 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" +) + +// ClearScrollService clears one or more scroll contexts by their ids. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api +// for details. +type ClearScrollService struct { + client *Client + pretty bool + scrollId []string +} + +// NewClearScrollService creates a new ClearScrollService. +func NewClearScrollService(client *Client) *ClearScrollService { + return &ClearScrollService{ + client: client, + scrollId: make([]string, 0), + } +} + +// ScrollId is a list of scroll IDs to clear. +// Use _all to clear all search contexts. +func (s *ClearScrollService) ScrollId(scrollIds ...string) *ClearScrollService { + s.scrollId = append(s.scrollId, scrollIds...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClearScrollService) buildURL() (string, url.Values, error) { + // Build URL + path := "/_search/scroll/" + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClearScrollService) Validate() error { + var invalid []string + if len(s.scrollId) == 0 { + invalid = append(invalid, "ScrollId") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ClearScrollService) Do() (*ClearScrollResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body := strings.Join(s.scrollId, ",") + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClearScrollResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClearScrollResponse is the response of ClearScrollService.Do. +type ClearScrollResponse struct { +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/client.go b/vendor/gopkg.in/olivere/elastic.v3/client.go new file mode 100644 index 000000000..fe0fa2254 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/client.go @@ -0,0 +1,1603 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math/rand" + "net/http" + "net/http/httputil" + "net/url" + "regexp" + "strings" + "sync" + "time" +) + +const ( + // Version is the current version of Elastic. + Version = "3.0.43" + + // DefaultUrl is the default endpoint of Elasticsearch on the local machine. + // It is used e.g. when initializing a new Client without a specific URL. + DefaultURL = "http://127.0.0.1:9200" + + // DefaultScheme is the default protocol scheme to use when sniffing + // the Elasticsearch cluster. + DefaultScheme = "http" + + // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default. + DefaultHealthcheckEnabled = true + + // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits + // for a response from Elasticsearch on startup, i.e. when creating a + // client. After the client is started, a shorter timeout is commonly used + // (its default is specified in DefaultHealthcheckTimeout). + DefaultHealthcheckTimeoutStartup = 5 * time.Second + + // DefaultHealthcheckTimeout specifies the time a running client waits for + // a response from Elasticsearch. Notice that the healthcheck timeout + // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup). + DefaultHealthcheckTimeout = 1 * time.Second + + // DefaultHealthcheckInterval is the default interval between + // two health checks of the nodes in the cluster. + DefaultHealthcheckInterval = 60 * time.Second + + // DefaultSnifferEnabled specifies if the sniffer is enabled by default. + DefaultSnifferEnabled = true + + // DefaultSnifferInterval is the interval between two sniffing procedures, + // i.e. the lookup of all nodes in the cluster and their addition/removal + // from the list of actual connections. + DefaultSnifferInterval = 15 * time.Minute + + // DefaultSnifferTimeoutStartup is the default timeout for the sniffing + // process that is initiated while creating a new client. For subsequent + // sniffing processes, DefaultSnifferTimeout is used (by default). + DefaultSnifferTimeoutStartup = 5 * time.Second + + // DefaultSnifferTimeout is the default timeout after which the + // sniffing process times out. Notice that for the initial sniffing + // process, DefaultSnifferTimeoutStartup is used. + DefaultSnifferTimeout = 2 * time.Second + + // DefaultMaxRetries is the number of retries for a single request after + // Elastic will give up and return an error. It is zero by default, so + // retry is disabled by default. + DefaultMaxRetries = 0 + + // DefaultSendGetBodyAs is the HTTP method to use when elastic is sending + // a GET request with a body. + DefaultSendGetBodyAs = "GET" + + // DefaultGzipEnabled specifies if gzip compression is enabled by default. + DefaultGzipEnabled = false + + // off is used to disable timeouts. + off = -1 * time.Second +) + +var ( + // ErrNoClient is raised when no Elasticsearch node is available. + ErrNoClient = errors.New("no Elasticsearch node available") + + // ErrRetry is raised when a request cannot be executed after the configured + // number of retries. + ErrRetry = errors.New("cannot connect after several retries") + + // ErrTimeout is raised when a request timed out, e.g. when WaitForStatus + // didn't return in time. + ErrTimeout = errors.New("timeout") +) + +// ClientOptionFunc is a function that configures a Client. +// It is used in NewClient. +type ClientOptionFunc func(*Client) error + +// Client is an Elasticsearch client. Create one by calling NewClient. +type Client struct { + c *http.Client // net/http Client to use for requests + + connsMu sync.RWMutex // connsMu guards the next block + conns []*conn // all connections + cindex int // index into conns + + mu sync.RWMutex // guards the next block + urls []string // set of URLs passed initially to the client + running bool // true if the client's background processes are running + errorlog Logger // error log for critical messages + infolog Logger // information log for e.g. response times + tracelog Logger // trace log for debugging + maxRetries int // max. number of retries + scheme string // http or https + healthcheckEnabled bool // healthchecks enabled or disabled + healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup + healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch + healthcheckInterval time.Duration // interval between healthchecks + healthcheckStop chan bool // notify healthchecker to stop, and notify back + snifferEnabled bool // sniffer enabled or disabled + snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup + snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API + snifferInterval time.Duration // interval between sniffing + snifferStop chan bool // notify sniffer to stop, and notify back + decoder Decoder // used to decode data sent from Elasticsearch + basicAuth bool // indicates whether to send HTTP Basic Auth credentials + basicAuthUsername string // username for HTTP Basic Auth + basicAuthPassword string // password for HTTP Basic Auth + sendGetBodyAs string // override for when sending a GET with a body + requiredPlugins []string // list of required plugins + gzipEnabled bool // gzip compression enabled or disabled (default) +} + +// NewClient creates a new client to work with Elasticsearch. +// +// NewClient, by default, is meant to be long-lived and shared across +// your application. If you need a short-lived client, e.g. for request-scope, +// consider using NewSimpleClient instead. +// +// The caller can configure the new client by passing configuration options +// to the func. +// +// Example: +// +// client, err := elastic.NewClient( +// elastic.SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"), +// elastic.SetMaxRetries(10), +// elastic.SetBasicAuth("user", "secret")) +// +// If no URL is configured, Elastic uses DefaultURL by default. +// +// If the sniffer is enabled (the default), the new client then sniffes +// the cluster via the Nodes Info API +// (see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html#cluster-nodes-info). +// It uses the URLs specified by the caller. The caller is responsible +// to only pass a list of URLs of nodes that belong to the same cluster. +// This sniffing process is run on startup and periodically. +// Use SnifferInterval to set the interval between two sniffs (default is +// 15 minutes). In other words: By default, the client will find new nodes +// in the cluster and remove those that are no longer available every +// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient. +// +// The list of nodes found in the sniffing process will be used to make +// connections to the REST API of Elasticsearch. These nodes are also +// periodically checked in a shorter time frame. This process is called +// a health check. By default, a health check is done every 60 seconds. +// You can set a shorter or longer interval by SetHealthcheckInterval. +// Disabling health checks is not recommended, but can be done by +// SetHealthcheck(false). +// +// Connections are automatically marked as dead or healthy while +// making requests to Elasticsearch. When a request fails, Elastic will +// retry up to a maximum number of retries configured with SetMaxRetries. +// Retries are disabled by default. +// +// If no HttpClient is configured, then http.DefaultClient is used. +// You can use your own http.Client with some http.Transport for +// advanced scenarios. +// +// An error is also returned when some configuration option is invalid or +// the new client cannot sniff the cluster (if enabled). +func NewClient(options ...ClientOptionFunc) (*Client, error) { + // Set up the client + c := &Client{ + c: http.DefaultClient, + conns: make([]*conn, 0), + cindex: -1, + scheme: DefaultScheme, + decoder: &DefaultDecoder{}, + maxRetries: DefaultMaxRetries, + healthcheckEnabled: DefaultHealthcheckEnabled, + healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup, + healthcheckTimeout: DefaultHealthcheckTimeout, + healthcheckInterval: DefaultHealthcheckInterval, + healthcheckStop: make(chan bool), + snifferEnabled: DefaultSnifferEnabled, + snifferTimeoutStartup: DefaultSnifferTimeoutStartup, + snifferTimeout: DefaultSnifferTimeout, + snifferInterval: DefaultSnifferInterval, + snifferStop: make(chan bool), + sendGetBodyAs: DefaultSendGetBodyAs, + gzipEnabled: DefaultGzipEnabled, + } + + // Run the options on it + for _, option := range options { + if err := option(c); err != nil { + return nil, err + } + } + + if len(c.urls) == 0 { + c.urls = []string{DefaultURL} + } + c.urls = canonicalize(c.urls...) + + // Check if we can make a request to any of the specified URLs + if c.healthcheckEnabled { + if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil { + return nil, err + } + } + + if c.snifferEnabled { + // Sniff the cluster initially + if err := c.sniff(c.snifferTimeoutStartup); err != nil { + return nil, err + } + } else { + // Do not sniff the cluster initially. Use the provided URLs instead. + for _, url := range c.urls { + c.conns = append(c.conns, newConn(url, url)) + } + } + + if c.healthcheckEnabled { + // Perform an initial health check + c.healthcheck(c.healthcheckTimeoutStartup, true) + } + // Ensure that we have at least one connection available + if err := c.mustActiveConn(); err != nil { + return nil, err + } + + // Check the required plugins + for _, plugin := range c.requiredPlugins { + found, err := c.HasPlugin(plugin) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("elastic: plugin %s not found", plugin) + } + } + + if c.snifferEnabled { + go c.sniffer() // periodically update cluster information + } + if c.healthcheckEnabled { + go c.healthchecker() // start goroutine periodically ping all nodes of the cluster + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + return c, nil +} + +// NewSimpleClient creates a new short-lived Client that can be used in +// use cases where you need e.g. one client per request. +// +// While NewClient by default sets up e.g. periodic health checks +// and sniffing for new nodes in separate goroutines, NewSimpleClient does +// not and is meant as a simple replacement where you don't need all the +// heavy lifting of NewClient. +// +// NewSimpleClient does the following by default: First, all health checks +// are disabled, including timeouts and periodic checks. Second, sniffing +// is disabled, including timeouts and periodic checks. The number of retries +// is set to 1. NewSimpleClient also does not start any goroutines. +// +// Notice that you can still override settings by passing additional options, +// just like with NewClient. +func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) { + c := &Client{ + c: http.DefaultClient, + conns: make([]*conn, 0), + cindex: -1, + scheme: DefaultScheme, + decoder: &DefaultDecoder{}, + maxRetries: 1, + healthcheckEnabled: false, + healthcheckTimeoutStartup: off, + healthcheckTimeout: off, + healthcheckInterval: off, + healthcheckStop: make(chan bool), + snifferEnabled: false, + snifferTimeoutStartup: off, + snifferTimeout: off, + snifferInterval: off, + snifferStop: make(chan bool), + sendGetBodyAs: DefaultSendGetBodyAs, + gzipEnabled: DefaultGzipEnabled, + } + + // Run the options on it + for _, option := range options { + if err := option(c); err != nil { + return nil, err + } + } + + if len(c.urls) == 0 { + c.urls = []string{DefaultURL} + } + c.urls = canonicalize(c.urls...) + + for _, url := range c.urls { + c.conns = append(c.conns, newConn(url, url)) + } + + // Ensure that we have at least one connection available + if err := c.mustActiveConn(); err != nil { + return nil, err + } + + // Check the required plugins + for _, plugin := range c.requiredPlugins { + found, err := c.HasPlugin(plugin) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("elastic: plugin %s not found", plugin) + } + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + return c, nil +} + +// SetHttpClient can be used to specify the http.Client to use when making +// HTTP requests to Elasticsearch. +func SetHttpClient(httpClient *http.Client) ClientOptionFunc { + return func(c *Client) error { + if httpClient != nil { + c.c = httpClient + } else { + c.c = http.DefaultClient + } + return nil + } +} + +// SetBasicAuth can be used to specify the HTTP Basic Auth credentials to +// use when making HTTP requests to Elasticsearch. +func SetBasicAuth(username, password string) ClientOptionFunc { + return func(c *Client) error { + c.basicAuthUsername = username + c.basicAuthPassword = password + c.basicAuth = c.basicAuthUsername != "" || c.basicAuthPassword != "" + return nil + } +} + +// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that +// when sniffing is enabled, these URLs are used to initially sniff the +// cluster on startup. +func SetURL(urls ...string) ClientOptionFunc { + return func(c *Client) error { + switch len(urls) { + case 0: + c.urls = []string{DefaultURL} + default: + c.urls = urls + } + return nil + } +} + +// SetScheme sets the HTTP scheme to look for when sniffing (http or https). +// This is http by default. +func SetScheme(scheme string) ClientOptionFunc { + return func(c *Client) error { + c.scheme = scheme + return nil + } +} + +// SetSniff enables or disables the sniffer (enabled by default). +func SetSniff(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.snifferEnabled = enabled + return nil + } +} + +// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used +// when creating a new client. The default is 5 seconds. Notice that the +// timeout being used for subsequent sniffing processes is set with +// SetSnifferTimeout. +func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferTimeoutStartup = timeout + return nil + } +} + +// SetSnifferTimeout sets the timeout for the sniffer that finds the +// nodes in a cluster. The default is 2 seconds. Notice that the timeout +// used when creating a new client on startup is usually greater and can +// be set with SetSnifferTimeoutStartup. +func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferTimeout = timeout + return nil + } +} + +// SetSnifferInterval sets the interval between two sniffing processes. +// The default interval is 15 minutes. +func SetSnifferInterval(interval time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferInterval = interval + return nil + } +} + +// SetHealthcheck enables or disables healthchecks (enabled by default). +func SetHealthcheck(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckEnabled = enabled + return nil + } +} + +// SetHealthcheckTimeoutStartup sets the timeout for the initial health check. +// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup). +// Notice that timeouts for subsequent health checks can be modified with +// SetHealthcheckTimeout. +func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckTimeoutStartup = timeout + return nil + } +} + +// SetHealthcheckTimeout sets the timeout for periodic health checks. +// The default timeout is 1 second (see DefaultHealthcheckTimeout). +// Notice that a different (usually larger) timeout is used for the initial +// healthcheck, which is initiated while creating a new client. +// The startup timeout can be modified with SetHealthcheckTimeoutStartup. +func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckTimeout = timeout + return nil + } +} + +// SetHealthcheckInterval sets the interval between two health checks. +// The default interval is 60 seconds. +func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckInterval = interval + return nil + } +} + +// SetMaxRetries sets the maximum number of retries before giving up when +// performing a HTTP request to Elasticsearch. +func SetMaxRetries(maxRetries int) ClientOptionFunc { + return func(c *Client) error { + if maxRetries < 0 { + return errors.New("MaxRetries must be greater than or equal to 0") + } + c.maxRetries = maxRetries + return nil + } +} + +// SetGzip enables or disables gzip compression (disabled by default). +func SetGzip(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.gzipEnabled = enabled + return nil + } +} + +// SetDecoder sets the Decoder to use when decoding data from Elasticsearch. +// DefaultDecoder is used by default. +func SetDecoder(decoder Decoder) ClientOptionFunc { + return func(c *Client) error { + if decoder != nil { + c.decoder = decoder + } else { + c.decoder = &DefaultDecoder{} + } + return nil + } +} + +// SetRequiredPlugins can be used to indicate that some plugins are required +// before a Client will be created. +func SetRequiredPlugins(plugins ...string) ClientOptionFunc { + return func(c *Client) error { + if c.requiredPlugins == nil { + c.requiredPlugins = make([]string, 0) + } + c.requiredPlugins = append(c.requiredPlugins, plugins...) + return nil + } +} + +// SetErrorLog sets the logger for critical messages like nodes joining +// or leaving the cluster or failing requests. It is nil by default. +func SetErrorLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.errorlog = logger + return nil + } +} + +// SetInfoLog sets the logger for informational messages, e.g. requests +// and their response times. It is nil by default. +func SetInfoLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.infolog = logger + return nil + } +} + +// SetTraceLog specifies the log.Logger to use for output of HTTP requests +// and responses which is helpful during debugging. It is nil by default. +func SetTraceLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.tracelog = logger + return nil + } +} + +// SendGetBodyAs specifies the HTTP method to use when sending a GET request +// with a body. It is GET by default. +func SetSendGetBodyAs(httpMethod string) ClientOptionFunc { + return func(c *Client) error { + c.sendGetBodyAs = httpMethod + return nil + } +} + +// String returns a string representation of the client status. +func (c *Client) String() string { + c.connsMu.Lock() + conns := c.conns + c.connsMu.Unlock() + + var buf bytes.Buffer + for i, conn := range conns { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(conn.String()) + } + return buf.String() +} + +// IsRunning returns true if the background processes of the client are +// running, false otherwise. +func (c *Client) IsRunning() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.running +} + +// Start starts the background processes like sniffing the cluster and +// periodic health checks. You don't need to run Start when creating a +// client with NewClient; the background processes are run by default. +// +// If the background processes are already running, this is a no-op. +func (c *Client) Start() { + c.mu.RLock() + if c.running { + c.mu.RUnlock() + return + } + c.mu.RUnlock() + + if c.snifferEnabled { + go c.sniffer() + } + if c.healthcheckEnabled { + go c.healthchecker() + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + c.infof("elastic: client started") +} + +// Stop stops the background processes that the client is running, +// i.e. sniffing the cluster periodically and running health checks +// on the nodes. +// +// If the background processes are not running, this is a no-op. +func (c *Client) Stop() { + c.mu.RLock() + if !c.running { + c.mu.RUnlock() + return + } + c.mu.RUnlock() + + if c.healthcheckEnabled { + c.healthcheckStop <- true + <-c.healthcheckStop + } + + if c.snifferEnabled { + c.snifferStop <- true + <-c.snifferStop + } + + c.mu.Lock() + c.running = false + c.mu.Unlock() + + c.infof("elastic: client stopped") +} + +// errorf logs to the error log. +func (c *Client) errorf(format string, args ...interface{}) { + if c.errorlog != nil { + c.errorlog.Printf(format, args...) + } +} + +// infof logs informational messages. +func (c *Client) infof(format string, args ...interface{}) { + if c.infolog != nil { + c.infolog.Printf(format, args...) + } +} + +// tracef logs to the trace log. +func (c *Client) tracef(format string, args ...interface{}) { + if c.tracelog != nil { + c.tracelog.Printf(format, args...) + } +} + +// dumpRequest dumps the given HTTP request to the trace log. +func (c *Client) dumpRequest(r *http.Request) { + if c.tracelog != nil { + out, err := httputil.DumpRequestOut(r, true) + if err == nil { + c.tracef("%s\n", string(out)) + } + } +} + +// dumpResponse dumps the given HTTP response to the trace log. +func (c *Client) dumpResponse(resp *http.Response) { + if c.tracelog != nil { + out, err := httputil.DumpResponse(resp, true) + if err == nil { + c.tracef("%s\n", string(out)) + } + } +} + +// sniffer periodically runs sniff. +func (c *Client) sniffer() { + c.mu.RLock() + timeout := c.snifferTimeout + c.mu.RUnlock() + + ticker := time.NewTicker(timeout) + defer ticker.Stop() + + for { + select { + case <-c.snifferStop: + // we are asked to stop, so we signal back that we're stopping now + c.snifferStop <- true + return + case <-ticker.C: + c.sniff(timeout) + } + } +} + +// sniff uses the Node Info API to return the list of nodes in the cluster. +// It uses the list of URLs passed on startup plus the list of URLs found +// by the preceding sniffing process (if sniffing is enabled). +// +// If sniffing is disabled, this is a no-op. +func (c *Client) sniff(timeout time.Duration) error { + c.mu.RLock() + if !c.snifferEnabled { + c.mu.RUnlock() + return nil + } + + // Use all available URLs provided to sniff the cluster. + urlsMap := make(map[string]bool) + urls := make([]string, 0) + + // Add all URLs provided on startup + for _, url := range c.urls { + urlsMap[url] = true + urls = append(urls, url) + } + c.mu.RUnlock() + + // Add all URLs found by sniffing + c.connsMu.RLock() + for _, conn := range c.conns { + if !conn.IsDead() { + url := conn.URL() + if _, found := urlsMap[url]; !found { + urls = append(urls, url) + } + } + } + c.connsMu.RUnlock() + + if len(urls) == 0 { + return ErrNoClient + } + + // Start sniffing on all found URLs + ch := make(chan []*conn, len(urls)) + for _, url := range urls { + go func(url string) { ch <- c.sniffNode(url) }(url) + } + + // Wait for the results to come back, or the process times out. + for { + select { + case conns := <-ch: + if len(conns) > 0 { + c.updateConns(conns) + return nil + } + case <-time.After(timeout): + // We get here if no cluster responds in time + return ErrNoClient + } + } +} + +// sniffNode sniffs a single node. This method is run as a goroutine +// in sniff. If successful, it returns the list of node URLs extracted +// from the result of calling Nodes Info API. Otherwise, an empty array +// is returned. +func (c *Client) sniffNode(url string) []*conn { + nodes := make([]*conn, 0) + + // Call the Nodes Info API at /_nodes/http + req, err := NewRequest("GET", url+"/_nodes/http") + if err != nil { + return nodes + } + + c.mu.RLock() + if c.basicAuth { + req.SetBasicAuth(c.basicAuthUsername, c.basicAuthPassword) + } + c.mu.RUnlock() + + res, err := c.c.Do((*http.Request)(req)) + if err != nil { + return nodes + } + if res == nil { + return nodes + } + + if res.Body != nil { + defer res.Body.Close() + } + + var info NodesInfoResponse + if err := json.NewDecoder(res.Body).Decode(&info); err == nil { + if len(info.Nodes) > 0 { + switch c.scheme { + case "https": + for nodeID, node := range info.Nodes { + url := c.extractHostname("https", node.HTTPSAddress) + if url != "" { + nodes = append(nodes, newConn(nodeID, url)) + } + } + default: + for nodeID, node := range info.Nodes { + url := c.extractHostname("http", node.HTTPAddress) + if url != "" { + nodes = append(nodes, newConn(nodeID, url)) + } + } + } + } + } + return nodes +} + +// reSniffHostAndPort is used to extract hostname and port from a result +// from a Nodes Info API (example: "inet[/127.0.0.1:9200]"). +var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`) + +func (c *Client) extractHostname(scheme, address string) string { + if strings.HasPrefix(address, "inet") { + m := reSniffHostAndPort.FindStringSubmatch(address) + if len(m) == 3 { + return fmt.Sprintf("%s://%s:%s", scheme, m[1], m[2]) + } + } + s := address + if idx := strings.Index(s, "/"); idx >= 0 { + s = s[idx+1:] + } + if strings.Index(s, ":") < 0 { + return "" + } + return fmt.Sprintf("%s://%s", scheme, s) +} + +// updateConns updates the clients' connections with new information +// gather by a sniff operation. +func (c *Client) updateConns(conns []*conn) { + c.connsMu.Lock() + + newConns := make([]*conn, 0) + + // Build up new connections: + // If we find an existing connection, use that (including no. of failures etc.). + // If we find a new connection, add it. + for _, conn := range conns { + var found bool + for _, oldConn := range c.conns { + if oldConn.NodeID() == conn.NodeID() { + // Take over the old connection + newConns = append(newConns, oldConn) + found = true + break + } + } + if !found { + // New connection didn't exist, so add it to our list of new conns. + c.infof("elastic: %s joined the cluster", conn.URL()) + newConns = append(newConns, conn) + } + } + + c.conns = newConns + c.cindex = -1 + c.connsMu.Unlock() +} + +// healthchecker periodically runs healthcheck. +func (c *Client) healthchecker() { + c.mu.RLock() + timeout := c.healthcheckTimeout + c.mu.RUnlock() + + ticker := time.NewTicker(timeout) + defer ticker.Stop() + + for { + select { + case <-c.healthcheckStop: + // we are asked to stop, so we signal back that we're stopping now + c.healthcheckStop <- true + return + case <-ticker.C: + c.healthcheck(timeout, false) + } + } +} + +// healthcheck does a health check on all nodes in the cluster. Depending on +// the node state, it marks connections as dead, sets them alive etc. +// If healthchecks are disabled and force is false, this is a no-op. +// The timeout specifies how long to wait for a response from Elasticsearch. +func (c *Client) healthcheck(timeout time.Duration, force bool) { + c.mu.RLock() + if !c.healthcheckEnabled && !force { + c.mu.RUnlock() + return + } + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + c.mu.RUnlock() + + c.connsMu.RLock() + conns := c.conns + c.connsMu.RUnlock() + + timeoutInMillis := int64(timeout / time.Millisecond) + + for _, conn := range conns { + params := make(url.Values) + params.Set("timeout", fmt.Sprintf("%dms", timeoutInMillis)) + req, err := NewRequest("HEAD", conn.URL()+"/?"+params.Encode()) + if err == nil { + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + res, err := c.c.Do((*http.Request)(req)) + if err == nil { + if res.Body != nil { + defer res.Body.Close() + } + if res.StatusCode >= 200 && res.StatusCode < 300 { + conn.MarkAsAlive() + } else { + conn.MarkAsDead() + c.errorf("elastic: %s is dead [status=%d]", conn.URL(), res.StatusCode) + } + } else { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + } + } else { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + } + } +} + +// startupHealthcheck is used at startup to check if the server is available +// at all. +func (c *Client) startupHealthcheck(timeout time.Duration) error { + c.mu.Lock() + urls := c.urls + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + c.mu.Unlock() + + // If we don't get a connection after "timeout", we bail. + start := time.Now() + for { + // Make a copy of the HTTP client provided via options to respect + // settings like Basic Auth or a user-specified http.Transport. + cl := new(http.Client) + *cl = *c.c + cl.Timeout = timeout + for _, url := range urls { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return err + } + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + res, err := cl.Do(req) + if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 { + return nil + } + } + time.Sleep(1 * time.Second) + if time.Now().Sub(start) > timeout { + break + } + } + return ErrNoClient +} + +// next returns the next available connection, or ErrNoClient. +func (c *Client) next() (*conn, error) { + // We do round-robin here. + // TODO(oe) This should be a pluggable strategy, like the Selector in the official clients. + c.connsMu.Lock() + defer c.connsMu.Unlock() + + i := 0 + numConns := len(c.conns) + for { + i += 1 + if i > numConns { + break // we visited all conns: they all seem to be dead + } + c.cindex += 1 + if c.cindex >= numConns { + c.cindex = 0 + } + conn := c.conns[c.cindex] + if !conn.IsDead() { + return conn, nil + } + } + + // We have a deadlock here: All nodes are marked as dead. + // If sniffing is disabled, connections will never be marked alive again. + // So we are marking them as alive--if sniffing is disabled. + // They'll then be picked up in the next call to PerformRequest. + if !c.snifferEnabled { + c.errorf("elastic: all %d nodes marked as dead; resurrecting them to prevent deadlock", len(c.conns)) + for _, conn := range c.conns { + conn.MarkAsAlive() + } + } + + // We tried hard, but there is no node available + return nil, ErrNoClient +} + +// mustActiveConn returns nil if there is an active connection, +// otherwise ErrNoClient is returned. +func (c *Client) mustActiveConn() error { + c.connsMu.Lock() + defer c.connsMu.Unlock() + + for _, c := range c.conns { + if !c.IsDead() { + return nil + } + } + return ErrNoClient +} + +// PerformRequest does a HTTP request to Elasticsearch. +// It returns a response and an error on failure. +// +// Optionally, a list of HTTP error codes to ignore can be passed. +// This is necessary for services that expect e.g. HTTP status 404 as a +// valid outcome (Exists, IndicesExists, IndicesTypeExists). +func (c *Client) PerformRequest(method, path string, params url.Values, body interface{}, ignoreErrors ...int) (*Response, error) { + start := time.Now().UTC() + + c.mu.RLock() + timeout := c.healthcheckTimeout + retries := c.maxRetries + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + sendGetBodyAs := c.sendGetBodyAs + gzipEnabled := c.gzipEnabled + c.mu.RUnlock() + + var err error + var conn *conn + var req *Request + var resp *Response + var retried bool + + // We wait between retries, using simple exponential back-off. + // TODO: Make this configurable, including the jitter. + retryWaitMsec := int64(100 + (rand.Intn(20) - 10)) + + // Change method if sendGetBodyAs is specified. + if method == "GET" && body != nil && sendGetBodyAs != "GET" { + method = sendGetBodyAs + } + + for { + pathWithParams := path + if len(params) > 0 { + pathWithParams += "?" + params.Encode() + } + + // Get a connection + conn, err = c.next() + if err == ErrNoClient { + if !retried { + // Force a healtcheck as all connections seem to be dead. + c.healthcheck(timeout, false) + } + retries -= 1 + if retries <= 0 { + return nil, err + } + retried = true + time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond) + retryWaitMsec += retryWaitMsec + continue // try again + } + if err != nil { + c.errorf("elastic: cannot get connection from pool") + return nil, err + } + + req, err = NewRequest(method, conn.URL()+pathWithParams) + if err != nil { + c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(method), conn.URL()+pathWithParams, err) + return nil, err + } + + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + + // Set body + if body != nil { + err = req.SetBody(body, gzipEnabled) + if err != nil { + c.errorf("elastic: couldn't set body %+v for request: %v", body, err) + return nil, err + } + } + + // Tracing + c.dumpRequest((*http.Request)(req)) + + // Get response + res, err := c.c.Do((*http.Request)(req)) + if err != nil { + retries -= 1 + if retries <= 0 { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + return nil, err + } + retried = true + time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond) + retryWaitMsec += retryWaitMsec + continue // try again + } + if res.Body != nil { + defer res.Body.Close() + } + + // Check for errors + if err := checkResponse((*http.Request)(req), res, ignoreErrors...); err != nil { + // No retry if request succeeded + return nil, err + } + + // Tracing + c.dumpResponse(res) + + // We successfully made a request with this connection + conn.MarkAsHealthy() + + resp, err = c.newResponse(res) + if err != nil { + return nil, err + } + + break + } + + duration := time.Now().UTC().Sub(start) + c.infof("%s %s [status:%d, request:%.3fs]", + strings.ToUpper(method), + req.URL, + resp.StatusCode, + float64(int64(duration/time.Millisecond))/1000) + + return resp, nil +} + +// -- Document APIs -- + +// Index a document. +func (c *Client) Index() *IndexService { + return NewIndexService(c) +} + +// Get a document. +func (c *Client) Get() *GetService { + return NewGetService(c) +} + +// MultiGet retrieves multiple documents in one roundtrip. +func (c *Client) MultiGet() *MgetService { + return NewMgetService(c) +} + +// Mget retrieves multiple documents in one roundtrip. +func (c *Client) Mget() *MgetService { + return NewMgetService(c) +} + +// Delete a document. +func (c *Client) Delete() *DeleteService { + return NewDeleteService(c) +} + +// DeleteByQuery deletes documents as found by a query. +func (c *Client) DeleteByQuery(indices ...string) *DeleteByQueryService { + return NewDeleteByQueryService(c).Index(indices...) +} + +// Update a document. +func (c *Client) Update() *UpdateService { + return NewUpdateService(c) +} + +// UpdateByQuery performs an update on a set of documents. +func (c *Client) UpdateByQuery(indices ...string) *UpdateByQueryService { + return NewUpdateByQueryService(c).Index(indices...) +} + +// Bulk is the entry point to mass insert/update/delete documents. +func (c *Client) Bulk() *BulkService { + return NewBulkService(c) +} + +// BulkProcessor allows setting up a concurrent processor of bulk requests. +func (c *Client) BulkProcessor() *BulkProcessorService { + return NewBulkProcessorService(c) +} + +// Reindex returns a service that will reindex documents from a source +// index into a target index. +// +// Notice that this Reindexer is an Elastic-specific solution that pre-dated +// the Reindex API introduced in Elasticsearch 2.3.0 (see ReindexTask). +// +// See http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html +// for more information about reindexing. +func (c *Client) Reindex(sourceIndex, targetIndex string) *Reindexer { + return NewReindexer(c, sourceIndex, CopyToTargetIndex(targetIndex)) +} + +// ReindexTask copies data from a source index into a destination index. +// +// The Reindex API has been introduced in Elasticsearch 2.3.0. Notice that +// there is a Elastic-specific Reindexer that pre-dates the Reindex API from +// Elasticsearch. If you rely on that, use the ReindexerService via +// Client.Reindex. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html +// for details on the Reindex API. +func (c *Client) ReindexTask() *ReindexService { + return NewReindexService(c) +} + +// TermVectors returns information and statistics on terms in the fields +// of a particular document. +func (c *Client) TermVectors(index, typ string) *TermvectorsService { + builder := NewTermvectorsService(c) + builder = builder.Index(index).Type(typ) + return builder +} + +// MultiTermVectors returns information and statistics on terms in the fields +// of multiple documents. +func (c *Client) MultiTermVectors() *MultiTermvectorService { + return NewMultiTermvectorService(c) +} + +// -- Search APIs -- + +// Search is the entry point for searches. +func (c *Client) Search(indices ...string) *SearchService { + return NewSearchService(c).Index(indices...) +} + +// Suggest returns a service to return suggestions. +func (c *Client) Suggest(indices ...string) *SuggestService { + return NewSuggestService(c).Index(indices...) +} + +// MultiSearch is the entry point for multi searches. +func (c *Client) MultiSearch() *MultiSearchService { + return NewMultiSearchService(c) +} + +// Count documents. +func (c *Client) Count(indices ...string) *CountService { + return NewCountService(c).Index(indices...) +} + +// Explain computes a score explanation for a query and a specific document. +func (c *Client) Explain(index, typ, id string) *ExplainService { + return NewExplainService(c).Index(index).Type(typ).Id(id) +} + +// Percolate allows to send a document and return matching queries. +// See http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html. +func (c *Client) Percolate() *PercolateService { + return NewPercolateService(c) +} + +// TODO Search Template +// TODO Search Shards API +// TODO Search Exists API +// TODO Validate API + +// FieldStats returns statistical information about fields in indices. +func (c *Client) FieldStats(indices ...string) *FieldStatsService { + return NewFieldStatsService(c).Index(indices...) +} + +// Exists checks if a document exists. +func (c *Client) Exists() *ExistsService { + return NewExistsService(c) +} + +// Scan through documents. Use this to iterate inside a server process +// where the results will be processed without returning them to a client. +func (c *Client) Scan(indices ...string) *ScanService { + return NewScanService(c).Index(indices...) +} + +// Scroll through documents. Use this to efficiently scroll through results +// while returning the results to a client. Use Scan when you don't need +// to return requests to a client (i.e. not paginating via request/response). +func (c *Client) Scroll(indices ...string) *ScrollService { + return NewScrollService(c).Index(indices...) +} + +// ClearScroll can be used to clear search contexts manually. +func (c *Client) ClearScroll(scrollIds ...string) *ClearScrollService { + return NewClearScrollService(c).ScrollId(scrollIds...) +} + +// -- Indices APIs -- + +// CreateIndex returns a service to create a new index. +func (c *Client) CreateIndex(name string) *IndicesCreateService { + return NewIndicesCreateService(c).Index(name) +} + +// DeleteIndex returns a service to delete an index. +func (c *Client) DeleteIndex(indices ...string) *IndicesDeleteService { + return NewIndicesDeleteService(c).Index(indices) +} + +// IndexExists allows to check if an index exists. +func (c *Client) IndexExists(indices ...string) *IndicesExistsService { + return NewIndicesExistsService(c).Index(indices) +} + +// TypeExists allows to check if one or more types exist in one or more indices. +func (c *Client) TypeExists() *IndicesExistsTypeService { + return NewIndicesExistsTypeService(c) +} + +// IndexStats provides statistics on different operations happining +// in one or more indices. +func (c *Client) IndexStats(indices ...string) *IndicesStatsService { + return NewIndicesStatsService(c).Index(indices...) +} + +// OpenIndex opens an index. +func (c *Client) OpenIndex(name string) *IndicesOpenService { + return NewIndicesOpenService(c).Index(name) +} + +// CloseIndex closes an index. +func (c *Client) CloseIndex(name string) *IndicesCloseService { + return NewIndicesCloseService(c).Index(name) +} + +// IndexGet retrieves information about one or more indices. +// IndexGet is only available for Elasticsearch 1.4 or later. +func (c *Client) IndexGet(indices ...string) *IndicesGetService { + return NewIndicesGetService(c).Index(indices...) +} + +// IndexGetSettings retrieves settings of all, one or more indices. +func (c *Client) IndexGetSettings(indices ...string) *IndicesGetSettingsService { + return NewIndicesGetSettingsService(c).Index(indices...) +} + +// IndexPutSettings sets settings for all, one or more indices. +func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService { + return NewIndicesPutSettingsService(c).Index(indices...) +} + +// Optimize asks Elasticsearch to optimize one or more indices. +// Optimize is deprecated as of Elasticsearch 2.1 and replaced by Forcemerge. +func (c *Client) Optimize(indices ...string) *OptimizeService { + return NewOptimizeService(c).Index(indices...) +} + +// Forcemerge optimizes one or more indices. +// It replaces the deprecated Optimize API. +func (c *Client) Forcemerge(indices ...string) *IndicesForcemergeService { + return NewIndicesForcemergeService(c).Index(indices...) +} + +// Refresh asks Elasticsearch to refresh one or more indices. +func (c *Client) Refresh(indices ...string) *RefreshService { + return NewRefreshService(c).Index(indices...) +} + +// Flush asks Elasticsearch to free memory from the index and +// flush data to disk. +func (c *Client) Flush(indices ...string) *IndicesFlushService { + return NewIndicesFlushService(c).Index(indices...) +} + +// Alias enables the caller to add and/or remove aliases. +func (c *Client) Alias() *AliasService { + return NewAliasService(c) +} + +// Aliases returns aliases by index name(s). +func (c *Client) Aliases() *AliasesService { + return NewAliasesService(c) +} + +// GetTemplate gets a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) GetTemplate() *GetTemplateService { + return NewGetTemplateService(c) +} + +// PutTemplate creates or updates a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) PutTemplate() *PutTemplateService { + return NewPutTemplateService(c) +} + +// DeleteTemplate deletes a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) DeleteTemplate() *DeleteTemplateService { + return NewDeleteTemplateService(c) +} + +// IndexGetTemplate gets an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService { + return NewIndicesGetTemplateService(c).Name(names...) +} + +// IndexTemplateExists gets check if an index template exists. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService { + return NewIndicesExistsTemplateService(c).Name(name) +} + +// IndexPutTemplate creates or updates an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService { + return NewIndicesPutTemplateService(c).Name(name) +} + +// IndexDeleteTemplate deletes an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService { + return NewIndicesDeleteTemplateService(c).Name(name) +} + +// GetMapping gets a mapping. +func (c *Client) GetMapping() *IndicesGetMappingService { + return NewIndicesGetMappingService(c) +} + +// PutMapping registers a mapping. +func (c *Client) PutMapping() *IndicesPutMappingService { + return NewIndicesPutMappingService(c) +} + +// GetWarmer gets one or more warmers by name. +func (c *Client) GetWarmer() *IndicesGetWarmerService { + return NewIndicesGetWarmerService(c) +} + +// PutWarmer registers a warmer. +func (c *Client) PutWarmer() *IndicesPutWarmerService { + return NewIndicesPutWarmerService(c) +} + +// DeleteWarmer deletes one or more warmers. +func (c *Client) DeleteWarmer() *IndicesDeleteWarmerService { + return NewIndicesDeleteWarmerService(c) +} + +// -- cat APIs -- + +// TODO cat aliases +// TODO cat allocation +// TODO cat count +// TODO cat fielddata +// TODO cat health +// TODO cat indices +// TODO cat master +// TODO cat nodes +// TODO cat pending tasks +// TODO cat plugins +// TODO cat recovery +// TODO cat thread pool +// TODO cat shards +// TODO cat segments + +// -- Cluster APIs -- + +// ClusterHealth retrieves the health of the cluster. +func (c *Client) ClusterHealth() *ClusterHealthService { + return NewClusterHealthService(c) +} + +// ClusterState retrieves the state of the cluster. +func (c *Client) ClusterState() *ClusterStateService { + return NewClusterStateService(c) +} + +// ClusterStats retrieves cluster statistics. +func (c *Client) ClusterStats() *ClusterStatsService { + return NewClusterStatsService(c) +} + +// NodesInfo retrieves one or more or all of the cluster nodes information. +func (c *Client) NodesInfo() *NodesInfoService { + return NewNodesInfoService(c) +} + +// TasksCancel cancels tasks running on the specified nodes. +func (c *Client) TasksCancel() *TasksCancelService { + return NewTasksCancelService(c) +} + +// TasksList retrieves the list of tasks running on the specified nodes. +func (c *Client) TasksList() *TasksListService { + return NewTasksListService(c) +} + +// TODO Pending cluster tasks +// TODO Cluster Reroute +// TODO Cluster Update Settings +// TODO Nodes Stats +// TODO Nodes hot_threads + +// -- Snapshot and Restore -- + +// TODO Snapshot Create +// TODO Snapshot Create Repository +// TODO Snapshot Delete +// TODO Snapshot Delete Repository +// TODO Snapshot Get +// TODO Snapshot Get Repository +// TODO Snapshot Restore +// TODO Snapshot Status +// TODO Snapshot Verify Repository + +// -- Helpers and shortcuts -- + +// ElasticsearchVersion returns the version number of Elasticsearch +// running on the given URL. +func (c *Client) ElasticsearchVersion(url string) (string, error) { + res, _, err := c.Ping(url).Do() + if err != nil { + return "", err + } + return res.Version.Number, nil +} + +// IndexNames returns the names of all indices in the cluster. +func (c *Client) IndexNames() ([]string, error) { + res, err := c.IndexGetSettings().Index("_all").Do() + if err != nil { + return nil, err + } + var names []string + for name, _ := range res { + names = append(names, name) + } + return names, nil +} + +// Ping checks if a given node in a cluster exists and (optionally) +// returns some basic information about the Elasticsearch server, +// e.g. the Elasticsearch version number. +// +// Notice that you need to specify a URL here explicitly. +func (c *Client) Ping(url string) *PingService { + return NewPingService(c).URL(url) +} + +// WaitForStatus waits for the cluster to have the given status. +// This is a shortcut method for the ClusterHealth service. +// +// WaitForStatus waits for the specified timeout, e.g. "10s". +// If the cluster will have the given state within the timeout, nil is returned. +// If the request timed out, ErrTimeout is returned. +func (c *Client) WaitForStatus(status string, timeout string) error { + health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do() + if err != nil { + return err + } + if health.TimedOut { + return ErrTimeout + } + return nil +} + +// WaitForGreenStatus waits for the cluster to have the "green" status. +// See WaitForStatus for more details. +func (c *Client) WaitForGreenStatus(timeout string) error { + return c.WaitForStatus("green", timeout) +} + +// WaitForYellowStatus waits for the cluster to have the "yellow" status. +// See WaitForStatus for more details. +func (c *Client) WaitForYellowStatus(timeout string) error { + return c.WaitForStatus("yellow", timeout) +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/cluster_health.go b/vendor/gopkg.in/olivere/elastic.v3/cluster_health.go new file mode 100644 index 000000000..fae7c4d91 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/cluster_health.go @@ -0,0 +1,243 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ClusterHealthService allows to get a very simple status on the health of the cluster. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html +// for details. +type ClusterHealthService struct { + client *Client + pretty bool + indices []string + level string + local *bool + masterTimeout string + timeout string + waitForActiveShards *int + waitForNodes string + waitForRelocatingShards *int + waitForStatus string +} + +// NewClusterHealthService creates a new ClusterHealthService. +func NewClusterHealthService(client *Client) *ClusterHealthService { + return &ClusterHealthService{ + client: client, + indices: make([]string, 0), + } +} + +// Index limits the information returned to specific indices. +func (s *ClusterHealthService) Index(indices ...string) *ClusterHealthService { + s.indices = append(s.indices, indices...) + return s +} + +// Level specifies the level of detail for returned information. +func (s *ClusterHealthService) Level(level string) *ClusterHealthService { + s.level = level + return s +} + +// Local indicates whether to return local information. If it is true, +// we do not retrieve the state from master node (default: false). +func (s *ClusterHealthService) Local(local bool) *ClusterHealthService { + s.local = &local + return s +} + +// MasterTimeout specifies an explicit operation timeout for connection to master node. +func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout specifies an explicit operation timeout. +func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService { + s.timeout = timeout + return s +} + +// WaitForActiveShards can be used to wait until the specified number of shards are active. +func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService { + s.waitForActiveShards = &waitForActiveShards + return s +} + +// WaitForNodes can be used to wait until the specified number of nodes are available. +// Example: "12" to wait for exact values, ">12" and "<12" for ranges. +func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService { + s.waitForNodes = waitForNodes + return s +} + +// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished. +func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService { + s.waitForRelocatingShards = &waitForRelocatingShards + return s +} + +// WaitForStatus can be used to wait until the cluster is in a specific state. +// Valid values are: green, yellow, or red. +func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService { + s.waitForStatus = waitForStatus + return s +} + +// WaitForGreenStatus will wait for the "green" state. +func (s *ClusterHealthService) WaitForGreenStatus() *ClusterHealthService { + return s.WaitForStatus("green") +} + +// WaitForYellowStatus will wait for the "yellow" state. +func (s *ClusterHealthService) WaitForYellowStatus() *ClusterHealthService { + return s.WaitForStatus("yellow") +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterHealthService) Pretty(pretty bool) *ClusterHealthService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterHealthService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.indices) > 0 { + path, err = uritemplates.Expand("/_cluster/health/{index}", map[string]string{ + "index": strings.Join(s.indices, ","), + }) + } else { + path = "/_cluster/health" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.level != "" { + params.Set("level", s.level) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != nil { + params.Set("wait_for_active_shards", fmt.Sprintf("%v", s.waitForActiveShards)) + } + if s.waitForNodes != "" { + params.Set("wait_for_nodes", s.waitForNodes) + } + if s.waitForRelocatingShards != nil { + params.Set("wait_for_relocating_shards", fmt.Sprintf("%v", s.waitForRelocatingShards)) + } + if s.waitForStatus != "" { + params.Set("wait_for_status", s.waitForStatus) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterHealthService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterHealthResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterHealthResponse is the response of ClusterHealthService.Do. +type ClusterHealthResponse struct { + ClusterName string `json:"cluster_name"` + Status string `json:"status"` + TimedOut bool `json:"timed_out"` + NumberOfNodes int `json:"number_of_nodes"` + NumberOfDataNodes int `json:"number_of_data_nodes"` + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` + DelayedUnassignedShards int `json:"delayed_unassigned_shards"` + NumberOfPendingTasks int `json:"number_of_pending_tasks"` + NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` + TaskMaxWaitTimeInQueueInMillis int `json:"task_max_waiting_in_queue_millis"` + ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"` + + // Validation failures -> index name -> array of validation failures + ValidationFailures []map[string][]string `json:"validation_failures"` + + // Index name -> index health + Indices map[string]*ClusterIndexHealth `json:"indices"` +} + +// ClusterIndexHealth will be returned as part of ClusterHealthResponse. +type ClusterIndexHealth struct { + Status string `json:"status"` + NumberOfShards int `json:"number_of_shards"` + NumberOfReplicas int `json:"number_of_replicas"` + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` + // Validation failures + ValidationFailures []string `json:"validation_failures"` + // Shards by id, e.g. "0" or "1" + Shards map[string]*ClusterShardHealth `json:"shards"` +} + +// ClusterShardHealth will be returned as part of ClusterHealthResponse. +type ClusterShardHealth struct { + Status string `json:"status"` + PrimaryActive bool `json:"primary_active"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/cluster_state.go b/vendor/gopkg.in/olivere/elastic.v3/cluster_state.go new file mode 100644 index 000000000..e59c835a5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/cluster_state.go @@ -0,0 +1,283 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ClusterStateService allows to get a comprehensive state information of the whole cluster. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html +// for details. +type ClusterStateService struct { + client *Client + pretty bool + indices []string + metrics []string + allowNoIndices *bool + expandWildcards string + flatSettings *bool + ignoreUnavailable *bool + local *bool + masterTimeout string +} + +// NewClusterStateService creates a new ClusterStateService. +func NewClusterStateService(client *Client) *ClusterStateService { + return &ClusterStateService{ + client: client, + indices: make([]string, 0), + metrics: make([]string, 0), + } +} + +// Index is a list of index names. Use _all or an empty string to +// perform the operation on all indices. +func (s *ClusterStateService) Index(indices ...string) *ClusterStateService { + s.indices = append(s.indices, indices...) + return s +} + +// Metric limits the information returned to the specified metric. +// It can be one of: version, master_node, nodes, routing_table, metadata, +// blocks, or customs. +func (s *ClusterStateService) Metric(metrics ...string) *ClusterStateService { + s.metrics = append(s.metrics, metrics...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *ClusterStateService) AllowNoIndices(allowNoIndices bool) *ClusterStateService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterStateService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings, when set, returns settings in flat format (default: false). +func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService { + s.flatSettings = &flatSettings + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *ClusterStateService) IgnoreUnavailable(ignoreUnavailable bool) *ClusterStateService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Local indicates whether to return local information. When set, it does not +// retrieve the state from master node (default: false). +func (s *ClusterStateService) Local(local bool) *ClusterStateService { + s.local = &local + return s +} + +// MasterTimeout specifies timeout for connection to master. +func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterStateService) Pretty(pretty bool) *ClusterStateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterStateService) buildURL() (string, url.Values, error) { + // Build URL + metrics := strings.Join(s.metrics, ",") + if metrics == "" { + metrics = "_all" + } + indices := strings.Join(s.indices, ",") + if indices == "" { + indices = "_all" + } + path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{ + "metrics": metrics, + "indices": indices, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterStateService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterStateService) Do() (*ClusterStateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterStateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterStateResponse is the response of ClusterStateService.Do. +type ClusterStateResponse struct { + ClusterName string `json:"cluster_name"` + Version int64 `json:"version"` + StateUUID string `json:"state_uuid"` + MasterNode string `json:"master_node"` + Blocks map[string]*clusterBlocks `json:"blocks"` + Nodes map[string]*discoveryNode `json:"nodes"` + Metadata *clusterStateMetadata `json:"metadata"` + RoutingTable map[string]*clusterStateRoutingTable `json:"routing_table"` + RoutingNodes *clusterStateRoutingNode `json:"routing_nodes"` + Customs map[string]interface{} `json:"customs"` +} + +type clusterBlocks struct { + Global map[string]*clusterBlock `json:"global"` // id -> cluster block + Indices map[string]*clusterBlock `json:"indices"` // index name -> cluster block +} + +type clusterBlock struct { + Description string `json:"description"` + Retryable bool `json:"retryable"` + DisableStatePersistence bool `json:"disable_state_persistence"` + Levels []string `json:"levels"` +} + +type clusterStateMetadata struct { + ClusterUUID string `json:"cluster_uuid"` + Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata + Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data + RoutingTable struct { + Indices map[string]*indexRoutingTable `json:"indices"` // index name -> routing table + } `json:"routing_table"` + RoutingNodes struct { + Unassigned []*shardRouting `json:"unassigned"` + Nodes []*shardRouting `json:"nodes"` + } `json:"routing_nodes"` + Customs map[string]interface{} `json:"customs"` +} + +type discoveryNode struct { + Name string `json:"name"` // server name, e.g. "es1" + TransportAddress string `json:"transport_address"` // e.g. inet[/1.2.3.4:9300] + Attributes map[string]interface{} `json:"attributes"` // e.g. { "data": true, "master": true } +} + +type clusterStateRoutingTable struct { + Indices map[string]interface{} `json:"indices"` +} + +type clusterStateRoutingNode struct { + Unassigned []*shardRouting `json:"unassigned"` + // Node Id -> shardRouting + Nodes map[string][]*shardRouting `json:"nodes"` +} + +type indexTemplateMetaData struct { + Template string `json:"template"` // e.g. "store-*" + Order int `json:"order"` + Settings map[string]interface{} `json:"settings"` // index settings + Mappings map[string]interface{} `json:"mappings"` // type name -> mapping +} + +type indexMetaData struct { + State string `json:"state"` + Settings map[string]interface{} `json:"settings"` + Mappings map[string]interface{} `json:"mappings"` + Aliases []string `json:"aliases"` // e.g. [ "alias1", "alias2" ] +} + +type indexRoutingTable struct { + Shards map[string]*shardRouting `json:"shards"` +} + +type shardRouting struct { + State string `json:"state"` + Primary bool `json:"primary"` + Node string `json:"node"` + RelocatingNode string `json:"relocating_node"` + Shard int `json:"shard"` + Index string `json:"index"` + Version int64 `json:"state"` + RestoreSource *RestoreSource `json:"restore_source"` + AllocationId *allocationId `json:"allocation_id"` + UnassignedInfo *unassignedInfo `json:"unassigned_info"` +} + +type RestoreSource struct { + Repository string `json:"repository"` + Snapshot string `json:"snapshot"` + Version string `json:"version"` + Index string `json:"index"` +} + +type allocationId struct { + Id string `json:"id"` + RelocationId string `json:"relocation_id"` +} + +type unassignedInfo struct { + Reason string `json:"reason"` + At string `json:"at"` + Details string `json:"details"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/cluster_stats.go b/vendor/gopkg.in/olivere/elastic.v3/cluster_stats.go new file mode 100644 index 000000000..09c1d8ca0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/cluster_stats.go @@ -0,0 +1,348 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ClusterStatsService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-stats.html. +type ClusterStatsService struct { + client *Client + pretty bool + nodeId []string + flatSettings *bool + human *bool +} + +// NewClusterStatsService creates a new ClusterStatsService. +func NewClusterStatsService(client *Client) *ClusterStatsService { + return &ClusterStatsService{ + client: client, + nodeId: make([]string, 0), + } +} + +// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService { + s.nodeId = nodeId + return s +} + +// FlatSettings is documented as: Return settings in flat format (default: false). +func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService { + s.flatSettings = &flatSettings + return s +} + +// Human is documented as: Whether to return time and byte values in human-readable format.. +func (s *ClusterStatsService) Human(human bool) *ClusterStatsService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterStatsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.nodeId) > 0 { + path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + }) + if err != nil { + return "", url.Values{}, err + } + } else { + path, err = uritemplates.Expand("/_cluster/stats", map[string]string{}) + if err != nil { + return "", url.Values{}, err + } + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterStatsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterStatsService) Do() (*ClusterStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterStatsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterStatsResponse is the response of ClusterStatsService.Do. +type ClusterStatsResponse struct { + Timestamp int64 `json:"timestamp"` + ClusterName string `json:"cluster_name"` + ClusterUUID string `json:"uuid"` + Status string `json:"status"` + Indices *ClusterStatsIndices `json:"indices"` + Nodes *ClusterStatsNodes `json:"nodes"` +} + +type ClusterStatsIndices struct { + Count int `json:"count"` + Shards *ClusterStatsIndicesShards `json:"shards"` + Docs *ClusterStatsIndicesDocs `json:"docs"` + Store *ClusterStatsIndicesStore `json:"store"` + FieldData *ClusterStatsIndicesFieldData `json:"fielddata"` + FilterCache *ClusterStatsIndicesFilterCache `json:"filter_cache"` + IdCache *ClusterStatsIndicesIdCache `json:"id_cache"` + Completion *ClusterStatsIndicesCompletion `json:"completion"` + Segments *ClusterStatsIndicesSegments `json:"segments"` + Percolate *ClusterStatsIndicesPercolate `json:"percolate"` +} + +type ClusterStatsIndicesShards struct { + Total int `json:"total"` + Primaries int `json:"primaries"` + Replication float64 `json:"replication"` + Index *ClusterStatsIndicesShardsIndex `json:"index"` +} + +type ClusterStatsIndicesShardsIndex struct { + Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"` + Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"` + Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"` +} + +type ClusterStatsIndicesShardsIndexIntMinMax struct { + Min int `json:"min"` + Max int `json:"max"` + Avg float64 `json:"avg"` +} + +type ClusterStatsIndicesShardsIndexFloat64MinMax struct { + Min float64 `json:"min"` + Max float64 `json:"max"` + Avg float64 `json:"avg"` +} + +type ClusterStatsIndicesDocs struct { + Count int `json:"count"` + Deleted int `json:"deleted"` +} + +type ClusterStatsIndicesStore struct { + Size string `json:"size"` // e.g. "5.3gb" + SizeInBytes int64 `json:"size_in_bytes"` + ThrottleTime string `json:"throttle_time"` // e.g. "0s" + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` +} + +type ClusterStatsIndicesFieldData struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` + Fields map[string]struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + } `json:"fields"` +} + +type ClusterStatsIndicesFilterCache struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` +} + +type ClusterStatsIndicesIdCache struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` +} + +type ClusterStatsIndicesCompletion struct { + Size string `json:"size"` // e.g. "61.3kb" + SizeInBytes int64 `json:"size_in_bytes"` + Fields map[string]struct { + Size string `json:"size"` // e.g. "61.3kb" + SizeInBytes int64 `json:"size_in_bytes"` + } `json:"fields"` +} + +type ClusterStatsIndicesSegments struct { + Count int64 `json:"count"` + Memory string `json:"memory"` // e.g. "61.3kb" + MemoryInBytes int64 `json:"memory_in_bytes"` + IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb" + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` + IndexWriterMaxMemory string `json:"index_writer_max_memory"` // e.g. "61.3kb" + IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"` + VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb" + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` + FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb" + FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"` +} + +type ClusterStatsIndicesPercolate struct { + Total int64 `json:"total"` + // TODO(oe) The JSON tag here is wrong as of ES 1.5.2 it seems + Time string `json:"get_time"` // e.g. "1s" + TimeInBytes int64 `json:"time_in_millis"` + Current int64 `json:"current"` + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_sitze_in_bytes"` + Queries int64 `json:"queries"` +} + +// --- + +type ClusterStatsNodes struct { + Count *ClusterStatsNodesCount `json:"count"` + Versions []string `json:"versions"` + OS *ClusterStatsNodesOsStats `json:"os"` + Process *ClusterStatsNodesProcessStats `json:"process"` + JVM *ClusterStatsNodesJvmStats `json:"jvm"` + FS *ClusterStatsNodesFsStats `json:"fs"` + Plugins []*ClusterStatsNodesPlugin `json:"plugins"` +} + +type ClusterStatsNodesCount struct { + Total int `json:"total"` + MasterOnly int `json:"master_only"` + DataOnly int `json:"data_only"` + MasterData int `json:"master_data"` + Client int `json:"client"` +} + +type ClusterStatsNodesOsStats struct { + AvailableProcessors int `json:"available_processors"` + Mem *ClusterStatsNodesOsStatsMem `json:"mem"` + CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"` +} + +type ClusterStatsNodesOsStatsMem struct { + Total string `json:"total"` // e.g. "16gb" + TotalInBytes int64 `json:"total_in_bytes"` +} + +type ClusterStatsNodesOsStatsCPU struct { + Vendor string `json:"vendor"` + Model string `json:"model"` + MHz int `json:"mhz"` + TotalCores int `json:"total_cores"` + TotalSockets int `json:"total_sockets"` + CoresPerSocket int `json:"cores_per_socket"` + CacheSize string `json:"cache_size"` // e.g. "256b" + CacheSizeInBytes int64 `json:"cache_size_in_bytes"` + Count int `json:"count"` +} + +type ClusterStatsNodesProcessStats struct { + CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"` + OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"` +} + +type ClusterStatsNodesProcessStatsCPU struct { + Percent float64 `json:"percent"` +} + +type ClusterStatsNodesProcessStatsOpenFileDescriptors struct { + Min int64 `json:"min"` + Max int64 `json:"max"` + Avg int64 `json:"avg"` +} + +type ClusterStatsNodesJvmStats struct { + MaxUptime string `json:"max_uptime"` // e.g. "5h" + MaxUptimeInMillis int64 `json:"max_uptime_in_millis"` + Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"` + Mem *ClusterStatsNodesJvmStatsMem `json:"mem"` + Threads int64 `json:"threads"` +} + +type ClusterStatsNodesJvmStatsVersion struct { + Version string `json:"version"` // e.g. "1.8.0_45" + VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" + VMVersion string `json:"vm_version"` // e.g. "25.45-b02" + VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" + Count int `json:"count"` +} + +type ClusterStatsNodesJvmStatsMem struct { + HeapUsed string `json:"heap_used"` + HeapUsedInBytes int64 `json:"heap_used_in_bytes"` + HeapMax string `json:"heap_max"` + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` +} + +type ClusterStatsNodesFsStats struct { + Path string `json:"path"` + Mount string `json:"mount"` + Dev string `json:"dev"` + Total string `json:"total"` // e.g. "930.7gb"` + TotalInBytes int64 `json:"total_in_bytes"` + Free string `json:"free"` // e.g. "930.7gb"` + FreeInBytes int64 `json:"free_in_bytes"` + Available string `json:"available"` // e.g. "930.7gb"` + AvailableInBytes int64 `json:"available_in_bytes"` + DiskReads int64 `json:"disk_reads"` + DiskWrites int64 `json:"disk_writes"` + DiskIOOp int64 `json:"disk_io_op"` + DiskReadSize string `json:"disk_read_size"` // e.g. "0b"` + DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"` + DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"` + DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"` + DiskIOSize string `json:"disk_io_size"` // e.g. "0b"` + DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"` + DiskQueue string `json:"disk_queue"` + DiskServiceTime string `json:"disk_service_time"` +} + +type ClusterStatsNodesPlugin struct { + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` + URL string `json:"url"` + JVM bool `json:"jvm"` + Site bool `json:"site"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/connection.go b/vendor/gopkg.in/olivere/elastic.v3/connection.go new file mode 100644 index 000000000..b8b5bf8aa --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/connection.go @@ -0,0 +1,90 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "sync" + "time" +) + +// conn represents a single connection to a node in a cluster. +type conn struct { + sync.RWMutex + nodeID string // node ID + url string + failures int + dead bool + deadSince *time.Time +} + +// newConn creates a new connection to the given URL. +func newConn(nodeID, url string) *conn { + c := &conn{ + nodeID: nodeID, + url: url, + } + return c +} + +// String returns a representation of the connection status. +func (c *conn) String() string { + c.RLock() + defer c.RUnlock() + return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince) +} + +// NodeID returns the ID of the node of this connection. +func (c *conn) NodeID() string { + c.RLock() + defer c.RUnlock() + return c.nodeID +} + +// URL returns the URL of this connection. +func (c *conn) URL() string { + c.RLock() + defer c.RUnlock() + return c.url +} + +// IsDead returns true if this connection is marked as dead, i.e. a previous +// request to the URL has been unsuccessful. +func (c *conn) IsDead() bool { + c.RLock() + defer c.RUnlock() + return c.dead +} + +// MarkAsDead marks this connection as dead, increments the failures +// counter and stores the current time in dead since. +func (c *conn) MarkAsDead() { + c.Lock() + c.dead = true + if c.deadSince == nil { + utcNow := time.Now().UTC() + c.deadSince = &utcNow + } + c.failures += 1 + c.Unlock() +} + +// MarkAsAlive marks this connection as eligible to be returned from the +// pool of connections by the selector. +func (c *conn) MarkAsAlive() { + c.Lock() + c.dead = false + c.Unlock() +} + +// MarkAsHealthy marks this connection as healthy, i.e. a request has been +// successfully performed with it. +func (c *conn) MarkAsHealthy() { + c.Lock() + c.dead = false + c.deadSince = nil + c.failures = 0 + c.Unlock() +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/count.go b/vendor/gopkg.in/olivere/elastic.v3/count.go new file mode 100644 index 000000000..804458274 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/count.go @@ -0,0 +1,309 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// CountService is a convenient service for determining the +// number of documents in an index. Use SearchService with +// a SearchType of count for counting with queries etc. +type CountService struct { + client *Client + pretty bool + index []string + typ []string + allowNoIndices *bool + analyzeWildcard *bool + analyzer string + defaultOperator string + df string + expandWildcards string + ignoreUnavailable *bool + lenient *bool + lowercaseExpandedTerms *bool + minScore interface{} + preference string + q string + query Query + routing string + bodyJson interface{} + bodyString string +} + +// NewCountService creates a new CountService. +func NewCountService(client *Client) *CountService { + return &CountService{ + client: client, + } +} + +// Index sets the names of the indices to restrict the results. +func (s *CountService) Index(index ...string) *CountService { + if s.index == nil { + s.index = make([]string, 0) + } + s.index = append(s.index, index...) + return s +} + +// Type sets the types to use to restrict the results. +func (s *CountService) Type(typ ...string) *CountService { + if s.typ == nil { + s.typ = make([]string, 0) + } + s.typ = append(s.typ, typ...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes "_all" string +// or when no indices have been specified). +func (s *CountService) AllowNoIndices(allowNoIndices bool) *CountService { + s.allowNoIndices = &allowNoIndices + return s +} + +// AnalyzeWildcard specifies whether wildcard and prefix queries should be +// analyzed (default: false). +func (s *CountService) AnalyzeWildcard(analyzeWildcard bool) *CountService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer specifies the analyzer to use for the query string. +func (s *CountService) Analyzer(analyzer string) *CountService { + s.analyzer = analyzer + return s +} + +// DefaultOperator specifies the default operator for query string query (AND or OR). +func (s *CountService) DefaultOperator(defaultOperator string) *CountService { + s.defaultOperator = defaultOperator + return s +} + +// Df specifies the field to use as default where no field prefix is given +// in the query string. +func (s *CountService) Df(df string) *CountService { + s.df = df + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *CountService) ExpandWildcards(expandWildcards string) *CountService { + s.expandWildcards = expandWildcards + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Lenient specifies whether format-based query failures (such as +// providing text to a numeric field) should be ignored. +func (s *CountService) Lenient(lenient bool) *CountService { + s.lenient = &lenient + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *CountService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *CountService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// MinScore indicates to include only documents with a specific `_score` +// value in the result. +func (s *CountService) MinScore(minScore interface{}) *CountService { + s.minScore = minScore + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: random). +func (s *CountService) Preference(preference string) *CountService { + s.preference = preference + return s +} + +// Q in the Lucene query string syntax. You can also use Query to pass +// a Query struct. +func (s *CountService) Q(q string) *CountService { + s.q = q + return s +} + +// Query specifies the query to pass. You can also pass a query string with Q. +func (s *CountService) Query(query Query) *CountService { + s.query = query + return s +} + +// Routing specifies the routing value. +func (s *CountService) Routing(routing string) *CountService { + s.routing = routing + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *CountService) Pretty(pretty bool) *CountService { + s.pretty = pretty + return s +} + +// BodyJson specifies the query to restrict the results specified with the +// Query DSL (optional). The interface{} will be serialized to a JSON document, +// so use a map[string]interface{}. +func (s *CountService) BodyJson(body interface{}) *CountService { + s.bodyJson = body + return s +} + +// Body specifies a query to restrict the results specified with +// the Query DSL (optional). +func (s *CountService) BodyString(body string) *CountService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *CountService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_count", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_count", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.typ) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_count", map[string]string{ + "type": strings.Join(s.typ, ","), + }) + } else { + path = "/_all/_count" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if s.minScore != nil { + params.Set("min_score", fmt.Sprintf("%v", s.minScore)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *CountService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *CountService) Do() (int64, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return 0, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return 0, err + } + + // Setup HTTP request body + var body interface{} + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return 0, err + } + query := make(map[string]interface{}) + query["query"] = src + body = query + } else if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return 0, err + } + + // Return result + ret := new(CountResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return 0, err + } + if ret != nil { + return ret.Count, nil + } + + return int64(0), nil +} + +// CountResponse is the response of using the Count API. +type CountResponse struct { + Count int64 `json:"count"` + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/decoder.go b/vendor/gopkg.in/olivere/elastic.v3/decoder.go new file mode 100644 index 000000000..765a5be30 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/decoder.go @@ -0,0 +1,26 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" +) + +// Decoder is used to decode responses from Elasticsearch. +// Users of elastic can implement their own marshaler for advanced purposes +// and set them per Client (see SetDecoder). If none is specified, +// DefaultDecoder is used. +type Decoder interface { + Decode(data []byte, v interface{}) error +} + +// DefaultDecoder uses json.Unmarshal from the Go standard library +// to decode JSON data. +type DefaultDecoder struct{} + +// Decode decodes with json.Unmarshal from the Go standard library. +func (u *DefaultDecoder) Decode(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/delete.go b/vendor/gopkg.in/olivere/elastic.v3/delete.go new file mode 100644 index 000000000..bf78ce8ff --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/delete.go @@ -0,0 +1,213 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// DeleteService allows to delete a typed JSON document from a specified +// index based on its id. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html +// for details. +type DeleteService struct { + client *Client + pretty bool + id string + index string + typ string + routing string + timeout string + version interface{} + versionType string + consistency string + parent string + refresh *bool + replication string +} + +// NewDeleteService creates a new DeleteService. +func NewDeleteService(client *Client) *DeleteService { + return &DeleteService{ + client: client, + } +} + +// Type is the type of the document. +func (s *DeleteService) Type(typ string) *DeleteService { + s.typ = typ + return s +} + +// Id is the document ID. +func (s *DeleteService) Id(id string) *DeleteService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *DeleteService) Index(index string) *DeleteService { + s.index = index + return s +} + +// Replication specifies a replication type. +func (s *DeleteService) Replication(replication string) *DeleteService { + s.replication = replication + return s +} + +// Routing is a specific routing value. +func (s *DeleteService) Routing(routing string) *DeleteService { + s.routing = routing + return s +} + +// Timeout is an explicit operation timeout. +func (s *DeleteService) Timeout(timeout string) *DeleteService { + s.timeout = timeout + return s +} + +// Version is an explicit version number for concurrency control. +func (s *DeleteService) Version(version interface{}) *DeleteService { + s.version = version + return s +} + +// VersionType is a specific version type. +func (s *DeleteService) VersionType(versionType string) *DeleteService { + s.versionType = versionType + return s +} + +// Consistency defines a specific write consistency setting for the operation. +func (s *DeleteService) Consistency(consistency string) *DeleteService { + s.consistency = consistency + return s +} + +// Parent is the ID of parent document. +func (s *DeleteService) Parent(parent string) *DeleteService { + s.parent = parent + return s +} + +// Refresh the index after performing the operation. +func (s *DeleteService) Refresh(refresh bool) *DeleteService { + s.refresh = &refresh + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *DeleteService) Pretty(pretty bool) *DeleteService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *DeleteService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "index": s.index, + "type": s.typ, + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.replication != "" { + params.Set("replication", s.replication) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.consistency != "" { + params.Set("consistency", s.consistency) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteService) Validate() error { + var invalid []string + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *DeleteService) Do() (*DeleteResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(DeleteResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a delete request. + +// DeleteResponse is the outcome of running DeleteService.Do. +type DeleteResponse struct { + // TODO _shards { total, failed, successful } + Found bool `json:"found"` + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int64 `json:"_version"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/delete_by_query.go b/vendor/gopkg.in/olivere/elastic.v3/delete_by_query.go new file mode 100644 index 000000000..ba531b6f4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/delete_by_query.go @@ -0,0 +1,301 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// DeleteByQueryService deletes documents that match a query. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html. +type DeleteByQueryService struct { + client *Client + indices []string + types []string + analyzer string + consistency string + defaultOper string + df string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + replication string + routing string + timeout string + pretty bool + q string + query Query +} + +// NewDeleteByQueryService creates a new DeleteByQueryService. +// You typically use the client's DeleteByQuery to get a reference to +// the service. +func NewDeleteByQueryService(client *Client) *DeleteByQueryService { + builder := &DeleteByQueryService{ + client: client, + } + return builder +} + +// Index sets the indices on which to perform the delete operation. +func (s *DeleteByQueryService) Index(indices ...string) *DeleteByQueryService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +// Type limits the delete operation to the given types. +func (s *DeleteByQueryService) Type(types ...string) *DeleteByQueryService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Analyzer to use for the query string. +func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService { + s.analyzer = analyzer + return s +} + +// Consistency represents the specific write consistency setting for the operation. +// It can be one, quorum, or all. +func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService { + s.consistency = consistency + return s +} + +// DefaultOperator for query string query (AND or OR). +func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService { + s.defaultOper = defaultOperator + return s +} + +// DF is the field to use as default where no field prefix is given in the query string. +func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService { + s.df = defaultField + return s +} + +// DefaultField is the field to use as default where no field prefix is given in the query string. +// It is an alias to the DF func. +func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService { + s.df = defaultField + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService { + s.ignoreUnavailable = &ignore + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices (including the _all string +// or when no indices have been specified). +func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService { + s.allowNoIndices = &allow + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. It can be "open" or "closed". +func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService { + s.expandWildcards = expand + return s +} + +// Replication sets a specific replication type (sync or async). +func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService { + s.replication = replication + return s +} + +// Q specifies the query in Lucene query string syntax. You can also use +// Query to programmatically specify the query. +func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService { + s.q = query + return s +} + +// QueryString is an alias to Q. Notice that you can also use Query to +// programmatically set the query. +func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService { + s.q = query + return s +} + +// Routing sets a specific routing value. +func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService { + s.routing = routing + return s +} + +// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms". +func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService { + s.timeout = timeout + return s +} + +// Pretty indents the JSON output from Elasticsearch. +func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService { + s.pretty = pretty + return s +} + +// Query sets the query programmatically. +func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService { + s.query = query + return s +} + +// Do executes the delete-by-query operation. +func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) { + var err error + + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err = uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + // Types part + typesPart := make([]string, 0) + for _, typ := range s.types { + typ, err = uritemplates.Expand("{type}", map[string]string{ + "type": typ, + }) + if err != nil { + return nil, err + } + typesPart = append(typesPart, typ) + } + if len(typesPart) > 0 { + path += "/" + strings.Join(typesPart, ",") + } + + // Search + path += "/_query" + + // Parameters + params := make(url.Values) + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.consistency != "" { + params.Set("consistency", s.consistency) + } + if s.defaultOper != "" { + params.Set("default_operator", s.defaultOper) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.replication != "" { + params.Set("replication", s.replication) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.q != "" { + params.Set("q", s.q) + } + + // Set body if there is a query set + var body interface{} + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + query := make(map[string]interface{}) + query["query"] = src + body = query + } + + // Get response + res, err := s.client.PerformRequest("DELETE", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(DeleteByQueryResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService. +type DeleteByQueryResult struct { + Took int64 `json:"took"` + TimedOut bool `json:"timed_out"` + Indices map[string]IndexDeleteByQueryResult `json:"_indices"` + Failures []shardOperationFailure `json:"failures"` +} + +// IndexNames returns the names of the indices the DeleteByQuery touched. +func (res DeleteByQueryResult) IndexNames() []string { + var indices []string + for index, _ := range res.Indices { + indices = append(indices, index) + } + return indices +} + +// All returns the index delete-by-query result of all indices. +func (res DeleteByQueryResult) All() IndexDeleteByQueryResult { + all, _ := res.Indices["_all"] + return all +} + +// IndexDeleteByQueryResult is the result of a delete-by-query for a specific +// index. +type IndexDeleteByQueryResult struct { + // Found documents, matching the query. + Found int `json:"found"` + // Deleted documents, successfully, from the given index. + Deleted int `json:"deleted"` + // Missing documents when trying to delete them. + Missing int `json:"missing"` + // Failed documents to be deleted for the given index. + Failed int `json:"failed"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/delete_template.go b/vendor/gopkg.in/olivere/elastic.v3/delete_template.go new file mode 100644 index 000000000..971637f59 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/delete_template.go @@ -0,0 +1,117 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// DeleteTemplateService deletes a search template. More information can +// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. +type DeleteTemplateService struct { + client *Client + pretty bool + id string + version *int + versionType string +} + +// NewDeleteTemplateService creates a new DeleteTemplateService. +func NewDeleteTemplateService(client *Client) *DeleteTemplateService { + return &DeleteTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService { + s.id = id + return s +} + +// Version an explicit version number for concurrency control. +func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService { + s.version = &version + return s +} + +// VersionType specifies a version type. +func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService { + s.versionType = versionType + return s +} + +// buildURL builds the URL for the operation. +func (s *DeleteTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%d", *s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(DeleteTemplateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// DeleteTemplateResponse is the response of DeleteTemplateService.Do. +type DeleteTemplateResponse struct { + Found bool `json:"found"` + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/doc.go b/vendor/gopkg.in/olivere/elastic.v3/doc.go new file mode 100644 index 000000000..336a734de --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/doc.go @@ -0,0 +1,51 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +/* +Package elastic provides an interface to the Elasticsearch server +(http://www.elasticsearch.org/). + +The first thing you do is to create a Client. If you have Elasticsearch +installed and running with its default settings +(i.e. available at http://127.0.0.1:9200), all you need to do is: + + client, err := elastic.NewClient() + if err != nil { + // Handle error + } + +If your Elasticsearch server is running on a different IP and/or port, +just provide a URL to NewClient: + + // Create a client and connect to http://192.168.2.10:9201 + client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201")) + if err != nil { + // Handle error + } + +You can pass many more configuration parameters to NewClient. Review the +documentation of NewClient for more information. + +If no Elasticsearch server is available, services will fail when creating +a new request and will return ErrNoClient. + +A Client provides services. The services usually come with a variety of +methods to prepare the query and a Do function to execute it against the +Elasticsearch REST interface and return a response. Here is an example +of the IndexExists service that checks if a given index already exists. + + exists, err := client.IndexExists("twitter").Do() + if err != nil { + // Handle error + } + if !exists { + // Index does not exist yet. + } + +Look up the documentation for Client to get an idea of the services provided +and what kinds of responses you get when executing the Do function of a service. +Also see the wiki on Github for more details. + +*/ +package elastic diff --git a/vendor/gopkg.in/olivere/elastic.v3/errors.go b/vendor/gopkg.in/olivere/elastic.v3/errors.go new file mode 100644 index 000000000..93c2c6de5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/errors.go @@ -0,0 +1,141 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +// checkResponse will return an error if the request/response indicates +// an error returned from Elasticsearch. +// +// HTTP status codes between in the range [200..299] are considered successful. +// All other errors are considered errors except they are specified in +// ignoreErrors. This is necessary because for some services, HTTP status 404 +// is a valid response from Elasticsearch (e.g. the Exists service). +// +// The func tries to parse error details as returned from Elasticsearch +// and encapsulates them in type elastic.Error. +func checkResponse(req *http.Request, res *http.Response, ignoreErrors ...int) error { + // 200-299 are valid status codes + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + // Ignore certain errors? + for _, code := range ignoreErrors { + if code == res.StatusCode { + return nil + } + } + return createResponseError(res) +} + +// createResponseError creates an Error structure from the HTTP response, +// its status code and the error information sent by Elasticsearch. +func createResponseError(res *http.Response) error { + if res.Body == nil { + return &Error{Status: res.StatusCode} + } + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return &Error{Status: res.StatusCode} + } + errReply := new(Error) + err = json.Unmarshal(data, errReply) + if err != nil { + return &Error{Status: res.StatusCode} + } + if errReply != nil { + if errReply.Status == 0 { + errReply.Status = res.StatusCode + } + return errReply + } + return &Error{Status: res.StatusCode} +} + +// Error encapsulates error details as returned from Elasticsearch. +type Error struct { + Status int `json:"status"` + Details *ErrorDetails `json:"error,omitempty"` +} + +// ErrorDetails encapsulate error details from Elasticsearch. +// It is used in e.g. elastic.Error and elastic.BulkResponseItem. +type ErrorDetails struct { + Type string `json:"type"` + Reason string `json:"reason"` + ResourceType string `json:"resource.type,omitempty"` + ResourceId string `json:"resource.id,omitempty"` + Index string `json:"index,omitempty"` + Phase string `json:"phase,omitempty"` + Grouped bool `json:"grouped,omitempty"` + CausedBy map[string]interface{} `json:"caused_by,omitempty"` + RootCause []*ErrorDetails `json:"root_cause,omitempty"` + FailedShards []map[string]interface{} `json:"failed_shards,omitempty"` +} + +// Error returns a string representation of the error. +func (e *Error) Error() string { + if e.Details != nil && e.Details.Reason != "" { + return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type) + } else { + return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status)) + } +} + +// IsNotFound returns true if the given error indicates that Elasticsearch +// returned HTTP status 404. The err parameter can be of type *elastic.Error, +// elastic.Error, *http.Response or int (indicating the HTTP status code). +func IsNotFound(err interface{}) bool { + switch e := err.(type) { + case *http.Response: + return e.StatusCode == http.StatusNotFound + case *Error: + return e.Status == http.StatusNotFound + case Error: + return e.Status == http.StatusNotFound + case int: + return e == http.StatusNotFound + } + return false +} + +// IsTimeout returns true if the given error indicates that Elasticsearch +// returned HTTP status 408. The err parameter can be of type *elastic.Error, +// elastic.Error, *http.Response or int (indicating the HTTP status code). +func IsTimeout(err interface{}) bool { + switch e := err.(type) { + case *http.Response: + return e.StatusCode == http.StatusRequestTimeout + case *Error: + return e.Status == http.StatusRequestTimeout + case Error: + return e.Status == http.StatusRequestTimeout + case int: + return e == http.StatusRequestTimeout + } + return false +} + +// -- General errors -- + +// shardsInfo represents information from a shard. +type shardsInfo struct { + Total int `json:"total"` + Successful int `json:"successful"` + Failed int `json:"failed"` +} + +// shardOperationFailure represents a shard failure. +type shardOperationFailure struct { + Shard int `json:"shard"` + Index string `json:"index"` + Status string `json:"status"` + // "reason" +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/exists.go b/vendor/gopkg.in/olivere/elastic.v3/exists.go new file mode 100644 index 000000000..7a42d53c9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/exists.go @@ -0,0 +1,175 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ExistsService checks for the existence of a document using HEAD. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// for details. +type ExistsService struct { + client *Client + pretty bool + id string + index string + typ string + preference string + realtime *bool + refresh *bool + routing string + parent string +} + +// NewExistsService creates a new ExistsService. +func NewExistsService(client *Client) *ExistsService { + return &ExistsService{ + client: client, + } +} + +// Id is the document ID. +func (s *ExistsService) Id(id string) *ExistsService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *ExistsService) Index(index string) *ExistsService { + s.index = index + return s +} + +// Type is the type of the document (use `_all` to fetch the first document +// matching the ID across all types). +func (s *ExistsService) Type(typ string) *ExistsService { + s.typ = typ + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *ExistsService) Preference(preference string) *ExistsService { + s.preference = preference + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *ExistsService) Realtime(realtime bool) *ExistsService { + s.realtime = &realtime + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *ExistsService) Refresh(refresh bool) *ExistsService { + s.refresh = &refresh + return s +} + +// Routing is a specific routing value. +func (s *ExistsService) Routing(routing string) *ExistsService { + s.routing = routing + return s +} + +// Parent is the ID of the parent document. +func (s *ExistsService) Parent(parent string) *ExistsService { + s.parent = parent + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ExistsService) Pretty(pretty bool) *ExistsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ExistsService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ExistsService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ExistsService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/explain.go b/vendor/gopkg.in/olivere/elastic.v3/explain.go new file mode 100644 index 000000000..617e8462e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/explain.go @@ -0,0 +1,329 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "log" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +var ( + _ = fmt.Print + _ = log.Print + _ = strings.Index + _ = uritemplates.Expand + _ = url.Parse +) + +// ExplainService computes a score explanation for a query and +// a specific document. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-explain.html. +type ExplainService struct { + client *Client + pretty bool + id string + index string + typ string + q string + routing string + lenient *bool + analyzer string + df string + fields []string + lowercaseExpandedTerms *bool + xSourceInclude []string + analyzeWildcard *bool + parent string + preference string + xSource []string + defaultOperator string + xSourceExclude []string + source string + bodyJson interface{} + bodyString string +} + +// NewExplainService creates a new ExplainService. +func NewExplainService(client *Client) *ExplainService { + return &ExplainService{ + client: client, + xSource: make([]string, 0), + xSourceExclude: make([]string, 0), + fields: make([]string, 0), + xSourceInclude: make([]string, 0), + } +} + +// Id is the document ID. +func (s *ExplainService) Id(id string) *ExplainService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *ExplainService) Index(index string) *ExplainService { + s.index = index + return s +} + +// Type is the type of the document. +func (s *ExplainService) Type(typ string) *ExplainService { + s.typ = typ + return s +} + +// Source is the URL-encoded query definition (instead of using the request body). +func (s *ExplainService) Source(source string) *ExplainService { + s.source = source + return s +} + +// XSourceExclude is a list of fields to exclude from the returned _source field. +func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService { + s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) + return s +} + +// Lenient specifies whether format-based query failures +// (such as providing text to a numeric field) should be ignored. +func (s *ExplainService) Lenient(lenient bool) *ExplainService { + s.lenient = &lenient + return s +} + +// Query in the Lucene query string syntax. +func (s *ExplainService) Q(q string) *ExplainService { + s.q = q + return s +} + +// Routing sets a specific routing value. +func (s *ExplainService) Routing(routing string) *ExplainService { + s.routing = routing + return s +} + +// AnalyzeWildcard specifies whether wildcards and prefix queries +// in the query string query should be analyzed (default: false). +func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer is the analyzer for the query string query. +func (s *ExplainService) Analyzer(analyzer string) *ExplainService { + s.analyzer = analyzer + return s +} + +// Df is the default field for query string query (default: _all). +func (s *ExplainService) Df(df string) *ExplainService { + s.df = df + return s +} + +// Fields is a list of fields to return in the response. +func (s *ExplainService) Fields(fields ...string) *ExplainService { + s.fields = append(s.fields, fields...) + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// XSourceInclude is a list of fields to extract and return from the _source field. +func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService { + s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) + return s +} + +// DefaultOperator is the default operator for query string query (AND or OR). +func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService { + s.defaultOperator = defaultOperator + return s +} + +// Parent is the ID of the parent document. +func (s *ExplainService) Parent(parent string) *ExplainService { + s.parent = parent + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *ExplainService) Preference(preference string) *ExplainService { + s.preference = preference + return s +} + +// XSource is true or false to return the _source field or not, or a list of fields to return. +func (s *ExplainService) XSource(xSource ...string) *ExplainService { + s.xSource = append(s.xSource, xSource...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ExplainService) Pretty(pretty bool) *ExplainService { + s.pretty = pretty + return s +} + +// Query sets a query definition using the Query DSL. +func (s *ExplainService) Query(query Query) *ExplainService { + src, err := query.Source() + if err != nil { + // Do nothing in case of an error + return s + } + body := make(map[string]interface{}) + body["query"] = src + s.bodyJson = body + return s +} + +// BodyJson sets the query definition using the Query DSL. +func (s *ExplainService) BodyJson(body interface{}) *ExplainService { + s.bodyJson = body + return s +} + +// BodyString sets the query definition using the Query DSL as a string. +func (s *ExplainService) BodyString(body string) *ExplainService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *ExplainService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.xSource) > 0 { + params.Set("_source", strings.Join(s.xSource, ",")) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.source != "" { + params.Set("source", s.source) + } + if len(s.xSourceExclude) > 0 { + params.Set("_source_exclude", strings.Join(s.xSourceExclude, ",")) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if len(s.xSourceInclude) > 0 { + params.Set("_source_include", strings.Join(s.xSourceInclude, ",")) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.df != "" { + params.Set("df", s.df) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ExplainService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ExplainService) Do() (*ExplainResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ExplainResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ExplainResponse is the response of ExplainService.Do. +type ExplainResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Matched bool `json:"matched"` + Explanation map[string]interface{} `json:"explanation"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/fetch_source_context.go b/vendor/gopkg.in/olivere/elastic.v3/fetch_source_context.go new file mode 100644 index 000000000..e13c9eb47 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/fetch_source_context.go @@ -0,0 +1,74 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "strings" +) + +type FetchSourceContext struct { + fetchSource bool + transformSource bool + includes []string + excludes []string +} + +func NewFetchSourceContext(fetchSource bool) *FetchSourceContext { + return &FetchSourceContext{ + fetchSource: fetchSource, + includes: make([]string, 0), + excludes: make([]string, 0), + } +} + +func (fsc *FetchSourceContext) FetchSource() bool { + return fsc.fetchSource +} + +func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) { + fsc.fetchSource = fetchSource +} + +func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext { + fsc.includes = append(fsc.includes, includes...) + return fsc +} + +func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext { + fsc.excludes = append(fsc.excludes, excludes...) + return fsc +} + +func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext { + fsc.transformSource = transformSource + return fsc +} + +func (fsc *FetchSourceContext) Source() (interface{}, error) { + if !fsc.fetchSource { + return false, nil + } + return map[string]interface{}{ + "includes": fsc.includes, + "excludes": fsc.excludes, + }, nil +} + +// Query returns the parameters in a form suitable for a URL query string. +func (fsc *FetchSourceContext) Query() url.Values { + params := url.Values{} + if !fsc.fetchSource { + params.Add("_source", "false") + return params + } + if len(fsc.includes) > 0 { + params.Add("_source_include", strings.Join(fsc.includes, ",")) + } + if len(fsc.excludes) > 0 { + params.Add("_source_exclude", strings.Join(fsc.excludes, ",")) + } + return params +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/field_stats.go b/vendor/gopkg.in/olivere/elastic.v3/field_stats.go new file mode 100644 index 000000000..fc8d084cd --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/field_stats.go @@ -0,0 +1,255 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +const ( + FieldStatsClusterLevel = "cluster" + FieldStatsIndicesLevel = "indices" +) + +// FieldStatsService allows finding statistical properties of a field without executing a search, +// but looking up measurements that are natively available in the Lucene index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-stats.html +// for details +type FieldStatsService struct { + client *Client + pretty bool + level string + index []string + allowNoIndices *bool + expandWildcards string + fields []string + ignoreUnavailable *bool + bodyJson interface{} + bodyString string +} + +// NewFieldStatsService creates a new FieldStatsService +func NewFieldStatsService(client *Client) *FieldStatsService { + return &FieldStatsService{ + client: client, + index: make([]string, 0), + fields: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *FieldStatsService) Index(index ...string) *FieldStatsService { + s.index = append(s.index, index...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *FieldStatsService) AllowNoIndices(allowNoIndices bool) *FieldStatsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *FieldStatsService) ExpandWildcards(expandWildcards string) *FieldStatsService { + s.expandWildcards = expandWildcards + return s +} + +// Fields is a list of fields for to get field statistics +// for (min value, max value, and more). +func (s *FieldStatsService) Fields(fields ...string) *FieldStatsService { + s.fields = append(s.fields, fields...) + return s +} + +// IgnoreUnavailable is documented as: Whether specified concrete indices should be ignored when unavailable (missing or closed). +func (s *FieldStatsService) IgnoreUnavailable(ignoreUnavailable bool) *FieldStatsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Level sets if stats should be returned on a per index level or on a cluster wide level; +// should be one of 'cluster' or 'indices'; defaults to former +func (s *FieldStatsService) Level(level string) *FieldStatsService { + s.level = level + return s +} + +// ClusterLevel is a helper that sets Level to "cluster". +func (s *FieldStatsService) ClusterLevel() *FieldStatsService { + s.level = FieldStatsClusterLevel + return s +} + +// IndicesLevel is a helper that sets Level to "indices". +func (s *FieldStatsService) IndicesLevel() *FieldStatsService { + s.level = FieldStatsIndicesLevel + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *FieldStatsService) Pretty(pretty bool) *FieldStatsService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds. +func (s *FieldStatsService) BodyJson(body interface{}) *FieldStatsService { + s.bodyJson = body + return s +} + +// BodyString is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds. +func (s *FieldStatsService) BodyString(body string) *FieldStatsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *FieldStatsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_field_stats", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_field_stats" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.level != "" { + params.Set("level", s.level) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *FieldStatsService) Validate() error { + var invalid []string + if s.level != "" && (s.level != FieldStatsIndicesLevel && s.level != FieldStatsClusterLevel) { + invalid = append(invalid, "Level") + } + if len(invalid) != 0 { + return fmt.Errorf("missing or invalid required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *FieldStatsService) Do() (*FieldStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, body, http.StatusNotFound) + if err != nil { + return nil, err + } + + // TODO(oe): Is 404 really a valid response here? + if res.StatusCode == http.StatusNotFound { + return &FieldStatsResponse{make(map[string]IndexFieldStats)}, nil + } + + // Return operation response + ret := new(FieldStatsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Request -- + +// FieldStatsRequest can be used to set up the body to be used in the +// Field Stats API. +type FieldStatsRequest struct { + Fields []string `json:"fields"` + IndexConstraints map[string]*FieldStatsConstraints `json:"index_constraints,omitempty"` +} + +// FieldStatsConstraints is a constraint on a field. +type FieldStatsConstraints struct { + Min *FieldStatsComparison `json:"min_value,omitempty"` + Max *FieldStatsComparison `json:"max_value,omitempty"` +} + +// FieldStatsComparison contain all comparison operations that can be used +// in FieldStatsConstraints. +type FieldStatsComparison struct { + Lte interface{} `json:"lte,omitempty"` + Lt interface{} `json:"lt,omitempty"` + Gte interface{} `json:"gte,omitempty"` + Gt interface{} `json:"gt,omitempty"` +} + +// -- Response -- + +// FieldStatsResponse is the response body content +type FieldStatsResponse struct { + Indices map[string]IndexFieldStats `json:"indices,omitempty"` +} + +// IndexFieldStats contains field stats for an index +type IndexFieldStats struct { + Fields map[string]FieldStats `json:"fields,omitempty"` +} + +// FieldStats contains stats of an individual field +type FieldStats struct { + MaxDoc int64 `json:"max_doc"` + DocCount int64 `json:"doc_count"` + Density int64 `json:"density"` + SumDocFrequeny int64 `json:"sum_doc_freq"` + SumTotalTermFrequency int64 `json:"sum_total_term_freq"` + MinValue interface{} `json:"min_value"` + MinValueAsString string `json:"min_value_as_string"` + MaxValue interface{} `json:"max_value"` + MaxValueAsString string `json:"max_value_as_string"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/geo_point.go b/vendor/gopkg.in/olivere/elastic.v3/geo_point.go new file mode 100644 index 000000000..a09351ca2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/geo_point.go @@ -0,0 +1,48 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strconv" + "strings" +) + +// GeoPoint is a geographic position described via latitude and longitude. +type GeoPoint struct { + Lat float64 `json:"lat"` + Lon float64 `json:"lon"` +} + +// Source returns the object to be serialized in Elasticsearch DSL. +func (pt *GeoPoint) Source() map[string]float64 { + return map[string]float64{ + "lat": pt.Lat, + "lon": pt.Lon, + } +} + +// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude. +func GeoPointFromLatLon(lat, lon float64) *GeoPoint { + return &GeoPoint{Lat: lat, Lon: lon} +} + +// GeoPointFromString initializes a new GeoPoint by a string that is +// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091". +func GeoPointFromString(latLon string) (*GeoPoint, error) { + latlon := strings.SplitN(latLon, ",", 2) + if len(latlon) != 2 { + return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon) + } + lat, err := strconv.ParseFloat(latlon[0], 64) + if err != nil { + return nil, err + } + lon, err := strconv.ParseFloat(latlon[1], 64) + if err != nil { + return nil, err + } + return &GeoPoint{Lat: lat, Lon: lon}, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/get.go b/vendor/gopkg.in/olivere/elastic.v3/get.go new file mode 100644 index 000000000..4e255ada6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/get.go @@ -0,0 +1,271 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// GetService allows to get a typed JSON document from the index based +// on its id. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// for details. +type GetService struct { + client *Client + pretty bool + index string + typ string + id string + routing string + preference string + fields []string + refresh *bool + realtime *bool + fsc *FetchSourceContext + version interface{} + versionType string + parent string + ignoreErrorsOnGeneratedFields *bool +} + +// NewGetService creates a new GetService. +func NewGetService(client *Client) *GetService { + return &GetService{ + client: client, + typ: "_all", + } +} + +/* +// String returns a string representation of the GetService request. +func (s *GetService) String() string { + return fmt.Sprintf("[%v][%v][%v]: routing [%v]", + s.index, + s.typ, + s.id, + s.routing) +} +*/ + +// Index is the name of the index. +func (s *GetService) Index(index string) *GetService { + s.index = index + return s +} + +// Type is the type of the document (use `_all` to fetch the first document +// matching the ID across all types). +func (s *GetService) Type(typ string) *GetService { + s.typ = typ + return s +} + +// Id is the document ID. +func (s *GetService) Id(id string) *GetService { + s.id = id + return s +} + +// Parent is the ID of the parent document. +func (s *GetService) Parent(parent string) *GetService { + s.parent = parent + return s +} + +// Routing is the specific routing value. +func (s *GetService) Routing(routing string) *GetService { + s.routing = routing + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *GetService) Preference(preference string) *GetService { + s.preference = preference + return s +} + +// Fields is a list of fields to return in the response. +func (s *GetService) Fields(fields ...string) *GetService { + if s.fields == nil { + s.fields = make([]string, 0) + } + s.fields = append(s.fields, fields...) + return s +} + +func (s *GetService) FetchSource(fetchSource bool) *GetService { + if s.fsc == nil { + s.fsc = NewFetchSourceContext(fetchSource) + } else { + s.fsc.SetFetchSource(fetchSource) + } + return s +} + +func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService { + s.fsc = fetchSourceContext + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *GetService) Refresh(refresh bool) *GetService { + s.refresh = &refresh + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *GetService) Realtime(realtime bool) *GetService { + s.realtime = &realtime + return s +} + +// VersionType is the specific version type. +func (s *GetService) VersionType(versionType string) *GetService { + s.versionType = versionType + return s +} + +// Version is an explicit version number for concurrency control. +func (s *GetService) Version(version interface{}) *GetService { + s.version = version + return s +} + +// IgnoreErrorsOnGeneratedFields indicates whether to ignore fields that +// are generated if the transaction log is accessed. +func (s *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService { + s.ignoreErrorsOnGeneratedFields = &ignore + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *GetService) Pretty(pretty bool) *GetService { + s.pretty = pretty + return s +} + +// Validate checks if the operation is valid. +func (s *GetService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// buildURL builds the URL for the operation. +func (s *GetService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.ignoreErrorsOnGeneratedFields != nil { + params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *s.ignoreErrorsOnGeneratedFields)) + } + if s.fsc != nil { + for k, values := range s.fsc.Query() { + params.Add(k, strings.Join(values, ",")) + } + } + return path, params, nil +} + +// Do executes the operation. +func (s *GetService) Do() (*GetResult, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(GetResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a get request. + +// GetResult is the outcome of GetService.Do. +type GetResult struct { + Index string `json:"_index"` // index meta field + Type string `json:"_type"` // type meta field + Id string `json:"_id"` // id meta field + Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) + Timestamp int64 `json:"_timestamp"` // timestamp meta field + TTL int64 `json:"_ttl"` // ttl meta field + Routing string `json:"_routing"` // routing meta field + Parent string `json:"_parent"` // parent meta field + Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService + Source *json.RawMessage `json:"_source,omitempty"` + Found bool `json:"found,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + //Error string `json:"error,omitempty"` // used only in MultiGet + // TODO double-check that MultiGet now returns details error information + Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/get_template.go b/vendor/gopkg.in/olivere/elastic.v3/get_template.go new file mode 100644 index 000000000..e5c11f029 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/get_template.go @@ -0,0 +1,112 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// GetTemplateService reads a search template. +// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. +type GetTemplateService struct { + client *Client + pretty bool + id string + version interface{} + versionType string +} + +// NewGetTemplateService creates a new GetTemplateService. +func NewGetTemplateService(client *Client) *GetTemplateService { + return &GetTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *GetTemplateService) Id(id string) *GetTemplateService { + s.id = id + return s +} + +// Version is an explicit version number for concurrency control. +func (s *GetTemplateService) Version(version interface{}) *GetTemplateService { + s.version = version + return s +} + +// VersionType is a specific version type. +func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService { + s.versionType = versionType + return s +} + +// buildURL builds the URL for the operation. +func (s *GetTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *GetTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation and returns the template. +func (s *GetTemplateService) Do() (*GetTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(GetTemplateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +type GetTemplateResponse struct { + Template string `json:"template"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/highlight.go b/vendor/gopkg.in/olivere/elastic.v3/highlight.go new file mode 100644 index 000000000..44501a731 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/highlight.go @@ -0,0 +1,455 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Highlight allows highlighting search results on one or more fields. +// For details, see: +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html +type Highlight struct { + fields []*HighlighterField + tagsSchema *string + highlightFilter *bool + fragmentSize *int + numOfFragments *int + preTags []string + postTags []string + order *string + encoder *string + requireFieldMatch *bool + boundaryMaxScan *int + boundaryChars []rune + highlighterType *string + fragmenter *string + highlightQuery Query + noMatchSize *int + phraseLimit *int + options map[string]interface{} + forceSource *bool + useExplicitFieldOrder bool +} + +func NewHighlight() *Highlight { + hl := &Highlight{ + fields: make([]*HighlighterField, 0), + preTags: make([]string, 0), + postTags: make([]string, 0), + boundaryChars: make([]rune, 0), + options: make(map[string]interface{}), + } + return hl +} + +func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight { + hl.fields = append(hl.fields, fields...) + return hl +} + +func (hl *Highlight) Field(name string) *Highlight { + field := NewHighlighterField(name) + hl.fields = append(hl.fields, field) + return hl +} + +func (hl *Highlight) TagsSchema(schemaName string) *Highlight { + hl.tagsSchema = &schemaName + return hl +} + +func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight { + hl.highlightFilter = &highlightFilter + return hl +} + +func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight { + hl.fragmentSize = &fragmentSize + return hl +} + +func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight { + hl.numOfFragments = &numOfFragments + return hl +} + +func (hl *Highlight) Encoder(encoder string) *Highlight { + hl.encoder = &encoder + return hl +} + +func (hl *Highlight) PreTags(preTags ...string) *Highlight { + hl.preTags = append(hl.preTags, preTags...) + return hl +} + +func (hl *Highlight) PostTags(postTags ...string) *Highlight { + hl.postTags = append(hl.postTags, postTags...) + return hl +} + +func (hl *Highlight) Order(order string) *Highlight { + hl.order = &order + return hl +} + +func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight { + hl.requireFieldMatch = &requireFieldMatch + return hl +} + +func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight { + hl.boundaryMaxScan = &boundaryMaxScan + return hl +} + +func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight { + hl.boundaryChars = append(hl.boundaryChars, boundaryChars...) + return hl +} + +func (hl *Highlight) HighlighterType(highlighterType string) *Highlight { + hl.highlighterType = &highlighterType + return hl +} + +func (hl *Highlight) Fragmenter(fragmenter string) *Highlight { + hl.fragmenter = &fragmenter + return hl +} + +func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight { + hl.highlightQuery = highlightQuery + return hl +} + +func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight { + hl.noMatchSize = &noMatchSize + return hl +} + +func (hl *Highlight) Options(options map[string]interface{}) *Highlight { + hl.options = options + return hl +} + +func (hl *Highlight) ForceSource(forceSource bool) *Highlight { + hl.forceSource = &forceSource + return hl +} + +func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight { + hl.useExplicitFieldOrder = useExplicitFieldOrder + return hl +} + +// Creates the query source for the bool query. +func (hl *Highlight) Source() (interface{}, error) { + // Returns the map inside of "highlight": + // "highlight":{ + // ... this ... + // } + source := make(map[string]interface{}) + if hl.tagsSchema != nil { + source["tags_schema"] = *hl.tagsSchema + } + if hl.preTags != nil && len(hl.preTags) > 0 { + source["pre_tags"] = hl.preTags + } + if hl.postTags != nil && len(hl.postTags) > 0 { + source["post_tags"] = hl.postTags + } + if hl.order != nil { + source["order"] = *hl.order + } + if hl.highlightFilter != nil { + source["highlight_filter"] = *hl.highlightFilter + } + if hl.fragmentSize != nil { + source["fragment_size"] = *hl.fragmentSize + } + if hl.numOfFragments != nil { + source["number_of_fragments"] = *hl.numOfFragments + } + if hl.encoder != nil { + source["encoder"] = *hl.encoder + } + if hl.requireFieldMatch != nil { + source["require_field_match"] = *hl.requireFieldMatch + } + if hl.boundaryMaxScan != nil { + source["boundary_max_scan"] = *hl.boundaryMaxScan + } + if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 { + source["boundary_chars"] = hl.boundaryChars + } + if hl.highlighterType != nil { + source["type"] = *hl.highlighterType + } + if hl.fragmenter != nil { + source["fragmenter"] = *hl.fragmenter + } + if hl.highlightQuery != nil { + src, err := hl.highlightQuery.Source() + if err != nil { + return nil, err + } + source["highlight_query"] = src + } + if hl.noMatchSize != nil { + source["no_match_size"] = *hl.noMatchSize + } + if hl.phraseLimit != nil { + source["phrase_limit"] = *hl.phraseLimit + } + if hl.options != nil && len(hl.options) > 0 { + source["options"] = hl.options + } + if hl.forceSource != nil { + source["force_source"] = *hl.forceSource + } + + if hl.fields != nil && len(hl.fields) > 0 { + if hl.useExplicitFieldOrder { + // Use a slice for the fields + fields := make([]map[string]interface{}, 0) + for _, field := range hl.fields { + src, err := field.Source() + if err != nil { + return nil, err + } + fmap := make(map[string]interface{}) + fmap[field.Name] = src + fields = append(fields, fmap) + } + source["fields"] = fields + } else { + // Use a map for the fields + fields := make(map[string]interface{}, 0) + for _, field := range hl.fields { + src, err := field.Source() + if err != nil { + return nil, err + } + fields[field.Name] = src + } + source["fields"] = fields + } + } + + return source, nil +} + +// HighlighterField specifies a highlighted field. +type HighlighterField struct { + Name string + + preTags []string + postTags []string + fragmentSize int + fragmentOffset int + numOfFragments int + highlightFilter *bool + order *string + requireFieldMatch *bool + boundaryMaxScan int + boundaryChars []rune + highlighterType *string + fragmenter *string + highlightQuery Query + noMatchSize *int + matchedFields []string + phraseLimit *int + options map[string]interface{} + forceSource *bool + + /* + Name string + preTags []string + postTags []string + fragmentSize int + numOfFragments int + fragmentOffset int + highlightFilter *bool + order string + requireFieldMatch *bool + boundaryMaxScan int + boundaryChars []rune + highlighterType string + fragmenter string + highlightQuery Query + noMatchSize *int + matchedFields []string + options map[string]interface{} + forceSource *bool + */ +} + +func NewHighlighterField(name string) *HighlighterField { + return &HighlighterField{ + Name: name, + preTags: make([]string, 0), + postTags: make([]string, 0), + fragmentSize: -1, + fragmentOffset: -1, + numOfFragments: -1, + boundaryMaxScan: -1, + boundaryChars: make([]rune, 0), + matchedFields: make([]string, 0), + options: make(map[string]interface{}), + } +} + +func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField { + f.preTags = append(f.preTags, preTags...) + return f +} + +func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField { + f.postTags = append(f.postTags, postTags...) + return f +} + +func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField { + f.fragmentSize = fragmentSize + return f +} + +func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField { + f.fragmentOffset = fragmentOffset + return f +} + +func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField { + f.numOfFragments = numOfFragments + return f +} + +func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField { + f.highlightFilter = &highlightFilter + return f +} + +func (f *HighlighterField) Order(order string) *HighlighterField { + f.order = &order + return f +} + +func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField { + f.requireFieldMatch = &requireFieldMatch + return f +} + +func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField { + f.boundaryMaxScan = boundaryMaxScan + return f +} + +func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField { + f.boundaryChars = append(f.boundaryChars, boundaryChars...) + return f +} + +func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField { + f.highlighterType = &highlighterType + return f +} + +func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField { + f.fragmenter = &fragmenter + return f +} + +func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField { + f.highlightQuery = highlightQuery + return f +} + +func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField { + f.noMatchSize = &noMatchSize + return f +} + +func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField { + f.options = options + return f +} + +func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField { + f.matchedFields = append(f.matchedFields, matchedFields...) + return f +} + +func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField { + f.phraseLimit = &phraseLimit + return f +} + +func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField { + f.forceSource = &forceSource + return f +} + +func (f *HighlighterField) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if f.preTags != nil && len(f.preTags) > 0 { + source["pre_tags"] = f.preTags + } + if f.postTags != nil && len(f.postTags) > 0 { + source["post_tags"] = f.postTags + } + if f.fragmentSize != -1 { + source["fragment_size"] = f.fragmentSize + } + if f.numOfFragments != -1 { + source["number_of_fragments"] = f.numOfFragments + } + if f.fragmentOffset != -1 { + source["fragment_offset"] = f.fragmentOffset + } + if f.highlightFilter != nil { + source["highlight_filter"] = *f.highlightFilter + } + if f.order != nil { + source["order"] = *f.order + } + if f.requireFieldMatch != nil { + source["require_field_match"] = *f.requireFieldMatch + } + if f.boundaryMaxScan != -1 { + source["boundary_max_scan"] = f.boundaryMaxScan + } + if f.boundaryChars != nil && len(f.boundaryChars) > 0 { + source["boundary_chars"] = f.boundaryChars + } + if f.highlighterType != nil { + source["type"] = *f.highlighterType + } + if f.fragmenter != nil { + source["fragmenter"] = *f.fragmenter + } + if f.highlightQuery != nil { + src, err := f.highlightQuery.Source() + if err != nil { + return nil, err + } + source["highlight_query"] = src + } + if f.noMatchSize != nil { + source["no_match_size"] = *f.noMatchSize + } + if f.matchedFields != nil && len(f.matchedFields) > 0 { + source["matched_fields"] = f.matchedFields + } + if f.phraseLimit != nil { + source["phrase_limit"] = *f.phraseLimit + } + if f.options != nil && len(f.options) > 0 { + source["options"] = f.options + } + if f.forceSource != nil { + source["force_source"] = *f.forceSource + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/index.go b/vendor/gopkg.in/olivere/elastic.v3/index.go new file mode 100644 index 000000000..f31e7b010 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/index.go @@ -0,0 +1,283 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndexService adds or updates a typed JSON document in a specified index, +// making it searchable. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html +// for details. +type IndexService struct { + client *Client + pretty bool + id string + index string + typ string + parent string + replication string + routing string + timeout string + timestamp string + ttl string + version interface{} + opType string + versionType string + refresh *bool + consistency string + bodyJson interface{} + bodyString string +} + +// NewIndexService creates a new IndexService. +func NewIndexService(client *Client) *IndexService { + return &IndexService{ + client: client, + } +} + +// Id is the document ID. +func (s *IndexService) Id(id string) *IndexService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *IndexService) Index(index string) *IndexService { + s.index = index + return s +} + +// Type is the type of the document. +func (s *IndexService) Type(typ string) *IndexService { + s.typ = typ + return s +} + +// Consistency is an explicit write consistency setting for the operation. +func (s *IndexService) Consistency(consistency string) *IndexService { + s.consistency = consistency + return s +} + +// Refresh the index after performing the operation. +func (s *IndexService) Refresh(refresh bool) *IndexService { + s.refresh = &refresh + return s +} + +// Ttl is an expiration time for the document. +func (s *IndexService) Ttl(ttl string) *IndexService { + s.ttl = ttl + return s +} + +// TTL is an expiration time for the document (alias for Ttl). +func (s *IndexService) TTL(ttl string) *IndexService { + s.ttl = ttl + return s +} + +// Version is an explicit version number for concurrency control. +func (s *IndexService) Version(version interface{}) *IndexService { + s.version = version + return s +} + +// OpType is an explicit operation type, i.e. "create" or "index" (default). +func (s *IndexService) OpType(opType string) *IndexService { + s.opType = opType + return s +} + +// Parent is the ID of the parent document. +func (s *IndexService) Parent(parent string) *IndexService { + s.parent = parent + return s +} + +// Replication is a specific replication type. +func (s *IndexService) Replication(replication string) *IndexService { + s.replication = replication + return s +} + +// Routing is a specific routing value. +func (s *IndexService) Routing(routing string) *IndexService { + s.routing = routing + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndexService) Timeout(timeout string) *IndexService { + s.timeout = timeout + return s +} + +// Timestamp is an explicit timestamp for the document. +func (s *IndexService) Timestamp(timestamp string) *IndexService { + s.timestamp = timestamp + return s +} + +// VersionType is a specific version type. +func (s *IndexService) VersionType(versionType string) *IndexService { + s.versionType = versionType + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndexService) Pretty(pretty bool) *IndexService { + s.pretty = pretty + return s +} + +// BodyJson is the document as a serializable JSON interface. +func (s *IndexService) BodyJson(body interface{}) *IndexService { + s.bodyJson = body + return s +} + +// BodyString is the document encoded as a string. +func (s *IndexService) BodyString(body string) *IndexService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndexService) buildURL() (string, string, url.Values, error) { + var err error + var method, path string + + if s.id != "" { + // Create document with manual id + method = "PUT" + path, err = uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + } else { + // Automatic ID generation + // See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation + method = "POST" + path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{ + "index": s.index, + "type": s.typ, + }) + } + if err != nil { + return "", "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.consistency != "" { + params.Set("consistency", s.consistency) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.opType != "" { + params.Set("op_type", s.opType) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.replication != "" { + params.Set("replication", s.replication) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.timestamp != "" { + params.Set("timestamp", s.timestamp) + } + if s.ttl != "" { + params.Set("ttl", s.ttl) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return method, path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndexService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndexService) Do() (*IndexResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + method, path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(method, path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndexResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndexResponse is the result of indexing a document in Elasticsearch. +type IndexResponse struct { + // TODO _shards { total, failed, successful } + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_close.go b/vendor/gopkg.in/olivere/elastic.v3/indices_close.go new file mode 100644 index 000000000..e515c7e0e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_close.go @@ -0,0 +1,152 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesCloseService closes an index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html +// for details. +type IndicesCloseService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesCloseService creates and initializes a new IndicesCloseService. +func NewIndicesCloseService(client *Client) *IndicesCloseService { + return &IndicesCloseService{client: client} +} + +// Index is the name of the index to close. +func (s *IndicesCloseService) Index(index string) *IndicesCloseService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesCloseService) Timeout(timeout string) *IndicesCloseService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesCloseService) MasterTimeout(masterTimeout string) *IndicesCloseService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesCloseService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesCloseService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified). +func (s *IndicesCloseService) AllowNoIndices(allowNoIndices bool) *IndicesCloseService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesCloseService) ExpandWildcards(expandWildcards string) *IndicesCloseService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesCloseService) Pretty(pretty bool) *IndicesCloseService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesCloseService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_close", map[string]string{ + "index": s.index, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesCloseService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesCloseService) Do() (*IndicesCloseResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesCloseResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesCloseResponse is the response of IndicesCloseService.Do. +type IndicesCloseResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_create.go b/vendor/gopkg.in/olivere/elastic.v3/indices_create.go new file mode 100644 index 000000000..80ff33b02 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_create.go @@ -0,0 +1,128 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "errors" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesCreateService creates a new index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html +// for details. +type IndicesCreateService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + bodyJson interface{} + bodyString string +} + +// NewIndicesCreateService returns a new IndicesCreateService. +func NewIndicesCreateService(client *Client) *IndicesCreateService { + return &IndicesCreateService{client: client} +} + +// Index is the name of the index to create. +func (b *IndicesCreateService) Index(index string) *IndicesCreateService { + b.index = index + return b +} + +// Timeout the explicit operation timeout, e.g. "5s". +func (s *IndicesCreateService) Timeout(timeout string) *IndicesCreateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesCreateService) MasterTimeout(masterTimeout string) *IndicesCreateService { + s.masterTimeout = masterTimeout + return s +} + +// Body specifies the configuration of the index as a string. +// It is an alias for BodyString. +func (b *IndicesCreateService) Body(body string) *IndicesCreateService { + b.bodyString = body + return b +} + +// BodyString specifies the configuration of the index as a string. +func (b *IndicesCreateService) BodyString(body string) *IndicesCreateService { + b.bodyString = body + return b +} + +// BodyJson specifies the configuration of the index. The interface{} will +// be serializes as a JSON document, so use a map[string]interface{}. +func (b *IndicesCreateService) BodyJson(body interface{}) *IndicesCreateService { + b.bodyJson = body + return b +} + +// Pretty indicates that the JSON response be indented and human readable. +func (b *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService { + b.pretty = pretty + return b +} + +// Do executes the operation. +func (b *IndicesCreateService) Do() (*IndicesCreateResult, error) { + if b.index == "" { + return nil, errors.New("missing index name") + } + + // Build url + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": b.index, + }) + if err != nil { + return nil, err + } + + params := make(url.Values) + if b.pretty { + params.Set("pretty", "1") + } + if b.masterTimeout != "" { + params.Set("master_timeout", b.masterTimeout) + } + if b.timeout != "" { + params.Set("timeout", b.timeout) + } + + // Setup HTTP request body + var body interface{} + if b.bodyJson != nil { + body = b.bodyJson + } else { + body = b.bodyString + } + + // Get response + res, err := b.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + ret := new(IndicesCreateResult) + if err := b.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a create index request. + +// IndicesCreateResult is the outcome of creating a new index. +type IndicesCreateResult struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_delete.go b/vendor/gopkg.in/olivere/elastic.v3/indices_delete.go new file mode 100644 index 000000000..e93f1fb81 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_delete.go @@ -0,0 +1,128 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesDeleteService allows to delete existing indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html +// for details. +type IndicesDeleteService struct { + client *Client + pretty bool + index []string + timeout string + masterTimeout string +} + +// NewIndicesDeleteService creates and initializes a new IndicesDeleteService. +func NewIndicesDeleteService(client *Client) *IndicesDeleteService { + return &IndicesDeleteService{ + client: client, + index: make([]string, 0), + } +} + +// Index adds the list of indices to delete. +// Use `_all` or `*` string to delete all indices. +func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(s.index, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteService) Do() (*IndicesDeleteResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesDeleteResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a delete index request. + +// IndicesDeleteResponse is the response of IndicesDeleteService.Do. +type IndicesDeleteResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_delete_template.go b/vendor/gopkg.in/olivere/elastic.v3/indices_delete_template.go new file mode 100644 index 000000000..76bedf706 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_delete_template.go @@ -0,0 +1,121 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesDeleteTemplateService deletes index templates. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. +type IndicesDeleteTemplateService struct { + client *Client + pretty bool + name string + timeout string + masterTimeout string +} + +// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService. +func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService { + return &IndicesDeleteTemplateService{ + client: client, + } +} + +// Name is the name of the template. +func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService { + s.name = name + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesDeleteTemplateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do. +type IndicesDeleteTemplateResponse struct { + Acknowledged bool `json:"acknowledged,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go b/vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go new file mode 100644 index 000000000..ab3dbfec2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go @@ -0,0 +1,130 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesDeleteWarmerService allows to delete a warmer. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html. +type IndicesDeleteWarmerService struct { + client *Client + pretty bool + index []string + name []string + masterTimeout string +} + +// NewIndicesDeleteWarmerService creates a new IndicesDeleteWarmerService. +func NewIndicesDeleteWarmerService(client *Client) *IndicesDeleteWarmerService { + return &IndicesDeleteWarmerService{ + client: client, + index: make([]string, 0), + name: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesDeleteWarmerService) Index(indices ...string) *IndicesDeleteWarmerService { + s.index = append(s.index, indices...) + return s +} + +// Name is a list of warmer names to delete (supports wildcards); +// use `_all` to delete all warmers in the specified indices. +func (s *IndicesDeleteWarmerService) Name(name ...string) *IndicesDeleteWarmerService { + s.name = append(s.name, name...) + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteWarmerService) MasterTimeout(masterTimeout string) *IndicesDeleteWarmerService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteWarmerService) Pretty(pretty bool) *IndicesDeleteWarmerService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteWarmerService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "name": strings.Join(s.name, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if len(s.name) > 0 { + params.Set("name", strings.Join(s.name, ",")) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteWarmerService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(s.name) == 0 { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteWarmerService) Do() (*DeleteWarmerResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(DeleteWarmerResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// DeleteWarmerResponse is the response of IndicesDeleteWarmerService.Do. +type DeleteWarmerResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_exists.go b/vendor/gopkg.in/olivere/elastic.v3/indices_exists.go new file mode 100644 index 000000000..92f9974f2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_exists.go @@ -0,0 +1,149 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesExistsService checks if an index or indices exist or not. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html +// for details. +type IndicesExistsService struct { + client *Client + pretty bool + index []string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + local *bool +} + +// NewIndicesExistsService creates and initializes a new IndicesExistsService. +func NewIndicesExistsService(client *Client) *IndicesExistsService { + return &IndicesExistsService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of one or more indices to check. +func (s *IndicesExistsService) Index(index []string) *IndicesExistsService { + s.index = index + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or +// when no indices have been specified). +func (s *IndicesExistsService) AllowNoIndices(allowNoIndices bool) *IndicesExistsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesExistsService) ExpandWildcards(expandWildcards string) *IndicesExistsService { + s.expandWildcards = expandWildcards + return s +} + +// Local, when set, returns local information and does not retrieve the state +// from master node (default: false). +func (s *IndicesExistsService) Local(local bool) *IndicesExistsService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesExistsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsService) Pretty(pretty bool) *IndicesExistsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(s.index, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_exists_template.go b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_template.go new file mode 100644 index 000000000..7587a8786 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_template.go @@ -0,0 +1,112 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesExistsTemplateService checks if a given template exists. +// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#indices-templates-exists +// for documentation. +type IndicesExistsTemplateService struct { + client *Client + pretty bool + name string + local *bool +} + +// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService. +func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService { + return &IndicesExistsTemplateService{ + client: client, + } +} + +// Name is the name of the template. +func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService { + s.name = name + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsTemplateService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_exists_type.go b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_type.go new file mode 100644 index 000000000..631f773fe --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_type.go @@ -0,0 +1,161 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesExistsTypeService checks if one or more types exist in one or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-types-exists.html +// for details. +type IndicesExistsTypeService struct { + client *Client + pretty bool + typ []string + index []string + expandWildcards string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool +} + +// NewIndicesExistsTypeService creates a new IndicesExistsTypeService. +func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService { + return &IndicesExistsTypeService{ + client: client, + index: make([]string, 0), + typ: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` to check the types across all indices. +func (s *IndicesExistsTypeService) Index(indices ...string) *IndicesExistsTypeService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types to check. +func (s *IndicesExistsTypeService) Type(types ...string) *IndicesExistsTypeService { + s.typ = append(s.typ, types...) + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService { + s.expandWildcards = expandWildcards + return s +} + +// Local specifies whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsTypeService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(s.typ) == 0 { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsTypeService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_flush.go b/vendor/gopkg.in/olivere/elastic.v3/indices_flush.go new file mode 100644 index 000000000..282c4de55 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_flush.go @@ -0,0 +1,168 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// Flush allows to flush one or more indices. The flush process of an index +// basically frees memory from the index by flushing data to the index +// storage and clearing the internal transaction log. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html +// for details. +type IndicesFlushService struct { + client *Client + pretty bool + index []string + force *bool + waitIfOngoing *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesFlushService creates a new IndicesFlushService. +func NewIndicesFlushService(client *Client) *IndicesFlushService { + return &IndicesFlushService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string for all indices. +func (s *IndicesFlushService) Index(indices ...string) *IndicesFlushService { + s.index = append(s.index, indices...) + return s +} + +// Force indicates whether a flush should be forced even if it is not +// necessarily needed ie. if no changes will be committed to the index. +// This is useful if transaction log IDs should be incremented even if +// no uncommitted changes are present. (This setting can be considered as internal). +func (s *IndicesFlushService) Force(force bool) *IndicesFlushService { + s.force = &force + return s +} + +// WaitIfOngoing, if set to true, indicates that the flush operation will +// block until the flush can be executed if another flush operation is +// already executing. The default is false and will cause an exception +// to be thrown on the shard level if another flush operation is already running.. +func (s *IndicesFlushService) WaitIfOngoing(waitIfOngoing bool) *IndicesFlushService { + s.waitIfOngoing = &waitIfOngoing + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFlushService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or when +// no indices have been specified). +func (s *IndicesFlushService) AllowNoIndices(allowNoIndices bool) *IndicesFlushService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards specifies whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesFlushService) ExpandWildcards(expandWildcards string) *IndicesFlushService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesFlushService) Pretty(pretty bool) *IndicesFlushService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesFlushService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_flush", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_flush" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.waitIfOngoing != nil { + params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesFlushService) Validate() error { + return nil +} + +// Do executes the service. +func (s *IndicesFlushService) Do() (*IndicesFlushResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesFlushResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a flush request. + +type IndicesFlushResponse struct { + Shards shardsInfo `json:"_shards"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_forcemerge.go b/vendor/gopkg.in/olivere/elastic.v3/indices_forcemerge.go new file mode 100644 index 000000000..69c155471 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_forcemerge.go @@ -0,0 +1,199 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesForcemergeService allows to force merging of one or more indices. +// The merge relates to the number of segments a Lucene index holds +// within each shard. The force merge operation allows to reduce the number +// of segments by merging them. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/2.1/indices-forcemerge.html +// for more information. +type IndicesForcemergeService struct { + client *Client + pretty bool + index []string + allowNoIndices *bool + expandWildcards string + flush *bool + ignoreUnavailable *bool + maxNumSegments interface{} + onlyExpungeDeletes *bool + operationThreading interface{} + waitForMerge *bool +} + +// NewIndicesForcemergeService creates a new IndicesForcemergeService. +func NewIndicesForcemergeService(client *Client) *IndicesForcemergeService { + return &IndicesForcemergeService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesForcemergeService) Index(index ...string) *IndicesForcemergeService { + if s.index == nil { + s.index = make([]string, 0) + } + s.index = append(s.index, index...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesForcemergeService) AllowNoIndices(allowNoIndices bool) *IndicesForcemergeService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesForcemergeService) ExpandWildcards(expandWildcards string) *IndicesForcemergeService { + s.expandWildcards = expandWildcards + return s +} + +// Flush specifies whether the index should be flushed after performing +// the operation (default: true). +func (s *IndicesForcemergeService) Flush(flush bool) *IndicesForcemergeService { + s.flush = &flush + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesForcemergeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesForcemergeService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// MaxNumSegments specifies the number of segments the index should be +// merged into (default: dynamic). +func (s *IndicesForcemergeService) MaxNumSegments(maxNumSegments interface{}) *IndicesForcemergeService { + s.maxNumSegments = maxNumSegments + return s +} + +// OnlyExpungeDeletes specifies whether the operation should only expunge +// deleted documents. +func (s *IndicesForcemergeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *IndicesForcemergeService { + s.onlyExpungeDeletes = &onlyExpungeDeletes + return s +} + +func (s *IndicesForcemergeService) OperationThreading(operationThreading interface{}) *IndicesForcemergeService { + s.operationThreading = operationThreading + return s +} + +// WaitForMerge specifies whether the request should block until the +// merge process is finished (default: true). +func (s *IndicesForcemergeService) WaitForMerge(waitForMerge bool) *IndicesForcemergeService { + s.waitForMerge = &waitForMerge + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_forcemerge", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_forcemerge" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flush != nil { + params.Set("flush", fmt.Sprintf("%v", *s.flush)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.maxNumSegments != nil { + params.Set("max_num_segments", fmt.Sprintf("%v", s.maxNumSegments)) + } + if s.onlyExpungeDeletes != nil { + params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes)) + } + if s.operationThreading != nil { + params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading)) + } + if s.waitForMerge != nil { + params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesForcemergeService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesForcemergeService) Do() (*IndicesForcemergeResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesForcemergeResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesForcemergeResponse is the response of IndicesForcemergeService.Do. +type IndicesForcemergeResponse struct { + Shards shardsInfo `json:"_shards"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get.go new file mode 100644 index 000000000..3d3a8a911 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get.go @@ -0,0 +1,201 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetService retrieves information about one or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html +// for more details. +type IndicesGetService struct { + client *Client + pretty bool + index []string + feature []string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + flatSettings *bool + human *bool +} + +// NewIndicesGetService creates a new IndicesGetService. +func NewIndicesGetService(client *Client) *IndicesGetService { + return &IndicesGetService{ + client: client, + index: make([]string, 0), + feature: make([]string, 0), + } +} + +// Index is a list of index names. +func (s *IndicesGetService) Index(indices ...string) *IndicesGetService { + s.index = append(s.index, indices...) + return s +} + +// Feature is a list of features. +func (s *IndicesGetService) Feature(features ...string) *IndicesGetService { + s.feature = append(s.feature, features...) + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesGetService) Local(local bool) *IndicesGetService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false). +func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard expression +// resolves to no concrete indices (default: false). +func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether wildcard expressions should get +// expanded to open or closed indices (default: open). +func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService { + s.expandWildcards = expandWildcards + return s +} + +/* Disabled because serialization would fail in that case. */ +/* +// FlatSettings make the service return settings in flat format (default: false). +func (s *IndicesGetService) FlatSettings(flatSettings bool) *IndicesGetService { + s.flatSettings = &flatSettings + return s +} +*/ + +// Human indicates whether to return version and creation date values +// in human-readable format (default: false). +func (s *IndicesGetService) Human(human bool) *IndicesGetService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetService) buildURL() (string, url.Values, error) { + var err error + var path string + var index []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.feature) > 0 { + // Build URL + path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{ + "index": strings.Join(index, ","), + "feature": strings.Join(s.feature, ","), + }) + } else { + // Build URL + path, err = uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesGetService) Do() (map[string]*IndicesGetResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetResponse + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetResponse is part of the response of IndicesGetService.Do. +type IndicesGetResponse struct { + Aliases map[string]interface{} `json:"aliases"` + Mappings map[string]interface{} `json:"mappings"` + Settings map[string]interface{} `json:"settings"` + Warmers map[string]interface{} `json:"warmers"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_aliases.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_aliases.go new file mode 100644 index 000000000..42755f9f2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_aliases.go @@ -0,0 +1,154 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +type AliasesService struct { + client *Client + indices []string + pretty bool +} + +func NewAliasesService(client *Client) *AliasesService { + builder := &AliasesService{ + client: client, + indices: make([]string, 0), + } + return builder +} + +func (s *AliasesService) Pretty(pretty bool) *AliasesService { + s.pretty = pretty + return s +} + +func (s *AliasesService) Index(indices ...string) *AliasesService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *AliasesService) Do() (*AliasesResult, error) { + var err error + + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err = uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + path += strings.Join(indexPart, ",") + + // TODO Add types here + + // Search + path += "/_aliases" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Get response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // { + // "indexName" : { + // "aliases" : { + // "alias1" : { }, + // "alias2" : { } + // } + // }, + // "indexName2" : { + // ... + // }, + // } + indexMap := make(map[string]interface{}) + if err := s.client.decoder.Decode(res.Body, &indexMap); err != nil { + return nil, err + } + + // Each (indexName, _) + ret := &AliasesResult{ + Indices: make(map[string]indexResult), + } + for indexName, indexData := range indexMap { + indexOut, found := ret.Indices[indexName] + if !found { + indexOut = indexResult{Aliases: make([]aliasResult, 0)} + } + + // { "aliases" : { ... } } + indexDataMap, ok := indexData.(map[string]interface{}) + if ok { + aliasesData, ok := indexDataMap["aliases"].(map[string]interface{}) + if ok { + for aliasName, _ := range aliasesData { + aliasRes := aliasResult{AliasName: aliasName} + indexOut.Aliases = append(indexOut.Aliases, aliasRes) + } + } + } + + ret.Indices[indexName] = indexOut + } + + return ret, nil +} + +// -- Result of an alias request. + +type AliasesResult struct { + Indices map[string]indexResult +} + +type indexResult struct { + Aliases []aliasResult +} + +type aliasResult struct { + AliasName string +} + +func (ar AliasesResult) IndicesByAlias(aliasName string) []string { + indices := make([]string, 0) + + for indexName, indexInfo := range ar.Indices { + for _, aliasInfo := range indexInfo.Aliases { + if aliasInfo.AliasName == aliasName { + indices = append(indices, indexName) + } + } + } + + return indices +} + +func (ir indexResult) HasAlias(aliasName string) bool { + for _, alias := range ir.Aliases { + if alias.AliasName == aliasName { + return true + } + } + return false +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_mapping.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_mapping.go new file mode 100644 index 000000000..d1e633351 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_mapping.go @@ -0,0 +1,169 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetMappingService retrieves the mapping definitions for an index or +// index/type. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html +// for details. +type IndicesGetMappingService struct { + client *Client + pretty bool + index []string + typ []string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewGetMappingService is an alias for NewIndicesGetMappingService. +// Use NewIndicesGetMappingService. +func NewGetMappingService(client *Client) *IndicesGetMappingService { + return NewIndicesGetMappingService(client) +} + +// NewIndicesGetMappingService creates a new IndicesGetMappingService. +func NewIndicesGetMappingService(client *Client) *IndicesGetMappingService { + return &IndicesGetMappingService{ + client: client, + index: make([]string, 0), + typ: make([]string, 0), + } +} + +// Index is a list of index names. +func (s *IndicesGetMappingService) Index(indices ...string) *IndicesGetMappingService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types. +func (s *IndicesGetMappingService) Type(types ...string) *IndicesGetMappingService { + s.typ = append(s.typ, types...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesGetMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetMappingService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesGetMappingService) ExpandWildcards(expandWildcards string) *IndicesGetMappingService { + s.expandWildcards = expandWildcards + return s +} + +// Local indicates whether to return local information, do not retrieve +// the state from master node (default: false). +func (s *IndicesGetMappingService) Local(local bool) *IndicesGetMappingService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesGetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetMappingService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetMappingService) Pretty(pretty bool) *IndicesGetMappingService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetMappingService) buildURL() (string, url.Values, error) { + var index, typ []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.typ) > 0 { + typ = s.typ + } else { + typ = []string{"_all"} + } + + // Build URL + path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ + "index": strings.Join(index, ","), + "type": strings.Join(typ, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetMappingService) Validate() error { + return nil +} + +// Do executes the operation. It returns mapping definitions for an index +// or index/type. +func (s *IndicesGetMappingService) Do() (map[string]interface{}, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]interface{} + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_settings.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_settings.go new file mode 100644 index 000000000..84311323c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_settings.go @@ -0,0 +1,182 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetSettingsService allows to retrieve settings of one +// or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html +// for more details. +type IndicesGetSettingsService struct { + client *Client + pretty bool + index []string + name []string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + flatSettings *bool + local *bool +} + +// NewIndicesGetSettingsService creates a new IndicesGetSettingsService. +func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService { + return &IndicesGetSettingsService{ + client: client, + index: make([]string, 0), + name: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesGetSettingsService) Index(indices ...string) *IndicesGetSettingsService { + s.index = append(s.index, indices...) + return s +} + +// Name are the names of the settings that should be included. +func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService { + s.name = append(s.name, name...) + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression +// to concrete indices that are open, closed or both. +// Options: open, closed, none, all. Default: open,closed. +func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService { + s.flatSettings = &flatSettings + return s +} + +// Local indicates whether to return local information, do not retrieve +// the state from master node (default: false). +func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) { + var err error + var path string + var index []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.name) > 0 { + // Build URL + path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{ + "index": strings.Join(index, ","), + "name": strings.Join(s.name, ","), + }) + } else { + // Build URL + path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ + "index": strings.Join(index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetSettingsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetSettingsService) Do() (map[string]*IndicesGetSettingsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetSettingsResponse + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do. +type IndicesGetSettingsResponse struct { + Settings map[string]interface{} `json:"settings"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_template.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_template.go new file mode 100644 index 000000000..756e0ee5a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_template.go @@ -0,0 +1,127 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetTemplateService returns an index template. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. +type IndicesGetTemplateService struct { + client *Client + pretty bool + name []string + flatSettings *bool + local *bool +} + +// NewIndicesGetTemplateService creates a new IndicesGetTemplateService. +func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService { + return &IndicesGetTemplateService{ + client: client, + name: make([]string, 0), + } +} + +// Name is the name of the index template. +func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService { + s.name = append(s.name, name...) + return s +} + +// FlatSettings is returns settings in flat format (default: false). +func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService { + s.flatSettings = &flatSettings + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.name) > 0 { + path, err = uritemplates.Expand("/_template/{name}", map[string]string{ + "name": strings.Join(s.name, ","), + }) + } else { + path = "/_template" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetTemplateService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetTemplateService) Do() (map[string]*IndicesGetTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetTemplateResponse + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do. +type IndicesGetTemplateResponse struct { + Order int `json:"order,omitempty"` + Template string `json:"template,omitempty"` + Settings map[string]interface{} `json:"settings,omitempty"` + Mappings map[string]interface{} `json:"mappings,omitempty"` + Aliases map[string]interface{} `json:"aliases,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer.go new file mode 100644 index 000000000..8ca4883a0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer.go @@ -0,0 +1,193 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetWarmerService allows to get the definition of a warmer for a +// specific index (or alias, or several indices) based on its name. +// The provided name can be a simple wildcard expression or omitted to get +// all warmers. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html +// for more information. +type IndicesGetWarmerService struct { + client *Client + pretty bool + index []string + name []string + typ []string + allowNoIndices *bool + expandWildcards string + ignoreUnavailable *bool + local *bool +} + +// NewIndicesGetWarmerService creates a new IndicesGetWarmerService. +func NewIndicesGetWarmerService(client *Client) *IndicesGetWarmerService { + return &IndicesGetWarmerService{ + client: client, + typ: make([]string, 0), + index: make([]string, 0), + name: make([]string, 0), + } +} + +// Index is a list of index names to restrict the operation; use `_all` to perform the operation on all indices. +func (s *IndicesGetWarmerService) Index(indices ...string) *IndicesGetWarmerService { + s.index = append(s.index, indices...) + return s +} + +// Name is the name of the warmer (supports wildcards); leave empty to get all warmers. +func (s *IndicesGetWarmerService) Name(name ...string) *IndicesGetWarmerService { + s.name = append(s.name, name...) + return s +} + +// Type is a list of type names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all types. +func (s *IndicesGetWarmerService) Type(typ ...string) *IndicesGetWarmerService { + s.typ = append(s.typ, typ...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesGetWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesGetWarmerService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesGetWarmerService) ExpandWildcards(expandWildcards string) *IndicesGetWarmerService { + s.expandWildcards = expandWildcards + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesGetWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetWarmerService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Local indicates wether or not to return local information, +// do not retrieve the state from master node (default: false). +func (s *IndicesGetWarmerService) Local(local bool) *IndicesGetWarmerService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetWarmerService) Pretty(pretty bool) *IndicesGetWarmerService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetWarmerService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) == 0 { + path = "/_warmer" + } else if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) > 0 { + path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{ + "name": strings.Join(s.name, ","), + }) + } else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) == 0 { + path, err = uritemplates.Expand("/_all/{type}/_warmer", map[string]string{ + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{ + "type": strings.Join(s.typ, ","), + "name": strings.Join(s.name, ","), + }) + } else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) == 0 { + path, err = uritemplates.Expand("/{index}/_warmer", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) > 0 { + path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "name": strings.Join(s.name, ","), + }) + } else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) == 0 { + path, err = uritemplates.Expand("/{index}/{type}/_warmer", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + "name": strings.Join(s.name, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetWarmerService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetWarmerService) Do() (map[string]interface{}, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]interface{} + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_open.go b/vendor/gopkg.in/olivere/elastic.v3/indices_open.go new file mode 100644 index 000000000..e9da08876 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_open.go @@ -0,0 +1,156 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesOpenService opens an index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html +// for details. +type IndicesOpenService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesOpenService creates and initializes a new IndicesOpenService. +func NewIndicesOpenService(client *Client) *IndicesOpenService { + return &IndicesOpenService{client: client} +} + +// Index is the name of the index to open. +func (s *IndicesOpenService) Index(index string) *IndicesOpenService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesOpenService) Timeout(timeout string) *IndicesOpenService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesOpenService) MasterTimeout(masterTimeout string) *IndicesOpenService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesOpenService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesOpenService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesOpenService) AllowNoIndices(allowNoIndices bool) *IndicesOpenService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesOpenService) ExpandWildcards(expandWildcards string) *IndicesOpenService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesOpenService) Pretty(pretty bool) *IndicesOpenService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesOpenService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_open", map[string]string{ + "index": s.index, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesOpenService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesOpenService) Do() (*IndicesOpenResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesOpenResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesOpenResponse is the response of IndicesOpenService.Do. +type IndicesOpenResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_alias.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_alias.go new file mode 100644 index 000000000..17a091264 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_put_alias.go @@ -0,0 +1,110 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" +) + +type AliasService struct { + client *Client + actions []aliasAction + pretty bool +} + +type aliasAction struct { + // "add" or "remove" + Type string + // Index name + Index string + // Alias name + Alias string + // Filter + Filter Query +} + +func NewAliasService(client *Client) *AliasService { + builder := &AliasService{ + client: client, + actions: make([]aliasAction, 0), + } + return builder +} + +func (s *AliasService) Pretty(pretty bool) *AliasService { + s.pretty = pretty + return s +} + +func (s *AliasService) Add(indexName string, aliasName string) *AliasService { + action := aliasAction{Type: "add", Index: indexName, Alias: aliasName} + s.actions = append(s.actions, action) + return s +} + +func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService { + action := aliasAction{Type: "add", Index: indexName, Alias: aliasName, Filter: filter} + s.actions = append(s.actions, action) + return s +} + +func (s *AliasService) Remove(indexName string, aliasName string) *AliasService { + action := aliasAction{Type: "remove", Index: indexName, Alias: aliasName} + s.actions = append(s.actions, action) + return s +} + +func (s *AliasService) Do() (*AliasResult, error) { + // Build url + path := "/_aliases" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Actions + body := make(map[string]interface{}) + actionsJson := make([]interface{}, 0) + + for _, action := range s.actions { + actionJson := make(map[string]interface{}) + detailsJson := make(map[string]interface{}) + detailsJson["index"] = action.Index + detailsJson["alias"] = action.Alias + if action.Filter != nil { + src, err := action.Filter.Source() + if err != nil { + return nil, err + } + detailsJson["filter"] = src + } + actionJson[action.Type] = detailsJson + actionsJson = append(actionsJson, actionJson) + } + + body["actions"] = actionsJson + + // Get response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return results + ret := new(AliasResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of an alias request. + +type AliasResult struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_mapping.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_mapping.go new file mode 100644 index 000000000..5b2f99bae --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_put_mapping.go @@ -0,0 +1,220 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesPutMappingService allows to register specific mapping definition +// for a specific type. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html +// for details. +type IndicesPutMappingService struct { + client *Client + pretty bool + typ string + index []string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + ignoreConflicts *bool + timeout string + bodyJson map[string]interface{} + bodyString string +} + +// NewPutMappingService is an alias for NewIndicesPutMappingService. +// Use NewIndicesPutMappingService. +func NewPutMappingService(client *Client) *IndicesPutMappingService { + return NewIndicesPutMappingService(client) +} + +// NewIndicesPutMappingService creates a new IndicesPutMappingService. +func NewIndicesPutMappingService(client *Client) *IndicesPutMappingService { + return &IndicesPutMappingService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesPutMappingService) Index(indices ...string) *IndicesPutMappingService { + s.index = append(s.index, indices...) + return s +} + +// Type is the name of the document type. +func (s *IndicesPutMappingService) Type(typ string) *IndicesPutMappingService { + s.typ = typ + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesPutMappingService) Timeout(timeout string) *IndicesPutMappingService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesPutMappingService) MasterTimeout(masterTimeout string) *IndicesPutMappingService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesPutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutMappingService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesPutMappingService) AllowNoIndices(allowNoIndices bool) *IndicesPutMappingService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesPutMappingService) ExpandWildcards(expandWildcards string) *IndicesPutMappingService { + s.expandWildcards = expandWildcards + return s +} + +// IgnoreConflicts specifies whether to ignore conflicts while updating +// the mapping (default: false). +func (s *IndicesPutMappingService) IgnoreConflicts(ignoreConflicts bool) *IndicesPutMappingService { + s.ignoreConflicts = &ignoreConflicts + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutMappingService) Pretty(pretty bool) *IndicesPutMappingService { + s.pretty = pretty + return s +} + +// BodyJson contains the mapping definition. +func (s *IndicesPutMappingService) BodyJson(mapping map[string]interface{}) *IndicesPutMappingService { + s.bodyJson = mapping + return s +} + +// BodyString is the mapping definition serialized as a string. +func (s *IndicesPutMappingService) BodyString(mapping string) *IndicesPutMappingService { + s.bodyString = mapping + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutMappingService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL: Typ MUST be specified and is verified in Validate. + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": s.typ, + }) + } else { + path, err = uritemplates.Expand("/_mapping/{type}", map[string]string{ + "type": s.typ, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreConflicts != nil { + params.Set("ignore_conflicts", fmt.Sprintf("%v", *s.ignoreConflicts)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutMappingService) Validate() error { + var invalid []string + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesPutMappingService) Do() (*PutMappingResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PutMappingResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PutMappingResponse is the response of IndicesPutMappingService.Do. +type PutMappingResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_settings.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_settings.go new file mode 100644 index 000000000..f96e37aba --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_put_settings.go @@ -0,0 +1,183 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesPutSettingsService changes specific index level settings in +// real time. +// +// See the documentation at +// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html. +type IndicesPutSettingsService struct { + client *Client + pretty bool + index []string + allowNoIndices *bool + expandWildcards string + flatSettings *bool + ignoreUnavailable *bool + masterTimeout string + bodyJson interface{} + bodyString string +} + +// NewIndicesPutSettingsService creates a new IndicesPutSettingsService. +func NewIndicesPutSettingsService(client *Client) *IndicesPutSettingsService { + return &IndicesPutSettingsService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesPutSettingsService) Index(indices ...string) *IndicesPutSettingsService { + s.index = append(s.index, indices...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` +// string or when no indices have been specified). +func (s *IndicesPutSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesPutSettingsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards specifies whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesPutSettingsService) ExpandWildcards(expandWildcards string) *IndicesPutSettingsService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesPutSettingsService) FlatSettings(flatSettings bool) *IndicesPutSettingsService { + s.flatSettings = &flatSettings + return s +} + +// IgnoreUnavailable specifies whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesPutSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutSettingsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// MasterTimeout is the timeout for connection to master. +func (s *IndicesPutSettingsService) MasterTimeout(masterTimeout string) *IndicesPutSettingsService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutSettingsService) Pretty(pretty bool) *IndicesPutSettingsService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: The index settings to be updated. +func (s *IndicesPutSettingsService) BodyJson(body interface{}) *IndicesPutSettingsService { + s.bodyJson = body + return s +} + +// BodyString is documented as: The index settings to be updated. +func (s *IndicesPutSettingsService) BodyString(body string) *IndicesPutSettingsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutSettingsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_settings" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutSettingsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesPutSettingsService) Do() (*IndicesPutSettingsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesPutSettingsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesPutSettingsResponse is the response of IndicesPutSettingsService.Do. +type IndicesPutSettingsResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_template.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_template.go new file mode 100644 index 000000000..cd8a5faea --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_put_template.go @@ -0,0 +1,178 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesPutTemplateService creates or updates index mappings. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. +type IndicesPutTemplateService struct { + client *Client + pretty bool + name string + order interface{} + create *bool + timeout string + masterTimeout string + flatSettings *bool + bodyJson interface{} + bodyString string +} + +// NewIndicesPutTemplateService creates a new IndicesPutTemplateService. +func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService { + return &IndicesPutTemplateService{ + client: client, + } +} + +// Name is the name of the index template. +func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService { + s.name = name + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService { + s.masterTimeout = masterTimeout + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService { + s.flatSettings = &flatSettings + return s +} + +// Order is the order for this template when merging multiple matching ones +// (higher numbers are merged later, overriding the lower numbers). +func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService { + s.order = order + return s +} + +// Create indicates whether the index template should only be added if +// new or can also replace an existing one. +func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService { + s.create = &create + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: The template definition. +func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService { + s.bodyJson = body + return s +} + +// BodyString is documented as: The template definition. +func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.order != nil { + params.Set("order", fmt.Sprintf("%v", s.order)) + } + if s.create != nil { + params.Set("create", fmt.Sprintf("%v", *s.create)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesPutTemplateService) Do() (*IndicesPutTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesPutTemplateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do. +type IndicesPutTemplateResponse struct { + Acknowledged bool `json:"acknowledged,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_warmer.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_warmer.go new file mode 100644 index 000000000..8974e7b4b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_put_warmer.go @@ -0,0 +1,221 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesPutWarmerService allows to register a warmer. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html. +type IndicesPutWarmerService struct { + client *Client + pretty bool + typ []string + index []string + name string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + requestCache *bool + expandWildcards string + bodyJson map[string]interface{} + bodyString string +} + +// NewIndicesPutWarmerService creates a new IndicesPutWarmerService. +func NewIndicesPutWarmerService(client *Client) *IndicesPutWarmerService { + return &IndicesPutWarmerService{ + client: client, + index: make([]string, 0), + typ: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesPutWarmerService) Index(indices ...string) *IndicesPutWarmerService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of type names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all types. +func (s *IndicesPutWarmerService) Type(typ ...string) *IndicesPutWarmerService { + s.typ = append(s.typ, typ...) + return s +} + +// Name specifies the name of the warmer (supports wildcards); +// leave empty to get all warmers +func (s *IndicesPutWarmerService) Name(name string) *IndicesPutWarmerService { + s.name = name + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesPutWarmerService) MasterTimeout(masterTimeout string) *IndicesPutWarmerService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesPutWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutWarmerService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesPutWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesPutWarmerService { + s.allowNoIndices = &allowNoIndices + return s +} + +// RequestCache specifies whether the request to be warmed should use the request cache, +// defaults to index level setting +func (s *IndicesPutWarmerService) RequestCache(requestCache bool) *IndicesPutWarmerService { + s.requestCache = &requestCache + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesPutWarmerService) ExpandWildcards(expandWildcards string) *IndicesPutWarmerService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutWarmerService) Pretty(pretty bool) *IndicesPutWarmerService { + s.pretty = pretty + return s +} + +// BodyJson contains the mapping definition. +func (s *IndicesPutWarmerService) BodyJson(mapping map[string]interface{}) *IndicesPutWarmerService { + s.bodyJson = mapping + return s +} + +// BodyString is the mapping definition serialized as a string. +func (s *IndicesPutWarmerService) BodyString(mapping string) *IndicesPutWarmerService { + s.bodyString = mapping + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutWarmerService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) == 0 && len(s.typ) == 0 { + path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{ + "name": s.name, + }) + } else if len(s.index) == 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{ + "type": strings.Join(s.typ, ","), + "name": s.name, + }) + } else if len(s.index) > 0 && len(s.typ) == 0 { + path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "name": s.name, + }) + } else { + path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + "name": s.name, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.requestCache != nil { + params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutWarmerService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesPutWarmerService) Do() (*PutWarmerResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PutWarmerResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PutWarmerResponse is the response of IndicesPutWarmerService.Do. +type PutWarmerResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_refresh.go b/vendor/gopkg.in/olivere/elastic.v3/indices_refresh.go new file mode 100644 index 000000000..dc8ebe254 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_refresh.go @@ -0,0 +1,93 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +type RefreshService struct { + client *Client + indices []string + force *bool + pretty bool +} + +func NewRefreshService(client *Client) *RefreshService { + builder := &RefreshService{ + client: client, + indices: make([]string, 0), + } + return builder +} + +func (s *RefreshService) Index(indices ...string) *RefreshService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *RefreshService) Force(force bool) *RefreshService { + s.force = &force + return s +} + +func (s *RefreshService) Pretty(pretty bool) *RefreshService { + s.pretty = pretty + return s +} + +func (s *RefreshService) Do() (*RefreshResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + path += "/_refresh" + + // Parameters + params := make(url.Values) + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(RefreshResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a refresh request. + +type RefreshResult struct { + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_stats.go b/vendor/gopkg.in/olivere/elastic.v3/indices_stats.go new file mode 100644 index 000000000..5ba1adc31 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_stats.go @@ -0,0 +1,384 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesStatsService provides stats on various metrics of one or more +// indices. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-stats.html. +type IndicesStatsService struct { + client *Client + pretty bool + metric []string + index []string + level string + types []string + completionFields []string + fielddataFields []string + fields []string + groups []string + human *bool +} + +// NewIndicesStatsService creates a new IndicesStatsService. +func NewIndicesStatsService(client *Client) *IndicesStatsService { + return &IndicesStatsService{ + client: client, + index: make([]string, 0), + metric: make([]string, 0), + completionFields: make([]string, 0), + fielddataFields: make([]string, 0), + fields: make([]string, 0), + groups: make([]string, 0), + types: make([]string, 0), + } +} + +// Metric limits the information returned the specific metrics. Options are: +// docs, store, indexing, get, search, completion, fielddata, flush, merge, +// query_cache, refresh, suggest, and warmer. +func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService { + s.metric = append(s.metric, metric...) + return s +} + +// Index is the list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesStatsService) Index(indices ...string) *IndicesStatsService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types for the `indexing` index metric. +func (s *IndicesStatsService) Type(types ...string) *IndicesStatsService { + s.types = append(s.types, types...) + return s +} + +// Level returns stats aggregated at cluster, index or shard level. +func (s *IndicesStatsService) Level(level string) *IndicesStatsService { + s.level = level + return s +} + +// CompletionFields is a list of fields for `fielddata` and `suggest` +// index metric (supports wildcards). +func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService { + s.completionFields = append(s.completionFields, completionFields...) + return s +} + +// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards). +func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService { + s.fielddataFields = append(s.fielddataFields, fielddataFields...) + return s +} + +// Fields is a list of fields for `fielddata` and `completion` index metric +// (supports wildcards). +func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService { + s.fields = append(s.fields, fields...) + return s +} + +// Groups is a list of search groups for `search` index metric. +func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService { + s.groups = append(s.groups, groups...) + return s +} + +// Human indicates whether to return time and byte values in human-readable format.. +func (s *IndicesStatsService) Human(human bool) *IndicesStatsService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesStatsService) buildURL() (string, url.Values, error) { + var err error + var path string + if len(s.index) > 0 && len(s.metric) > 0 { + path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{ + "index": strings.Join(s.index, ","), + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_stats", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.metric) > 0 { + path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{ + "metric": strings.Join(s.metric, ","), + }) + } else { + path = "/_stats" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.groups) > 0 { + params.Set("groups", strings.Join(s.groups, ",")) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.level != "" { + params.Set("level", s.level) + } + if len(s.types) > 0 { + params.Set("types", strings.Join(s.types, ",")) + } + if len(s.completionFields) > 0 { + params.Set("completion_fields", strings.Join(s.completionFields, ",")) + } + if len(s.fielddataFields) > 0 { + params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesStatsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesStatsService) Do() (*IndicesStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesStatsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesStatsResponse is the response of IndicesStatsService.Do. +type IndicesStatsResponse struct { + // Shards provides information returned from shards. + Shards shardsInfo `json:"_shards"` + + // All provides summary stats about all indices. + All *IndexStats `json:"_all,omitempty"` + + // Indices provides a map into the stats of an index. The key of the + // map is the index name. + Indices map[string]*IndexStats `json:"indices,omitempty"` +} + +// IndexStats is index stats for a specific index. +type IndexStats struct { + Primaries *IndexStatsDetails `json:"primaries,omitempty"` + Total *IndexStatsDetails `json:"total,omitempty"` +} + +type IndexStatsDetails struct { + Docs *IndexStatsDocs `json:"docs,omitempty"` + Store *IndexStatsStore `json:"store,omitempty"` + Indexing *IndexStatsIndexing `json:"indexing,omitempty"` + Get *IndexStatsGet `json:"get,omitempty"` + Search *IndexStatsSearch `json:"search,omitempty"` + Merges *IndexStatsMerges `json:"merges,omitempty"` + Refresh *IndexStatsRefresh `json:"refresh,omitempty"` + Flush *IndexStatsFlush `json:"flush,omitempty"` + Warmer *IndexStatsWarmer `json:"warmer,omitempty"` + FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"` + IdCache *IndexStatsIdCache `json:"id_cache,omitempty"` + Fielddata *IndexStatsFielddata `json:"fielddata,omitempty"` + Percolate *IndexStatsPercolate `json:"percolate,omitempty"` + Completion *IndexStatsCompletion `json:"completion,omitempty"` + Segments *IndexStatsSegments `json:"segments,omitempty"` + Translog *IndexStatsTranslog `json:"translog,omitempty"` + Suggest *IndexStatsSuggest `json:"suggest,omitempty"` + QueryCache *IndexStatsQueryCache `json:"query_cache,omitempty"` +} + +type IndexStatsDocs struct { + Count int64 `json:"count,omitempty"` + Deleted int64 `json:"deleted,omitempty"` +} + +type IndexStatsStore struct { + Size string `json:"size,omitempty"` // human size, e.g. 119.3mb + SizeInBytes int64 `json:"size_in_bytes,omitempty"` + ThrottleTime string `json:"throttle_time,omitempty"` // human time, e.g. 0s + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` +} + +type IndexStatsIndexing struct { + IndexTotal int64 `json:"index_total,omitempty"` + IndexTime string `json:"index_time,omitempty"` + IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"` + IndexCurrent int64 `json:"index_current,omitempty"` + DeleteTotal int64 `json:"delete_total,omitempty"` + DeleteTime string `json:"delete_time,omitempty"` + DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"` + DeleteCurrent int64 `json:"delete_current,omitempty"` + NoopUpdateTotal int64 `json:"noop_update_total,omitempty"` + IsThrottled bool `json:"is_throttled,omitempty"` + ThrottleTime string `json:"throttle_time,omitempty"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` +} + +type IndexStatsGet struct { + Total int64 `json:"total,omitempty"` + GetTime string `json:"get_time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + ExistsTotal int64 `json:"exists_total,omitempty"` + ExistsTime string `json:"exists_time,omitempty"` + ExistsTimeInMillis int64 `json:"exists_time_in_millis,omitempty"` + MissingTotal int64 `json:"missing_total,omitempty"` + MissingTime string `json:"missing_time,omitempty"` + MissingTimeInMillis int64 `json:"missing_time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` +} + +type IndexStatsSearch struct { + OpenContexts int64 `json:"open_contexts,omitempty"` + QueryTotal int64 `json:"query_total,omitempty"` + QueryTime string `json:"query_time,omitempty"` + QueryTimeInMillis int64 `json:"query_time_in_millis,omitempty"` + QueryCurrent int64 `json:"query_current,omitempty"` + FetchTotal int64 `json:"fetch_total,omitempty"` + FetchTime string `json:"fetch_time,omitempty"` + FetchTimeInMillis int64 `json:"fetch_time_in_millis,omitempty"` + FetchCurrent int64 `json:"fetch_current,omitempty"` +} + +type IndexStatsMerges struct { + Current int64 `json:"current,omitempty"` + CurrentDocs int64 `json:"current_docs,omitempty"` + CurrentSize string `json:"current_size,omitempty"` + CurrentSizeInBytes int64 `json:"current_size_in_bytes,omitempty"` + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` + TotalDocs int64 `json:"total_docs,omitempty"` + TotalSize string `json:"total_size,omitempty"` + TotalSizeInBytes int64 `json:"total_size_in_bytes,omitempty"` +} + +type IndexStatsRefresh struct { + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsFlush struct { + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsWarmer struct { + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsFilterCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` +} + +type IndexStatsIdCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` +} + +type IndexStatsFielddata struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` +} + +type IndexStatsPercolate struct { + Total int64 `json:"total,omitempty"` + GetTime string `json:"get_time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Queries int64 `json:"queries,omitempty"` +} + +type IndexStatsCompletion struct { + Size string `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes,omitempty"` +} + +type IndexStatsSegments struct { + Count int64 `json:"count,omitempty"` + Memory string `json:"memory,omitempty"` + MemoryInBytes int64 `json:"memory_in_bytes,omitempty"` + IndexWriterMemory string `json:"index_writer_memory,omitempty"` + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes,omitempty"` + IndexWriterMaxMemory string `json:"index_writer_max_memory,omitempty"` + IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes,omitempty"` + VersionMapMemory string `json:"version_map_memory,omitempty"` + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes,omitempty"` + FixedBitSetMemory string `json:"fixed_bit_set,omitempty"` + FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes,omitempty"` +} + +type IndexStatsTranslog struct { + Operations int64 `json:"operations,omitempty"` + Size string `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes,omitempty"` +} + +type IndexStatsSuggest struct { + Total int64 `json:"total,omitempty"` + Time string `json:"time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` +} + +type IndexStatsQueryCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` + HitCount int64 `json:"hit_count,omitempty"` + MissCount int64 `json:"miss_count,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/inner_hit.go b/vendor/gopkg.in/olivere/elastic.v3/inner_hit.go new file mode 100644 index 000000000..1330df1ee --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/inner_hit.go @@ -0,0 +1,160 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// InnerHit implements a simple join for parent/child, nested, and even +// top-level documents in Elasticsearch. +// It is an experimental feature for Elasticsearch versions 1.5 (or greater). +// See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html +// for documentation. +// +// See the tests for SearchSource, HasChildFilter, HasChildQuery, +// HasParentFilter, HasParentQuery, NestedFilter, and NestedQuery +// for usage examples. +type InnerHit struct { + source *SearchSource + path string + typ string + + name string +} + +// NewInnerHit creates a new InnerHit. +func NewInnerHit() *InnerHit { + return &InnerHit{source: NewSearchSource()} +} + +func (hit *InnerHit) Path(path string) *InnerHit { + hit.path = path + return hit +} + +func (hit *InnerHit) Type(typ string) *InnerHit { + hit.typ = typ + return hit +} + +func (hit *InnerHit) Query(query Query) *InnerHit { + hit.source.Query(query) + return hit +} + +func (hit *InnerHit) From(from int) *InnerHit { + hit.source.From(from) + return hit +} + +func (hit *InnerHit) Size(size int) *InnerHit { + hit.source.Size(size) + return hit +} + +func (hit *InnerHit) TrackScores(trackScores bool) *InnerHit { + hit.source.TrackScores(trackScores) + return hit +} + +func (hit *InnerHit) Explain(explain bool) *InnerHit { + hit.source.Explain(explain) + return hit +} + +func (hit *InnerHit) Version(version bool) *InnerHit { + hit.source.Version(version) + return hit +} + +func (hit *InnerHit) Field(fieldName string) *InnerHit { + hit.source.Field(fieldName) + return hit +} + +func (hit *InnerHit) Fields(fieldNames ...string) *InnerHit { + hit.source.Fields(fieldNames...) + return hit +} + +func (hit *InnerHit) NoFields() *InnerHit { + hit.source.NoFields() + return hit +} + +func (hit *InnerHit) FetchSource(fetchSource bool) *InnerHit { + hit.source.FetchSource(fetchSource) + return hit +} + +func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext) *InnerHit { + hit.source.FetchSourceContext(fetchSourceContext) + return hit +} + +func (hit *InnerHit) FieldDataFields(fieldDataFields ...string) *InnerHit { + hit.source.FieldDataFields(fieldDataFields...) + return hit +} + +func (hit *InnerHit) FieldDataField(fieldDataField string) *InnerHit { + hit.source.FieldDataField(fieldDataField) + return hit +} + +func (hit *InnerHit) ScriptFields(scriptFields ...*ScriptField) *InnerHit { + hit.source.ScriptFields(scriptFields...) + return hit +} + +func (hit *InnerHit) ScriptField(scriptField *ScriptField) *InnerHit { + hit.source.ScriptField(scriptField) + return hit +} + +func (hit *InnerHit) Sort(field string, ascending bool) *InnerHit { + hit.source.Sort(field, ascending) + return hit +} + +func (hit *InnerHit) SortWithInfo(info SortInfo) *InnerHit { + hit.source.SortWithInfo(info) + return hit +} + +func (hit *InnerHit) SortBy(sorter ...Sorter) *InnerHit { + hit.source.SortBy(sorter...) + return hit +} + +func (hit *InnerHit) Highlight(highlight *Highlight) *InnerHit { + hit.source.Highlight(highlight) + return hit +} + +func (hit *InnerHit) Highlighter() *Highlight { + return hit.source.Highlighter() +} + +func (hit *InnerHit) Name(name string) *InnerHit { + hit.name = name + return hit +} + +func (hit *InnerHit) Source() (interface{}, error) { + src, err := hit.source.Source() + if err != nil { + return nil, err + } + source, ok := src.(map[string]interface{}) + if !ok { + return nil, nil + } + + // Notice that hit.typ and hit.path are not exported here. + // They are only used with SearchSource and serialized there. + + if hit.name != "" { + source["name"] = hit.name + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/logger.go b/vendor/gopkg.in/olivere/elastic.v3/logger.go new file mode 100644 index 000000000..0fb16b19f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/logger.go @@ -0,0 +1,10 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Logger specifies the interface for all log operations. +type Logger interface { + Printf(format string, v ...interface{}) +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/mget.go b/vendor/gopkg.in/olivere/elastic.v3/mget.go new file mode 100644 index 000000000..fcb36dded --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/mget.go @@ -0,0 +1,218 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" +) + +// MgetService allows to get multiple documents based on an index, +// type (optional) and id (possibly routing). The response includes +// a docs array with all the fetched documents, each element similar +// in structure to a document provided by the Get API. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html +// for details. +type MgetService struct { + client *Client + pretty bool + preference string + realtime *bool + refresh *bool + items []*MultiGetItem +} + +func NewMgetService(client *Client) *MgetService { + builder := &MgetService{ + client: client, + items: make([]*MultiGetItem, 0), + } + return builder +} + +func (b *MgetService) Preference(preference string) *MgetService { + b.preference = preference + return b +} + +func (b *MgetService) Refresh(refresh bool) *MgetService { + b.refresh = &refresh + return b +} + +func (b *MgetService) Realtime(realtime bool) *MgetService { + b.realtime = &realtime + return b +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *MgetService) Pretty(pretty bool) *MgetService { + s.pretty = pretty + return s +} + +func (b *MgetService) Add(items ...*MultiGetItem) *MgetService { + b.items = append(b.items, items...) + return b +} + +func (b *MgetService) Source() (interface{}, error) { + source := make(map[string]interface{}) + items := make([]interface{}, len(b.items)) + for i, item := range b.items { + src, err := item.Source() + if err != nil { + return nil, err + } + items[i] = src + } + source["docs"] = items + return source, nil +} + +func (b *MgetService) Do() (*MgetResponse, error) { + // Build url + path := "/_mget" + + params := make(url.Values) + if b.realtime != nil { + params.Add("realtime", fmt.Sprintf("%v", *b.realtime)) + } + if b.preference != "" { + params.Add("preference", b.preference) + } + if b.refresh != nil { + params.Add("refresh", fmt.Sprintf("%v", *b.refresh)) + } + + // Set body + body, err := b.Source() + if err != nil { + return nil, err + } + + // Get response + res, err := b.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(MgetResponse) + if err := b.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Multi Get Item -- + +// MultiGetItem is a single document to retrieve via the MgetService. +type MultiGetItem struct { + index string + typ string + id string + routing string + fields []string + version *int64 // see org.elasticsearch.common.lucene.uid.Versions + versionType string // see org.elasticsearch.index.VersionType + fsc *FetchSourceContext +} + +func NewMultiGetItem() *MultiGetItem { + return &MultiGetItem{} +} + +func (item *MultiGetItem) Index(index string) *MultiGetItem { + item.index = index + return item +} + +func (item *MultiGetItem) Type(typ string) *MultiGetItem { + item.typ = typ + return item +} + +func (item *MultiGetItem) Id(id string) *MultiGetItem { + item.id = id + return item +} + +func (item *MultiGetItem) Routing(routing string) *MultiGetItem { + item.routing = routing + return item +} + +func (item *MultiGetItem) Fields(fields ...string) *MultiGetItem { + if item.fields == nil { + item.fields = make([]string, 0) + } + item.fields = append(item.fields, fields...) + return item +} + +// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1), +// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions. +// The default in Elasticsearch is MatchAny (-3). +func (item *MultiGetItem) Version(version int64) *MultiGetItem { + item.version = &version + return item +} + +// VersionType can be "internal", "external", "external_gt", "external_gte", +// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source. +// It is "internal" by default. +func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem { + item.versionType = versionType + return item +} + +func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem { + item.fsc = fetchSourceContext + return item +} + +// Source returns the serialized JSON to be sent to Elasticsearch as +// part of a MultiGet search. +func (item *MultiGetItem) Source() (interface{}, error) { + source := make(map[string]interface{}) + + source["_id"] = item.id + + if item.index != "" { + source["_index"] = item.index + } + if item.typ != "" { + source["_type"] = item.typ + } + if item.fsc != nil { + src, err := item.fsc.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + if item.fields != nil { + source["fields"] = item.fields + } + if item.routing != "" { + source["_routing"] = item.routing + } + if item.version != nil { + source["version"] = fmt.Sprintf("%d", *item.version) + } + if item.versionType != "" { + source["version_type"] = item.versionType + } + + return source, nil +} + +// -- Result of a Multi Get request. + +type MgetResponse struct { + Docs []*GetResult `json:"docs,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/msearch.go b/vendor/gopkg.in/olivere/elastic.v3/msearch.go new file mode 100644 index 000000000..ab0c7fd18 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/msearch.go @@ -0,0 +1,96 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" +) + +// MultiSearch executes one or more searches in one roundtrip. +// See http://www.elasticsearch.org/guide/reference/api/multi-search/ +type MultiSearchService struct { + client *Client + requests []*SearchRequest + indices []string + pretty bool + routing string + preference string +} + +func NewMultiSearchService(client *Client) *MultiSearchService { + builder := &MultiSearchService{ + client: client, + requests: make([]*SearchRequest, 0), + indices: make([]string, 0), + } + return builder +} + +func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService { + s.requests = append(s.requests, requests...) + return s +} + +func (s *MultiSearchService) Index(indices ...string) *MultiSearchService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService { + s.pretty = pretty + return s +} + +func (s *MultiSearchService) Do() (*MultiSearchResult, error) { + // Build url + path := "/_msearch" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Set body + lines := make([]string, 0) + for _, sr := range s.requests { + // Set default indices if not specified in the request + if !sr.HasIndices() && len(s.indices) > 0 { + sr = sr.Index(s.indices...) + } + + header, err := json.Marshal(sr.header()) + if err != nil { + return nil, err + } + body, err := json.Marshal(sr.body()) + if err != nil { + return nil, err + } + lines = append(lines, string(header)) + lines = append(lines, string(body)) + } + body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n + + // Get response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(MultiSearchResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +type MultiSearchResult struct { + Responses []*SearchResult `json:"responses,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/mtermvectors.go b/vendor/gopkg.in/olivere/elastic.v3/mtermvectors.go new file mode 100644 index 000000000..2dead3255 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/mtermvectors.go @@ -0,0 +1,469 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// MultiTermvectorService returns information and statistics on terms in the +// fields of a particular document. The document could be stored in the +// index or artificially provided by the user. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html +// for documentation. +type MultiTermvectorService struct { + client *Client + pretty bool + index string + typ string + fieldStatistics *bool + fields []string + ids []string + offsets *bool + parent string + payloads *bool + positions *bool + preference string + realtime *bool + routing string + termStatistics *bool + version interface{} + versionType string + bodyJson interface{} + bodyString string + docs []*MultiTermvectorItem +} + +// NewMultiTermvectorService creates a new MultiTermvectorService. +func NewMultiTermvectorService(client *Client) *MultiTermvectorService { + return &MultiTermvectorService{ + client: client, + } +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *MultiTermvectorService) Pretty(pretty bool) *MultiTermvectorService { + s.pretty = pretty + return s +} + +// Add adds documents to MultiTermvectors service. +func (s *MultiTermvectorService) Add(docs ...*MultiTermvectorItem) *MultiTermvectorService { + s.docs = append(s.docs, docs...) + return s +} + +// Index in which the document resides. +func (s *MultiTermvectorService) Index(index string) *MultiTermvectorService { + s.index = index + return s +} + +// Type of the document. +func (s *MultiTermvectorService) Type(typ string) *MultiTermvectorService { + s.typ = typ + return s +} + +// FieldStatistics specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) FieldStatistics(fieldStatistics bool) *MultiTermvectorService { + s.fieldStatistics = &fieldStatistics + return s +} + +// Fields is a comma-separated list of fields to return. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Fields(fields []string) *MultiTermvectorService { + s.fields = fields + return s +} + +// Ids is a comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body. +func (s *MultiTermvectorService) Ids(ids []string) *MultiTermvectorService { + s.ids = ids + return s +} + +// Offsets specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Offsets(offsets bool) *MultiTermvectorService { + s.offsets = &offsets + return s +} + +// Parent id of documents. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Parent(parent string) *MultiTermvectorService { + s.parent = parent + return s +} + +// Payloads specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Payloads(payloads bool) *MultiTermvectorService { + s.payloads = &payloads + return s +} + +// Positions specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Positions(positions bool) *MultiTermvectorService { + s.positions = &positions + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Preference(preference string) *MultiTermvectorService { + s.preference = preference + return s +} + +// Realtime specifies if requests are real-time as opposed to near-real-time (default: true). +func (s *MultiTermvectorService) Realtime(realtime bool) *MultiTermvectorService { + s.realtime = &realtime + return s +} + +// Routing specific routing value. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Routing(routing string) *MultiTermvectorService { + s.routing = routing + return s +} + +// TermStatistics specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) TermStatistics(termStatistics bool) *MultiTermvectorService { + s.termStatistics = &termStatistics + return s +} + +// Version is explicit version number for concurrency control. +func (s *MultiTermvectorService) Version(version interface{}) *MultiTermvectorService { + s.version = version + return s +} + +// VersionType is specific version type. +func (s *MultiTermvectorService) VersionType(versionType string) *MultiTermvectorService { + s.versionType = versionType + return s +} + +// BodyJson is documented as: Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation.. +func (s *MultiTermvectorService) BodyJson(body interface{}) *MultiTermvectorService { + s.bodyJson = body + return s +} + +// BodyString is documented as: Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation.. +func (s *MultiTermvectorService) BodyString(body string) *MultiTermvectorService { + s.bodyString = body + return s +} + +func (s *MultiTermvectorService) Source() interface{} { + source := make(map[string]interface{}) + docs := make([]interface{}, len(s.docs)) + for i, doc := range s.docs { + docs[i] = doc.Source() + } + source["docs"] = docs + return source +} + +// buildURL builds the URL for the operation. +func (s *MultiTermvectorService) buildURL() (string, url.Values, error) { + var path string + var err error + + if s.index != "" && s.typ != "" { + path, err = uritemplates.Expand("/{index}/{type}/_mtermvectors", map[string]string{ + "index": s.index, + "type": s.typ, + }) + } else if s.index != "" && s.typ == "" { + path, err = uritemplates.Expand("/{index}/_mtermvectors", map[string]string{ + "index": s.index, + }) + } else { + path = "/_mtermvectors" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.fieldStatistics != nil { + params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics)) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if len(s.ids) > 0 { + params.Set("ids", strings.Join(s.ids, ",")) + } + if s.offsets != nil { + params.Set("offsets", fmt.Sprintf("%v", *s.offsets)) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.payloads != nil { + params.Set("payloads", fmt.Sprintf("%v", *s.payloads)) + } + if s.positions != nil { + params.Set("positions", fmt.Sprintf("%v", *s.positions)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.termStatistics != nil { + params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *MultiTermvectorService) Validate() error { + var invalid []string + if s.index == "" && s.typ != "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *MultiTermvectorService) Do() (*MultiTermvectorResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if len(s.bodyString) > 0 { + body = s.bodyString + } else { + body = s.Source() + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(MultiTermvectorResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// MultiTermvectorResponse is the response of MultiTermvectorService.Do. +type MultiTermvectorResponse struct { + Docs []*TermvectorsResponse `json:"docs"` +} + +// -- MultiTermvectorItem -- + +// MultiTermvectorItem is a single document to retrieve via MultiTermvectorService. +type MultiTermvectorItem struct { + index string + typ string + id string + doc interface{} + fieldStatistics *bool + fields []string + perFieldAnalyzer map[string]string + offsets *bool + parent string + payloads *bool + positions *bool + preference string + realtime *bool + routing string + termStatistics *bool +} + +func NewMultiTermvectorItem() *MultiTermvectorItem { + return &MultiTermvectorItem{} +} + +func (s *MultiTermvectorItem) Index(index string) *MultiTermvectorItem { + s.index = index + return s +} + +func (s *MultiTermvectorItem) Type(typ string) *MultiTermvectorItem { + s.typ = typ + return s +} + +func (s *MultiTermvectorItem) Id(id string) *MultiTermvectorItem { + s.id = id + return s +} + +// Doc is the document to analyze. +func (s *MultiTermvectorItem) Doc(doc interface{}) *MultiTermvectorItem { + s.doc = doc + return s +} + +// FieldStatistics specifies if document count, sum of document frequencies +// and sum of total term frequencies should be returned. +func (s *MultiTermvectorItem) FieldStatistics(fieldStatistics bool) *MultiTermvectorItem { + s.fieldStatistics = &fieldStatistics + return s +} + +// Fields a list of fields to return. +func (s *MultiTermvectorItem) Fields(fields ...string) *MultiTermvectorItem { + if s.fields == nil { + s.fields = make([]string, 0) + } + s.fields = append(s.fields, fields...) + return s +} + +// PerFieldAnalyzer allows to specify a different analyzer than the one +// at the field. +func (s *MultiTermvectorItem) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *MultiTermvectorItem { + s.perFieldAnalyzer = perFieldAnalyzer + return s +} + +// Offsets specifies if term offsets should be returned. +func (s *MultiTermvectorItem) Offsets(offsets bool) *MultiTermvectorItem { + s.offsets = &offsets + return s +} + +// Parent id of documents. +func (s *MultiTermvectorItem) Parent(parent string) *MultiTermvectorItem { + s.parent = parent + return s +} + +// Payloads specifies if term payloads should be returned. +func (s *MultiTermvectorItem) Payloads(payloads bool) *MultiTermvectorItem { + s.payloads = &payloads + return s +} + +// Positions specifies if term positions should be returned. +func (s *MultiTermvectorItem) Positions(positions bool) *MultiTermvectorItem { + s.positions = &positions + return s +} + +// Preference specify the node or shard the operation +// should be performed on (default: random). +func (s *MultiTermvectorItem) Preference(preference string) *MultiTermvectorItem { + s.preference = preference + return s +} + +// Realtime specifies if request is real-time as opposed to +// near-real-time (default: true). +func (s *MultiTermvectorItem) Realtime(realtime bool) *MultiTermvectorItem { + s.realtime = &realtime + return s +} + +// Routing is a specific routing value. +func (s *MultiTermvectorItem) Routing(routing string) *MultiTermvectorItem { + s.routing = routing + return s +} + +// TermStatistics specifies if total term frequency and document frequency +// should be returned. +func (s *MultiTermvectorItem) TermStatistics(termStatistics bool) *MultiTermvectorItem { + s.termStatistics = &termStatistics + return s +} + +// Source returns the serialized JSON to be sent to Elasticsearch as +// part of a MultiTermvector. +func (s *MultiTermvectorItem) Source() interface{} { + source := make(map[string]interface{}) + + source["_id"] = s.id + + if s.index != "" { + source["_index"] = s.index + } + if s.typ != "" { + source["_type"] = s.typ + } + if s.fields != nil { + source["fields"] = s.fields + } + if s.fieldStatistics != nil { + source["field_statistics"] = fmt.Sprintf("%v", *s.fieldStatistics) + } + if s.offsets != nil { + source["offsets"] = s.offsets + } + if s.parent != "" { + source["parent"] = s.parent + } + if s.payloads != nil { + source["payloads"] = fmt.Sprintf("%v", *s.payloads) + } + if s.positions != nil { + source["positions"] = fmt.Sprintf("%v", *s.positions) + } + if s.preference != "" { + source["preference"] = s.preference + } + if s.realtime != nil { + source["realtime"] = fmt.Sprintf("%v", *s.realtime) + } + if s.routing != "" { + source["routing"] = s.routing + } + if s.termStatistics != nil { + source["term_statistics"] = fmt.Sprintf("%v", *s.termStatistics) + } + if s.doc != nil { + source["doc"] = s.doc + } + if s.perFieldAnalyzer != nil && len(s.perFieldAnalyzer) > 0 { + source["per_field_analyzer"] = s.perFieldAnalyzer + } + + return source +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/nodes_info.go b/vendor/gopkg.in/olivere/elastic.v3/nodes_info.go new file mode 100644 index 000000000..29540f972 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/nodes_info.go @@ -0,0 +1,317 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "log" + "net/url" + "strings" + "time" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +var ( + _ = fmt.Print + _ = log.Print + _ = strings.Index + _ = uritemplates.Expand + _ = url.Parse +) + +// NodesInfoService allows to retrieve one or more or all of the +// cluster nodes information. +// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html. +type NodesInfoService struct { + client *Client + pretty bool + nodeId []string + metric []string + flatSettings *bool + human *bool +} + +// NewNodesInfoService creates a new NodesInfoService. +func NewNodesInfoService(client *Client) *NodesInfoService { + return &NodesInfoService{ + client: client, + nodeId: []string{"_all"}, + metric: []string{"_all"}, + } +} + +// NodeId is a list of node IDs or names to limit the returned information. +// Use "_local" to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService { + s.nodeId = append(s.nodeId, nodeId...) + return s +} + +// Metric is a list of metrics you wish returned. Leave empty to return all. +// Valid metrics are: settings, os, process, jvm, thread_pool, network, +// transport, http, and plugins. +func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService { + s.metric = append(s.metric, metric...) + return s +} + +// FlatSettings returns settings in flat format (default: false). +func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService { + s.flatSettings = &flatSettings + return s +} + +// Human indicates whether to return time and byte values in human-readable format. +func (s *NodesInfoService) Human(human bool) *NodesInfoService { + s.human = &human + return s +} + +// Pretty indicates whether to indent the returned JSON. +func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *NodesInfoService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + "metric": strings.Join(s.metric, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.pretty { + params.Set("pretty", "1") + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *NodesInfoService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *NodesInfoService) Do() (*NodesInfoResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(NodesInfoResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// NodesInfoResponse is the response of NodesInfoService.Do. +type NodesInfoResponse struct { + ClusterName string `json:"cluster_name"` + Nodes map[string]*NodesInfoNode `json:"nodes"` +} + +type NodesInfoNode struct { + // Name of the node, e.g. "Mister Fear" + Name string `json:"name"` + // TransportAddress, e.g. "127.0.0.1:9300" + TransportAddress string `json:"transport_address"` + // Host is the host name, e.g. "macbookair" + Host string `json:"host"` + // IP is the IP address, e.g. "192.168.1.2" + IP string `json:"ip"` + // Version is the Elasticsearch version running on the node, e.g. "1.4.3" + Version string `json:"version"` + // Build is the Elasticsearch build, e.g. "36a29a7" + Build string `json:"build"` + // HTTPAddress, e.g. "127.0.0.1:9200" + HTTPAddress string `json:"http_address"` + // HTTPSAddress, e.g. "127.0.0.1:9200" + HTTPSAddress string `json:"https_address"` + + // Attributes of the node. + Attributes map[string]interface{} `json:"attributes"` + + // Settings of the node, e.g. paths and pidfile. + Settings map[string]interface{} `json:"settings"` + + // OS information, e.g. CPU and memory. + OS *NodesInfoNodeOS `json:"os"` + + // Process information, e.g. max file descriptors. + Process *NodesInfoNodeProcess `json:"process"` + + // JVM information, e.g. VM version. + JVM *NodesInfoNodeProcess `json:"jvm"` + + // ThreadPool information. + ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"` + + // Network information. + Network *NodesInfoNodeNetwork `json:"network"` + + // Network information. + Transport *NodesInfoNodeTransport `json:"transport"` + + // HTTP information. + HTTP *NodesInfoNodeHTTP `json:"http"` + + // Plugins information. + Plugins []*NodesInfoNodePlugin `json:"plugins"` +} + +type NodesInfoNodeOS struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + AvailableProcessors int `json:"available_processors"` // e.g. 4 + + // CPU information + CPU struct { + Vendor string `json:"vendor"` // e.g. Intel + Model string `json:"model"` // e.g. iMac15,1 + MHz int `json:"mhz"` // e.g. 3500 + TotalCores int `json:"total_cores"` // e.g. 4 + TotalSockets int `json:"total_sockets"` // e.g. 4 + CoresPerSocket int `json:"cores_per_socket"` // e.g. 16 + CacheSizeInBytes int `json:"cache_size_in_bytes"` // e.g. 256 + } `json:"cpu"` + + // Mem information + Mem struct { + Total string `json:"total"` // e.g. 16gb + TotalInBytes int `json:"total_in_bytes"` // e.g. 17179869184 + } `json:"mem"` + + // Swap information + Swap struct { + Total string `json:"total"` // e.g. 1gb + TotalInBytes int `json:"total_in_bytes"` // e.g. 1073741824 + } `json:"swap"` +} + +type NodesInfoNodeProcess struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + ID int `json:"id"` // process id, e.g. 87079 + MaxFileDescriptors int `json:"max_file_descriptors"` // e.g. 32768 + Mlockall bool `json:"mlockall"` // e.g. false +} + +type NodesInfoNodeJVM struct { + PID int `json:"pid"` // process id, e.g. 87079 + Version string `json:"version"` // e.g. "1.8.0_25" + VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" + VMVersion string `json:"vm_version"` // e.g. "25.25-b02" + VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" + StartTime time.Time `json:"start_time"` // e.g. "2015-01-03T15:18:30.982Z" + StartTimeInMillis int64 `json:"start_time_in_millis"` + + // Mem information + Mem struct { + HeapInit string `json:"heap_init"` // e.g. 1gb + HeapInitInBytes int `json:"heap_init_in_bytes"` + HeapMax string `json:"heap_max"` // e.g. 4gb + HeapMaxInBytes int `json:"heap_max_in_bytes"` + NonHeapInit string `json:"non_heap_init"` // e.g. 2.4mb + NonHeapInitInBytes int `json:"non_heap_init_in_bytes"` + NonHeapMax string `json:"non_heap_max"` // e.g. 0b + NonHeapMaxInBytes int `json:"non_heap_max_in_bytes"` + DirectMax string `json:"direct_max"` // e.g. 4gb + DirectMaxInBytes int `json:"direct_max_in_bytes"` + } `json:"mem"` + + GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew"] + MemoryPools []string `json:"memory_pools"` // e.g. ["Code Cache", "Metaspace"] +} + +type NodesInfoNodeThreadPool struct { + Percolate *NodesInfoNodeThreadPoolSection `json:"percolate"` + Bench *NodesInfoNodeThreadPoolSection `json:"bench"` + Listener *NodesInfoNodeThreadPoolSection `json:"listener"` + Index *NodesInfoNodeThreadPoolSection `json:"index"` + Refresh *NodesInfoNodeThreadPoolSection `json:"refresh"` + Suggest *NodesInfoNodeThreadPoolSection `json:"suggest"` + Generic *NodesInfoNodeThreadPoolSection `json:"generic"` + Warmer *NodesInfoNodeThreadPoolSection `json:"warmer"` + Search *NodesInfoNodeThreadPoolSection `json:"search"` + Flush *NodesInfoNodeThreadPoolSection `json:"flush"` + Optimize *NodesInfoNodeThreadPoolSection `json:"optimize"` + Management *NodesInfoNodeThreadPoolSection `json:"management"` + Get *NodesInfoNodeThreadPoolSection `json:"get"` + Merge *NodesInfoNodeThreadPoolSection `json:"merge"` + Bulk *NodesInfoNodeThreadPoolSection `json:"bulk"` + Snapshot *NodesInfoNodeThreadPoolSection `json:"snapshot"` +} + +type NodesInfoNodeThreadPoolSection struct { + Type string `json:"type"` // e.g. fixed + Min int `json:"min"` // e.g. 4 + Max int `json:"max"` // e.g. 4 + KeepAlive string `json:"keep_alive"` // e.g. "5m" + QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1 +} + +type NodesInfoNodeNetwork struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + PrimaryInterface struct { + Address string `json:"address"` // e.g. 192.168.1.2 + Name string `json:"name"` // e.g. en0 + MACAddress string `json:"mac_address"` // e.g. 11:22:33:44:55:66 + } `json:"primary_interface"` +} + +type NodesInfoNodeTransport struct { + BoundAddress []string `json:"bound_address"` + PublishAddress string `json:"publish_address"` + Profiles map[string]*NodesInfoNodeTransportProfile `json:"profiles"` +} + +type NodesInfoNodeTransportProfile struct { + BoundAddress []string `json:"bound_address"` + PublishAddress string `json:"publish_address"` +} + +type NodesInfoNodeHTTP struct { + BoundAddress []string `json:"bound_address"` // e.g. ["127.0.0.1:9200", "[fe80::1]:9200", "[::1]:9200"] + PublishAddress string `json:"publish_address"` // e.g. "127.0.0.1:9300" + MaxContentLength string `json:"max_content_length"` // e.g. "100mb" + MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"` +} + +type NodesInfoNodePlugin struct { + Name string `json:"name"` + Description string `json:"description"` + Site bool `json:"site"` + JVM bool `json:"jvm"` + URL string `json:"url"` // e.g. /_plugin/dummy/ +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/optimize.go b/vendor/gopkg.in/olivere/elastic.v3/optimize.go new file mode 100644 index 000000000..0e94cea23 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/optimize.go @@ -0,0 +1,129 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +type OptimizeService struct { + client *Client + indices []string + maxNumSegments *int + onlyExpungeDeletes *bool + flush *bool + waitForMerge *bool + force *bool + pretty bool +} + +func NewOptimizeService(client *Client) *OptimizeService { + builder := &OptimizeService{ + client: client, + indices: make([]string, 0), + } + return builder +} + +func (s *OptimizeService) Index(indices ...string) *OptimizeService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *OptimizeService) MaxNumSegments(maxNumSegments int) *OptimizeService { + s.maxNumSegments = &maxNumSegments + return s +} + +func (s *OptimizeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *OptimizeService { + s.onlyExpungeDeletes = &onlyExpungeDeletes + return s +} + +func (s *OptimizeService) Flush(flush bool) *OptimizeService { + s.flush = &flush + return s +} + +func (s *OptimizeService) WaitForMerge(waitForMerge bool) *OptimizeService { + s.waitForMerge = &waitForMerge + return s +} + +func (s *OptimizeService) Force(force bool) *OptimizeService { + s.force = &force + return s +} + +func (s *OptimizeService) Pretty(pretty bool) *OptimizeService { + s.pretty = pretty + return s +} + +func (s *OptimizeService) Do() (*OptimizeResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + path += "/_optimize" + + // Parameters + params := make(url.Values) + if s.maxNumSegments != nil { + params.Set("max_num_segments", fmt.Sprintf("%d", *s.maxNumSegments)) + } + if s.onlyExpungeDeletes != nil { + params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes)) + } + if s.flush != nil { + params.Set("flush", fmt.Sprintf("%v", *s.flush)) + } + if s.waitForMerge != nil { + params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge)) + } + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(OptimizeResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of an optimize request. + +type OptimizeResult struct { + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/percolate.go b/vendor/gopkg.in/olivere/elastic.v3/percolate.go new file mode 100644 index 000000000..31518a932 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/percolate.go @@ -0,0 +1,308 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// PercolateService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-percolate.html. +type PercolateService struct { + client *Client + pretty bool + index string + typ string + id string + version interface{} + versionType string + routing []string + preference string + ignoreUnavailable *bool + percolateIndex string + percolatePreference string + percolateRouting string + source string + allowNoIndices *bool + expandWildcards string + percolateFormat string + percolateType string + bodyJson interface{} + bodyString string +} + +// NewPercolateService creates a new PercolateService. +func NewPercolateService(client *Client) *PercolateService { + return &PercolateService{ + client: client, + routing: make([]string, 0), + } +} + +// Index is the name of the index of the document being percolated. +func (s *PercolateService) Index(index string) *PercolateService { + s.index = index + return s +} + +// Type is the type of the document being percolated. +func (s *PercolateService) Type(typ string) *PercolateService { + s.typ = typ + return s +} + +// Id is to substitute the document in the request body with a +// document that is known by the specified id. On top of the id, +// the index and type parameter will be used to retrieve +// the document from within the cluster. +func (s *PercolateService) Id(id string) *PercolateService { + s.id = id + return s +} + +// ExpandWildcards indicates whether to expand wildcard expressions +// to concrete indices that are open, closed or both. +func (s *PercolateService) ExpandWildcards(expandWildcards string) *PercolateService { + s.expandWildcards = expandWildcards + return s +} + +// PercolateFormat indicates whether to return an array of matching +// query IDs instead of objects. +func (s *PercolateService) PercolateFormat(percolateFormat string) *PercolateService { + s.percolateFormat = percolateFormat + return s +} + +// PercolateType is the type to percolate document into. Defaults to type. +func (s *PercolateService) PercolateType(percolateType string) *PercolateService { + s.percolateType = percolateType + return s +} + +// PercolateRouting is the routing value to use when percolating +// the existing document. +func (s *PercolateService) PercolateRouting(percolateRouting string) *PercolateService { + s.percolateRouting = percolateRouting + return s +} + +// Source is the URL-encoded request definition. +func (s *PercolateService) Source(source string) *PercolateService { + s.source = source + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *PercolateService) AllowNoIndices(allowNoIndices bool) *PercolateService { + s.allowNoIndices = &allowNoIndices + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *PercolateService) IgnoreUnavailable(ignoreUnavailable bool) *PercolateService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// PercolateIndex is the index to percolate the document into. Defaults to index. +func (s *PercolateService) PercolateIndex(percolateIndex string) *PercolateService { + s.percolateIndex = percolateIndex + return s +} + +// PercolatePreference defines which shard to prefer when executing +// the percolate request. +func (s *PercolateService) PercolatePreference(percolatePreference string) *PercolateService { + s.percolatePreference = percolatePreference + return s +} + +// Version is an explicit version number for concurrency control. +func (s *PercolateService) Version(version interface{}) *PercolateService { + s.version = version + return s +} + +// VersionType is the specific version type. +func (s *PercolateService) VersionType(versionType string) *PercolateService { + s.versionType = versionType + return s +} + +// Routing is a list of specific routing values. +func (s *PercolateService) Routing(routing []string) *PercolateService { + s.routing = routing + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: random). +func (s *PercolateService) Preference(preference string) *PercolateService { + s.preference = preference + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *PercolateService) Pretty(pretty bool) *PercolateService { + s.pretty = pretty + return s +} + +// Doc wraps the given document into the "doc" key of the body. +func (s *PercolateService) Doc(doc interface{}) *PercolateService { + return s.BodyJson(map[string]interface{}{"doc": doc}) +} + +// BodyJson is the percolator request definition using the percolate DSL. +func (s *PercolateService) BodyJson(body interface{}) *PercolateService { + s.bodyJson = body + return s +} + +// BodyString is the percolator request definition using the percolate DSL. +func (s *PercolateService) BodyString(body string) *PercolateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *PercolateService) buildURL() (string, url.Values, error) { + // Build URL + var path string + var err error + if s.id == "" { + path, err = uritemplates.Expand("/{index}/{type}/_percolate", map[string]string{ + "index": s.index, + "type": s.typ, + }) + } else { + path, err = uritemplates.Expand("/{index}/{type}/{id}/_percolate", map[string]string{ + "index": s.index, + "type": s.typ, + "id": s.id, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if len(s.routing) > 0 { + params.Set("routing", strings.Join(s.routing, ",")) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.percolateIndex != "" { + params.Set("percolate_index", s.percolateIndex) + } + if s.percolatePreference != "" { + params.Set("percolate_preference", s.percolatePreference) + } + if s.percolateRouting != "" { + params.Set("percolate_routing", s.percolateRouting) + } + if s.source != "" { + params.Set("source", s.source) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.percolateFormat != "" { + params.Set("percolate_format", s.percolateFormat) + } + if s.percolateType != "" { + params.Set("percolate_type", s.percolateType) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *PercolateService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *PercolateService) Do() (*PercolateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PercolateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PercolateResponse is the response of PercolateService.Do. +type PercolateResponse struct { + TookInMillis int64 `json:"took"` // search time in milliseconds + Total int64 `json:"total"` // total matches + Matches []*PercolateMatch `json:"matches,omitempty"` + Aggregations Aggregations `json:"aggregations,omitempty"` // results from aggregations +} + +// PercolateMatch returns a single match in a PercolateResponse. +type PercolateMatch struct { + Index string `json:"_index,omitempty"` + Id string `json:"_id"` + Score float64 `json:"_score,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/ping.go b/vendor/gopkg.in/olivere/elastic.v3/ping.go new file mode 100644 index 000000000..fada22817 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/ping.go @@ -0,0 +1,126 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "net/http" + "net/url" +) + +// PingService checks if an Elasticsearch server on a given URL is alive. +// When asked for, it can also return various information about the +// Elasticsearch server, e.g. the Elasticsearch version number. +// +// Ping simply starts a HTTP GET request to the URL of the server. +// If the server responds with HTTP Status code 200 OK, the server is alive. +type PingService struct { + client *Client + url string + timeout string + httpHeadOnly bool + pretty bool +} + +// PingResult is the result returned from querying the Elasticsearch server. +type PingResult struct { + Name string `json:"name"` + ClusterName string `json:"cluster_name"` + Version struct { + Number string `json:"number"` + BuildHash string `json:"build_hash"` + BuildTimestamp string `json:"build_timestamp"` + BuildSnapshot bool `json:"build_snapshot"` + LuceneVersion string `json:"lucene_version"` + } `json:"version"` + TagLine string `json:"tagline"` +} + +func NewPingService(client *Client) *PingService { + return &PingService{ + client: client, + url: DefaultURL, + httpHeadOnly: false, + pretty: false, + } +} + +func (s *PingService) URL(url string) *PingService { + s.url = url + return s +} + +func (s *PingService) Timeout(timeout string) *PingService { + s.timeout = timeout + return s +} + +// HeadOnly makes the service to only return the status code in Do; +// the PingResult will be nil. +func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService { + s.httpHeadOnly = httpHeadOnly + return s +} + +func (s *PingService) Pretty(pretty bool) *PingService { + s.pretty = pretty + return s +} + +// Do returns the PingResult, the HTTP status code of the Elasticsearch +// server, and an error. +func (s *PingService) Do() (*PingResult, int, error) { + s.client.mu.RLock() + basicAuth := s.client.basicAuth + basicAuthUsername := s.client.basicAuthUsername + basicAuthPassword := s.client.basicAuthPassword + s.client.mu.RUnlock() + + url_ := s.url + "/" + + params := make(url.Values) + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.pretty { + params.Set("pretty", "1") + } + if len(params) > 0 { + url_ += "?" + params.Encode() + } + + var method string + if s.httpHeadOnly { + method = "HEAD" + } else { + method = "GET" + } + + // Notice: This service must NOT use PerformRequest! + req, err := NewRequest(method, url_) + if err != nil { + return nil, 0, err + } + + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + + res, err := s.client.c.Do((*http.Request)(req)) + if err != nil { + return nil, 0, err + } + defer res.Body.Close() + + var ret *PingResult + if !s.httpHeadOnly { + ret = new(PingResult) + if err := json.NewDecoder(res.Body).Decode(ret); err != nil { + return nil, res.StatusCode, err + } + } + + return ret, res.StatusCode, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/plugins.go b/vendor/gopkg.in/olivere/elastic.v3/plugins.go new file mode 100644 index 000000000..3906d74d7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/plugins.go @@ -0,0 +1,38 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasPlugin indicates whether the cluster has the named plugin. +func (c *Client) HasPlugin(name string) (bool, error) { + plugins, err := c.Plugins() + if err != nil { + return false, nil + } + for _, plugin := range plugins { + if plugin == name { + return true, nil + } + } + return false, nil +} + +// Plugins returns the list of all registered plugins. +func (c *Client) Plugins() ([]string, error) { + stats, err := c.ClusterStats().Do() + if err != nil { + return nil, err + } + if stats == nil { + return nil, err + } + if stats.Nodes == nil { + return nil, err + } + var plugins []string + for _, plugin := range stats.Nodes.Plugins { + plugins = append(plugins, plugin.Name) + } + return plugins, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/query.go b/vendor/gopkg.in/olivere/elastic.v3/query.go new file mode 100644 index 000000000..0869eaecc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/query.go @@ -0,0 +1,13 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Query represents the generic query interface. A query's sole purpose +// is to return the source of the query as a JSON-serializable object. +// Returning map[string]interface{} is the norm for queries. +type Query interface { + // Source returns the JSON-serializable query request. + Source() (interface{}, error) +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/reindex.go b/vendor/gopkg.in/olivere/elastic.v3/reindex.go new file mode 100644 index 000000000..663bf25a6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/reindex.go @@ -0,0 +1,572 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" +) + +// ReindexService is a method to copy documents from one index to another. +// It was introduced in Elasticsearch 2.3.0. +// +// Notice that Elastic already had a Reindexer service that pre-dated +// the Reindex API. Use that if you're on an earlier version of Elasticsearch. +// +// It is documented at https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html. +type ReindexService struct { + client *Client + pretty bool + consistency string + refresh *bool + timeout string + waitForCompletion *bool + bodyJson interface{} + bodyString string + source *ReindexSource + destination *ReindexDestination + conflicts string + size *int + script *Script +} + +// NewReindexService creates a new ReindexService. +func NewReindexService(client *Client) *ReindexService { + return &ReindexService{ + client: client, + } +} + +// Consistency specifies an explicit write consistency setting for the operation. +func (s *ReindexService) Consistency(consistency string) *ReindexService { + s.consistency = consistency + return s +} + +// Refresh indicates whether Elasticsearch should refresh the effected indexes +// immediately. +func (s *ReindexService) Refresh(refresh bool) *ReindexService { + s.refresh = &refresh + return s +} + +// Timeout is the time each individual bulk request should wait for shards +// that are unavailable. +func (s *ReindexService) Timeout(timeout string) *ReindexService { + s.timeout = timeout + return s +} + +// WaitForCompletion indicates whether Elasticsearch should block until the +// reindex is complete. +func (s *ReindexService) WaitForCompletion(waitForCompletion bool) *ReindexService { + s.waitForCompletion = &waitForCompletion + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ReindexService) Pretty(pretty bool) *ReindexService { + s.pretty = pretty + return s +} + +// Source specifies the source of the reindexing process. +func (s *ReindexService) Source(source *ReindexSource) *ReindexService { + s.source = source + return s +} + +// SourceIndex specifies the source index of the reindexing process. +func (s *ReindexService) SourceIndex(index string) *ReindexService { + if s.source == nil { + s.source = NewReindexSource() + } + s.source = s.source.Index(index) + return s +} + +// Destination specifies the destination of the reindexing process. +func (s *ReindexService) Destination(destination *ReindexDestination) *ReindexService { + s.destination = destination + return s +} + +// DestinationIndex specifies the destination index of the reindexing process. +func (s *ReindexService) DestinationIndex(index string) *ReindexService { + if s.destination == nil { + s.destination = NewReindexDestination() + } + s.destination = s.destination.Index(index) + return s +} + +// DestinationIndexAndType specifies both the destination index and type +// of the reindexing process. +func (s *ReindexService) DestinationIndexAndType(index, typ string) *ReindexService { + if s.destination == nil { + s.destination = NewReindexDestination() + } + s.destination = s.destination.Index(index) + s.destination = s.destination.Type(typ) + return s +} + +// Conflicts indicates what to do when the process detects version conflicts. +// Possible values are "proceed" and "abort". +func (s *ReindexService) Conflicts(conflicts string) *ReindexService { + s.conflicts = conflicts + return s +} + +// AbortOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("abort"). +func (s *ReindexService) AbortOnVersionConflict() *ReindexService { + s.conflicts = "abort" + return s +} + +// ProceedOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("proceed"). +func (s *ReindexService) ProceedOnVersionConflict() *ReindexService { + s.conflicts = "proceed" + return s +} + +// Size sets an upper limit for the number of processed documents. +func (s *ReindexService) Size(size int) *ReindexService { + s.size = &size + return s +} + +// Script allows for modification of the documents as they are reindexed +// from source to destination. +func (s *ReindexService) Script(script *Script) *ReindexService { + s.script = script + return s +} + +// BodyJson specifies e.g. the query to restrict the results specified with the +// Query DSL (optional). The interface{} will be serialized to a JSON document, +// so use a map[string]interface{}. +func (s *ReindexService) BodyJson(body interface{}) *ReindexService { + s.bodyJson = body + return s +} + +// Body specifies e.g. a query to restrict the results specified with +// the Query DSL (optional). +func (s *ReindexService) BodyString(body string) *ReindexService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *ReindexService) buildURL() (string, url.Values, error) { + // Build URL path + path := "/_reindex" + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.consistency != "" { + params.Set("consistency", s.consistency) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForCompletion != nil { + params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ReindexService) Validate() error { + var invalid []string + if s.source == nil { + invalid = append(invalid, "Source") + } else { + if len(s.source.indices) == 0 { + invalid = append(invalid, "Source.Index") + } + } + if s.destination == nil { + invalid = append(invalid, "Destination") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// body returns the body part of the document request. +func (s *ReindexService) body() (interface{}, error) { + if s.bodyJson != nil { + return s.bodyJson, nil + } + if s.bodyString != "" { + return s.bodyString, nil + } + + body := make(map[string]interface{}) + + if s.conflicts != "" { + body["conflicts"] = s.conflicts + } + if s.size != nil { + body["size"] = *s.size + } + if s.script != nil { + out, err := s.script.Source() + if err != nil { + return nil, err + } + body["script"] = out + } + + src, err := s.source.Source() + if err != nil { + return nil, err + } + body["source"] = src + + dst, err := s.destination.Source() + if err != nil { + return nil, err + } + body["dest"] = dst + + return body, nil +} + +// Do executes the operation. +func (s *ReindexService) Do() (*ReindexResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body, err := s.body() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ReindexResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ReindexResponse is the response of ReindexService.Do. +type ReindexResponse struct { + Took interface{} `json:"took"` // 2.3.0 returns "37.7ms" while 2.2 returns 38 for took + TimedOut bool `json:"timed_out"` + Total int64 `json:"total"` + Updated int64 `json:"updated"` + Created int64 `json:"created"` + Deleted int64 `json:"deleted"` + Batches int64 `json:"batches"` + VersionConflicts int64 `json:"version_conflicts"` + Noops int64 `json:"noops"` + Retries int64 `json:"retries"` + Canceled string `json:"canceled"` + Failures []shardOperationFailure `json:"failures"` +} + +// -- Source of Reindex -- + +// ReindexSource specifies the source of a Reindex process. +type ReindexSource struct { + searchType string // default in ES is "query_then_fetch" + indices []string + types []string + routing *string + preference *string + requestCache *bool + scroll string + query Query + sorts []SortInfo + sorters []Sorter + searchSource *SearchSource +} + +// NewReindexSource creates a new ReindexSource. +func NewReindexSource() *ReindexSource { + return &ReindexSource{ + indices: make([]string, 0), + types: make([]string, 0), + sorts: make([]SortInfo, 0), + sorters: make([]Sorter, 0), + } +} + +// SearchType is the search operation type. Possible values are +// "query_then_fetch" and "dfs_query_then_fetch". +func (r *ReindexSource) SearchType(searchType string) *ReindexSource { + r.searchType = searchType + return r +} + +func (r *ReindexSource) SearchTypeDfsQueryThenFetch() *ReindexSource { + return r.SearchType("dfs_query_then_fetch") +} + +func (r *ReindexSource) SearchTypeQueryThenFetch() *ReindexSource { + return r.SearchType("query_then_fetch") +} + +func (r *ReindexSource) Index(indices ...string) *ReindexSource { + r.indices = append(r.indices, indices...) + return r +} + +func (r *ReindexSource) Type(types ...string) *ReindexSource { + r.types = append(r.types, types...) + return r +} + +func (r *ReindexSource) Preference(preference string) *ReindexSource { + r.preference = &preference + return r +} + +func (r *ReindexSource) RequestCache(requestCache bool) *ReindexSource { + r.requestCache = &requestCache + return r +} + +func (r *ReindexSource) Scroll(scroll string) *ReindexSource { + r.scroll = scroll + return r +} + +func (r *ReindexSource) Query(query Query) *ReindexSource { + r.query = query + return r +} + +// Sort adds a sort order. +func (s *ReindexSource) Sort(field string, ascending bool) *ReindexSource { + s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending}) + return s +} + +// SortWithInfo adds a sort order. +func (s *ReindexSource) SortWithInfo(info SortInfo) *ReindexSource { + s.sorts = append(s.sorts, info) + return s +} + +// SortBy adds a sort order. +func (s *ReindexSource) SortBy(sorter ...Sorter) *ReindexSource { + s.sorters = append(s.sorters, sorter...) + return s +} + +// Source returns a serializable JSON request for the request. +func (r *ReindexSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if r.query != nil { + src, err := r.query.Source() + if err != nil { + return nil, err + } + source["query"] = src + } else if r.searchSource != nil { + src, err := r.searchSource.Source() + if err != nil { + return nil, err + } + source["source"] = src + } + + if r.searchType != "" { + source["search_type"] = r.searchType + } + + switch len(r.indices) { + case 0: + case 1: + source["index"] = r.indices[0] + default: + source["index"] = r.indices + } + + switch len(r.types) { + case 0: + case 1: + source["type"] = r.types[0] + default: + source["type"] = r.types + } + + if r.preference != nil && *r.preference != "" { + source["preference"] = *r.preference + } + + if r.requestCache != nil { + source["request_cache"] = fmt.Sprintf("%v", *r.requestCache) + } + + if r.scroll != "" { + source["scroll"] = r.scroll + } + + if len(r.sorters) > 0 { + sortarr := make([]interface{}, 0) + for _, sorter := range r.sorters { + src, err := sorter.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } else if len(r.sorts) > 0 { + sortarr := make([]interface{}, 0) + for _, sort := range r.sorts { + src, err := sort.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } + + return source, nil +} + +// -source Destination of Reindex -- + +// ReindexDestination is the destination of a Reindex API call. +// It is basically the meta data of a BulkIndexRequest. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.3/docs-reindex.html +// fsourcer details. +type ReindexDestination struct { + index string + typ string + routing string + parent string + opType string + version int64 // default is MATCH_ANY + versionType string // default is "internal" +} + +// NewReindexDestination returns a new ReindexDestination. +func NewReindexDestination() *ReindexDestination { + return &ReindexDestination{} +} + +// Index specifies name of the Elasticsearch index to use as the destination +// of a reindexing process. +func (r *ReindexDestination) Index(index string) *ReindexDestination { + r.index = index + return r +} + +// Type specifies the Elasticsearch type to use for reindexing. +func (r *ReindexDestination) Type(typ string) *ReindexDestination { + r.typ = typ + return r +} + +// Routing specifies a routing value for the reindexing request. +// It can be "keep", "discard", or start with "=". The latter specifies +// the routing on the bulk request. +func (r *ReindexDestination) Routing(routing string) *ReindexDestination { + r.routing = routing + return r +} + +// Keep sets the routing on the bulk request sent for each match to the routing +// of the match (the default). +func (r *ReindexDestination) Keep() *ReindexDestination { + r.routing = "keep" + return r +} + +// Discard sets the routing on the bulk request sent for each match to null. +func (r *ReindexDestination) Discard() *ReindexDestination { + r.routing = "discard" + return r +} + +// Parent specifies the identifier of the parent document (if available). +func (r *ReindexDestination) Parent(parent string) *ReindexDestination { + r.parent = parent + return r +} + +// OpType specifies if this request should follow create-only or upsert +// behavior. This follows the OpType of the standard document index API. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#operation-type +// for details. +func (r *ReindexDestination) OpType(opType string) *ReindexDestination { + r.opType = opType + return r +} + +// Version indicates the version of the document as part of an optimistic +// concurrency model. +func (r *ReindexDestination) Version(version int64) *ReindexDestination { + r.version = version + return r +} + +// VersionType specifies how versions are created. +func (r *ReindexDestination) VersionType(versionType string) *ReindexDestination { + r.versionType = versionType + return r +} + +// Source returns a serializable JSON request for the request. +func (r *ReindexDestination) Source() (interface{}, error) { + source := make(map[string]interface{}) + if r.index != "" { + source["index"] = r.index + } + if r.typ != "" { + source["type"] = r.typ + } + if r.routing != "" { + source["routing"] = r.routing + } + if r.opType != "" { + source["op_type"] = r.opType + } + if r.parent != "" { + source["parent"] = r.parent + } + if r.version > 0 { + source["version"] = r.version + } + if r.versionType != "" { + source["version_type"] = r.versionType + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/reindexer.go b/vendor/gopkg.in/olivere/elastic.v3/reindexer.go new file mode 100644 index 000000000..7193a1337 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/reindexer.go @@ -0,0 +1,270 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "errors" +) + +// Reindexer simplifies the process of reindexing an index. You typically +// reindex a source index to a target index. However, you can also specify +// a query that filters out documents from the source index before bulk +// indexing them into the target index. The caller may also specify a +// different client for the target, e.g. when copying indices from one +// Elasticsearch cluster to another. +// +// Internally, the Reindex users a scan and scroll operation on the source +// index and bulk indexing to push data into the target index. +// +// By default the reindexer fetches the _source, _parent, and _routing +// attributes from the source index, using the provided CopyToTargetIndex +// will copy those attributes into the destinationIndex. +// This behaviour can be overridden by setting the ScanFields and providing a +// custom ReindexerFunc. +// +// The caller is responsible for setting up and/or clearing the target index +// before starting the reindex process. +// +// See http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html +// for more information about reindexing. +type Reindexer struct { + sourceClient, targetClient *Client + sourceIndex string + query Query + scanFields []string + bulkSize int + size int + scroll string + reindexerFunc ReindexerFunc + progress ReindexerProgressFunc + statsOnly bool +} + +// A ReindexerFunc receives each hit from the sourceIndex. +// It can choose to add any number of BulkableRequests to the bulkService. +type ReindexerFunc func(hit *SearchHit, bulkService *BulkService) error + +// CopyToTargetIndex returns a ReindexerFunc that copies the SearchHit's +// _source, _parent, and _routing attributes into the targetIndex +func CopyToTargetIndex(targetIndex string) ReindexerFunc { + return func(hit *SearchHit, bulkService *BulkService) error { + // TODO(oe) Do we need to deserialize here? + source := make(map[string]interface{}) + if err := json.Unmarshal(*hit.Source, &source); err != nil { + return err + } + req := NewBulkIndexRequest().Index(targetIndex).Type(hit.Type).Id(hit.Id).Doc(source) + if hit.Parent != "" { + req = req.Parent(hit.Parent) + } + if hit.Routing != "" { + req = req.Routing(hit.Routing) + } + bulkService.Add(req) + return nil + } +} + +// ReindexerProgressFunc is a callback that can be used with Reindexer +// to report progress while reindexing data. +type ReindexerProgressFunc func(current, total int64) + +// ReindexerResponse is returned from the Do func in a Reindexer. +// By default, it returns the number of succeeded and failed bulk operations. +// To return details about all failed items, set StatsOnly to false in +// Reindexer. +type ReindexerResponse struct { + Success int64 + Failed int64 + Errors []*BulkResponseItem +} + +// NewReindexer returns a new Reindexer. +func NewReindexer(client *Client, source string, reindexerFunc ReindexerFunc) *Reindexer { + return &Reindexer{ + sourceClient: client, + sourceIndex: source, + reindexerFunc: reindexerFunc, + statsOnly: true, + } +} + +// TargetClient specifies a different client for the target. This is +// necessary when the target index is in a different Elasticsearch cluster. +// By default, the source and target clients are the same. +func (ix *Reindexer) TargetClient(c *Client) *Reindexer { + ix.targetClient = c + return ix +} + +// Query specifies the query to apply to the source. It filters out those +// documents to be indexed into target. A nil query does not filter out any +// documents. +func (ix *Reindexer) Query(q Query) *Reindexer { + ix.query = q + return ix +} + +// ScanFields specifies the fields the scan query should load. +// The default fields are _source, _parent, _routing. +func (ix *Reindexer) ScanFields(scanFields ...string) *Reindexer { + ix.scanFields = scanFields + return ix +} + +// BulkSize returns the number of documents to send to Elasticsearch per chunk. +// The default is 500. +func (ix *Reindexer) BulkSize(bulkSize int) *Reindexer { + ix.bulkSize = bulkSize + return ix +} + +// Size is the number of results to return per shard, not per request. +// So a size of 10 which hits 5 shards will return a maximum of 50 results +// per scan request. +func (ix *Reindexer) Size(size int) *Reindexer { + ix.size = size + return ix +} + +// Scroll specifies for how long the scroll operation on the source index +// should be maintained. The default is 5m. +func (ix *Reindexer) Scroll(timeout string) *Reindexer { + ix.scroll = timeout + return ix +} + +// Progress indicates a callback that will be called while indexing. +func (ix *Reindexer) Progress(f ReindexerProgressFunc) *Reindexer { + ix.progress = f + return ix +} + +// StatsOnly indicates whether the Do method should return details e.g. about +// the documents that failed while indexing. It is true by default, i.e. only +// the number of documents that succeeded/failed are returned. Set to false +// if you want all the details. +func (ix *Reindexer) StatsOnly(statsOnly bool) *Reindexer { + ix.statsOnly = statsOnly + return ix +} + +// Do starts the reindexing process. +func (ix *Reindexer) Do() (*ReindexerResponse, error) { + if ix.sourceClient == nil { + return nil, errors.New("no source client") + } + if ix.sourceIndex == "" { + return nil, errors.New("no source index") + } + if ix.targetClient == nil { + ix.targetClient = ix.sourceClient + } + if ix.scanFields == nil { + ix.scanFields = []string{"_source", "_parent", "_routing"} + } + if ix.bulkSize <= 0 { + ix.bulkSize = 500 + } + if ix.scroll == "" { + ix.scroll = "5m" + } + + // Count total to report progress (if necessary) + var err error + var current, total int64 + if ix.progress != nil { + total, err = ix.count() + if err != nil { + return nil, err + } + } + + // Prepare scan and scroll to iterate through the source index + scanner := ix.sourceClient.Scan(ix.sourceIndex).Scroll(ix.scroll).Fields(ix.scanFields...) + if ix.query != nil { + scanner = scanner.Query(ix.query) + } + if ix.size > 0 { + scanner = scanner.Size(ix.size) + } + cursor, err := scanner.Do() + + bulk := ix.targetClient.Bulk() + + ret := &ReindexerResponse{ + Errors: make([]*BulkResponseItem, 0), + } + + // Main loop iterates through the source index and bulk indexes into target. + for { + docs, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + return ret, err + } + + if docs.TotalHits() > 0 { + for _, hit := range docs.Hits.Hits { + if ix.progress != nil { + current++ + ix.progress(current, total) + } + + err := ix.reindexerFunc(hit, bulk) + if err != nil { + return ret, err + } + + if bulk.NumberOfActions() >= ix.bulkSize { + bulk, err = ix.commit(bulk, ret) + if err != nil { + return ret, err + } + } + } + } + } + + // Final flush + if bulk.NumberOfActions() > 0 { + bulk, err = ix.commit(bulk, ret) + if err != nil { + return ret, err + } + bulk = nil + } + + return ret, nil +} + +// count returns the number of documents in the source index. +// The query is taken into account, if specified. +func (ix *Reindexer) count() (int64, error) { + service := ix.sourceClient.Count(ix.sourceIndex) + if ix.query != nil { + service = service.Query(ix.query) + } + return service.Do() +} + +// commit commits a bulk, updates the stats, and returns a fresh bulk service. +func (ix *Reindexer) commit(bulk *BulkService, ret *ReindexerResponse) (*BulkService, error) { + bres, err := bulk.Do() + if err != nil { + return nil, err + } + ret.Success += int64(len(bres.Succeeded())) + failed := bres.Failed() + ret.Failed += int64(len(failed)) + if !ix.statsOnly { + ret.Errors = append(ret.Errors, failed...) + } + bulk = ix.targetClient.Bulk() + return bulk, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/request.go b/vendor/gopkg.in/olivere/elastic.v3/request.go new file mode 100644 index 000000000..1347e1b6f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/request.go @@ -0,0 +1,123 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "runtime" + "strings" +) + +// Elasticsearch-specific HTTP request +type Request http.Request + +// NewRequest is a http.Request and adds features such as encoding the body. +func NewRequest(method, url string) (*Request, error) { + req, err := http.NewRequest(method, url, nil) + if err != nil { + return nil, err + } + req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")") + req.Header.Add("Accept", "application/json") + return (*Request)(req), nil +} + +// SetBasicAuth wraps http.Request's SetBasicAuth. +func (r *Request) SetBasicAuth(username, password string) { + ((*http.Request)(r)).SetBasicAuth(username, password) +} + +// SetBody encodes the body in the request. Optionally, it performs GZIP compression. +func (r *Request) SetBody(body interface{}, gzipCompress bool) error { + switch b := body.(type) { + case string: + if gzipCompress { + return r.setBodyGzip(b) + } else { + return r.setBodyString(b) + } + default: + if gzipCompress { + return r.setBodyGzip(body) + } else { + return r.setBodyJson(body) + } + } +} + +// setBodyJson encodes the body as a struct to be marshaled via json.Marshal. +func (r *Request) setBodyJson(data interface{}) error { + body, err := json.Marshal(data) + if err != nil { + return err + } + r.Header.Set("Content-Type", "application/json") + r.setBodyReader(bytes.NewReader(body)) + return nil +} + +// setBodyString encodes the body as a string. +func (r *Request) setBodyString(body string) error { + return r.setBodyReader(strings.NewReader(body)) +} + +// setBodyGzip gzip's the body. It accepts both strings and structs as body. +// The latter will be encoded via json.Marshal. +func (r *Request) setBodyGzip(body interface{}) error { + switch b := body.(type) { + case string: + buf := new(bytes.Buffer) + w := gzip.NewWriter(buf) + if _, err := w.Write([]byte(b)); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + r.Header.Add("Content-Encoding", "gzip") + r.Header.Add("Vary", "Accept-Encoding") + return r.setBodyReader(bytes.NewReader(buf.Bytes())) + default: + data, err := json.Marshal(b) + if err != nil { + return err + } + buf := new(bytes.Buffer) + w := gzip.NewWriter(buf) + if _, err := w.Write(data); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + r.Header.Add("Content-Encoding", "gzip") + r.Header.Add("Vary", "Accept-Encoding") + r.Header.Set("Content-Type", "application/json") + return r.setBodyReader(bytes.NewReader(buf.Bytes())) + } +} + +// setBodyReader writes the body from an io.Reader. +func (r *Request) setBodyReader(body io.Reader) error { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + r.Body = rc + if body != nil { + switch v := body.(type) { + case *strings.Reader: + r.ContentLength = int64(v.Len()) + case *bytes.Buffer: + r.ContentLength = int64(v.Len()) + } + } + return nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/rescore.go b/vendor/gopkg.in/olivere/elastic.v3/rescore.go new file mode 100644 index 000000000..0cbc06710 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/rescore.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +type Rescore struct { + rescorer Rescorer + windowSize *int + defaultRescoreWindowSize *int +} + +func NewRescore() *Rescore { + return &Rescore{} +} + +func (r *Rescore) WindowSize(windowSize int) *Rescore { + r.windowSize = &windowSize + return r +} + +func (r *Rescore) IsEmpty() bool { + return r.rescorer == nil +} + +func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore { + r.rescorer = rescorer + return r +} + +func (r *Rescore) Source() (interface{}, error) { + source := make(map[string]interface{}) + if r.windowSize != nil { + source["window_size"] = *r.windowSize + } else if r.defaultRescoreWindowSize != nil { + source["window_size"] = *r.defaultRescoreWindowSize + } + rescorerSrc, err := r.rescorer.Source() + if err != nil { + return nil, err + } + source[r.rescorer.Name()] = rescorerSrc + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/rescorer.go b/vendor/gopkg.in/olivere/elastic.v3/rescorer.go new file mode 100644 index 000000000..28ad59cbb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/rescorer.go @@ -0,0 +1,64 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +type Rescorer interface { + Name() string + Source() (interface{}, error) +} + +// -- Query Rescorer -- + +type QueryRescorer struct { + query Query + rescoreQueryWeight *float64 + queryWeight *float64 + scoreMode string +} + +func NewQueryRescorer(query Query) *QueryRescorer { + return &QueryRescorer{ + query: query, + } +} + +func (r *QueryRescorer) Name() string { + return "query" +} + +func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer { + r.rescoreQueryWeight = &rescoreQueryWeight + return r +} + +func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer { + r.queryWeight = &queryWeight + return r +} + +func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer { + r.scoreMode = scoreMode + return r +} + +func (r *QueryRescorer) Source() (interface{}, error) { + rescoreQuery, err := r.query.Source() + if err != nil { + return nil, err + } + + source := make(map[string]interface{}) + source["rescore_query"] = rescoreQuery + if r.queryWeight != nil { + source["query_weight"] = *r.queryWeight + } + if r.rescoreQueryWeight != nil { + source["rescore_query_weight"] = *r.rescoreQueryWeight + } + if r.scoreMode != "" { + source["score_mode"] = r.scoreMode + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/response.go b/vendor/gopkg.in/olivere/elastic.v3/response.go new file mode 100644 index 000000000..9426c23af --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/response.go @@ -0,0 +1,43 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "io/ioutil" + "net/http" +) + +// Response represents a response from Elasticsearch. +type Response struct { + // StatusCode is the HTTP status code, e.g. 200. + StatusCode int + // Header is the HTTP header from the HTTP response. + // Keys in the map are canonicalized (see http.CanonicalHeaderKey). + Header http.Header + // Body is the deserialized response body. + Body json.RawMessage +} + +// newResponse creates a new response from the HTTP response. +func (c *Client) newResponse(res *http.Response) (*Response, error) { + r := &Response{ + StatusCode: res.StatusCode, + Header: res.Header, + } + if res.Body != nil { + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + // HEAD requests return a body but no content + if len(slurp) > 0 { + if err := c.decoder.Decode(slurp, &r.Body); err != nil { + return nil, err + } + } + } + return r, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/scan.go b/vendor/gopkg.in/olivere/elastic.v3/scan.go new file mode 100644 index 000000000..08f2ef38c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/scan.go @@ -0,0 +1,358 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "errors" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +const ( + defaultKeepAlive = "5m" +) + +var ( + // End of stream (or scan) + EOS = errors.New("EOS") + + // No ScrollId + ErrNoScrollId = errors.New("no scrollId") +) + +// ScanService manages a cursor through documents in Elasticsearch. +type ScanService struct { + client *Client + indices []string + types []string + keepAlive string + searchSource *SearchSource + pretty bool + routing string + preference string + size *int +} + +// NewScanService creates a new service to iterate through the results +// of a query. +func NewScanService(client *Client) *ScanService { + builder := &ScanService{ + client: client, + searchSource: NewSearchSource().Query(NewMatchAllQuery()), + } + return builder +} + +// Index sets the name(s) of the index to use for scan. +func (s *ScanService) Index(indices ...string) *ScanService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +// Types allows to restrict the scan to a list of types. +func (s *ScanService) Type(types ...string) *ScanService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Scroll is an alias for KeepAlive, the time to keep +// the cursor alive (e.g. "5m" for 5 minutes). +func (s *ScanService) Scroll(keepAlive string) *ScanService { + s.keepAlive = keepAlive + return s +} + +// KeepAlive sets the maximum time the cursor will be +// available before expiration (e.g. "5m" for 5 minutes). +func (s *ScanService) KeepAlive(keepAlive string) *ScanService { + s.keepAlive = keepAlive + return s +} + +// Fields tells Elasticsearch to only load specific fields from a search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-fields.html. +func (s *ScanService) Fields(fields ...string) *ScanService { + s.searchSource = s.searchSource.Fields(fields...) + return s +} + +// SearchSource sets the search source builder to use with this service. +func (s *ScanService) SearchSource(searchSource *SearchSource) *ScanService { + s.searchSource = searchSource + if s.searchSource == nil { + s.searchSource = NewSearchSource().Query(NewMatchAllQuery()) + } + return s +} + +// Routing allows for (a comma-separated) list of specific routing values. +func (s *ScanService) Routing(routings ...string) *ScanService { + s.routing = strings.Join(routings, ",") + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: "random"). +func (s *ScanService) Preference(preference string) *ScanService { + s.preference = preference + return s +} + +// Query sets the query to perform, e.g. MatchAllQuery. +func (s *ScanService) Query(query Query) *ScanService { + s.searchSource = s.searchSource.Query(query) + return s +} + +// PostFilter is executed as the last filter. It only affects the +// search hits but not facets. See +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html +// for details. +func (s *ScanService) PostFilter(postFilter Query) *ScanService { + s.searchSource = s.searchSource.PostFilter(postFilter) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *ScanService) FetchSource(fetchSource bool) *ScanService { + s.searchSource = s.searchSource.FetchSource(fetchSource) + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *ScanService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScanService { + s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) + return s +} + +// Version can be set to true to return a version for each search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html. +func (s *ScanService) Version(version bool) *ScanService { + s.searchSource = s.searchSource.Version(version) + return s +} + +// Sort the results by the given field, in the given order. +// Use the alternative SortWithInfo to use a struct to define the sorting. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html +// for detailed documentation of sorting. +func (s *ScanService) Sort(field string, ascending bool) *ScanService { + s.searchSource = s.searchSource.Sort(field, ascending) + return s +} + +// SortWithInfo defines how to sort results. +// Use the Sort func for a shortcut. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html +// for detailed documentation of sorting. +func (s *ScanService) SortWithInfo(info SortInfo) *ScanService { + s.searchSource = s.searchSource.SortWithInfo(info) + return s +} + +// SortBy defines how to sort results. +// Use the Sort func for a shortcut. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html +// for detailed documentation of sorting. +func (s *ScanService) SortBy(sorter ...Sorter) *ScanService { + s.searchSource = s.searchSource.SortBy(sorter...) + return s +} + +// Pretty enables the caller to indent the JSON output. +func (s *ScanService) Pretty(pretty bool) *ScanService { + s.pretty = pretty + return s +} + +// Size is the number of results to return per shard, not per request. +// So a size of 10 which hits 5 shards will return a maximum of 50 results +// per scan request. +func (s *ScanService) Size(size int) *ScanService { + s.size = &size + return s +} + +// Do executes the query and returns a "server-side cursor". +func (s *ScanService) Do() (*ScanCursor, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + // Types + typesPart := make([]string, 0) + for _, typ := range s.types { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": typ, + }) + if err != nil { + return nil, err + } + typesPart = append(typesPart, typ) + } + if len(typesPart) > 0 { + path += "/" + strings.Join(typesPart, ",") + } + + // Search + path += "/_search" + + // Parameters + params := make(url.Values) + if !s.searchSource.hasSort() { + // TODO: ES 2.1 deprecates search_type=scan. See https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated. + params.Set("search_type", "scan") + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.keepAlive != "" { + params.Set("scroll", s.keepAlive) + } else { + params.Set("scroll", defaultKeepAlive) + } + if s.size != nil && *s.size > 0 { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + + // Get response + body, err := s.searchSource.Source() + if err != nil { + return nil, err + } + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + searchResult := new(SearchResult) + if err := s.client.decoder.Decode(res.Body, searchResult); err != nil { + return nil, err + } + + cursor := NewScanCursor(s.client, s.keepAlive, s.pretty, searchResult) + + return cursor, nil +} + +// scanCursor represents a single page of results from +// an Elasticsearch Scan operation. +type ScanCursor struct { + Results *SearchResult + + client *Client + keepAlive string + pretty bool + currentPage int +} + +// newScanCursor returns a new initialized instance +// of scanCursor. +func NewScanCursor(client *Client, keepAlive string, pretty bool, searchResult *SearchResult) *ScanCursor { + return &ScanCursor{ + client: client, + keepAlive: keepAlive, + pretty: pretty, + Results: searchResult, + } +} + +// TotalHits is a convenience method that returns the number +// of hits the cursor will iterate through. +func (c *ScanCursor) TotalHits() int64 { + if c.Results.Hits == nil { + return 0 + } + return c.Results.Hits.TotalHits +} + +// Next returns the next search result or nil when all +// documents have been scanned. +// +// Usage: +// +// for { +// res, err := cursor.Next() +// if err == elastic.EOS { +// // End of stream (or scan) +// break +// } +// if err != nil { +// // Handle error +// } +// // Work with res +// } +// +func (c *ScanCursor) Next() (*SearchResult, error) { + if c.currentPage > 0 { + if c.Results.Hits == nil || len(c.Results.Hits.Hits) == 0 || c.Results.Hits.TotalHits == 0 { + return nil, EOS + } + } + if c.Results.ScrollId == "" { + return nil, EOS + } + + // Build url + path := "/_search/scroll" + + // Parameters + params := make(url.Values) + if c.pretty { + params.Set("pretty", fmt.Sprintf("%v", c.pretty)) + } + if c.keepAlive != "" { + params.Set("scroll", c.keepAlive) + } else { + params.Set("scroll", defaultKeepAlive) + } + + // Set body + body := c.Results.ScrollId + + // Get response + res, err := c.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + c.Results = &SearchResult{ScrollId: body} + if err := c.client.decoder.Decode(res.Body, c.Results); err != nil { + return nil, err + } + + c.currentPage += 1 + + return c.Results, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/script.go b/vendor/gopkg.in/olivere/elastic.v3/script.go new file mode 100644 index 000000000..a5c9e45e2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/script.go @@ -0,0 +1,131 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// Script holds all the paramaters necessary to compile or find in cache +// and then execute a script. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// for details of scripting. +type Script struct { + script string + typ string + lang string + params map[string]interface{} +} + +// NewScript creates and initializes a new Script. +func NewScript(script string) *Script { + return &Script{ + script: script, + typ: "", // default type is "inline" + params: make(map[string]interface{}), + } +} + +// NewScriptInline creates and initializes a new Script of type "inline". +func NewScriptInline(script string) *Script { + return NewScript(script).Type("inline") +} + +// NewScriptId creates and initializes a new Script of type "id". +func NewScriptId(script string) *Script { + return NewScript(script).Type("id") +} + +// NewScriptFile creates and initializes a new Script of type "file". +func NewScriptFile(script string) *Script { + return NewScript(script).Type("file") +} + +// Script is either the cache key of the script to be compiled/executed +// or the actual script source code for inline scripts. For indexed +// scripts this is the id used in the request. For file scripts this is +// the file name. +func (s *Script) Script(script string) *Script { + s.script = script + return s +} + +// Type sets the type of script: "inline", "id", or "file". +func (s *Script) Type(typ string) *Script { + s.typ = typ + return s +} + +// Lang sets the language of the script. Permitted values are "groovy", +// "expression", "mustache", "mvel" (default), "javascript", "python". +// To use certain languages, you need to configure your server and/or +// add plugins. See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// for details. +func (s *Script) Lang(lang string) *Script { + s.lang = lang + return s +} + +// Param adds a key/value pair to the parameters that this script will be executed with. +func (s *Script) Param(name string, value interface{}) *Script { + if s.params == nil { + s.params = make(map[string]interface{}) + } + s.params[name] = value + return s +} + +// Params sets the map of parameters this script will be executed with. +func (s *Script) Params(params map[string]interface{}) *Script { + s.params = params + return s +} + +// Source returns the JSON serializable data for this Script. +func (s *Script) Source() (interface{}, error) { + if s.typ == "" && s.lang == "" && len(s.params) == 0 { + return s.script, nil + } + source := make(map[string]interface{}) + if s.typ == "" { + source["inline"] = s.script + } else { + source[s.typ] = s.script + } + if s.lang != "" { + source["lang"] = s.lang + } + if len(s.params) > 0 { + source["params"] = s.params + } + return source, nil +} + +// -- Script Field -- + +// ScriptField is a single script field. +type ScriptField struct { + FieldName string // name of the field + + script *Script +} + +// NewScriptField creates and initializes a new ScriptField. +func NewScriptField(fieldName string, script *Script) *ScriptField { + return &ScriptField{FieldName: fieldName, script: script} +} + +// Source returns the serializable JSON for the ScriptField. +func (f *ScriptField) Source() (interface{}, error) { + if f.script == nil { + return nil, errors.New("ScriptField expects script") + } + source := make(map[string]interface{}) + src, err := f.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/scroll.go b/vendor/gopkg.in/olivere/elastic.v3/scroll.go new file mode 100644 index 000000000..00847f5aa --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/scroll.go @@ -0,0 +1,205 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ScrollService manages a cursor through documents in Elasticsearch. +type ScrollService struct { + client *Client + indices []string + types []string + keepAlive string + query Query + size *int + pretty bool + scrollId string +} + +func NewScrollService(client *Client) *ScrollService { + builder := &ScrollService{ + client: client, + query: NewMatchAllQuery(), + } + return builder +} + +func (s *ScrollService) Index(indices ...string) *ScrollService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +func (s *ScrollService) Type(types ...string) *ScrollService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Scroll is an alias for KeepAlive, the time to keep +// the cursor alive (e.g. "5m" for 5 minutes). +func (s *ScrollService) Scroll(keepAlive string) *ScrollService { + s.keepAlive = keepAlive + return s +} + +// KeepAlive sets the maximum time the cursor will be +// available before expiration (e.g. "5m" for 5 minutes). +func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService { + s.keepAlive = keepAlive + return s +} + +func (s *ScrollService) Query(query Query) *ScrollService { + s.query = query + return s +} + +func (s *ScrollService) Pretty(pretty bool) *ScrollService { + s.pretty = pretty + return s +} + +func (s *ScrollService) Size(size int) *ScrollService { + s.size = &size + return s +} + +func (s *ScrollService) ScrollId(scrollId string) *ScrollService { + s.scrollId = scrollId + return s +} + +func (s *ScrollService) Do() (*SearchResult, error) { + if s.scrollId == "" { + return s.GetFirstPage() + } + return s.GetNextPage() +} + +func (s *ScrollService) GetFirstPage() (*SearchResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + // Types + typesPart := make([]string, 0) + for _, typ := range s.types { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": typ, + }) + if err != nil { + return nil, err + } + typesPart = append(typesPart, typ) + } + if len(typesPart) > 0 { + path += "/" + strings.Join(typesPart, ",") + } + + // Search + path += "/_search" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.keepAlive != "" { + params.Set("scroll", s.keepAlive) + } else { + params.Set("scroll", defaultKeepAlive) + } + if s.size != nil && *s.size > 0 { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + + // Set body + body := make(map[string]interface{}) + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + body["query"] = src + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + searchResult := new(SearchResult) + if err := s.client.decoder.Decode(res.Body, searchResult); err != nil { + return nil, err + } + + return searchResult, nil +} + +func (s *ScrollService) GetNextPage() (*SearchResult, error) { + if s.scrollId == "" { + return nil, EOS + } + + // Build url + path := "/_search/scroll" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.keepAlive != "" { + params.Set("scroll", s.keepAlive) + } else { + params.Set("scroll", defaultKeepAlive) + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, s.scrollId) + if err != nil { + return nil, err + } + + // Return result + searchResult := new(SearchResult) + if err := s.client.decoder.Decode(res.Body, searchResult); err != nil { + return nil, err + } + + // Determine last page + if searchResult == nil || searchResult.Hits == nil || len(searchResult.Hits.Hits) == 0 || searchResult.Hits.TotalHits == 0 { + return nil, EOS + } + + return searchResult, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search.go b/vendor/gopkg.in/olivere/elastic.v3/search.go new file mode 100644 index 000000000..9965aa13c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search.go @@ -0,0 +1,477 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// Search for documents in Elasticsearch. +type SearchService struct { + client *Client + searchSource *SearchSource + source interface{} + pretty bool + searchType string + index []string + typ []string + routing string + preference string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewSearchService creates a new service for searching in Elasticsearch. +func NewSearchService(client *Client) *SearchService { + builder := &SearchService{ + client: client, + searchSource: NewSearchSource(), + } + return builder +} + +// SearchSource sets the search source builder to use with this service. +func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService { + s.searchSource = searchSource + if s.searchSource == nil { + s.searchSource = NewSearchSource() + } + return s +} + +// Source allows the user to set the request body manually without using +// any of the structs and interfaces in Elastic. +func (s *SearchService) Source(source interface{}) *SearchService { + s.source = source + return s +} + +// Index sets the names of the indices to use for search. +func (s *SearchService) Index(index ...string) *SearchService { + if s.index == nil { + s.index = make([]string, 0) + } + s.index = append(s.index, index...) + return s +} + +// Types adds search restrictions for a list of types. +func (s *SearchService) Type(typ ...string) *SearchService { + if s.typ == nil { + s.typ = make([]string, 0) + } + s.typ = append(s.typ, typ...) + return s +} + +// Pretty enables the caller to indent the JSON output. +func (s *SearchService) Pretty(pretty bool) *SearchService { + s.pretty = pretty + return s +} + +// Timeout sets the timeout to use, e.g. "1s" or "1000ms". +func (s *SearchService) Timeout(timeout string) *SearchService { + s.searchSource = s.searchSource.Timeout(timeout) + return s +} + +// TimeoutInMillis sets the timeout in milliseconds. +func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService { + s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis) + return s +} + +// SearchType sets the search operation type. Valid values are: +// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", +// "dfs_query_and_fetch", "count", "scan". +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-search-type.html +// for details. +func (s *SearchService) SearchType(searchType string) *SearchService { + s.searchType = searchType + return s +} + +// Routing is a list of specific routing values to control the shards +// the search will be executed on. +func (s *SearchService) Routing(routings ...string) *SearchService { + s.routing = strings.Join(routings, ",") + return s +} + +// Preference sets the preference to execute the search. Defaults to +// randomize across shards ("random"). Can be set to "_local" to prefer +// local shards, "_primary" to execute on primary shards only, +// or a custom value which guarantees that the same order will be used +// across different requests. +func (s *SearchService) Preference(preference string) *SearchService { + s.preference = preference + return s +} + +// Query sets the query to perform, e.g. MatchAllQuery. +func (s *SearchService) Query(query Query) *SearchService { + s.searchSource = s.searchSource.Query(query) + return s +} + +// PostFilter will be executed after the query has been executed and +// only affects the search hits, not the aggregations. +// This filter is always executed as the last filtering mechanism. +func (s *SearchService) PostFilter(postFilter Query) *SearchService { + s.searchSource = s.searchSource.PostFilter(postFilter) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *SearchService) FetchSource(fetchSource bool) *SearchService { + s.searchSource = s.searchSource.FetchSource(fetchSource) + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *SearchService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchService { + s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) + return s +} + +// Highlight adds highlighting to the search. +func (s *SearchService) Highlight(highlight *Highlight) *SearchService { + s.searchSource = s.searchSource.Highlight(highlight) + return s +} + +// GlobalSuggestText defines the global text to use with all suggesters. +// This avoids repetition. +func (s *SearchService) GlobalSuggestText(globalText string) *SearchService { + s.searchSource = s.searchSource.GlobalSuggestText(globalText) + return s +} + +// Suggester adds a suggester to the search. +func (s *SearchService) Suggester(suggester Suggester) *SearchService { + s.searchSource = s.searchSource.Suggester(suggester) + return s +} + +// Aggregation adds an aggreation to perform as part of the search. +func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService { + s.searchSource = s.searchSource.Aggregation(name, aggregation) + return s +} + +// MinScore sets the minimum score below which docs will be filtered out. +func (s *SearchService) MinScore(minScore float64) *SearchService { + s.searchSource = s.searchSource.MinScore(minScore) + return s +} + +// From index to start the search from. Defaults to 0. +func (s *SearchService) From(from int) *SearchService { + s.searchSource = s.searchSource.From(from) + return s +} + +// Size is the number of search hits to return. Defaults to 10. +func (s *SearchService) Size(size int) *SearchService { + s.searchSource = s.searchSource.Size(size) + return s +} + +// Explain indicates whether each search hit should be returned with +// an explanation of the hit (ranking). +func (s *SearchService) Explain(explain bool) *SearchService { + s.searchSource = s.searchSource.Explain(explain) + return s +} + +// Version indicates whether each search hit should be returned with +// a version associated to it. +func (s *SearchService) Version(version bool) *SearchService { + s.searchSource = s.searchSource.Version(version) + return s +} + +// Sort adds a sort order. +func (s *SearchService) Sort(field string, ascending bool) *SearchService { + s.searchSource = s.searchSource.Sort(field, ascending) + return s +} + +// SortWithInfo adds a sort order. +func (s *SearchService) SortWithInfo(info SortInfo) *SearchService { + s.searchSource = s.searchSource.SortWithInfo(info) + return s +} + +// SortBy adds a sort order. +func (s *SearchService) SortBy(sorter ...Sorter) *SearchService { + s.searchSource = s.searchSource.SortBy(sorter...) + return s +} + +// NoFields indicates that no fields should be loaded, resulting in only +// id and type to be returned per field. +func (s *SearchService) NoFields() *SearchService { + s.searchSource = s.searchSource.NoFields() + return s +} + +// Field adds a single field to load and return (note, must be stored) as +// part of the search request. If none are specified, the source of the +// document will be returned. +func (s *SearchService) Field(fieldName string) *SearchService { + s.searchSource = s.searchSource.Field(fieldName) + return s +} + +// Fields sets the fields to load and return as part of the search request. +// If none are specified, the source of the document will be returned. +func (s *SearchService) Fields(fields ...string) *SearchService { + s.searchSource = s.searchSource.Fields(fields...) + return s +} + +// IgnoreUnavailable indicates whether the specified concrete indices +// should be ignored when unavailable (missing or closed). +func (s *SearchService) IgnoreUnavailable(ignoreUnavailable bool) *SearchService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` string +// or when no indices have been specified). +func (s *SearchService) AllowNoIndices(allowNoIndices bool) *SearchService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *SearchService) ExpandWildcards(expandWildcards string) *SearchService { + s.expandWildcards = expandWildcards + return s +} + +// buildURL builds the URL for the operation. +func (s *SearchService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_search", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_search", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.typ) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_search", map[string]string{ + "type": strings.Join(s.typ, ","), + }) + } else { + path = "/_search" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.searchType != "" { + params.Set("search_type", s.searchType) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *SearchService) Validate() error { + return nil +} + +// Do executes the search and returns a SearchResult. +func (s *SearchService) Do() (*SearchResult, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Perform request + var body interface{} + if s.source != nil { + body = s.source + } else { + src, err := s.searchSource.Source() + if err != nil { + return nil, err + } + body = src + } + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return search results + ret := new(SearchResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// SearchResult is the result of a search in Elasticsearch. +type SearchResult struct { + TookInMillis int64 `json:"took"` // search time in milliseconds + ScrollId string `json:"_scroll_id"` // only used with Scroll and Scan operations + Hits *SearchHits `json:"hits"` // the actual search hits + Suggest SearchSuggest `json:"suggest"` // results from suggesters + Aggregations Aggregations `json:"aggregations"` // results from aggregations + TimedOut bool `json:"timed_out"` // true if the search timed out + //Error string `json:"error,omitempty"` // used in MultiSearch only + // TODO double-check that MultiGet now returns details error information + Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet +} + +// TotalHits is a convenience function to return the number of hits for +// a search result. +func (r *SearchResult) TotalHits() int64 { + if r.Hits != nil { + return r.Hits.TotalHits + } + return 0 +} + +// Each is a utility function to iterate over all hits. It saves you from +// checking for nil values. Notice that Each will ignore errors in +// serializing JSON. +func (r *SearchResult) Each(typ reflect.Type) []interface{} { + if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 { + return nil + } + slice := make([]interface{}, 0) + for _, hit := range r.Hits.Hits { + v := reflect.New(typ).Elem() + if err := json.Unmarshal(*hit.Source, v.Addr().Interface()); err == nil { + slice = append(slice, v.Interface()) + } + } + return slice +} + +// SearchHits specifies the list of search hits. +type SearchHits struct { + TotalHits int64 `json:"total"` // total number of hits found + MaxScore *float64 `json:"max_score"` // maximum score of all hits + Hits []*SearchHit `json:"hits"` // the actual hits returned +} + +// SearchHit is a single hit. +type SearchHit struct { + Score *float64 `json:"_score"` // computed score + Index string `json:"_index"` // index name + Type string `json:"_type"` // type meta field + Id string `json:"_id"` // external or internal + Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) + Timestamp int64 `json:"_timestamp"` // timestamp meta field + TTL int64 `json:"_ttl"` // ttl meta field + Routing string `json:"_routing"` // routing meta field + Parent string `json:"_parent"` // parent meta field + Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService + Sort []interface{} `json:"sort"` // sort information + Highlight SearchHitHighlight `json:"highlight"` // highlighter information + Source *json.RawMessage `json:"_source"` // stored document source + Fields map[string]interface{} `json:"fields"` // returned fields + Explanation *SearchExplanation `json:"_explanation"` // explains how the score was computed + MatchedQueries []string `json:"matched_queries"` // matched queries + InnerHits map[string]*SearchHitInnerHits `json:"inner_hits"` // inner hits with ES >= 1.5.0 + + // Shard + // HighlightFields + // SortValues + // MatchedFilters +} + +type SearchHitInnerHits struct { + Hits *SearchHits `json:"hits"` +} + +// SearchExplanation explains how the score for a hit was computed. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html. +type SearchExplanation struct { + Value float64 `json:"value"` // e.g. 1.0 + Description string `json:"description"` // e.g. "boost" or "ConstantScore(*:*), product of:" + Details []SearchExplanation `json:"details,omitempty"` // recursive details +} + +// Suggest + +// SearchSuggest is a map of suggestions. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. +type SearchSuggest map[string][]SearchSuggestion + +// SearchSuggestion is a single search suggestion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. +type SearchSuggestion struct { + Text string `json:"text"` + Offset int `json:"offset"` + Length int `json:"length"` + Options []SearchSuggestionOption `json:"options"` +} + +// SearchSuggestionOption is an option of a SearchSuggestion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. +type SearchSuggestionOption struct { + Text string `json:"text"` + Highlighted string `json:"highlighted"` + Score float64 `json:"score"` + CollateMatch bool `json:"collate_match"` + Freq int `json:"freq"` // deprecated in 2.x + Payload interface{} `json:"payload"` +} + +// Aggregations (see search_aggs.go) + +// Highlighting + +// SearchHitHighlight is the highlight information of a search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html +// for a general discussion of highlighting. +type SearchHitHighlight map[string][]string diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs.go new file mode 100644 index 000000000..ff427d0ce --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs.go @@ -0,0 +1,1274 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" +) + +// Aggregations can be seen as a unit-of-work that build +// analytic information over a set of documents. It is +// (in many senses) the follow-up of facets in Elasticsearch. +// For more details about aggregations, visit: +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html +type Aggregation interface { + // Source returns a JSON-serializable aggregation that is a fragment + // of the request sent to Elasticsearch. + Source() (interface{}, error) +} + +// Aggregations is a list of aggregations that are part of a search result. +type Aggregations map[string]*json.RawMessage + +// Min returns min aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html +func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Max returns max aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html +func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Sum returns sum aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html +func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Avg returns average aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html +func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ValueCount returns value-count aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html +func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Cardinality returns cardinality aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html +func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Stats returns stats aggregation results. +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html +func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationStatsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ExtendedStats returns extended stats aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html +func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationExtendedStatsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Percentiles returns percentiles results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html +func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPercentilesMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// PercentileRanks returns percentile ranks results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html +func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPercentilesMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// TopHits returns top-hits aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html +func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationTopHitsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Global returns global results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html +func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Filter returns filter results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html +func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Filters returns filters results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html +func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketFilters) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Missing returns missing results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html +func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Nested returns nested results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html +func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ReverseNested returns reverse-nested results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-reverse-nested-aggregation.html +func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Children returns children results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html +func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Terms returns terms aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html +func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SignificantTerms returns significant terms aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html +func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketSignificantTerms) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Sampler returns sampler aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-sampler-aggregation.html +func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Range returns range aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html +func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// KeyedRange returns keyed range aggregation results. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html. +func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyedRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// DateRange returns date range aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html +func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// IPv4Range returns IPv4 range aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-iprange-aggregation.html +func (a Aggregations) IPv4Range(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Histogram returns histogram aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html +func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketHistogramItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// DateHistogram returns date histogram aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html +func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketHistogramItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoBounds returns geo-bounds aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html +func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationGeoBoundsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoHash returns geo-hash aggregation results. +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html +func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoDistance returns geo distance aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geodistance-aggregation.html +func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// AvgBucket returns average bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-avg-bucket-aggregation.html +func (a Aggregations) AvgBucket(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SumBucket returns sum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-sum-bucket-aggregation.html +func (a Aggregations) SumBucket(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MaxBucket returns maximum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-max-bucket-aggregation.html +func (a Aggregations) MaxBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineBucketMetricValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MinBucket returns minimum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-min-bucket-aggregation.html +func (a Aggregations) MinBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineBucketMetricValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MovAvg returns moving average pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html +func (a Aggregations) MovAvg(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Derivative returns derivative pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html +func (a Aggregations) Derivative(name string) (*AggregationPipelineDerivative, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineDerivative) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// CumulativeSum returns a cumulative sum pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html +func (a Aggregations) CumulativeSum(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// BucketScript returns bucket script pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html +func (a Aggregations) BucketScript(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SerialDiff returns serial differencing pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-serialdiff-aggregation.html +func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// -- Single value metric -- + +// AggregationValueMetric is a single-value metric, returned e.g. by a +// Min or Max aggregation. +type AggregationValueMetric struct { + Aggregations + + Value *float64 //`json:"value"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure. +func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Stats metric -- + +// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation. +type AggregationStatsMetric struct { + Aggregations + + Count int64 // `json:"count"` + Min *float64 //`json:"min,omitempty"` + Max *float64 //`json:"max,omitempty"` + Avg *float64 //`json:"avg,omitempty"` + Sum *float64 //`json:"sum,omitempty"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure. +func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["count"]; ok && v != nil { + json.Unmarshal(*v, &a.Count) + } + if v, ok := aggs["min"]; ok && v != nil { + json.Unmarshal(*v, &a.Min) + } + if v, ok := aggs["max"]; ok && v != nil { + json.Unmarshal(*v, &a.Max) + } + if v, ok := aggs["avg"]; ok && v != nil { + json.Unmarshal(*v, &a.Avg) + } + if v, ok := aggs["sum"]; ok && v != nil { + json.Unmarshal(*v, &a.Sum) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Extended stats metric -- + +// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation. +type AggregationExtendedStatsMetric struct { + Aggregations + + Count int64 // `json:"count"` + Min *float64 //`json:"min,omitempty"` + Max *float64 //`json:"max,omitempty"` + Avg *float64 //`json:"avg,omitempty"` + Sum *float64 //`json:"sum,omitempty"` + SumOfSquares *float64 //`json:"sum_of_squares,omitempty"` + Variance *float64 //`json:"variance,omitempty"` + StdDeviation *float64 //`json:"std_deviation,omitempty"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure. +func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["count"]; ok && v != nil { + json.Unmarshal(*v, &a.Count) + } + if v, ok := aggs["min"]; ok && v != nil { + json.Unmarshal(*v, &a.Min) + } + if v, ok := aggs["max"]; ok && v != nil { + json.Unmarshal(*v, &a.Max) + } + if v, ok := aggs["avg"]; ok && v != nil { + json.Unmarshal(*v, &a.Avg) + } + if v, ok := aggs["sum"]; ok && v != nil { + json.Unmarshal(*v, &a.Sum) + } + if v, ok := aggs["sum_of_squares"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfSquares) + } + if v, ok := aggs["variance"]; ok && v != nil { + json.Unmarshal(*v, &a.Variance) + } + if v, ok := aggs["std_deviation"]; ok && v != nil { + json.Unmarshal(*v, &a.StdDeviation) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Percentiles metric -- + +// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation. +type AggregationPercentilesMetric struct { + Aggregations + + Values map[string]float64 // `json:"values"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure. +func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["values"]; ok && v != nil { + json.Unmarshal(*v, &a.Values) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Top-hits metric -- + +// AggregationTopHitsMetric is a metric returned by a TopHits aggregation. +type AggregationTopHitsMetric struct { + Aggregations + + Hits *SearchHits //`json:"hits"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure. +func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + a.Aggregations = aggs + a.Hits = new(SearchHits) + if v, ok := aggs["hits"]; ok && v != nil { + json.Unmarshal(*v, &a.Hits) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + return nil +} + +// -- Geo-bounds metric -- + +// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation. +type AggregationGeoBoundsMetric struct { + Aggregations + + Bounds struct { + TopLeft struct { + Latitude float64 `json:"lat"` + Longitude float64 `json:"lon"` + } `json:"top_left"` + BottomRight struct { + Latitude float64 `json:"lat"` + Longitude float64 `json:"lon"` + } `json:"bottom_right"` + } `json:"bounds"` + + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure. +func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["bounds"]; ok && v != nil { + json.Unmarshal(*v, &a.Bounds) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Single bucket -- + +// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global. +type AggregationSingleBucket struct { + Aggregations + + DocCount int64 // `json:"doc_count"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure. +func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket range items -- + +// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned +// with a range aggregation. +type AggregationBucketRangeItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets []*AggregationBucketRangeItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. +func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketKeyedRangeItems is a bucket aggregation that is e.g. returned +// with a keyed range aggregation. +type AggregationBucketKeyedRangeItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets map[string]*AggregationBucketRangeItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. +func (a *AggregationBucketKeyedRangeItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure. +type AggregationBucketRangeItem struct { + Aggregations + + Key string //`json:"key"` + DocCount int64 //`json:"doc_count"` + From *float64 //`json:"from"` + FromAsString string //`json:"from_as_string"` + To *float64 //`json:"to"` + ToAsString string //`json:"to_as_string"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure. +func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["from"]; ok && v != nil { + json.Unmarshal(*v, &a.From) + } + if v, ok := aggs["from_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.FromAsString) + } + if v, ok := aggs["to"]; ok && v != nil { + json.Unmarshal(*v, &a.To) + } + if v, ok := aggs["to_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ToAsString) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket key items -- + +// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned +// with a terms aggregation. +type AggregationBucketKeyItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets []*AggregationBucketKeyItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure. +func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure. +type AggregationBucketKeyItem struct { + Aggregations + + Key interface{} //`json:"key"` + KeyAsString *string //`json:"key_as_string"` + KeyNumber json.Number + DocCount int64 //`json:"doc_count"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure. +func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + if err := dec.Decode(&aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + json.Unmarshal(*v, &a.KeyNumber) + } + if v, ok := aggs["key_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.KeyAsString) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket types for significant terms -- + +// AggregationBucketSignificantTerms is a bucket aggregation returned +// with a significant terms aggregation. +type AggregationBucketSignificantTerms struct { + Aggregations + + DocCount int64 //`json:"doc_count"` + Buckets []*AggregationBucketSignificantTerm //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure. +func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure. +type AggregationBucketSignificantTerm struct { + Aggregations + + Key string //`json:"key"` + DocCount int64 //`json:"doc_count"` + BgCount int64 //`json:"bg_count"` + Score float64 //`json:"score"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure. +func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["bg_count"]; ok && v != nil { + json.Unmarshal(*v, &a.BgCount) + } + if v, ok := aggs["score"]; ok && v != nil { + json.Unmarshal(*v, &a.Score) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket filters -- + +// AggregationBucketFilters is a multi-bucket aggregation that is returned +// with a filters aggregation. +type AggregationBucketFilters struct { + Aggregations + + Buckets []*AggregationBucketKeyItem //`json:"buckets"` + NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure. +func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + json.Unmarshal(*v, &a.NamedBuckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket histogram items -- + +// AggregationBucketHistogramItems is a bucket aggregation that is returned +// with a date histogram aggregation. +type AggregationBucketHistogramItems struct { + Aggregations + + Buckets []*AggregationBucketHistogramItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure. +func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure. +type AggregationBucketHistogramItem struct { + Aggregations + + Key int64 //`json:"key"` + KeyAsString *string //`json:"key_as_string"` + DocCount int64 //`json:"doc_count"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure. +func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["key_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.KeyAsString) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline simple value -- + +// AggregationPipelineSimpleValue is a simple value, returned e.g. by a +// MovAvg aggregation. +type AggregationPipelineSimpleValue struct { + Aggregations + + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineSimpleValue structure. +func (a *AggregationPipelineSimpleValue) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline simple value -- + +// AggregationPipelineBucketMetricValue is a value returned e.g. by a +// MaxBucket aggregation. +type AggregationPipelineBucketMetricValue struct { + Aggregations + + Keys []interface{} // `json:"keys"` + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineBucketMetricValue structure. +func (a *AggregationPipelineBucketMetricValue) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["keys"]; ok && v != nil { + json.Unmarshal(*v, &a.Keys) + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline derivative -- + +// AggregationPipelineDerivative is the value returned by a +// Derivative aggregation. +type AggregationPipelineDerivative struct { + Aggregations + + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + NormalizedValue *float64 // `json:"normalized_value"` + NormalizedValueAsString string // `json:"normalized_value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineDerivative structure. +func (a *AggregationPipelineDerivative) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["normalized_value"]; ok && v != nil { + json.Unmarshal(*v, &a.NormalizedValue) + } + if v, ok := aggs["normalized_value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.NormalizedValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children.go new file mode 100644 index 000000000..903e5461f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children.go @@ -0,0 +1,76 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ChildrenAggregation is a special single bucket aggregation that enables +// aggregating from buckets on parent document types to buckets on child documents. +// It is available from 1.4.0.Beta1 upwards. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html +type ChildrenAggregation struct { + typ string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewChildrenAggregation() *ChildrenAggregation { + return &ChildrenAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ChildrenAggregation) Type(typ string) *ChildrenAggregation { + a.typ = typ + return a +} + +func (a *ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) *ChildrenAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ChildrenAggregation) Meta(metaData map[string]interface{}) *ChildrenAggregation { + a.meta = metaData + return a +} + +func (a *ChildrenAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "to-answers" : { + // "children": { + // "type" : "answer" + // } + // } + // } + // } + // This method returns only the { "type" : ... } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["children"] = opts + opts["type"] = a.typ + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram.go new file mode 100644 index 000000000..231c51ef8 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram.go @@ -0,0 +1,285 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DateHistogramAggregation is a multi-bucket aggregation similar to the +// histogram except it can only be applied on date values. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html +type DateHistogramAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + interval string + order string + orderAsc bool + minDocCount *int64 + extendedBoundsMin interface{} + extendedBoundsMax interface{} + timeZone string + format string + offset string +} + +// NewDateHistogramAggregation creates a new DateHistogramAggregation. +func NewDateHistogramAggregation() *DateHistogramAggregation { + return &DateHistogramAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +// Field on which the aggregation is processed. +func (a *DateHistogramAggregation) Field(field string) *DateHistogramAggregation { + a.field = field + return a +} + +func (a *DateHistogramAggregation) Script(script *Script) *DateHistogramAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *DateHistogramAggregation) Missing(missing interface{}) *DateHistogramAggregation { + a.missing = missing + return a +} + +func (a *DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *DateHistogramAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DateHistogramAggregation) Meta(metaData map[string]interface{}) *DateHistogramAggregation { + a.meta = metaData + return a +} + +// Interval by which the aggregation gets processed. +// Allowed values are: "year", "quarter", "month", "week", "day", +// "hour", "minute". It also supports time settings like "1.5h" +// (up to "w" for weeks). +func (a *DateHistogramAggregation) Interval(interval string) *DateHistogramAggregation { + a.interval = interval + return a +} + +// Order specifies the sort order. Valid values for order are: +// "_key", "_count", a sub-aggregation name, or a sub-aggregation name +// with a metric. +func (a *DateHistogramAggregation) Order(order string, asc bool) *DateHistogramAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByCount(asc bool) *DateHistogramAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByCountAsc() *DateHistogramAggregation { + return a.OrderByCount(true) +} + +func (a *DateHistogramAggregation) OrderByCountDesc() *DateHistogramAggregation { + return a.OrderByCount(false) +} + +func (a *DateHistogramAggregation) OrderByKey(asc bool) *DateHistogramAggregation { + // "order" : { "_key" : "asc" } + a.order = "_key" + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByKeyAsc() *DateHistogramAggregation { + return a.OrderByKey(true) +} + +func (a *DateHistogramAggregation) OrderByKeyDesc() *DateHistogramAggregation { + return a.OrderByKey(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) *DateHistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *DateHistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +// MinDocCount sets the minimum document count per bucket. +// Buckets with less documents than this min value will not be returned. +func (a *DateHistogramAggregation) MinDocCount(minDocCount int64) *DateHistogramAggregation { + a.minDocCount = &minDocCount + return a +} + +// TimeZone sets the timezone in which to translate dates before computing buckets. +func (a *DateHistogramAggregation) TimeZone(timeZone string) *DateHistogramAggregation { + a.timeZone = timeZone + return a +} + +// Format sets the format to use for dates. +func (a *DateHistogramAggregation) Format(format string) *DateHistogramAggregation { + a.format = format + return a +} + +// Offset sets the offset of time intervals in the histogram, e.g. "+6h". +func (a *DateHistogramAggregation) Offset(offset string) *DateHistogramAggregation { + a.offset = offset + return a +} + +// ExtendedBounds accepts int, int64, string, or time.Time values. +// In case the lower value in the histogram would be greater than min or the +// upper value would be less than max, empty buckets will be generated. +func (a *DateHistogramAggregation) ExtendedBounds(min, max interface{}) *DateHistogramAggregation { + a.extendedBoundsMin = min + a.extendedBoundsMax = max + return a +} + +// ExtendedBoundsMin accepts int, int64, string, or time.Time values. +func (a *DateHistogramAggregation) ExtendedBoundsMin(min interface{}) *DateHistogramAggregation { + a.extendedBoundsMin = min + return a +} + +// ExtendedBoundsMax accepts int, int64, string, or time.Time values. +func (a *DateHistogramAggregation) ExtendedBoundsMax(max interface{}) *DateHistogramAggregation { + a.extendedBoundsMax = max + return a +} + +func (a *DateHistogramAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "articles_over_time" : { + // "date_histogram" : { + // "field" : "date", + // "interval" : "month" + // } + // } + // } + // } + // + // This method returns only the { "date_histogram" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["date_histogram"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + opts["interval"] = a.interval + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if a.timeZone != "" { + opts["time_zone"] = a.timeZone + } + if a.offset != "" { + opts["offset"] = a.offset + } + if a.format != "" { + opts["format"] = a.format + } + if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { + bounds := make(map[string]interface{}) + if a.extendedBoundsMin != nil { + bounds["min"] = a.extendedBoundsMin + } + if a.extendedBoundsMax != nil { + bounds["max"] = a.extendedBoundsMax + } + opts["extended_bounds"] = bounds + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range.go new file mode 100644 index 000000000..82de0696b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range.go @@ -0,0 +1,234 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "time" +) + +// DateRangeAggregation is a range aggregation that is dedicated for +// date values. The main difference between this aggregation and the +// normal range aggregation is that the from and to values can be expressed +// in Date Math expressions, and it is also possible to specify a +// date format by which the from and to response fields will be returned. +// Note that this aggregration includes the from value and excludes the to +// value for each range. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html +type DateRangeAggregation struct { + field string + script *Script + subAggregations map[string]Aggregation + meta map[string]interface{} + keyed *bool + unmapped *bool + format string + entries []DateRangeAggregationEntry +} + +type DateRangeAggregationEntry struct { + Key string + From interface{} + To interface{} +} + +func NewDateRangeAggregation() *DateRangeAggregation { + return &DateRangeAggregation{ + subAggregations: make(map[string]Aggregation), + entries: make([]DateRangeAggregationEntry, 0), + } +} + +func (a *DateRangeAggregation) Field(field string) *DateRangeAggregation { + a.field = field + return a +} + +func (a *DateRangeAggregation) Script(script *Script) *DateRangeAggregation { + a.script = script + return a +} + +func (a *DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *DateRangeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DateRangeAggregation) Meta(metaData map[string]interface{}) *DateRangeAggregation { + a.meta = metaData + return a +} + +func (a *DateRangeAggregation) Keyed(keyed bool) *DateRangeAggregation { + a.keyed = &keyed + return a +} + +func (a *DateRangeAggregation) Unmapped(unmapped bool) *DateRangeAggregation { + a.unmapped = &unmapped + return a +} + +func (a *DateRangeAggregation) Format(format string) *DateRangeAggregation { + a.format = format + return a +} + +func (a *DateRangeAggregation) AddRange(from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedTo(from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedFrom(to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) Lt(to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) LtWithKey(key string, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) Between(from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) Gt(from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) GtWithKey(key string, from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "range" : { + // "date_range": { + // "field": "date", + // "format": "MM-yyy", + // "ranges": [ + // { "to": "now-10M/M" }, + // { "from": "now-10M/M" } + // ] + // } + // } + // } + // } + // } + // + // This method returns only the { "date_range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["date_range"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.keyed != nil { + opts["keyed"] = *a.keyed + } + if a.unmapped != nil { + opts["unmapped"] = *a.unmapped + } + if a.format != "" { + opts["format"] = a.format + } + + ranges := make([]interface{}, 0) + for _, ent := range a.entries { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case time.Time: + r["from"] = from.Format(time.RFC3339) + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case time.Time: + r["to"] = to.Format(time.RFC3339) + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter.go new file mode 100644 index 000000000..101399882 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter.go @@ -0,0 +1,77 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FilterAggregation defines a single bucket of all the documents +// in the current document set context that match a specified filter. +// Often this will be used to narrow down the current aggregation context +// to a specific set of documents. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html +type FilterAggregation struct { + filter Query + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewFilterAggregation() *FilterAggregation { + return &FilterAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *FilterAggregation) SubAggregation(name string, subAggregation Aggregation) *FilterAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *FilterAggregation) Meta(metaData map[string]interface{}) *FilterAggregation { + a.meta = metaData + return a +} + +func (a *FilterAggregation) Filter(filter Query) *FilterAggregation { + a.filter = filter + return a +} + +func (a *FilterAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "in_stock_products" : { + // "filter" : { "range" : { "stock" : { "gt" : 0 } } } + // } + // } + // } + // This method returns only the { "filter" : {} } part. + + src, err := a.filter.Source() + if err != nil { + return nil, err + } + source := make(map[string]interface{}) + source["filter"] = src + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters.go new file mode 100644 index 000000000..9acceb247 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters.go @@ -0,0 +1,138 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// FiltersAggregation defines a multi bucket aggregations where each bucket +// is associated with a filter. Each bucket will collect all documents that +// match its associated filter. +// +// Notice that the caller has to decide whether to add filters by name +// (using FilterWithName) or unnamed filters (using Filter or Filters). One cannot +// use both named and unnamed filters. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html +type FiltersAggregation struct { + unnamedFilters []Query + namedFilters map[string]Query + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +// NewFiltersAggregation initializes a new FiltersAggregation. +func NewFiltersAggregation() *FiltersAggregation { + return &FiltersAggregation{ + unnamedFilters: make([]Query, 0), + namedFilters: make(map[string]Query), + subAggregations: make(map[string]Aggregation), + } +} + +// Filter adds an unnamed filter. Notice that you can +// either use named or unnamed filters, but not both. +func (a *FiltersAggregation) Filter(filter Query) *FiltersAggregation { + a.unnamedFilters = append(a.unnamedFilters, filter) + return a +} + +// Filters adds one or more unnamed filters. Notice that you can +// either use named or unnamed filters, but not both. +func (a *FiltersAggregation) Filters(filters ...Query) *FiltersAggregation { + if len(filters) > 0 { + a.unnamedFilters = append(a.unnamedFilters, filters...) + } + return a +} + +// FilterWithName adds a filter with a specific name. Notice that you can +// either use named or unnamed filters, but not both. +func (a *FiltersAggregation) FilterWithName(name string, filter Query) *FiltersAggregation { + a.namedFilters[name] = filter + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) *FiltersAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *FiltersAggregation) Meta(metaData map[string]interface{}) *FiltersAggregation { + a.meta = metaData + return a +} + +// Source returns the a JSON-serializable interface. +// If the aggregation is invalid, an error is returned. This may e.g. happen +// if you mixed named and unnamed filters. +func (a *FiltersAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "messages" : { + // "filters" : { + // "filters" : { + // "errors" : { "term" : { "body" : "error" }}, + // "warnings" : { "term" : { "body" : "warning" }} + // } + // } + // } + // } + // } + // This method returns only the (outer) { "filters" : {} } part. + + source := make(map[string]interface{}) + filters := make(map[string]interface{}) + source["filters"] = filters + + if len(a.unnamedFilters) > 0 && len(a.namedFilters) > 0 { + return nil, errors.New("elastic: use either named or unnamed filters with FiltersAggregation but not both") + } + + if len(a.unnamedFilters) > 0 { + arr := make([]interface{}, len(a.unnamedFilters)) + for i, filter := range a.unnamedFilters { + src, err := filter.Source() + if err != nil { + return nil, err + } + arr[i] = src + } + filters["filters"] = arr + } else { + dict := make(map[string]interface{}) + for key, filter := range a.namedFilters { + src, err := filter.Source() + if err != nil { + return nil, err + } + dict[key] = src + } + filters["filters"] = dict + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance.go new file mode 100644 index 000000000..3a1372221 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance.go @@ -0,0 +1,194 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields +// and conceptually works very similar to the range aggregation. +// The user can define a point of origin and a set of distance range buckets. +// The aggregation evaluate the distance of each document value from +// the origin point and determines the buckets it belongs to based on +// the ranges (a document belongs to a bucket if the distance between the +// document and the origin falls within the distance range of the bucket). +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-geodistance-aggregation.html +type GeoDistanceAggregation struct { + field string + unit string + distanceType string + point string + ranges []geoDistAggRange + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +type geoDistAggRange struct { + Key string + From interface{} + To interface{} +} + +func NewGeoDistanceAggregation() *GeoDistanceAggregation { + return &GeoDistanceAggregation{ + subAggregations: make(map[string]Aggregation), + ranges: make([]geoDistAggRange, 0), + } +} + +func (a *GeoDistanceAggregation) Field(field string) *GeoDistanceAggregation { + a.field = field + return a +} + +func (a *GeoDistanceAggregation) Unit(unit string) *GeoDistanceAggregation { + a.unit = unit + return a +} + +func (a *GeoDistanceAggregation) DistanceType(distanceType string) *GeoDistanceAggregation { + a.distanceType = distanceType + return a +} + +func (a *GeoDistanceAggregation) Point(latLon string) *GeoDistanceAggregation { + a.point = latLon + return a +} + +func (a *GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoDistanceAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GeoDistanceAggregation) Meta(metaData map[string]interface{}) *GeoDistanceAggregation { + a.meta = metaData + return a +} +func (a *GeoDistanceAggregation) AddRange(from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedTo(from float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedFrom(to float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to}) + return a +} + +func (a *GeoDistanceAggregation) Between(from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "rings_around_amsterdam" : { + // "geo_distance" : { + // "field" : "location", + // "origin" : "52.3760, 4.894", + // "ranges" : [ + // { "to" : 100 }, + // { "from" : 100, "to" : 300 }, + // { "from" : 300 } + // ] + // } + // } + // } + // } + // + // This method returns only the { "range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geo_distance"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.unit != "" { + opts["unit"] = a.unit + } + if a.distanceType != "" { + opts["distance_type"] = a.distanceType + } + if a.point != "" { + opts["origin"] = a.point + } + + ranges := make([]interface{}, 0) + for _, ent := range a.ranges { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case *int, *int16, *int32, *int64, *float32, *float64: + r["from"] = from + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case *int, *int16, *int32, *int64, *float32, *float64: + r["to"] = to + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geohash_grid.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geohash_grid.go new file mode 100644 index 000000000..07f61b331 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geohash_grid.go @@ -0,0 +1,102 @@ +package elastic + +type GeoHashGridAggregation struct { + field string + precision int + size int + shardSize int + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGeoHashGridAggregation() *GeoHashGridAggregation { + return &GeoHashGridAggregation{ + subAggregations: make(map[string]Aggregation), + precision: -1, + size: -1, + shardSize: -1, + } +} + +func (a *GeoHashGridAggregation) Field(field string) *GeoHashGridAggregation { + a.field = field + return a +} + +func (a *GeoHashGridAggregation) Precision(precision int) *GeoHashGridAggregation { + a.precision = precision + return a +} + +func (a *GeoHashGridAggregation) Size(size int) *GeoHashGridAggregation { + a.size = size + return a +} + +func (a *GeoHashGridAggregation) ShardSize(shardSize int) *GeoHashGridAggregation { + a.shardSize = shardSize + return a +} + +func (a *GeoHashGridAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoHashGridAggregation { + a.subAggregations[name] = subAggregation + return a +} + +func (a *GeoHashGridAggregation) Meta(metaData map[string]interface{}) *GeoHashGridAggregation { + a.meta = metaData + return a +} + +func (a *GeoHashGridAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs": { + // "new_york": { + // "geohash_grid": { + // "field": "location", + // "precision": 5 + // } + // } + // } + // } + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geohash_grid"] = opts + + if a.field != "" { + opts["field"] = a.field + } + + if a.precision != -1 { + opts["precision"] = a.precision + } + + if a.size != -1 { + opts["size"] = a.size + } + + if a.shardSize != -1 { + opts["shard_size"] = a.shardSize + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global.go new file mode 100644 index 000000000..49e24d60f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global.go @@ -0,0 +1,71 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GlobalAggregation defines a single bucket of all the documents within +// the search execution context. This context is defined by the indices +// and the document types you’re searching on, but is not influenced +// by the search query itself. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html +type GlobalAggregation struct { + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGlobalAggregation() *GlobalAggregation { + return &GlobalAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) *GlobalAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GlobalAggregation) Meta(metaData map[string]interface{}) *GlobalAggregation { + a.meta = metaData + return a +} + +func (a *GlobalAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "all_products" : { + // "global" : {}, + // "aggs" : { + // "avg_price" : { "avg" : { "field" : "price" } } + // } + // } + // } + // } + // This method returns only the { "global" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["global"] = opts + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram.go new file mode 100644 index 000000000..7821adbc0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram.go @@ -0,0 +1,253 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HistogramAggregation is a multi-bucket values source based aggregation +// that can be applied on numeric values extracted from the documents. +// It dynamically builds fixed size (a.k.a. interval) buckets over the +// values. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html +type HistogramAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + interval int64 + order string + orderAsc bool + minDocCount *int64 + extendedBoundsMin *int64 + extendedBoundsMax *int64 + offset *int64 +} + +func NewHistogramAggregation() *HistogramAggregation { + return &HistogramAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *HistogramAggregation) Field(field string) *HistogramAggregation { + a.field = field + return a +} + +func (a *HistogramAggregation) Script(script *Script) *HistogramAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *HistogramAggregation) Missing(missing interface{}) *HistogramAggregation { + a.missing = missing + return a +} + +func (a *HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *HistogramAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *HistogramAggregation) Meta(metaData map[string]interface{}) *HistogramAggregation { + a.meta = metaData + return a +} + +func (a *HistogramAggregation) Interval(interval int64) *HistogramAggregation { + a.interval = interval + return a +} + +// Order specifies the sort order. Valid values for order are: +// "_key", "_count", a sub-aggregation name, or a sub-aggregation name +// with a metric. +func (a *HistogramAggregation) Order(order string, asc bool) *HistogramAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByCount(asc bool) *HistogramAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByCountAsc() *HistogramAggregation { + return a.OrderByCount(true) +} + +func (a *HistogramAggregation) OrderByCountDesc() *HistogramAggregation { + return a.OrderByCount(false) +} + +func (a *HistogramAggregation) OrderByKey(asc bool) *HistogramAggregation { + // "order" : { "_key" : "asc" } + a.order = "_key" + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByKeyAsc() *HistogramAggregation { + return a.OrderByKey(true) +} + +func (a *HistogramAggregation) OrderByKeyDesc() *HistogramAggregation { + return a.OrderByKey(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *HistogramAggregation) OrderByAggregation(aggName string, asc bool) *HistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *HistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) MinDocCount(minDocCount int64) *HistogramAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *HistogramAggregation) ExtendedBounds(min, max int64) *HistogramAggregation { + a.extendedBoundsMin = &min + a.extendedBoundsMax = &max + return a +} + +func (a *HistogramAggregation) ExtendedBoundsMin(min int64) *HistogramAggregation { + a.extendedBoundsMin = &min + return a +} + +func (a *HistogramAggregation) ExtendedBoundsMax(max int64) *HistogramAggregation { + a.extendedBoundsMax = &max + return a +} + +func (a *HistogramAggregation) Offset(offset int64) *HistogramAggregation { + a.offset = &offset + return a +} + +func (a *HistogramAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "prices" : { + // "histogram" : { + // "field" : "price", + // "interval" : 50 + // } + // } + // } + // } + // + // This method returns only the { "histogram" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["histogram"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + opts["interval"] = a.interval + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if a.offset != nil { + opts["offset"] = *a.offset + } + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { + bounds := make(map[string]interface{}) + if a.extendedBoundsMin != nil { + bounds["min"] = a.extendedBoundsMin + } + if a.extendedBoundsMax != nil { + bounds["max"] = a.extendedBoundsMax + } + opts["extended_bounds"] = bounds + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing.go new file mode 100644 index 000000000..ca610c953 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing.go @@ -0,0 +1,81 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MissingAggregation is a field data based single bucket aggregation, +// that creates a bucket of all documents in the current document set context +// that are missing a field value (effectively, missing a field or having +// the configured NULL value set). This aggregator will often be used in +// conjunction with other field data bucket aggregators (such as ranges) +// to return information for all the documents that could not be placed +// in any of the other buckets due to missing field data values. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html +type MissingAggregation struct { + field string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMissingAggregation() *MissingAggregation { + return &MissingAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MissingAggregation) Field(field string) *MissingAggregation { + a.field = field + return a +} + +func (a *MissingAggregation) SubAggregation(name string, subAggregation Aggregation) *MissingAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MissingAggregation) Meta(metaData map[string]interface{}) *MissingAggregation { + a.meta = metaData + return a +} + +func (a *MissingAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "products_without_a_price" : { + // "missing" : { "field" : "price" } + // } + // } + // } + // This method returns only the { "missing" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["missing"] = opts + + if a.field != "" { + opts["field"] = a.field + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested.go new file mode 100644 index 000000000..f65da8048 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested.go @@ -0,0 +1,82 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NestedAggregation is a special single bucket aggregation that enables +// aggregating nested documents. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-nested-aggregation.html +type NestedAggregation struct { + path string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewNestedAggregation() *NestedAggregation { + return &NestedAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *NestedAggregation) SubAggregation(name string, subAggregation Aggregation) *NestedAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *NestedAggregation) Meta(metaData map[string]interface{}) *NestedAggregation { + a.meta = metaData + return a +} + +func (a *NestedAggregation) Path(path string) *NestedAggregation { + a.path = path + return a +} + +func (a *NestedAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "match" : { "name" : "led tv" } + // } + // "aggs" : { + // "resellers" : { + // "nested" : { + // "path" : "resellers" + // }, + // "aggs" : { + // "min_price" : { "min" : { "field" : "resellers.price" } } + // } + // } + // } + // } + // This method returns only the { "nested" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["nested"] = opts + + opts["path"] = a.path + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range.go new file mode 100644 index 000000000..bc017c60f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range.go @@ -0,0 +1,232 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "time" +) + +// RangeAggregation is a multi-bucket value source based aggregation that +// enables the user to define a set of ranges - each representing a bucket. +// During the aggregation process, the values extracted from each document +// will be checked against each bucket range and "bucket" the +// relevant/matching document. Note that this aggregration includes the +// from value and excludes the to value for each range. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html +type RangeAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + keyed *bool + unmapped *bool + entries []rangeAggregationEntry +} + +type rangeAggregationEntry struct { + Key string + From interface{} + To interface{} +} + +func NewRangeAggregation() *RangeAggregation { + return &RangeAggregation{ + subAggregations: make(map[string]Aggregation), + entries: make([]rangeAggregationEntry, 0), + } +} + +func (a *RangeAggregation) Field(field string) *RangeAggregation { + a.field = field + return a +} + +func (a *RangeAggregation) Script(script *Script) *RangeAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *RangeAggregation) Missing(missing interface{}) *RangeAggregation { + a.missing = missing + return a +} + +func (a *RangeAggregation) SubAggregation(name string, subAggregation Aggregation) *RangeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *RangeAggregation) Meta(metaData map[string]interface{}) *RangeAggregation { + a.meta = metaData + return a +} + +func (a *RangeAggregation) Keyed(keyed bool) *RangeAggregation { + a.keyed = &keyed + return a +} + +func (a *RangeAggregation) Unmapped(unmapped bool) *RangeAggregation { + a.unmapped = &unmapped + return a +} + +func (a *RangeAggregation) AddRange(from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *RangeAggregation) AddRangeWithKey(key string, from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *RangeAggregation) AddUnboundedTo(from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *RangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *RangeAggregation) AddUnboundedFrom(to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *RangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *RangeAggregation) Lt(to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *RangeAggregation) LtWithKey(key string, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *RangeAggregation) Between(from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *RangeAggregation) BetweenWithKey(key string, from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *RangeAggregation) Gt(from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *RangeAggregation) GtWithKey(key string, from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *RangeAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "price_ranges" : { + // "range" : { + // "field" : "price", + // "ranges" : [ + // { "to" : 50 }, + // { "from" : 50, "to" : 100 }, + // { "from" : 100 } + // ] + // } + // } + // } + // } + // + // This method returns only the { "range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["range"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + if a.keyed != nil { + opts["keyed"] = *a.keyed + } + if a.unmapped != nil { + opts["unmapped"] = *a.unmapped + } + + ranges := make([]interface{}, 0) + for _, ent := range a.entries { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case time.Time: + r["from"] = from.Format(time.RFC3339) + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case time.Time: + r["to"] = to.Format(time.RFC3339) + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_reverse_nested.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_reverse_nested.go new file mode 100644 index 000000000..89c7531d2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_reverse_nested.go @@ -0,0 +1,86 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ReverseNestedAggregation defines a special single bucket aggregation +// that enables aggregating on parent docs from nested documents. +// Effectively this aggregation can break out of the nested block +// structure and link to other nested structures or the root document, +// which allows nesting other aggregations that aren’t part of +// the nested object in a nested aggregation. +// +// See: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-reverse-nested-aggregation.html +type ReverseNestedAggregation struct { + path string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +// NewReverseNestedAggregation initializes a new ReverseNestedAggregation +// bucket aggregation. +func NewReverseNestedAggregation() *ReverseNestedAggregation { + return &ReverseNestedAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +// Path set the path to use for this nested aggregation. The path must match +// the path to a nested object in the mappings. If it is not specified +// then this aggregation will go back to the root document. +func (a *ReverseNestedAggregation) Path(path string) *ReverseNestedAggregation { + a.path = path + return a +} + +func (a *ReverseNestedAggregation) SubAggregation(name string, subAggregation Aggregation) *ReverseNestedAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ReverseNestedAggregation) Meta(metaData map[string]interface{}) *ReverseNestedAggregation { + a.meta = metaData + return a +} + +func (a *ReverseNestedAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "reverse_nested" : { + // "path": "..." + // } + // } + // } + // This method returns only the { "reverse_nested" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["reverse_nested"] = opts + + if a.path != "" { + opts["path"] = a.path + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler.go new file mode 100644 index 000000000..9a6df15ec --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler.go @@ -0,0 +1,145 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SamplerAggregation is a filtering aggregation used to limit any +// sub aggregations' processing to a sample of the top-scoring documents. +// Optionally, diversity settings can be used to limit the number of matches +// that share a common value such as an "author". +// See: https://www.elastic.co/guide/en/elasticsearch/reference/2.x/search-aggregations-bucket-sampler-aggregation.html +type SamplerAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + shardSize int + maxDocsPerValue int + executionHint string +} + +func NewSamplerAggregation() *SamplerAggregation { + return &SamplerAggregation{ + shardSize: -1, + maxDocsPerValue: -1, + subAggregations: make(map[string]Aggregation), + } +} + +func (a *SamplerAggregation) Field(field string) *SamplerAggregation { + a.field = field + return a +} + +func (a *SamplerAggregation) Script(script *Script) *SamplerAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *SamplerAggregation) Missing(missing interface{}) *SamplerAggregation { + a.missing = missing + return a +} + +func (a *SamplerAggregation) SubAggregation(name string, subAggregation Aggregation) *SamplerAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SamplerAggregation) Meta(metaData map[string]interface{}) *SamplerAggregation { + a.meta = metaData + return a +} + +// ShardSize sets the maximum number of docs returned from each shard. +func (a *SamplerAggregation) ShardSize(shardSize int) *SamplerAggregation { + a.shardSize = shardSize + return a +} + +func (a *SamplerAggregation) MaxDocsPerValue(maxDocsPerValue int) *SamplerAggregation { + a.maxDocsPerValue = maxDocsPerValue + return a +} + +func (a *SamplerAggregation) ExecutionHint(hint string) *SamplerAggregation { + a.executionHint = hint + return a +} + +func (a *SamplerAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "sample" : { + // "sampler" : { + // "field" : "user.id", + // "shard_size" : 200 + // }, + // "aggs": { + // "keywords": { + // "significant_terms": { + // "field": "text" + // } + // } + // } + // } + // } + // } + // + // This method returns only the { "sampler" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["sampler"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + if a.shardSize >= 0 { + opts["shard_size"] = a.shardSize + } + if a.maxDocsPerValue >= 0 { + opts["max_docs_per_value"] = a.maxDocsPerValue + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms.go new file mode 100644 index 000000000..041bdb43d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms.go @@ -0,0 +1,389 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SignificantSignificantTermsAggregation is an aggregation that returns interesting +// or unusual occurrences of terms in a set. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html +type SignificantTermsAggregation struct { + field string + subAggregations map[string]Aggregation + meta map[string]interface{} + + minDocCount *int + shardMinDocCount *int + requiredSize *int + shardSize *int + filter Query + executionHint string + significanceHeuristic SignificanceHeuristic +} + +func NewSignificantTermsAggregation() *SignificantTermsAggregation { + return &SignificantTermsAggregation{ + subAggregations: make(map[string]Aggregation, 0), + } +} + +func (a *SignificantTermsAggregation) Field(field string) *SignificantTermsAggregation { + a.field = field + return a +} + +func (a *SignificantTermsAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTermsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SignificantTermsAggregation) Meta(metaData map[string]interface{}) *SignificantTermsAggregation { + a.meta = metaData + return a +} + +func (a *SignificantTermsAggregation) MinDocCount(minDocCount int) *SignificantTermsAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *SignificantTermsAggregation) ShardMinDocCount(shardMinDocCount int) *SignificantTermsAggregation { + a.shardMinDocCount = &shardMinDocCount + return a +} + +func (a *SignificantTermsAggregation) RequiredSize(requiredSize int) *SignificantTermsAggregation { + a.requiredSize = &requiredSize + return a +} + +func (a *SignificantTermsAggregation) ShardSize(shardSize int) *SignificantTermsAggregation { + a.shardSize = &shardSize + return a +} + +func (a *SignificantTermsAggregation) BackgroundFilter(filter Query) *SignificantTermsAggregation { + a.filter = filter + return a +} + +func (a *SignificantTermsAggregation) ExecutionHint(hint string) *SignificantTermsAggregation { + a.executionHint = hint + return a +} + +func (a *SignificantTermsAggregation) SignificanceHeuristic(heuristic SignificanceHeuristic) *SignificantTermsAggregation { + a.significanceHeuristic = heuristic + return a +} + +func (a *SignificantTermsAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "terms" : {"force" : [ "British Transport Police" ]} + // }, + // "aggregations" : { + // "significantCrimeTypes" : { + // "significant_terms" : { "field" : "crime_type" } + // } + // } + // } + // + // This method returns only the + // { "significant_terms" : { "field" : "crime_type" } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["significant_terms"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.requiredSize != nil { + opts["size"] = *a.requiredSize // not a typo! + } + if a.shardSize != nil { + opts["shard_size"] = *a.shardSize + } + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.shardMinDocCount != nil { + opts["shard_min_doc_count"] = *a.shardMinDocCount + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + if a.filter != nil { + src, err := a.filter.Source() + if err != nil { + return nil, err + } + opts["background_filter"] = src + } + if a.significanceHeuristic != nil { + name := a.significanceHeuristic.Name() + src, err := a.significanceHeuristic.Source() + if err != nil { + return nil, err + } + opts[name] = src + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} + +// -- Significance heuristics -- + +type SignificanceHeuristic interface { + Name() string + Source() (interface{}, error) +} + +// -- Chi Square -- + +// ChiSquareSignificanceHeuristic implements Chi square as described +// in "Information Retrieval", Manning et al., Chapter 13.5.2. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_chi_square +// for details. +type ChiSquareSignificanceHeuristic struct { + backgroundIsSuperset *bool + includeNegatives *bool +} + +// NewChiSquareSignificanceHeuristic initializes a new ChiSquareSignificanceHeuristic. +func NewChiSquareSignificanceHeuristic() *ChiSquareSignificanceHeuristic { + return &ChiSquareSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *ChiSquareSignificanceHeuristic) Name() string { + return "chi_square" +} + +// BackgroundIsSuperset indicates whether you defined a custom background +// filter that represents a difference set of documents that you want to +// compare to. +func (sh *ChiSquareSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *ChiSquareSignificanceHeuristic { + sh.backgroundIsSuperset = &backgroundIsSuperset + return sh +} + +// IncludeNegatives indicates whether to filter out the terms that appear +// much less in the subset than in the background without the subset. +func (sh *ChiSquareSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *ChiSquareSignificanceHeuristic { + sh.includeNegatives = &includeNegatives + return sh +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *ChiSquareSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + if sh.backgroundIsSuperset != nil { + source["background_is_superset"] = *sh.backgroundIsSuperset + } + if sh.includeNegatives != nil { + source["include_negatives"] = *sh.includeNegatives + } + return source, nil +} + +// -- GND -- + +// GNDSignificanceHeuristic implements the "Google Normalized Distance" +// as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, +// 2007. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_google_normalized_distance +// for details. +type GNDSignificanceHeuristic struct { + backgroundIsSuperset *bool +} + +// NewGNDSignificanceHeuristic implements a new GNDSignificanceHeuristic. +func NewGNDSignificanceHeuristic() *GNDSignificanceHeuristic { + return &GNDSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *GNDSignificanceHeuristic) Name() string { + return "gnd" +} + +// BackgroundIsSuperset indicates whether you defined a custom background +// filter that represents a difference set of documents that you want to +// compare to. +func (sh *GNDSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *GNDSignificanceHeuristic { + sh.backgroundIsSuperset = &backgroundIsSuperset + return sh +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *GNDSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + if sh.backgroundIsSuperset != nil { + source["background_is_superset"] = *sh.backgroundIsSuperset + } + return source, nil +} + +// -- JLH Score -- + +// JLHScoreSignificanceHeuristic implements the JLH score as described in +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_jlh_score. +type JLHScoreSignificanceHeuristic struct{} + +// NewJLHScoreSignificanceHeuristic initializes a new JLHScoreSignificanceHeuristic. +func NewJLHScoreSignificanceHeuristic() *JLHScoreSignificanceHeuristic { + return &JLHScoreSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *JLHScoreSignificanceHeuristic) Name() string { + return "jlh" +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *JLHScoreSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + return source, nil +} + +// -- Mutual Information -- + +// MutualInformationSignificanceHeuristic implements Mutual information +// as described in "Information Retrieval", Manning et al., Chapter 13.5.1. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_mutual_information +// for details. +type MutualInformationSignificanceHeuristic struct { + backgroundIsSuperset *bool + includeNegatives *bool +} + +// NewMutualInformationSignificanceHeuristic initializes a new instance of +// MutualInformationSignificanceHeuristic. +func NewMutualInformationSignificanceHeuristic() *MutualInformationSignificanceHeuristic { + return &MutualInformationSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *MutualInformationSignificanceHeuristic) Name() string { + return "mutual_information" +} + +// BackgroundIsSuperset indicates whether you defined a custom background +// filter that represents a difference set of documents that you want to +// compare to. +func (sh *MutualInformationSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *MutualInformationSignificanceHeuristic { + sh.backgroundIsSuperset = &backgroundIsSuperset + return sh +} + +// IncludeNegatives indicates whether to filter out the terms that appear +// much less in the subset than in the background without the subset. +func (sh *MutualInformationSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *MutualInformationSignificanceHeuristic { + sh.includeNegatives = &includeNegatives + return sh +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *MutualInformationSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + if sh.backgroundIsSuperset != nil { + source["background_is_superset"] = *sh.backgroundIsSuperset + } + if sh.includeNegatives != nil { + source["include_negatives"] = *sh.includeNegatives + } + return source, nil +} + +// -- Percentage Score -- + +// PercentageScoreSignificanceHeuristic implements the algorithm described +// in https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_percentage. +type PercentageScoreSignificanceHeuristic struct{} + +// NewPercentageScoreSignificanceHeuristic initializes a new instance of +// PercentageScoreSignificanceHeuristic. +func NewPercentageScoreSignificanceHeuristic() *PercentageScoreSignificanceHeuristic { + return &PercentageScoreSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *PercentageScoreSignificanceHeuristic) Name() string { + return "percentage" +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *PercentageScoreSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + return source, nil +} + +// -- Script -- + +// ScriptSignificanceHeuristic implements a scripted significance heuristic. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_scripted +// for details. +type ScriptSignificanceHeuristic struct { + script *Script +} + +// NewScriptSignificanceHeuristic initializes a new instance of +// ScriptSignificanceHeuristic. +func NewScriptSignificanceHeuristic() *ScriptSignificanceHeuristic { + return &ScriptSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *ScriptSignificanceHeuristic) Name() string { + return "script_heuristic" +} + +// Script specifies the script to use to get custom scores. The following +// parameters are available in the script: `_subset_freq`, `_superset_freq`, +// `_subset_size`, and `_superset_size`. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_scripted +// for details. +func (sh *ScriptSignificanceHeuristic) Script(script *Script) *ScriptSignificanceHeuristic { + sh.script = script + return sh +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *ScriptSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + if sh.script != nil { + src, err := sh.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms.go new file mode 100644 index 000000000..2d3c0d1ad --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms.go @@ -0,0 +1,341 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermsAggregation is a multi-bucket value source based aggregation +// where buckets are dynamically built - one per unique value. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html +type TermsAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + size *int + shardSize *int + requiredSize *int + minDocCount *int + shardMinDocCount *int + valueType string + order string + orderAsc bool + includePattern string + includeFlags *int + excludePattern string + excludeFlags *int + executionHint string + collectionMode string + showTermDocCountError *bool + includeTerms []string + excludeTerms []string +} + +func NewTermsAggregation() *TermsAggregation { + return &TermsAggregation{ + subAggregations: make(map[string]Aggregation, 0), + includeTerms: make([]string, 0), + excludeTerms: make([]string, 0), + } +} + +func (a *TermsAggregation) Field(field string) *TermsAggregation { + a.field = field + return a +} + +func (a *TermsAggregation) Script(script *Script) *TermsAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *TermsAggregation) Missing(missing interface{}) *TermsAggregation { + a.missing = missing + return a +} + +func (a *TermsAggregation) SubAggregation(name string, subAggregation Aggregation) *TermsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *TermsAggregation) Meta(metaData map[string]interface{}) *TermsAggregation { + a.meta = metaData + return a +} + +func (a *TermsAggregation) Size(size int) *TermsAggregation { + a.size = &size + return a +} + +func (a *TermsAggregation) RequiredSize(requiredSize int) *TermsAggregation { + a.requiredSize = &requiredSize + return a +} + +func (a *TermsAggregation) ShardSize(shardSize int) *TermsAggregation { + a.shardSize = &shardSize + return a +} + +func (a *TermsAggregation) MinDocCount(minDocCount int) *TermsAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *TermsAggregation) ShardMinDocCount(shardMinDocCount int) *TermsAggregation { + a.shardMinDocCount = &shardMinDocCount + return a +} + +func (a *TermsAggregation) Include(regexp string) *TermsAggregation { + a.includePattern = regexp + return a +} + +func (a *TermsAggregation) IncludeWithFlags(regexp string, flags int) *TermsAggregation { + a.includePattern = regexp + a.includeFlags = &flags + return a +} + +func (a *TermsAggregation) Exclude(regexp string) *TermsAggregation { + a.excludePattern = regexp + return a +} + +func (a *TermsAggregation) ExcludeWithFlags(regexp string, flags int) *TermsAggregation { + a.excludePattern = regexp + a.excludeFlags = &flags + return a +} + +// ValueType can be string, long, or double. +func (a *TermsAggregation) ValueType(valueType string) *TermsAggregation { + a.valueType = valueType + return a +} + +func (a *TermsAggregation) Order(order string, asc bool) *TermsAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByCount(asc bool) *TermsAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByCountAsc() *TermsAggregation { + return a.OrderByCount(true) +} + +func (a *TermsAggregation) OrderByCountDesc() *TermsAggregation { + return a.OrderByCount(false) +} + +func (a *TermsAggregation) OrderByTerm(asc bool) *TermsAggregation { + // "order" : { "_term" : "asc" } + a.order = "_term" + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByTermAsc() *TermsAggregation { + return a.OrderByTerm(true) +} + +func (a *TermsAggregation) OrderByTermDesc() *TermsAggregation { + return a.OrderByTerm(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *TermsAggregation) OrderByAggregation(aggName string, asc bool) *TermsAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *TermsAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *TermsAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) ExecutionHint(hint string) *TermsAggregation { + a.executionHint = hint + return a +} + +// Collection mode can be depth_first or breadth_first as of 1.4.0. +func (a *TermsAggregation) CollectionMode(collectionMode string) *TermsAggregation { + a.collectionMode = collectionMode + return a +} + +func (a *TermsAggregation) ShowTermDocCountError(showTermDocCountError bool) *TermsAggregation { + a.showTermDocCountError = &showTermDocCountError + return a +} + +func (a *TermsAggregation) IncludeTerms(terms ...string) *TermsAggregation { + a.includeTerms = append(a.includeTerms, terms...) + return a +} + +func (a *TermsAggregation) ExcludeTerms(terms ...string) *TermsAggregation { + a.excludeTerms = append(a.excludeTerms, terms...) + return a +} + +func (a *TermsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "genders" : { + // "terms" : { "field" : "gender" } + // } + // } + // } + // This method returns only the { "terms" : { "field" : "gender" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["terms"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + // TermsBuilder + if a.size != nil && *a.size >= 0 { + opts["size"] = *a.size + } + if a.shardSize != nil && *a.shardSize >= 0 { + opts["shard_size"] = *a.shardSize + } + if a.requiredSize != nil && *a.requiredSize >= 0 { + opts["required_size"] = *a.requiredSize + } + if a.minDocCount != nil && *a.minDocCount >= 0 { + opts["min_doc_count"] = *a.minDocCount + } + if a.shardMinDocCount != nil && *a.shardMinDocCount >= 0 { + opts["shard_min_doc_count"] = *a.shardMinDocCount + } + if a.showTermDocCountError != nil { + opts["show_term_doc_count_error"] = *a.showTermDocCountError + } + if a.collectionMode != "" { + opts["collect_mode"] = a.collectionMode + } + if a.valueType != "" { + opts["value_type"] = a.valueType + } + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if len(a.includeTerms) > 0 { + opts["include"] = a.includeTerms + } + if a.includePattern != "" { + if a.includeFlags == nil || *a.includeFlags == 0 { + opts["include"] = a.includePattern + } else { + p := make(map[string]interface{}) + p["pattern"] = a.includePattern + p["flags"] = *a.includeFlags + opts["include"] = p + } + } + if len(a.excludeTerms) > 0 { + opts["exclude"] = a.excludeTerms + } + if a.excludePattern != "" { + if a.excludeFlags == nil || *a.excludeFlags == 0 { + opts["exclude"] = a.excludePattern + } else { + p := make(map[string]interface{}) + p["pattern"] = a.excludePattern + p["flags"] = *a.excludeFlags + opts["exclude"] = p + } + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg.go new file mode 100644 index 000000000..37ec2b7ad --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg.go @@ -0,0 +1,101 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AvgAggregation is a single-value metrics aggregation that computes +// the average of numeric values that are extracted from the +// aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by +// a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html +type AvgAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewAvgAggregation() *AvgAggregation { + return &AvgAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *AvgAggregation) Field(field string) *AvgAggregation { + a.field = field + return a +} + +func (a *AvgAggregation) Script(script *Script) *AvgAggregation { + a.script = script + return a +} + +func (a *AvgAggregation) Format(format string) *AvgAggregation { + a.format = format + return a +} + +func (a *AvgAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *AvgAggregation) Meta(metaData map[string]interface{}) *AvgAggregation { + a.meta = metaData + return a +} + +func (a *AvgAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "avg_grade" : { "avg" : { "field" : "grade" } } + // } + // } + // This method returns only the { "avg" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["avg"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality.go new file mode 100644 index 000000000..ebf247c79 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality.go @@ -0,0 +1,120 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CardinalityAggregation is a single-value metrics aggregation that +// calculates an approximate count of distinct values. +// Values can be extracted either from specific fields in the document +// or generated by a script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html +type CardinalityAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + precisionThreshold *int64 + rehash *bool +} + +func NewCardinalityAggregation() *CardinalityAggregation { + return &CardinalityAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *CardinalityAggregation) Field(field string) *CardinalityAggregation { + a.field = field + return a +} + +func (a *CardinalityAggregation) Script(script *Script) *CardinalityAggregation { + a.script = script + return a +} + +func (a *CardinalityAggregation) Format(format string) *CardinalityAggregation { + a.format = format + return a +} + +func (a *CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) *CardinalityAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *CardinalityAggregation) Meta(metaData map[string]interface{}) *CardinalityAggregation { + a.meta = metaData + return a +} + +func (a *CardinalityAggregation) PrecisionThreshold(threshold int64) *CardinalityAggregation { + a.precisionThreshold = &threshold + return a +} + +func (a *CardinalityAggregation) Rehash(rehash bool) *CardinalityAggregation { + a.rehash = &rehash + return a +} + +func (a *CardinalityAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "author_count" : { + // "cardinality" : { "field" : "author" } + // } + // } + // } + // This method returns only the "cardinality" : { "field" : "author" } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["cardinality"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.format != "" { + opts["format"] = a.format + } + if a.precisionThreshold != nil { + opts["precision_threshold"] = *a.precisionThreshold + } + if a.rehash != nil { + opts["rehash"] = *a.rehash + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats.go new file mode 100644 index 000000000..69447409c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that +// computes stats over numeric values extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html +type ExtendedStatsAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewExtendedStatsAggregation() *ExtendedStatsAggregation { + return &ExtendedStatsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ExtendedStatsAggregation) Field(field string) *ExtendedStatsAggregation { + a.field = field + return a +} + +func (a *ExtendedStatsAggregation) Script(script *Script) *ExtendedStatsAggregation { + a.script = script + return a +} + +func (a *ExtendedStatsAggregation) Format(format string) *ExtendedStatsAggregation { + a.format = format + return a +} + +func (a *ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) *ExtendedStatsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ExtendedStatsAggregation) Meta(metaData map[string]interface{}) *ExtendedStatsAggregation { + a.meta = metaData + return a +} + +func (a *ExtendedStatsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_stats" : { "extended_stats" : { "field" : "grade" } } + // } + // } + // This method returns only the { "extended_stats" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["extended_stats"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds.go new file mode 100644 index 000000000..647ba5139 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds.go @@ -0,0 +1,105 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoBoundsAggregation is a metric aggregation that computes the +// bounding box containing all geo_point values for a field. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html +type GeoBoundsAggregation struct { + field string + script *Script + wrapLongitude *bool + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGeoBoundsAggregation() *GeoBoundsAggregation { + return &GeoBoundsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *GeoBoundsAggregation) Field(field string) *GeoBoundsAggregation { + a.field = field + return a +} + +func (a *GeoBoundsAggregation) Script(script *Script) *GeoBoundsAggregation { + a.script = script + return a +} + +func (a *GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) *GeoBoundsAggregation { + a.wrapLongitude = &wrapLongitude + return a +} + +func (a *GeoBoundsAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoBoundsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GeoBoundsAggregation) Meta(metaData map[string]interface{}) *GeoBoundsAggregation { + a.meta = metaData + return a +} + +func (a *GeoBoundsAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "match" : { "business_type" : "shop" } + // }, + // "aggs" : { + // "viewport" : { + // "geo_bounds" : { + // "field" : "location" + // "wrap_longitude" : "true" + // } + // } + // } + // } + // + // This method returns only the { "geo_bounds" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geo_bounds"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.wrapLongitude != nil { + opts["wrap_longitude"] = *a.wrapLongitude + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max.go new file mode 100644 index 000000000..334cff020 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MaxAggregation is a single-value metrics aggregation that keeps track and +// returns the maximum value among the numeric values extracted from +// the aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by +// a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html +type MaxAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMaxAggregation() *MaxAggregation { + return &MaxAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MaxAggregation) Field(field string) *MaxAggregation { + a.field = field + return a +} + +func (a *MaxAggregation) Script(script *Script) *MaxAggregation { + a.script = script + return a +} + +func (a *MaxAggregation) Format(format string) *MaxAggregation { + a.format = format + return a +} + +func (a *MaxAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MaxAggregation) Meta(metaData map[string]interface{}) *MaxAggregation { + a.meta = metaData + return a +} +func (a *MaxAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "max_price" : { "max" : { "field" : "price" } } + // } + // } + // This method returns only the { "max" : { "field" : "price" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["max"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min.go new file mode 100644 index 000000000..f9e21f7a8 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min.go @@ -0,0 +1,100 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MinAggregation is a single-value metrics aggregation that keeps track and +// returns the minimum value among numeric values extracted from the +// aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by a +// provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html +type MinAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMinAggregation() *MinAggregation { + return &MinAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MinAggregation) Field(field string) *MinAggregation { + a.field = field + return a +} + +func (a *MinAggregation) Script(script *Script) *MinAggregation { + a.script = script + return a +} + +func (a *MinAggregation) Format(format string) *MinAggregation { + a.format = format + return a +} + +func (a *MinAggregation) SubAggregation(name string, subAggregation Aggregation) *MinAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MinAggregation) Meta(metaData map[string]interface{}) *MinAggregation { + a.meta = metaData + return a +} + +func (a *MinAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "min_price" : { "min" : { "field" : "price" } } + // } + // } + // This method returns only the { "min" : { "field" : "price" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["min"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks.go new file mode 100644 index 000000000..c0b3aa663 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks.go @@ -0,0 +1,131 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PercentileRanksAggregation +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html +type PercentileRanksAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + values []float64 + compression *float64 + estimator string +} + +func NewPercentileRanksAggregation() *PercentileRanksAggregation { + return &PercentileRanksAggregation{ + subAggregations: make(map[string]Aggregation), + values: make([]float64, 0), + } +} + +func (a *PercentileRanksAggregation) Field(field string) *PercentileRanksAggregation { + a.field = field + return a +} + +func (a *PercentileRanksAggregation) Script(script *Script) *PercentileRanksAggregation { + a.script = script + return a +} + +func (a *PercentileRanksAggregation) Format(format string) *PercentileRanksAggregation { + a.format = format + return a +} + +func (a *PercentileRanksAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentileRanksAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *PercentileRanksAggregation) Meta(metaData map[string]interface{}) *PercentileRanksAggregation { + a.meta = metaData + return a +} + +func (a *PercentileRanksAggregation) Values(values ...float64) *PercentileRanksAggregation { + a.values = append(a.values, values...) + return a +} + +func (a *PercentileRanksAggregation) Compression(compression float64) *PercentileRanksAggregation { + a.compression = &compression + return a +} + +func (a *PercentileRanksAggregation) Estimator(estimator string) *PercentileRanksAggregation { + a.estimator = estimator + return a +} + +func (a *PercentileRanksAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "load_time_outlier" : { + // "percentile_ranks" : { + // "field" : "load_time" + // "values" : [15, 30] + // } + // } + // } + // } + // This method returns only the + // { "percentile_ranks" : { "field" : "load_time", "values" : [15, 30] } } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["percentile_ranks"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + if len(a.values) > 0 { + opts["values"] = a.values + } + if a.compression != nil { + opts["compression"] = *a.compression + } + if a.estimator != "" { + opts["estimator"] = a.estimator + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles.go new file mode 100644 index 000000000..b1695ebb3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles.go @@ -0,0 +1,130 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PercentilesAggregation +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html +type PercentilesAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + percentiles []float64 + compression *float64 + estimator string +} + +func NewPercentilesAggregation() *PercentilesAggregation { + return &PercentilesAggregation{ + subAggregations: make(map[string]Aggregation), + percentiles: make([]float64, 0), + } +} + +func (a *PercentilesAggregation) Field(field string) *PercentilesAggregation { + a.field = field + return a +} + +func (a *PercentilesAggregation) Script(script *Script) *PercentilesAggregation { + a.script = script + return a +} + +func (a *PercentilesAggregation) Format(format string) *PercentilesAggregation { + a.format = format + return a +} + +func (a *PercentilesAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentilesAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *PercentilesAggregation) Meta(metaData map[string]interface{}) *PercentilesAggregation { + a.meta = metaData + return a +} + +func (a *PercentilesAggregation) Percentiles(percentiles ...float64) *PercentilesAggregation { + a.percentiles = append(a.percentiles, percentiles...) + return a +} + +func (a *PercentilesAggregation) Compression(compression float64) *PercentilesAggregation { + a.compression = &compression + return a +} + +func (a *PercentilesAggregation) Estimator(estimator string) *PercentilesAggregation { + a.estimator = estimator + return a +} + +func (a *PercentilesAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "load_time_outlier" : { + // "percentiles" : { + // "field" : "load_time" + // } + // } + // } + // } + // This method returns only the + // { "percentiles" : { "field" : "load_time" } } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["percentiles"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + if len(a.percentiles) > 0 { + opts["percents"] = a.percentiles + } + if a.compression != nil { + opts["compression"] = *a.compression + } + if a.estimator != "" { + opts["estimator"] = a.estimator + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats.go new file mode 100644 index 000000000..42da9c854 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// StatsAggregation is a multi-value metrics aggregation that computes stats +// over numeric values extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html +type StatsAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewStatsAggregation() *StatsAggregation { + return &StatsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *StatsAggregation) Field(field string) *StatsAggregation { + a.field = field + return a +} + +func (a *StatsAggregation) Script(script *Script) *StatsAggregation { + a.script = script + return a +} + +func (a *StatsAggregation) Format(format string) *StatsAggregation { + a.format = format + return a +} + +func (a *StatsAggregation) SubAggregation(name string, subAggregation Aggregation) *StatsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *StatsAggregation) Meta(metaData map[string]interface{}) *StatsAggregation { + a.meta = metaData + return a +} + +func (a *StatsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_stats" : { "stats" : { "field" : "grade" } } + // } + // } + // This method returns only the { "stats" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["stats"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum.go new file mode 100644 index 000000000..6f783e7e1 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SumAggregation is a single-value metrics aggregation that sums up +// numeric values that are extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html +type SumAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewSumAggregation() *SumAggregation { + return &SumAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *SumAggregation) Field(field string) *SumAggregation { + a.field = field + return a +} + +func (a *SumAggregation) Script(script *Script) *SumAggregation { + a.script = script + return a +} + +func (a *SumAggregation) Format(format string) *SumAggregation { + a.format = format + return a +} + +func (a *SumAggregation) SubAggregation(name string, subAggregation Aggregation) *SumAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SumAggregation) Meta(metaData map[string]interface{}) *SumAggregation { + a.meta = metaData + return a +} + +func (a *SumAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "intraday_return" : { "sum" : { "field" : "change" } } + // } + // } + // This method returns only the { "sum" : { "field" : "change" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["sum"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits.go new file mode 100644 index 000000000..c017abb98 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits.go @@ -0,0 +1,143 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TopHitsAggregation keeps track of the most relevant document +// being aggregated. This aggregator is intended to be used as a +// sub aggregator, so that the top matching documents +// can be aggregated per bucket. +// +// It can effectively be used to group result sets by certain fields via +// a bucket aggregator. One or more bucket aggregators determines by +// which properties a result set get sliced into. +// +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html +type TopHitsAggregation struct { + searchSource *SearchSource +} + +func NewTopHitsAggregation() *TopHitsAggregation { + return &TopHitsAggregation{ + searchSource: NewSearchSource(), + } +} + +func (a *TopHitsAggregation) From(from int) *TopHitsAggregation { + a.searchSource = a.searchSource.From(from) + return a +} + +func (a *TopHitsAggregation) Size(size int) *TopHitsAggregation { + a.searchSource = a.searchSource.Size(size) + return a +} + +func (a *TopHitsAggregation) TrackScores(trackScores bool) *TopHitsAggregation { + a.searchSource = a.searchSource.TrackScores(trackScores) + return a +} + +func (a *TopHitsAggregation) Explain(explain bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Explain(explain) + return a +} + +func (a *TopHitsAggregation) Version(version bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Version(version) + return a +} + +func (a *TopHitsAggregation) NoFields() *TopHitsAggregation { + a.searchSource = a.searchSource.NoFields() + return a +} + +func (a *TopHitsAggregation) FetchSource(fetchSource bool) *TopHitsAggregation { + a.searchSource = a.searchSource.FetchSource(fetchSource) + return a +} + +func (a *TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceContext) *TopHitsAggregation { + a.searchSource = a.searchSource.FetchSourceContext(fetchSourceContext) + return a +} + +func (a *TopHitsAggregation) FieldDataFields(fieldDataFields ...string) *TopHitsAggregation { + a.searchSource = a.searchSource.FieldDataFields(fieldDataFields...) + return a +} + +func (a *TopHitsAggregation) FieldDataField(fieldDataField string) *TopHitsAggregation { + a.searchSource = a.searchSource.FieldDataField(fieldDataField) + return a +} + +func (a *TopHitsAggregation) ScriptFields(scriptFields ...*ScriptField) *TopHitsAggregation { + a.searchSource = a.searchSource.ScriptFields(scriptFields...) + return a +} + +func (a *TopHitsAggregation) ScriptField(scriptField *ScriptField) *TopHitsAggregation { + a.searchSource = a.searchSource.ScriptField(scriptField) + return a +} + +func (a *TopHitsAggregation) Sort(field string, ascending bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Sort(field, ascending) + return a +} + +func (a *TopHitsAggregation) SortWithInfo(info SortInfo) *TopHitsAggregation { + a.searchSource = a.searchSource.SortWithInfo(info) + return a +} + +func (a *TopHitsAggregation) SortBy(sorter ...Sorter) *TopHitsAggregation { + a.searchSource = a.searchSource.SortBy(sorter...) + return a +} + +func (a *TopHitsAggregation) Highlight(highlight *Highlight) *TopHitsAggregation { + a.searchSource = a.searchSource.Highlight(highlight) + return a +} + +func (a *TopHitsAggregation) Highlighter() *Highlight { + return a.searchSource.Highlighter() +} + +func (a *TopHitsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs": { + // "top_tag_hits": { + // "top_hits": { + // "sort": [ + // { + // "last_activity_date": { + // "order": "desc" + // } + // } + // ], + // "_source": { + // "include": [ + // "title" + // ] + // }, + // "size" : 1 + // } + // } + // } + // } + // This method returns only the { "top_hits" : { ... } } part. + + source := make(map[string]interface{}) + src, err := a.searchSource.Source() + if err != nil { + return nil, err + } + source["top_hits"] = src + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count.go new file mode 100644 index 000000000..b2e3e8241 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count.go @@ -0,0 +1,102 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ValueCountAggregation is a single-value metrics aggregation that counts +// the number of values that are extracted from the aggregated documents. +// These values can be extracted either from specific fields in the documents, +// or be generated by a provided script. Typically, this aggregator will be +// used in conjunction with other single-value aggregations. +// For example, when computing the avg one might be interested in the +// number of values the average is computed over. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html +type ValueCountAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewValueCountAggregation() *ValueCountAggregation { + return &ValueCountAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ValueCountAggregation) Field(field string) *ValueCountAggregation { + a.field = field + return a +} + +func (a *ValueCountAggregation) Script(script *Script) *ValueCountAggregation { + a.script = script + return a +} + +func (a *ValueCountAggregation) Format(format string) *ValueCountAggregation { + a.format = format + return a +} + +func (a *ValueCountAggregation) SubAggregation(name string, subAggregation Aggregation) *ValueCountAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ValueCountAggregation) Meta(metaData map[string]interface{}) *ValueCountAggregation { + a.meta = metaData + return a +} + +func (a *ValueCountAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_count" : { "value_count" : { "field" : "grade" } } + // } + // } + // This method returns only the { "value_count" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["value_count"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket.go new file mode 100644 index 000000000..5cd93d5cc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket.go @@ -0,0 +1,113 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AvgBucketAggregation is a sibling pipeline aggregation which calculates +// the (mean) average value of a specified metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-avg-bucket-aggregation.html +type AvgBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewAvgBucketAggregation creates and initializes a new AvgBucketAggregation. +func NewAvgBucketAggregation() *AvgBucketAggregation { + return &AvgBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *AvgBucketAggregation) Format(format string) *AvgBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *AvgBucketAggregation) GapPolicy(gapPolicy string) *AvgBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *AvgBucketAggregation) GapInsertZeros() *AvgBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *AvgBucketAggregation) GapSkip() *AvgBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *AvgBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *AvgBucketAggregation) Meta(metaData map[string]interface{}) *AvgBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *AvgBucketAggregation) BucketsPath(bucketsPaths ...string) *AvgBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *AvgBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["avg_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script.go new file mode 100644 index 000000000..44d6bc624 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script.go @@ -0,0 +1,132 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// BucketScriptAggregation is a parent pipeline aggregation which executes +// a script which can perform per bucket computations on specified metrics +// in the parent multi-bucket aggregation. The specified metric must be +// numeric and the script must return a numeric value. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html +type BucketScriptAggregation struct { + format string + gapPolicy string + script *Script + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPathsMap map[string]string +} + +// NewBucketScriptAggregation creates and initializes a new BucketScriptAggregation. +func NewBucketScriptAggregation() *BucketScriptAggregation { + return &BucketScriptAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPathsMap: make(map[string]string), + } +} + +func (a *BucketScriptAggregation) Format(format string) *BucketScriptAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *BucketScriptAggregation) GapPolicy(gapPolicy string) *BucketScriptAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *BucketScriptAggregation) GapInsertZeros() *BucketScriptAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *BucketScriptAggregation) GapSkip() *BucketScriptAggregation { + a.gapPolicy = "skip" + return a +} + +// Script is the script to run. +func (a *BucketScriptAggregation) Script(script *Script) *BucketScriptAggregation { + a.script = script + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *BucketScriptAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketScriptAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *BucketScriptAggregation) Meta(metaData map[string]interface{}) *BucketScriptAggregation { + a.meta = metaData + return a +} + +// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. +func (a *BucketScriptAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketScriptAggregation { + a.bucketsPathsMap = bucketsPathsMap + return a +} + +// AddBucketsPath adds a bucket path to use for this pipeline aggregator. +func (a *BucketScriptAggregation) AddBucketsPath(name, path string) *BucketScriptAggregation { + if a.bucketsPathsMap == nil { + a.bucketsPathsMap = make(map[string]string) + } + a.bucketsPathsMap[name] = path + return a +} + +func (a *BucketScriptAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["bucket_script"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + } + + // Add buckets paths + if len(a.bucketsPathsMap) > 0 { + params["buckets_path"] = a.bucketsPathsMap + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector.go new file mode 100644 index 000000000..ce17ec1f6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector.go @@ -0,0 +1,134 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// BucketSelectorAggregation is a parent pipeline aggregation which +// determines whether the current bucket will be retained in the parent +// multi-bucket aggregation. The specific metric must be numeric and +// the script must return a boolean value. If the script language is +// expression then a numeric return value is permitted. In this case 0.0 +// will be evaluated as false and all other values will evaluate to true. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-selector-aggregation.html +type BucketSelectorAggregation struct { + format string + gapPolicy string + script *Script + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPathsMap map[string]string +} + +// NewBucketSelectorAggregation creates and initializes a new BucketSelectorAggregation. +func NewBucketSelectorAggregation() *BucketSelectorAggregation { + return &BucketSelectorAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPathsMap: make(map[string]string), + } +} + +func (a *BucketSelectorAggregation) Format(format string) *BucketSelectorAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *BucketSelectorAggregation) GapPolicy(gapPolicy string) *BucketSelectorAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *BucketSelectorAggregation) GapInsertZeros() *BucketSelectorAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *BucketSelectorAggregation) GapSkip() *BucketSelectorAggregation { + a.gapPolicy = "skip" + return a +} + +// Script is the script to run. +func (a *BucketSelectorAggregation) Script(script *Script) *BucketSelectorAggregation { + a.script = script + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *BucketSelectorAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketSelectorAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *BucketSelectorAggregation) Meta(metaData map[string]interface{}) *BucketSelectorAggregation { + a.meta = metaData + return a +} + +// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. +func (a *BucketSelectorAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketSelectorAggregation { + a.bucketsPathsMap = bucketsPathsMap + return a +} + +// AddBucketsPath adds a bucket path to use for this pipeline aggregator. +func (a *BucketSelectorAggregation) AddBucketsPath(name, path string) *BucketSelectorAggregation { + if a.bucketsPathsMap == nil { + a.bucketsPathsMap = make(map[string]string) + } + a.bucketsPathsMap[name] = path + return a +} + +func (a *BucketSelectorAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["bucket_selector"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + } + + // Add buckets paths + if len(a.bucketsPathsMap) > 0 { + params["buckets_path"] = a.bucketsPathsMap + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum.go new file mode 100644 index 000000000..018eb918f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum.go @@ -0,0 +1,90 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CumulativeSumAggregation is a parent pipeline aggregation which calculates +// the cumulative sum of a specified metric in a parent histogram (or date_histogram) +// aggregation. The specified metric must be numeric and the enclosing +// histogram must have min_doc_count set to 0 (default for histogram aggregations). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html +type CumulativeSumAggregation struct { + format string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewCumulativeSumAggregation creates and initializes a new CumulativeSumAggregation. +func NewCumulativeSumAggregation() *CumulativeSumAggregation { + return &CumulativeSumAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *CumulativeSumAggregation) Format(format string) *CumulativeSumAggregation { + a.format = format + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *CumulativeSumAggregation) SubAggregation(name string, subAggregation Aggregation) *CumulativeSumAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *CumulativeSumAggregation) Meta(metaData map[string]interface{}) *CumulativeSumAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *CumulativeSumAggregation) BucketsPath(bucketsPaths ...string) *CumulativeSumAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *CumulativeSumAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["cumulative_sum"] = params + + if a.format != "" { + params["format"] = a.format + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative.go new file mode 100644 index 000000000..66611f46e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative.go @@ -0,0 +1,124 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DerivativeAggregation is a parent pipeline aggregation which calculates +// the derivative of a specified metric in a parent histogram (or date_histogram) +// aggregation. The specified metric must be numeric and the enclosing +// histogram must have min_doc_count set to 0 (default for histogram aggregations). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html +type DerivativeAggregation struct { + format string + gapPolicy string + unit string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewDerivativeAggregation creates and initializes a new DerivativeAggregation. +func NewDerivativeAggregation() *DerivativeAggregation { + return &DerivativeAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *DerivativeAggregation) Format(format string) *DerivativeAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *DerivativeAggregation) GapPolicy(gapPolicy string) *DerivativeAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *DerivativeAggregation) GapInsertZeros() *DerivativeAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *DerivativeAggregation) GapSkip() *DerivativeAggregation { + a.gapPolicy = "skip" + return a +} + +// Unit sets the unit provided, e.g. "1d" or "1y". +// It is only useful when calculating the derivative using a date_histogram. +func (a *DerivativeAggregation) Unit(unit string) *DerivativeAggregation { + a.unit = unit + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *DerivativeAggregation) SubAggregation(name string, subAggregation Aggregation) *DerivativeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DerivativeAggregation) Meta(metaData map[string]interface{}) *DerivativeAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *DerivativeAggregation) BucketsPath(bucketsPaths ...string) *DerivativeAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *DerivativeAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["derivative"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.unit != "" { + params["unit"] = a.unit + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket.go new file mode 100644 index 000000000..da6f9ef36 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket.go @@ -0,0 +1,114 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MaxBucketAggregation is a sibling pipeline aggregation which identifies +// the bucket(s) with the maximum value of a specified metric in a sibling +// aggregation and outputs both the value and the key(s) of the bucket(s). +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-max-bucket-aggregation.html +type MaxBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMaxBucketAggregation creates and initializes a new MaxBucketAggregation. +func NewMaxBucketAggregation() *MaxBucketAggregation { + return &MaxBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MaxBucketAggregation) Format(format string) *MaxBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MaxBucketAggregation) GapPolicy(gapPolicy string) *MaxBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MaxBucketAggregation) GapInsertZeros() *MaxBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MaxBucketAggregation) GapSkip() *MaxBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MaxBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MaxBucketAggregation) Meta(metaData map[string]interface{}) *MaxBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MaxBucketAggregation) BucketsPath(bucketsPaths ...string) *MaxBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MaxBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["max_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket.go new file mode 100644 index 000000000..325f00f03 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket.go @@ -0,0 +1,114 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MinBucketAggregation is a sibling pipeline aggregation which identifies +// the bucket(s) with the maximum value of a specified metric in a sibling +// aggregation and outputs both the value and the key(s) of the bucket(s). +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-min-bucket-aggregation.html +type MinBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMinBucketAggregation creates and initializes a new MinBucketAggregation. +func NewMinBucketAggregation() *MinBucketAggregation { + return &MinBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MinBucketAggregation) Format(format string) *MinBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MinBucketAggregation) GapPolicy(gapPolicy string) *MinBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MinBucketAggregation) GapInsertZeros() *MinBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MinBucketAggregation) GapSkip() *MinBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MinBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MinBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MinBucketAggregation) Meta(metaData map[string]interface{}) *MinBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MinBucketAggregation) BucketsPath(bucketsPaths ...string) *MinBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MinBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["min_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg.go new file mode 100644 index 000000000..021144ddc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg.go @@ -0,0 +1,393 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MovAvgAggregation operates on a series of data. It will slide a window +// across the data and emit the average value of that window. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html +type MovAvgAggregation struct { + format string + gapPolicy string + model MovAvgModel + window *int + predict *int + minimize *bool + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMovAvgAggregation creates and initializes a new MovAvgAggregation. +func NewMovAvgAggregation() *MovAvgAggregation { + return &MovAvgAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MovAvgAggregation) Format(format string) *MovAvgAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MovAvgAggregation) GapPolicy(gapPolicy string) *MovAvgAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MovAvgAggregation) GapInsertZeros() *MovAvgAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MovAvgAggregation) GapSkip() *MovAvgAggregation { + a.gapPolicy = "skip" + return a +} + +// Model is used to define what type of moving average you want to use +// in the series. +func (a *MovAvgAggregation) Model(model MovAvgModel) *MovAvgAggregation { + a.model = model + return a +} + +// Window sets the window size for the moving average. This window will +// "slide" across the series, and the values inside that window will +// be used to calculate the moving avg value. +func (a *MovAvgAggregation) Window(window int) *MovAvgAggregation { + a.window = &window + return a +} + +// Predict sets the number of predictions that should be returned. +// Each prediction will be spaced at the intervals in the histogram. +// E.g. a predict of 2 will return two new buckets at the end of the +// histogram with the predicted values. +func (a *MovAvgAggregation) Predict(numPredictions int) *MovAvgAggregation { + a.predict = &numPredictions + return a +} + +// Minimize determines if the model should be fit to the data using a +// cost minimizing algorithm. +func (a *MovAvgAggregation) Minimize(minimize bool) *MovAvgAggregation { + a.minimize = &minimize + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MovAvgAggregation) SubAggregation(name string, subAggregation Aggregation) *MovAvgAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MovAvgAggregation) Meta(metaData map[string]interface{}) *MovAvgAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MovAvgAggregation) BucketsPath(bucketsPaths ...string) *MovAvgAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MovAvgAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["moving_avg"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.model != nil { + params["model"] = a.model.Name() + settings := a.model.Settings() + if len(settings) > 0 { + params["settings"] = settings + } + } + if a.window != nil { + params["window"] = *a.window + } + if a.predict != nil { + params["predict"] = *a.predict + } + if a.minimize != nil { + params["minimize"] = *a.minimize + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} + +// -- Models for moving averages -- +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_models + +// MovAvgModel specifies the model to use with the MovAvgAggregation. +type MovAvgModel interface { + Name() string + Settings() map[string]interface{} +} + +// -- EWMA -- + +// EWMAMovAvgModel calculates an exponentially weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_ewma_exponentially_weighted +type EWMAMovAvgModel struct { + alpha *float64 +} + +// NewEWMAMovAvgModel creates and initializes a new EWMAMovAvgModel. +func NewEWMAMovAvgModel() *EWMAMovAvgModel { + return &EWMAMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *EWMAMovAvgModel) Alpha(alpha float64) *EWMAMovAvgModel { + m.alpha = &alpha + return m +} + +// Name of the model. +func (m *EWMAMovAvgModel) Name() string { + return "ewma" +} + +// Settings of the model. +func (m *EWMAMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + return settings +} + +// -- Holt linear -- + +// HoltLinearMovAvgModel calculates a doubly exponential weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_holt_linear +type HoltLinearMovAvgModel struct { + alpha *float64 + beta *float64 +} + +// NewHoltLinearMovAvgModel creates and initializes a new HoltLinearMovAvgModel. +func NewHoltLinearMovAvgModel() *HoltLinearMovAvgModel { + return &HoltLinearMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *HoltLinearMovAvgModel) Alpha(alpha float64) *HoltLinearMovAvgModel { + m.alpha = &alpha + return m +} + +// Beta is equivalent to Alpha but controls the smoothing of the trend +// instead of the data. +func (m *HoltLinearMovAvgModel) Beta(beta float64) *HoltLinearMovAvgModel { + m.beta = &beta + return m +} + +// Name of the model. +func (m *HoltLinearMovAvgModel) Name() string { + return "holt" +} + +// Settings of the model. +func (m *HoltLinearMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + if m.beta != nil { + settings["beta"] = *m.beta + } + return settings +} + +// -- Holt Winters -- + +// HoltWintersMovAvgModel calculates a triple exponential weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_holt_winters +type HoltWintersMovAvgModel struct { + alpha *float64 + beta *float64 + gamma *float64 + period *int + seasonalityType string + pad *bool +} + +// NewHoltWintersMovAvgModel creates and initializes a new HoltWintersMovAvgModel. +func NewHoltWintersMovAvgModel() *HoltWintersMovAvgModel { + return &HoltWintersMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *HoltWintersMovAvgModel) Alpha(alpha float64) *HoltWintersMovAvgModel { + m.alpha = &alpha + return m +} + +// Beta is equivalent to Alpha but controls the smoothing of the trend +// instead of the data. +func (m *HoltWintersMovAvgModel) Beta(beta float64) *HoltWintersMovAvgModel { + m.beta = &beta + return m +} + +func (m *HoltWintersMovAvgModel) Gamma(gamma float64) *HoltWintersMovAvgModel { + m.gamma = &gamma + return m +} + +func (m *HoltWintersMovAvgModel) Period(period int) *HoltWintersMovAvgModel { + m.period = &period + return m +} + +func (m *HoltWintersMovAvgModel) SeasonalityType(typ string) *HoltWintersMovAvgModel { + m.seasonalityType = typ + return m +} + +func (m *HoltWintersMovAvgModel) Pad(pad bool) *HoltWintersMovAvgModel { + m.pad = &pad + return m +} + +// Name of the model. +func (m *HoltWintersMovAvgModel) Name() string { + return "holt_winters" +} + +// Settings of the model. +func (m *HoltWintersMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + if m.beta != nil { + settings["beta"] = *m.beta + } + if m.gamma != nil { + settings["gamma"] = *m.gamma + } + if m.period != nil { + settings["period"] = *m.period + } + if m.pad != nil { + settings["pad"] = *m.pad + } + if m.seasonalityType != "" { + settings["type"] = m.seasonalityType + } + return settings +} + +// -- Linear -- + +// LinearMovAvgModel calculates a linearly weighted moving average, such +// that older values are linearly less important. "Time" is determined +// by position in collection. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_linear +type LinearMovAvgModel struct { +} + +// NewLinearMovAvgModel creates and initializes a new LinearMovAvgModel. +func NewLinearMovAvgModel() *LinearMovAvgModel { + return &LinearMovAvgModel{} +} + +// Name of the model. +func (m *LinearMovAvgModel) Name() string { + return "linear" +} + +// Settings of the model. +func (m *LinearMovAvgModel) Settings() map[string]interface{} { + return nil +} + +// -- Simple -- + +// SimpleMovAvgModel calculates a simple unweighted (arithmetic) moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_simple +type SimpleMovAvgModel struct { +} + +// NewSimpleMovAvgModel creates and initializes a new SimpleMovAvgModel. +func NewSimpleMovAvgModel() *SimpleMovAvgModel { + return &SimpleMovAvgModel{} +} + +// Name of the model. +func (m *SimpleMovAvgModel) Name() string { + return "simple" +} + +// Settings of the model. +func (m *SimpleMovAvgModel) Settings() map[string]interface{} { + return nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff.go new file mode 100644 index 000000000..db81d3cf4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff.go @@ -0,0 +1,124 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SerialDiffAggregation implements serial differencing. +// Serial differencing is a technique where values in a time series are +// subtracted from itself at different time lags or periods. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-serialdiff-aggregation.html +type SerialDiffAggregation struct { + format string + gapPolicy string + lag *int + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewSerialDiffAggregation creates and initializes a new SerialDiffAggregation. +func NewSerialDiffAggregation() *SerialDiffAggregation { + return &SerialDiffAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *SerialDiffAggregation) Format(format string) *SerialDiffAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *SerialDiffAggregation) GapPolicy(gapPolicy string) *SerialDiffAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *SerialDiffAggregation) GapInsertZeros() *SerialDiffAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *SerialDiffAggregation) GapSkip() *SerialDiffAggregation { + a.gapPolicy = "skip" + return a +} + +// Lag specifies the historical bucket to subtract from the current value. +// E.g. a lag of 7 will subtract the current value from the value 7 buckets +// ago. Lag must be a positive, non-zero integer. +func (a *SerialDiffAggregation) Lag(lag int) *SerialDiffAggregation { + a.lag = &lag + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *SerialDiffAggregation) SubAggregation(name string, subAggregation Aggregation) *SerialDiffAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SerialDiffAggregation) Meta(metaData map[string]interface{}) *SerialDiffAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *SerialDiffAggregation) BucketsPath(bucketsPaths ...string) *SerialDiffAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *SerialDiffAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["serial_diff"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.lag != nil { + params["lag"] = *a.lag + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket.go new file mode 100644 index 000000000..16ef64986 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket.go @@ -0,0 +1,113 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SumBucketAggregation is a sibling pipeline aggregation which calculates +// the sum across all buckets of a specified metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-sum-bucket-aggregation.html +type SumBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewSumBucketAggregation creates and initializes a new SumBucketAggregation. +func NewSumBucketAggregation() *SumBucketAggregation { + return &SumBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *SumBucketAggregation) Format(format string) *SumBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *SumBucketAggregation) GapPolicy(gapPolicy string) *SumBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *SumBucketAggregation) GapInsertZeros() *SumBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *SumBucketAggregation) GapSkip() *SumBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *SumBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *SumBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SumBucketAggregation) Meta(metaData map[string]interface{}) *SumBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *SumBucketAggregation) BucketsPath(bucketsPaths ...string) *SumBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *SumBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["sum_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_bool.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_bool.go new file mode 100644 index 000000000..c2cc8697b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_bool.go @@ -0,0 +1,212 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "fmt" + +// A bool query matches documents matching boolean +// combinations of other queries. +// For more details, see: +// http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html +type BoolQuery struct { + Query + mustClauses []Query + mustNotClauses []Query + filterClauses []Query + shouldClauses []Query + boost *float64 + disableCoord *bool + minimumShouldMatch string + adjustPureNegative *bool + queryName string +} + +// Creates a new bool query. +func NewBoolQuery() *BoolQuery { + return &BoolQuery{ + mustClauses: make([]Query, 0), + mustNotClauses: make([]Query, 0), + filterClauses: make([]Query, 0), + shouldClauses: make([]Query, 0), + } +} + +func (q *BoolQuery) Must(queries ...Query) *BoolQuery { + q.mustClauses = append(q.mustClauses, queries...) + return q +} + +func (q *BoolQuery) MustNot(queries ...Query) *BoolQuery { + q.mustNotClauses = append(q.mustNotClauses, queries...) + return q +} + +func (q *BoolQuery) Filter(filters ...Query) *BoolQuery { + q.filterClauses = append(q.filterClauses, filters...) + return q +} + +func (q *BoolQuery) Should(queries ...Query) *BoolQuery { + q.shouldClauses = append(q.shouldClauses, queries...) + return q +} + +func (q *BoolQuery) Boost(boost float64) *BoolQuery { + q.boost = &boost + return q +} + +func (q *BoolQuery) DisableCoord(disableCoord bool) *BoolQuery { + q.disableCoord = &disableCoord + return q +} + +func (q *BoolQuery) MinimumShouldMatch(minimumShouldMatch string) *BoolQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *BoolQuery) MinimumNumberShouldMatch(minimumNumberShouldMatch int) *BoolQuery { + q.minimumShouldMatch = fmt.Sprintf("%d", minimumNumberShouldMatch) + return q +} + +func (q *BoolQuery) AdjustPureNegative(adjustPureNegative bool) *BoolQuery { + q.adjustPureNegative = &adjustPureNegative + return q +} + +func (q *BoolQuery) QueryName(queryName string) *BoolQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the bool query. +func (q *BoolQuery) Source() (interface{}, error) { + // { + // "bool" : { + // "must" : { + // "term" : { "user" : "kimchy" } + // }, + // "must_not" : { + // "range" : { + // "age" : { "from" : 10, "to" : 20 } + // } + // }, + // "filter" : [ + // ... + // ] + // "should" : [ + // { + // "term" : { "tag" : "wow" } + // }, + // { + // "term" : { "tag" : "elasticsearch" } + // } + // ], + // "minimum_number_should_match" : 1, + // "boost" : 1.0 + // } + // } + + query := make(map[string]interface{}) + + boolClause := make(map[string]interface{}) + query["bool"] = boolClause + + // must + if len(q.mustClauses) == 1 { + src, err := q.mustClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["must"] = src + } else if len(q.mustClauses) > 1 { + clauses := make([]interface{}, 0) + for _, subQuery := range q.mustClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["must"] = clauses + } + + // must_not + if len(q.mustNotClauses) == 1 { + src, err := q.mustNotClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["must_not"] = src + } else if len(q.mustNotClauses) > 1 { + clauses := make([]interface{}, 0) + for _, subQuery := range q.mustNotClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["must_not"] = clauses + } + + // filter + if len(q.filterClauses) == 1 { + src, err := q.filterClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["filter"] = src + } else if len(q.filterClauses) > 1 { + clauses := make([]interface{}, 0) + for _, subQuery := range q.filterClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["filter"] = clauses + } + + // should + if len(q.shouldClauses) == 1 { + src, err := q.shouldClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["should"] = src + } else if len(q.shouldClauses) > 1 { + clauses := make([]interface{}, 0) + for _, subQuery := range q.shouldClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["should"] = clauses + } + + if q.boost != nil { + boolClause["boost"] = *q.boost + } + if q.disableCoord != nil { + boolClause["disable_coord"] = *q.disableCoord + } + if q.minimumShouldMatch != "" { + boolClause["minimum_should_match"] = q.minimumShouldMatch + } + if q.adjustPureNegative != nil { + boolClause["adjust_pure_negative"] = *q.adjustPureNegative + } + if q.queryName != "" { + boolClause["_name"] = q.queryName + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_boosting.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_boosting.go new file mode 100644 index 000000000..7f7a53b8b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_boosting.go @@ -0,0 +1,97 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// A boosting query can be used to effectively +// demote results that match a given query. +// For more details, see: +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-boosting-query.html +type BoostingQuery struct { + Query + positiveClause Query + negativeClause Query + negativeBoost *float64 + boost *float64 +} + +// Creates a new boosting query. +func NewBoostingQuery() *BoostingQuery { + return &BoostingQuery{} +} + +func (q *BoostingQuery) Positive(positive Query) *BoostingQuery { + q.positiveClause = positive + return q +} + +func (q *BoostingQuery) Negative(negative Query) *BoostingQuery { + q.negativeClause = negative + return q +} + +func (q *BoostingQuery) NegativeBoost(negativeBoost float64) *BoostingQuery { + q.negativeBoost = &negativeBoost + return q +} + +func (q *BoostingQuery) Boost(boost float64) *BoostingQuery { + q.boost = &boost + return q +} + +// Creates the query source for the boosting query. +func (q *BoostingQuery) Source() (interface{}, error) { + // { + // "boosting" : { + // "positive" : { + // "term" : { + // "field1" : "value1" + // } + // }, + // "negative" : { + // "term" : { + // "field2" : "value2" + // } + // }, + // "negative_boost" : 0.2 + // } + // } + + query := make(map[string]interface{}) + + boostingClause := make(map[string]interface{}) + query["boosting"] = boostingClause + + // Negative and positive clause as well as negative boost + // are mandatory in the Java client. + + // positive + if q.positiveClause != nil { + src, err := q.positiveClause.Source() + if err != nil { + return nil, err + } + boostingClause["positive"] = src + } + + // negative + if q.negativeClause != nil { + src, err := q.negativeClause.Source() + if err != nil { + return nil, err + } + boostingClause["negative"] = src + } + + if q.negativeBoost != nil { + boostingClause["negative_boost"] = *q.negativeBoost + } + + if q.boost != nil { + boostingClause["boost"] = *q.boost + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_common_terms.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_common_terms.go new file mode 100644 index 000000000..d45825067 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_common_terms.go @@ -0,0 +1,146 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CommonTermsQuery is a modern alternative to stopwords +// which improves the precision and recall of search results +// (by taking stopwords into account), without sacrificing performance. +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-common-terms-query.html +type CommonTermsQuery struct { + Query + name string + text interface{} + cutoffFreq *float64 + highFreq *float64 + highFreqOp string + highFreqMinimumShouldMatch string + lowFreq *float64 + lowFreqOp string + lowFreqMinimumShouldMatch string + analyzer string + boost *float64 + disableCoord *bool + queryName string +} + +// NewCommonTermsQuery creates and initializes a new common terms query. +func NewCommonTermsQuery(name string, text interface{}) *CommonTermsQuery { + return &CommonTermsQuery{name: name, text: text} +} + +func (q *CommonTermsQuery) CutoffFrequency(f float64) *CommonTermsQuery { + q.cutoffFreq = &f + return q +} + +func (q *CommonTermsQuery) HighFreq(f float64) *CommonTermsQuery { + q.highFreq = &f + return q +} + +func (q *CommonTermsQuery) HighFreqOperator(op string) *CommonTermsQuery { + q.highFreqOp = op + return q +} + +func (q *CommonTermsQuery) HighFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { + q.highFreqMinimumShouldMatch = minShouldMatch + return q +} + +func (q *CommonTermsQuery) LowFreq(f float64) *CommonTermsQuery { + q.lowFreq = &f + return q +} + +func (q *CommonTermsQuery) LowFreqOperator(op string) *CommonTermsQuery { + q.lowFreqOp = op + return q +} + +func (q *CommonTermsQuery) LowFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { + q.lowFreqMinimumShouldMatch = minShouldMatch + return q +} + +func (q *CommonTermsQuery) Analyzer(analyzer string) *CommonTermsQuery { + q.analyzer = analyzer + return q +} + +func (q *CommonTermsQuery) Boost(boost float64) *CommonTermsQuery { + q.boost = &boost + return q +} + +func (q *CommonTermsQuery) DisableCoord(disableCoord bool) *CommonTermsQuery { + q.disableCoord = &disableCoord + return q +} + +func (q *CommonTermsQuery) QueryName(queryName string) *CommonTermsQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the common query. +func (q *CommonTermsQuery) Source() (interface{}, error) { + // { + // "common": { + // "body": { + // "query": "this is bonsai cool", + // "cutoff_frequency": 0.001 + // } + // } + // } + source := make(map[string]interface{}) + body := make(map[string]interface{}) + query := make(map[string]interface{}) + + source["common"] = body + body[q.name] = query + query["query"] = q.text + + if q.cutoffFreq != nil { + query["cutoff_frequency"] = *q.cutoffFreq + } + if q.highFreq != nil { + query["high_freq"] = *q.highFreq + } + if q.highFreqOp != "" { + query["high_freq_operator"] = q.highFreqOp + } + if q.lowFreq != nil { + query["low_freq"] = *q.lowFreq + } + if q.lowFreqOp != "" { + query["low_freq_operator"] = q.lowFreqOp + } + if q.lowFreqMinimumShouldMatch != "" || q.highFreqMinimumShouldMatch != "" { + mm := make(map[string]interface{}) + if q.lowFreqMinimumShouldMatch != "" { + mm["low_freq"] = q.lowFreqMinimumShouldMatch + } + if q.highFreqMinimumShouldMatch != "" { + mm["high_freq"] = q.highFreqMinimumShouldMatch + } + query["minimum_should_match"] = mm + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.disableCoord != nil { + query["disable_coord"] = *q.disableCoord + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_constant_score.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_constant_score.go new file mode 100644 index 000000000..c754d279d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_constant_score.go @@ -0,0 +1,59 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ConstantScoreQuery is a query that wraps a filter and simply returns +// a constant score equal to the query boost for every document in the filter. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-constant-score-query.html +type ConstantScoreQuery struct { + filter Query + boost *float64 +} + +// ConstantScoreQuery creates and initializes a new constant score query. +func NewConstantScoreQuery(filter Query) *ConstantScoreQuery { + return &ConstantScoreQuery{ + filter: filter, + } +} + +// Boost sets the boost for this query. Documents matching this query +// will (in addition to the normal weightings) have their score multiplied +// by the boost provided. +func (q *ConstantScoreQuery) Boost(boost float64) *ConstantScoreQuery { + q.boost = &boost + return q +} + +// Source returns the query source. +func (q *ConstantScoreQuery) Source() (interface{}, error) { + // "constant_score" : { + // "filter" : { + // .... + // }, + // "boost" : 1.5 + // } + + query := make(map[string]interface{}) + + params := make(map[string]interface{}) + query["constant_score"] = params + + // filter + src, err := q.filter.Source() + if err != nil { + return nil, err + } + params["filter"] = src + + // boost + if q.boost != nil { + params["boost"] = *q.boost + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_dis_max.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_dis_max.go new file mode 100644 index 000000000..c47d6bb12 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_dis_max.go @@ -0,0 +1,104 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DisMaxQuery is a query that generates the union of documents produced by +// its subqueries, and that scores each document with the maximum score +// for that document as produced by any subquery, plus a tie breaking +// increment for any additional matching subqueries. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-dis-max-query.html +type DisMaxQuery struct { + queries []Query + boost *float64 + tieBreaker *float64 + queryName string +} + +// NewDisMaxQuery creates and initializes a new dis max query. +func NewDisMaxQuery() *DisMaxQuery { + return &DisMaxQuery{ + queries: make([]Query, 0), + } +} + +// Query adds one or more queries to the dis max query. +func (q *DisMaxQuery) Query(queries ...Query) *DisMaxQuery { + q.queries = append(q.queries, queries...) + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by +// the boost provided. +func (q *DisMaxQuery) Boost(boost float64) *DisMaxQuery { + q.boost = &boost + return q +} + +// TieBreaker is the factor by which the score of each non-maximum disjunct +// for a document is multiplied with and added into the final score. +// +// If non-zero, the value should be small, on the order of 0.1, which says +// that 10 occurrences of word in a lower-scored field that is also in a +// higher scored field is just as good as a unique word in the lower scored +// field (i.e., one that is not in any higher scored field). +func (q *DisMaxQuery) TieBreaker(tieBreaker float64) *DisMaxQuery { + q.tieBreaker = &tieBreaker + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched filters per hit. +func (q *DisMaxQuery) QueryName(queryName string) *DisMaxQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable content for this query. +func (q *DisMaxQuery) Source() (interface{}, error) { + // { + // "dis_max" : { + // "tie_breaker" : 0.7, + // "boost" : 1.2, + // "queries" : { + // { + // "term" : { "age" : 34 } + // }, + // { + // "term" : { "age" : 35 } + // } + // ] + // } + // } + + query := make(map[string]interface{}) + params := make(map[string]interface{}) + query["dis_max"] = params + + if q.tieBreaker != nil { + params["tie_breaker"] = *q.tieBreaker + } + if q.boost != nil { + params["boost"] = *q.boost + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + // queries + clauses := make([]interface{}, 0) + for _, subQuery := range q.queries { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + params["queries"] = clauses + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_exists.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_exists.go new file mode 100644 index 000000000..e117673bd --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_exists.go @@ -0,0 +1,49 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ExistsQuery is a query that only matches on documents that the field +// has a value in them. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-exists-query.html +type ExistsQuery struct { + name string + queryName string +} + +// NewExistsQuery creates and initializes a new dis max query. +func NewExistsQuery(name string) *ExistsQuery { + return &ExistsQuery{ + name: name, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched queries per hit. +func (q *ExistsQuery) QueryName(queryName string) *ExistsQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable content for this query. +func (q *ExistsQuery) Source() (interface{}, error) { + // { + // "exists" : { + // "field" : "user" + // } + // } + + query := make(map[string]interface{}) + params := make(map[string]interface{}) + query["exists"] = params + + params["field"] = q.name + if q.queryName != "" { + params["_name"] = q.queryName + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq.go new file mode 100644 index 000000000..b7fa15e67 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq.go @@ -0,0 +1,172 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FunctionScoreQuery allows you to modify the score of documents that +// are retrieved by a query. This can be useful if, for example, +// a score function is computationally expensive and it is sufficient +// to compute the score on a filtered set of documents. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +type FunctionScoreQuery struct { + query Query + filter Query + boost *float64 + maxBoost *float64 + scoreMode string + boostMode string + filters []Query + scoreFuncs []ScoreFunction + minScore *float64 + weight *float64 +} + +// NewFunctionScoreQuery creates and initializes a new function score query. +func NewFunctionScoreQuery() *FunctionScoreQuery { + return &FunctionScoreQuery{ + filters: make([]Query, 0), + scoreFuncs: make([]ScoreFunction, 0), + } +} + +// Query sets the query for the function score query. +func (q *FunctionScoreQuery) Query(query Query) *FunctionScoreQuery { + q.query = query + q.filter = nil + return q +} + +// Filter sets the filter for the function score query. +func (q *FunctionScoreQuery) Filter(filter Query) *FunctionScoreQuery { + q.query = nil + q.filter = filter + return q +} + +// Add adds a score function that will execute on all the documents +// matching the filter. +func (q *FunctionScoreQuery) Add(filter Query, scoreFunc ScoreFunction) *FunctionScoreQuery { + q.filters = append(q.filters, filter) + q.scoreFuncs = append(q.scoreFuncs, scoreFunc) + return q +} + +// AddScoreFunc adds a score function that will execute the function on all documents. +func (q *FunctionScoreQuery) AddScoreFunc(scoreFunc ScoreFunction) *FunctionScoreQuery { + q.filters = append(q.filters, nil) + q.scoreFuncs = append(q.scoreFuncs, scoreFunc) + return q +} + +// ScoreMode defines how results of individual score functions will be aggregated. +// Can be first, avg, max, sum, min, or multiply. +func (q *FunctionScoreQuery) ScoreMode(scoreMode string) *FunctionScoreQuery { + q.scoreMode = scoreMode + return q +} + +// BoostMode defines how the combined result of score functions will +// influence the final score together with the sub query score. +func (q *FunctionScoreQuery) BoostMode(boostMode string) *FunctionScoreQuery { + q.boostMode = boostMode + return q +} + +// MaxBoost is the maximum boost that will be applied by function score. +func (q *FunctionScoreQuery) MaxBoost(maxBoost float64) *FunctionScoreQuery { + q.maxBoost = &maxBoost + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by the +// boost provided. +func (q *FunctionScoreQuery) Boost(boost float64) *FunctionScoreQuery { + q.boost = &boost + return q +} + +// MinScore sets the minimum score. +func (q *FunctionScoreQuery) MinScore(minScore float64) *FunctionScoreQuery { + q.minScore = &minScore + return q +} + +// Source returns JSON for the function score query. +func (q *FunctionScoreQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["function_score"] = query + + if q.query != nil { + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + } else if q.filter != nil { + src, err := q.filter.Source() + if err != nil { + return nil, err + } + query["filter"] = src + } + + if len(q.filters) == 1 && q.filters[0] == nil { + // Weight needs to be serialized on this level. + if weight := q.scoreFuncs[0].GetWeight(); weight != nil { + query["weight"] = weight + } + // Serialize the score function + src, err := q.scoreFuncs[0].Source() + if err != nil { + return nil, err + } + query[q.scoreFuncs[0].Name()] = src + } else { + funcs := make([]interface{}, len(q.filters)) + for i, filter := range q.filters { + hsh := make(map[string]interface{}) + if filter != nil { + src, err := filter.Source() + if err != nil { + return nil, err + } + hsh["filter"] = src + } + // Weight needs to be serialized on this level. + if weight := q.scoreFuncs[i].GetWeight(); weight != nil { + hsh["weight"] = weight + } + // Serialize the score function + src, err := q.scoreFuncs[i].Source() + if err != nil { + return nil, err + } + hsh[q.scoreFuncs[i].Name()] = src + funcs[i] = hsh + } + query["functions"] = funcs + } + + if q.scoreMode != "" { + query["score_mode"] = q.scoreMode + } + if q.boostMode != "" { + query["boost_mode"] = q.boostMode + } + if q.maxBoost != nil { + query["max_boost"] = *q.maxBoost + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.minScore != nil { + query["min_score"] = *q.minScore + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq_score_funcs.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq_score_funcs.go new file mode 100644 index 000000000..fbce3577d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq_score_funcs.go @@ -0,0 +1,567 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "strings" +) + +// ScoreFunction is used in combination with the Function Score Query. +type ScoreFunction interface { + Name() string + GetWeight() *float64 // returns the weight which must be serialized at the level of FunctionScoreQuery + Source() (interface{}, error) +} + +// -- Exponential Decay -- + +// ExponentialDecayFunction builds an exponential decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +// for details. +type ExponentialDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewExponentialDecayFunction creates a new ExponentialDecayFunction. +func NewExponentialDecayFunction() *ExponentialDecayFunction { + return &ExponentialDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *ExponentialDecayFunction) Name() string { + return "exp" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *ExponentialDecayFunction) FieldName(fieldName string) *ExponentialDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *ExponentialDecayFunction) Origin(origin interface{}) *ExponentialDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *ExponentialDecayFunction) Scale(scale interface{}) *ExponentialDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *ExponentialDecayFunction) Decay(decay float64) *ExponentialDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *ExponentialDecayFunction) Offset(offset interface{}) *ExponentialDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *ExponentialDecayFunction) Weight(weight float64) *ExponentialDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *ExponentialDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *ExponentialDecayFunction) MultiValueMode(mode string) *ExponentialDecayFunction { + fn.multiValueMode = mode + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *ExponentialDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + return source, nil +} + +// -- Gauss Decay -- + +// GaussDecayFunction builds a gauss decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +// for details. +type GaussDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewGaussDecayFunction returns a new GaussDecayFunction. +func NewGaussDecayFunction() *GaussDecayFunction { + return &GaussDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *GaussDecayFunction) Name() string { + return "gauss" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *GaussDecayFunction) FieldName(fieldName string) *GaussDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *GaussDecayFunction) Origin(origin interface{}) *GaussDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *GaussDecayFunction) Scale(scale interface{}) *GaussDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *GaussDecayFunction) Decay(decay float64) *GaussDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *GaussDecayFunction) Offset(offset interface{}) *GaussDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *GaussDecayFunction) Weight(weight float64) *GaussDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *GaussDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *GaussDecayFunction) MultiValueMode(mode string) *GaussDecayFunction { + fn.multiValueMode = mode + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *GaussDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Linear Decay -- + +// LinearDecayFunction builds a linear decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +// for details. +type LinearDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewLinearDecayFunction initializes and returns a new LinearDecayFunction. +func NewLinearDecayFunction() *LinearDecayFunction { + return &LinearDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *LinearDecayFunction) Name() string { + return "linear" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *LinearDecayFunction) FieldName(fieldName string) *LinearDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *LinearDecayFunction) Origin(origin interface{}) *LinearDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *LinearDecayFunction) Scale(scale interface{}) *LinearDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *LinearDecayFunction) Decay(decay float64) *LinearDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *LinearDecayFunction) Offset(offset interface{}) *LinearDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *LinearDecayFunction) Weight(weight float64) *LinearDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *LinearDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *LinearDecayFunction) MultiValueMode(mode string) *LinearDecayFunction { + fn.multiValueMode = mode + return fn +} + +// GetMultiValueMode returns how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *LinearDecayFunction) GetMultiValueMode() string { + return fn.multiValueMode +} + +// Source returns the serializable JSON data of this score function. +func (fn *LinearDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Script -- + +// ScriptFunction builds a script score function. It uses a script to +// compute or influence the score of documents that match with the inner +// query or filter. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_script_score +// for details. +type ScriptFunction struct { + script *Script + weight *float64 +} + +// NewScriptFunction initializes and returns a new ScriptFunction. +func NewScriptFunction(script *Script) *ScriptFunction { + return &ScriptFunction{ + script: script, + } +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *ScriptFunction) Name() string { + return "script_score" +} + +// Script specifies the script to be executed. +func (fn *ScriptFunction) Script(script *Script) *ScriptFunction { + fn.script = script + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *ScriptFunction) Weight(weight float64) *ScriptFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *ScriptFunction) GetWeight() *float64 { + return fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *ScriptFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.script != nil { + src, err := fn.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Field value factor -- + +// FieldValueFactorFunction is a function score function that allows you +// to use a field from a document to influence the score. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_field_value_factor. +type FieldValueFactorFunction struct { + field string + factor *float64 + missing *float64 + weight *float64 + modifier string +} + +// NewFieldValueFactorFunction initializes and returns a new FieldValueFactorFunction. +func NewFieldValueFactorFunction() *FieldValueFactorFunction { + return &FieldValueFactorFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *FieldValueFactorFunction) Name() string { + return "field_value_factor" +} + +// Field is the field to be extracted from the document. +func (fn *FieldValueFactorFunction) Field(field string) *FieldValueFactorFunction { + fn.field = field + return fn +} + +// Factor is the (optional) factor to multiply the field with. If you do not +// specify a factor, the default is 1. +func (fn *FieldValueFactorFunction) Factor(factor float64) *FieldValueFactorFunction { + fn.factor = &factor + return fn +} + +// Modifier to apply to the field value. It can be one of: none, log, log1p, +// log2p, ln, ln1p, ln2p, square, sqrt, or reciprocal. Defaults to: none. +func (fn *FieldValueFactorFunction) Modifier(modifier string) *FieldValueFactorFunction { + fn.modifier = modifier + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *FieldValueFactorFunction) Weight(weight float64) *FieldValueFactorFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *FieldValueFactorFunction) GetWeight() *float64 { + return fn.weight +} + +// Missing is used if a document does not have that field. +func (fn *FieldValueFactorFunction) Missing(missing float64) *FieldValueFactorFunction { + fn.missing = &missing + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *FieldValueFactorFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.field != "" { + source["field"] = fn.field + } + if fn.factor != nil { + source["factor"] = *fn.factor + } + if fn.missing != nil { + source["missing"] = *fn.missing + } + if fn.modifier != "" { + source["modifier"] = strings.ToLower(fn.modifier) + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Weight Factor -- + +// WeightFactorFunction builds a weight factor function that multiplies +// the weight to the score. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_weight +// for details. +type WeightFactorFunction struct { + weight float64 +} + +// NewWeightFactorFunction initializes and returns a new WeightFactorFunction. +func NewWeightFactorFunction(weight float64) *WeightFactorFunction { + return &WeightFactorFunction{weight: weight} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *WeightFactorFunction) Name() string { + return "weight" +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *WeightFactorFunction) Weight(weight float64) *WeightFactorFunction { + fn.weight = weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *WeightFactorFunction) GetWeight() *float64 { + return &fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *WeightFactorFunction) Source() (interface{}, error) { + // Notice that the weight has to be serialized in FunctionScoreQuery. + return fn.weight, nil +} + +// -- Random -- + +// RandomFunction builds a random score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_random +// for details. +type RandomFunction struct { + seed interface{} + weight *float64 +} + +// NewRandomFunction initializes and returns a new RandomFunction. +func NewRandomFunction() *RandomFunction { + return &RandomFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *RandomFunction) Name() string { + return "random_score" +} + +// Seed is documented in 1.6 as a numeric value. However, in the source code +// of the Java client, it also accepts strings. So we accept both here, too. +func (fn *RandomFunction) Seed(seed interface{}) *RandomFunction { + fn.seed = seed + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *RandomFunction) Weight(weight float64) *RandomFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *RandomFunction) GetWeight() *float64 { + return fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *RandomFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.seed != nil { + source["seed"] = fn.seed + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fuzzy.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fuzzy.go new file mode 100644 index 000000000..da79dc7e6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fuzzy.go @@ -0,0 +1,120 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FuzzyQuery uses similarity based on Levenshtein edit distance for +// string fields, and a +/- margin on numeric and date fields. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-fuzzy-query.html +type FuzzyQuery struct { + name string + value interface{} + boost *float64 + fuzziness interface{} + prefixLength *int + maxExpansions *int + transpositions *bool + rewrite string + queryName string +} + +// NewFuzzyQuery creates a new fuzzy query. +func NewFuzzyQuery(name string, value interface{}) *FuzzyQuery { + q := &FuzzyQuery{ + name: name, + value: value, + } + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by +// the boost provided. +func (q *FuzzyQuery) Boost(boost float64) *FuzzyQuery { + q.boost = &boost + return q +} + +// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings +// like "auto", "0..1", "1..4" or "0.0..1.0". +func (q *FuzzyQuery) Fuzziness(fuzziness interface{}) *FuzzyQuery { + q.fuzziness = fuzziness + return q +} + +func (q *FuzzyQuery) PrefixLength(prefixLength int) *FuzzyQuery { + q.prefixLength = &prefixLength + return q +} + +func (q *FuzzyQuery) MaxExpansions(maxExpansions int) *FuzzyQuery { + q.maxExpansions = &maxExpansions + return q +} + +func (q *FuzzyQuery) Transpositions(transpositions bool) *FuzzyQuery { + q.transpositions = &transpositions + return q +} + +func (q *FuzzyQuery) Rewrite(rewrite string) *FuzzyQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *FuzzyQuery) QueryName(queryName string) *FuzzyQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *FuzzyQuery) Source() (interface{}, error) { + // { + // "fuzzy" : { + // "user" : { + // "value" : "ki", + // "boost" : 1.0, + // "fuzziness" : 2, + // "prefix_length" : 0, + // "max_expansions" : 100 + // } + // } + + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["fuzzy"] = query + + fq := make(map[string]interface{}) + query[q.name] = fq + + fq["value"] = q.value + + if q.boost != nil { + fq["boost"] = *q.boost + } + if q.transpositions != nil { + fq["transpositions"] = *q.transpositions + } + if q.fuzziness != nil { + fq["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + fq["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + fq["max_expansions"] = *q.maxExpansions + } + if q.rewrite != "" { + fq["rewrite"] = q.rewrite + } + if q.queryName != "" { + fq["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box.go new file mode 100644 index 000000000..808ce82df --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box.go @@ -0,0 +1,121 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// GeoBoundingBoxQuery allows to filter hits based on a point location using +// a bounding box. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-bounding-box-query.html +type GeoBoundingBoxQuery struct { + name string + top *float64 + left *float64 + bottom *float64 + right *float64 + typ string + queryName string +} + +// NewGeoBoundingBoxQuery creates and initializes a new GeoBoundingBoxQuery. +func NewGeoBoundingBoxQuery(name string) *GeoBoundingBoxQuery { + return &GeoBoundingBoxQuery{ + name: name, + } +} + +func (q *GeoBoundingBoxQuery) TopLeft(top, left float64) *GeoBoundingBoxQuery { + q.top = &top + q.left = &left + return q +} + +func (q *GeoBoundingBoxQuery) TopLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.TopLeft(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) BottomRight(bottom, right float64) *GeoBoundingBoxQuery { + q.bottom = &bottom + q.right = &right + return q +} + +func (q *GeoBoundingBoxQuery) BottomRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.BottomRight(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) BottomLeft(bottom, left float64) *GeoBoundingBoxQuery { + q.bottom = &bottom + q.left = &left + return q +} + +func (q *GeoBoundingBoxQuery) BottomLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.BottomLeft(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) TopRight(top, right float64) *GeoBoundingBoxQuery { + q.top = &top + q.right = &right + return q +} + +func (q *GeoBoundingBoxQuery) TopRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.TopRight(point.Lat, point.Lon) +} + +// Type sets the type of executing the geo bounding box. It can be either +// memory or indexed. It defaults to memory. +func (q *GeoBoundingBoxQuery) Type(typ string) *GeoBoundingBoxQuery { + q.typ = typ + return q +} + +func (q *GeoBoundingBoxQuery) QueryName(queryName string) *GeoBoundingBoxQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoBoundingBoxQuery) Source() (interface{}, error) { + // { + // "geo_bbox" : { + // ... + // } + // } + + if q.top == nil { + return nil, errors.New("geo_bounding_box requires top latitude to be set") + } + if q.bottom == nil { + return nil, errors.New("geo_bounding_box requires bottom latitude to be set") + } + if q.right == nil { + return nil, errors.New("geo_bounding_box requires right longitude to be set") + } + if q.left == nil { + return nil, errors.New("geo_bounding_box requires left longitude to be set") + } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["geo_bbox"] = params + + box := make(map[string]interface{}) + box["top_left"] = []float64{*q.left, *q.top} + box["bottom_right"] = []float64{*q.right, *q.bottom} + params[q.name] = box + + if q.typ != "" { + params["type"] = q.typ + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_distance.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_distance.go new file mode 100644 index 000000000..c1eed8521 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_distance.go @@ -0,0 +1,116 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoDistanceQuery filters documents that include only hits that exists +// within a specific distance from a geo point. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-distance-query.html +type GeoDistanceQuery struct { + name string + distance string + lat float64 + lon float64 + geohash string + distanceType string + optimizeBbox string + queryName string +} + +// NewGeoDistanceQuery creates and initializes a new GeoDistanceQuery. +func NewGeoDistanceQuery(name string) *GeoDistanceQuery { + return &GeoDistanceQuery{name: name} +} + +func (q *GeoDistanceQuery) GeoPoint(point *GeoPoint) *GeoDistanceQuery { + q.lat = point.Lat + q.lon = point.Lon + return q +} + +func (q *GeoDistanceQuery) Point(lat, lon float64) *GeoDistanceQuery { + q.lat = lat + q.lon = lon + return q +} + +func (q *GeoDistanceQuery) Lat(lat float64) *GeoDistanceQuery { + q.lat = lat + return q +} + +func (q *GeoDistanceQuery) Lon(lon float64) *GeoDistanceQuery { + q.lon = lon + return q +} + +func (q *GeoDistanceQuery) GeoHash(geohash string) *GeoDistanceQuery { + q.geohash = geohash + return q +} + +func (q *GeoDistanceQuery) Distance(distance string) *GeoDistanceQuery { + q.distance = distance + return q +} + +func (q *GeoDistanceQuery) DistanceType(distanceType string) *GeoDistanceQuery { + q.distanceType = distanceType + return q +} + +func (q *GeoDistanceQuery) OptimizeBbox(optimizeBbox string) *GeoDistanceQuery { + q.optimizeBbox = optimizeBbox + return q +} + +func (q *GeoDistanceQuery) QueryName(queryName string) *GeoDistanceQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoDistanceQuery) Source() (interface{}, error) { + // { + // "geo_distance" : { + // "distance" : "200km", + // "pin.location" : { + // "lat" : 40, + // "lon" : -70 + // } + // } + // } + + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + + if q.geohash != "" { + params[q.name] = q.geohash + } else { + location := make(map[string]interface{}) + location["lat"] = q.lat + location["lon"] = q.lon + params[q.name] = location + } + + if q.distance != "" { + params["distance"] = q.distance + } + if q.distanceType != "" { + params["distance_type"] = q.distanceType + } + if q.optimizeBbox != "" { + params["optimize_bbox"] = q.optimizeBbox + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + source["geo_distance"] = params + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon.go new file mode 100644 index 000000000..b08d7078a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon.go @@ -0,0 +1,72 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoPolygonQuery allows to include hits that only fall within a polygon of points. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-polygon-query.html +type GeoPolygonQuery struct { + name string + points []*GeoPoint + queryName string +} + +// NewGeoPolygonQuery creates and initializes a new GeoPolygonQuery. +func NewGeoPolygonQuery(name string) *GeoPolygonQuery { + return &GeoPolygonQuery{ + name: name, + points: make([]*GeoPoint, 0), + } +} + +// AddPoint adds a point from latitude and longitude. +func (q *GeoPolygonQuery) AddPoint(lat, lon float64) *GeoPolygonQuery { + q.points = append(q.points, GeoPointFromLatLon(lat, lon)) + return q +} + +// AddGeoPoint adds a GeoPoint. +func (q *GeoPolygonQuery) AddGeoPoint(point *GeoPoint) *GeoPolygonQuery { + q.points = append(q.points, point) + return q +} + +func (q *GeoPolygonQuery) QueryName(queryName string) *GeoPolygonQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoPolygonQuery) Source() (interface{}, error) { + // "geo_polygon" : { + // "person.location" : { + // "points" : [ + // {"lat" : 40, "lon" : -70}, + // {"lat" : 30, "lon" : -80}, + // {"lat" : 20, "lon" : -90} + // ] + // } + // } + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + source["geo_polygon"] = params + + polygon := make(map[string]interface{}) + params[q.name] = polygon + + points := make([]interface{}, 0) + for _, point := range q.points { + points = append(points, point.Source()) + } + polygon["points"] = points + + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_child.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_child.go new file mode 100644 index 000000000..a8907546b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_child.go @@ -0,0 +1,129 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasChildQuery accepts a query and the child type to run against, and results +// in parent documents that have child docs matching the query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-has-child-query.html +type HasChildQuery struct { + query Query + childType string + boost *float64 + scoreType string + minChildren *int + maxChildren *int + shortCircuitCutoff *int + queryName string + innerHit *InnerHit +} + +// NewHasChildQuery creates and initializes a new has_child query. +func NewHasChildQuery(childType string, query Query) *HasChildQuery { + return &HasChildQuery{ + query: query, + childType: childType, + } +} + +// Boost sets the boost for this query. +func (q *HasChildQuery) Boost(boost float64) *HasChildQuery { + q.boost = &boost + return q +} + +// ScoreType defines how the scores from the matching child documents +// are mapped into the parent document. +func (q *HasChildQuery) ScoreType(scoreType string) *HasChildQuery { + q.scoreType = scoreType + return q +} + +// MinChildren defines the minimum number of children that are required +// to match for the parent to be considered a match. +func (q *HasChildQuery) MinChildren(minChildren int) *HasChildQuery { + q.minChildren = &minChildren + return q +} + +// MaxChildren defines the maximum number of children that are required +// to match for the parent to be considered a match. +func (q *HasChildQuery) MaxChildren(maxChildren int) *HasChildQuery { + q.maxChildren = &maxChildren + return q +} + +// ShortCircuitCutoff configures what cut off point only to evaluate +// parent documents that contain the matching parent id terms instead +// of evaluating all parent docs. +func (q *HasChildQuery) ShortCircuitCutoff(shortCircuitCutoff int) *HasChildQuery { + q.shortCircuitCutoff = &shortCircuitCutoff + return q +} + +// QueryName specifies the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *HasChildQuery) QueryName(queryName string) *HasChildQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this query and +// reusing the defined type and query. +func (q *HasChildQuery) InnerHit(innerHit *InnerHit) *HasChildQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the function score query. +func (q *HasChildQuery) Source() (interface{}, error) { + // { + // "has_child" : { + // "type" : "blog_tag", + // "query" : { + // "term" : { + // "tag" : "something" + // } + // } + // } + // } + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["has_child"] = query + + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + query["type"] = q.childType + if q.boost != nil { + query["boost"] = *q.boost + } + if q.scoreType != "" { + query["score_type"] = q.scoreType + } + if q.minChildren != nil { + query["min_children"] = *q.minChildren + } + if q.maxChildren != nil { + query["max_children"] = *q.maxChildren + } + if q.shortCircuitCutoff != nil { + query["short_circuit_cutoff"] = *q.shortCircuitCutoff + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + query["inner_hits"] = src + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_parent.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_parent.go new file mode 100644 index 000000000..4db1dde7e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_parent.go @@ -0,0 +1,97 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasParentQuery accepts a query and a parent type. The query is executed +// in the parent document space which is specified by the parent type. +// This query returns child documents which associated parents have matched. +// For the rest has_parent query has the same options and works in the +// same manner as has_child query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-has-parent-query.html +type HasParentQuery struct { + query Query + parentType string + boost *float64 + scoreType string + queryName string + innerHit *InnerHit +} + +// NewHasParentQuery creates and initializes a new has_parent query. +func NewHasParentQuery(parentType string, query Query) *HasParentQuery { + return &HasParentQuery{ + query: query, + parentType: parentType, + } +} + +// Boost sets the boost for this query. +func (q *HasParentQuery) Boost(boost float64) *HasParentQuery { + q.boost = &boost + return q +} + +// ScoreType defines how the parent score is mapped into the child documents. +func (q *HasParentQuery) ScoreType(scoreType string) *HasParentQuery { + q.scoreType = scoreType + return q +} + +// QueryName specifies the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *HasParentQuery) QueryName(queryName string) *HasParentQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this query and +// reusing the defined type and query. +func (q *HasParentQuery) InnerHit(innerHit *InnerHit) *HasParentQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the function score query. +func (q *HasParentQuery) Source() (interface{}, error) { + // { + // "has_parent" : { + // "parent_type" : "blog", + // "query" : { + // "term" : { + // "tag" : "something" + // } + // } + // } + // } + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["has_parent"] = query + + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + query["parent_type"] = q.parentType + if q.boost != nil { + query["boost"] = *q.boost + } + if q.scoreType != "" { + query["score_type"] = q.scoreType + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + query["inner_hits"] = src + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_ids.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_ids.go new file mode 100644 index 000000000..96f463dc6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_ids.go @@ -0,0 +1,76 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// IdsQuery filters documents that only have the provided ids. +// Note, this query uses the _uid field. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-ids-query.html +type IdsQuery struct { + types []string + values []string + boost *float64 + queryName string +} + +// NewIdsQuery creates and initializes a new ids query. +func NewIdsQuery(types ...string) *IdsQuery { + return &IdsQuery{ + types: types, + values: make([]string, 0), + } +} + +// Ids adds ids to the filter. +func (q *IdsQuery) Ids(ids ...string) *IdsQuery { + q.values = append(q.values, ids...) + return q +} + +// Boost sets the boost for this query. +func (q *IdsQuery) Boost(boost float64) *IdsQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter. +func (q *IdsQuery) QueryName(queryName string) *IdsQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *IdsQuery) Source() (interface{}, error) { + // { + // "ids" : { + // "type" : "my_type", + // "values" : ["1", "4", "100"] + // } + // } + + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["ids"] = query + + // type(s) + if len(q.types) == 1 { + query["type"] = q.types[0] + } else if len(q.types) > 1 { + query["types"] = q.types + } + + // values + query["values"] = q.values + + if q.boost != nil { + query["boost"] = *q.boost + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_indices.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_indices.go new file mode 100644 index 000000000..56efab3dd --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_indices.go @@ -0,0 +1,89 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// IndicesQuery can be used when executed across multiple indices, allowing +// to have a query that executes only when executed on an index that matches +// a specific list of indices, and another query that executes when it is +// executed on an index that does not match the listed indices. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-indices-query.html +type IndicesQuery struct { + query Query + indices []string + noMatchQueryType string + noMatchQuery Query + queryName string +} + +// NewIndicesQuery creates and initializes a new indices query. +func NewIndicesQuery(query Query, indices ...string) *IndicesQuery { + return &IndicesQuery{ + query: query, + indices: indices, + } +} + +// NoMatchQuery sets the query to use when it executes on an index that +// does not match the indices provided. +func (q *IndicesQuery) NoMatchQuery(query Query) *IndicesQuery { + q.noMatchQuery = query + return q +} + +// NoMatchQueryType sets the no match query which can be either all or none. +func (q *IndicesQuery) NoMatchQueryType(typ string) *IndicesQuery { + q.noMatchQueryType = typ + return q +} + +// QueryName sets the query name for the filter. +func (q *IndicesQuery) QueryName(queryName string) *IndicesQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *IndicesQuery) Source() (interface{}, error) { + // { + // "indices" : { + // "indices" : ["index1", "index2"], + // "query" : { + // "term" : { "tag" : "wow" } + // }, + // "no_match_query" : { + // "term" : { "tag" : "kow" } + // } + // } + // } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["indices"] = params + + params["indices"] = q.indices + + src, err := q.query.Source() + if err != nil { + return nil, err + } + params["query"] = src + + if q.noMatchQuery != nil { + src, err := q.noMatchQuery.Source() + if err != nil { + return nil, err + } + params["no_match_query"] = src + } else if q.noMatchQueryType != "" { + params["no_match_query"] = q.noMatchQueryType + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_match.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_match.go new file mode 100644 index 000000000..b740b0f0d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_match.go @@ -0,0 +1,214 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MatchQuery is a family of queries that accepts text/numerics/dates, +// analyzes them, and constructs a query. +// +// To create a new MatchQuery, use NewMatchQuery. To create specific types +// of queries, e.g. a match_phrase query, use NewMatchPhrQuery(...).Type("phrase"), +// or use one of the shortcuts e.g. NewMatchPhraseQuery(...). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html +type MatchQuery struct { + name string + text interface{} + typ string // boolean, phrase, phrase_prefix + operator string // or / and + analyzer string + boost *float64 + slop *int + fuzziness string + prefixLength *int + maxExpansions *int + minimumShouldMatch string + rewrite string + fuzzyRewrite string + lenient *bool + fuzzyTranspositions *bool + zeroTermsQuery string + cutoffFrequency *float64 + queryName string +} + +// NewMatchQuery creates and initializes a new MatchQuery. +func NewMatchQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text} +} + +// NewMatchPhraseQuery creates and initializes a new MatchQuery of type phrase. +func NewMatchPhraseQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text, typ: "phrase"} +} + +// NewMatchPhrasePrefixQuery creates and initializes a new MatchQuery of type phrase_prefix. +func NewMatchPhrasePrefixQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text, typ: "phrase_prefix"} +} + +// Type can be "boolean", "phrase", or "phrase_prefix". Defaults to "boolean". +func (q *MatchQuery) Type(typ string) *MatchQuery { + q.typ = typ + return q +} + +// Operator sets the operator to use when using a boolean query. +// Can be "AND" or "OR" (default). +func (q *MatchQuery) Operator(operator string) *MatchQuery { + q.operator = operator + return q +} + +// Analyzer explicitly sets the analyzer to use. It defaults to use explicit +// mapping config for the field, or, if not set, the default search analyzer. +func (q *MatchQuery) Analyzer(analyzer string) *MatchQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost to apply to this query. +func (q *MatchQuery) Boost(boost float64) *MatchQuery { + q.boost = &boost + return q +} + +// Slop sets the phrase slop if evaluated to a phrase query type. +func (q *MatchQuery) Slop(slop int) *MatchQuery { + q.slop = &slop + return q +} + +// Fuzziness sets the fuzziness when evaluated to a fuzzy query type. +// Defaults to "AUTO". +func (q *MatchQuery) Fuzziness(fuzziness string) *MatchQuery { + q.fuzziness = fuzziness + return q +} + +func (q *MatchQuery) PrefixLength(prefixLength int) *MatchQuery { + q.prefixLength = &prefixLength + return q +} + +// MaxExpansions is used with fuzzy or prefix type queries. It specifies +// the number of term expansions to use. It defaults to unbounded so that +// its recommended to set it to a reasonable value for faster execution. +func (q *MatchQuery) MaxExpansions(maxExpansions int) *MatchQuery { + q.maxExpansions = &maxExpansions + return q +} + +// CutoffFrequency can be a value in [0..1] (or an absolute number >=1). +// It represents the maximum treshold of a terms document frequency to be +// considered a low frequency term. +func (q *MatchQuery) CutoffFrequency(cutoff float64) *MatchQuery { + q.cutoffFrequency = &cutoff + return q +} + +func (q *MatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MatchQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *MatchQuery) Rewrite(rewrite string) *MatchQuery { + q.rewrite = rewrite + return q +} + +func (q *MatchQuery) FuzzyRewrite(fuzzyRewrite string) *MatchQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +func (q *MatchQuery) FuzzyTranspositions(fuzzyTranspositions bool) *MatchQuery { + q.fuzzyTranspositions = &fuzzyTranspositions + return q +} + +// Lenient specifies whether format based failures will be ignored. +func (q *MatchQuery) Lenient(lenient bool) *MatchQuery { + q.lenient = &lenient + return q +} + +// ZeroTermsQuery can be "all" or "none". +func (q *MatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MatchQuery { + q.zeroTermsQuery = zeroTermsQuery + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *MatchQuery) QueryName(queryName string) *MatchQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *MatchQuery) Source() (interface{}, error) { + // {"match":{"name":{"query":"value","type":"boolean/phrase"}}} + source := make(map[string]interface{}) + + match := make(map[string]interface{}) + source["match"] = match + + query := make(map[string]interface{}) + match[q.name] = query + + query["query"] = q.text + + if q.typ != "" { + query["type"] = q.typ + } + if q.operator != "" { + query["operator"] = q.operator + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.slop != nil { + query["slop"] = *q.slop + } + if q.fuzziness != "" { + query["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + query["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + query["max_expansions"] = *q.maxExpansions + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.rewrite != "" { + query["rewrite"] = q.rewrite + } + if q.fuzzyRewrite != "" { + query["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.fuzzyTranspositions != nil { + query["fuzzy_transpositions"] = *q.fuzzyTranspositions + } + if q.zeroTermsQuery != "" { + query["zero_terms_query"] = q.zeroTermsQuery + } + if q.cutoffFrequency != nil { + query["cutoff_frequency"] = q.cutoffFrequency + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_all.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_all.go new file mode 100644 index 000000000..5b5ca590e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_all.go @@ -0,0 +1,41 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MatchAllQuery is the most simple query, which matches all documents, +// giving them all a _score of 1.0. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-match-all-query.html +type MatchAllQuery struct { + boost *float64 +} + +// NewMatchAllQuery creates and initializes a new match all query. +func NewMatchAllQuery() *MatchAllQuery { + return &MatchAllQuery{} +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by the +// boost provided. +func (q *MatchAllQuery) Boost(boost float64) *MatchAllQuery { + q.boost = &boost + return q +} + +// Source returns JSON for the function score query. +func (q MatchAllQuery) Source() (interface{}, error) { + // { + // "match_all" : { ... } + // } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["match_all"] = params + if q.boost != nil { + params["boost"] = *q.boost + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_missing.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_missing.go new file mode 100644 index 000000000..0fff3f55c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_missing.go @@ -0,0 +1,67 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MissingQuery returns documents that have only null values or no value +// in the original field. +// +// For details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-missing-query.html +type MissingQuery struct { + name string + queryName string + nullValue *bool + existence *bool +} + +// NewMissingQuery creates and initializes a new MissingQuery. +func NewMissingQuery(name string) *MissingQuery { + return &MissingQuery{name: name} +} + +// QueryName sets the query name for the query that can be used when +// searching for matched filters hit. +func (q *MissingQuery) QueryName(queryName string) *MissingQuery { + q.queryName = queryName + return q +} + +// NullValue indicates whether the missing filter automatically includes +// fields with null value configured in the mappings. Defaults to false. +func (q *MissingQuery) NullValue(nullValue bool) *MissingQuery { + q.nullValue = &nullValue + return q +} + +// Existence indicates whether the missing filter includes documents where +// the field doesn't exist in the docs. +func (q *MissingQuery) Existence(existence bool) *MissingQuery { + q.existence = &existence + return q +} + +// Source returns JSON for the query. +func (q *MissingQuery) Source() (interface{}, error) { + // { + // "missing" : { + // "field" : "..." + // } + // } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["missing"] = params + params["field"] = q.name + if q.nullValue != nil { + params["null_value"] = *q.nullValue + } + if q.existence != nil { + params["existence"] = *q.existence + } + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_more_like_this.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_more_like_this.go new file mode 100644 index 000000000..afce3f05c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_more_like_this.go @@ -0,0 +1,412 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// MoreLikeThis query (MLT Query) finds documents that are "like" a given +// set of documents. In order to do so, MLT selects a set of representative +// terms of these input documents, forms a query using these terms, executes +// the query and returns the results. The user controls the input documents, +// how the terms should be selected and how the query is formed. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html +type MoreLikeThisQuery struct { + fields []string + docs []*MoreLikeThisQueryItem + unlikeDocs []*MoreLikeThisQueryItem + include *bool + minimumShouldMatch string + minTermFreq *int + maxQueryTerms *int + stopWords []string + minDocFreq *int + maxDocFreq *int + minWordLen *int + maxWordLen *int + boostTerms *float64 + boost *float64 + analyzer string + failOnUnsupportedField *bool + queryName string +} + +// NewMoreLikeThisQuery creates and initializes a new MoreLikeThisQuery. +func NewMoreLikeThisQuery() *MoreLikeThisQuery { + return &MoreLikeThisQuery{ + fields: make([]string, 0), + stopWords: make([]string, 0), + docs: make([]*MoreLikeThisQueryItem, 0), + unlikeDocs: make([]*MoreLikeThisQueryItem, 0), + } +} + +// Field adds one or more field names to the query. +func (q *MoreLikeThisQuery) Field(fields ...string) *MoreLikeThisQuery { + q.fields = append(q.fields, fields...) + return q +} + +// StopWord sets the stopwords. Any word in this set is considered +// "uninteresting" and ignored. Even if your Analyzer allows stopwords, +// you might want to tell the MoreLikeThis code to ignore them, as for +// the purposes of document similarity it seems reasonable to assume that +// "a stop word is never interesting". +func (q *MoreLikeThisQuery) StopWord(stopWords ...string) *MoreLikeThisQuery { + q.stopWords = append(q.stopWords, stopWords...) + return q +} + +// LikeText sets the text to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) LikeText(likeTexts ...string) *MoreLikeThisQuery { + for _, s := range likeTexts { + item := NewMoreLikeThisQueryItem().LikeText(s) + q.docs = append(q.docs, item) + } + return q +} + +// LikeItems sets the documents to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) LikeItems(docs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { + q.docs = append(q.docs, docs...) + return q +} + +// IgnoreLikeText sets the text from which the terms should not be selected from. +func (q *MoreLikeThisQuery) IgnoreLikeText(ignoreLikeText ...string) *MoreLikeThisQuery { + for _, s := range ignoreLikeText { + item := NewMoreLikeThisQueryItem().LikeText(s) + q.unlikeDocs = append(q.unlikeDocs, item) + } + return q +} + +// IgnoreLikeItems sets the documents from which the terms should not be selected from. +func (q *MoreLikeThisQuery) IgnoreLikeItems(ignoreDocs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { + q.unlikeDocs = append(q.unlikeDocs, ignoreDocs...) + return q +} + +// Ids sets the document ids to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) Ids(ids ...string) *MoreLikeThisQuery { + for _, id := range ids { + item := NewMoreLikeThisQueryItem().Id(id) + q.docs = append(q.docs, item) + } + return q +} + +// Include specifies whether the input documents should also be included +// in the results returned. Defaults to false. +func (q *MoreLikeThisQuery) Include(include bool) *MoreLikeThisQuery { + q.include = &include + return q +} + +// MinimumShouldMatch sets the number of terms that must match the generated +// query expressed in the common syntax for minimum should match. +// The default value is "30%". +// +// This used to be "PercentTermsToMatch" in Elasticsearch versions before 2.0. +func (q *MoreLikeThisQuery) MinimumShouldMatch(minimumShouldMatch string) *MoreLikeThisQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// MinTermFreq is the frequency below which terms will be ignored in the +// source doc. The default frequency is 2. +func (q *MoreLikeThisQuery) MinTermFreq(minTermFreq int) *MoreLikeThisQuery { + q.minTermFreq = &minTermFreq + return q +} + +// MaxQueryTerms sets the maximum number of query terms that will be included +// in any generated query. It defaults to 25. +func (q *MoreLikeThisQuery) MaxQueryTerms(maxQueryTerms int) *MoreLikeThisQuery { + q.maxQueryTerms = &maxQueryTerms + return q +} + +// MinDocFreq sets the frequency at which words will be ignored which do +// not occur in at least this many docs. The default is 5. +func (q *MoreLikeThisQuery) MinDocFreq(minDocFreq int) *MoreLikeThisQuery { + q.minDocFreq = &minDocFreq + return q +} + +// MaxDocFreq sets the maximum frequency for which words may still appear. +// Words that appear in more than this many docs will be ignored. +// It defaults to unbounded. +func (q *MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) *MoreLikeThisQuery { + q.maxDocFreq = &maxDocFreq + return q +} + +// MinWordLength sets the minimum word length below which words will be +// ignored. It defaults to 0. +func (q *MoreLikeThisQuery) MinWordLen(minWordLen int) *MoreLikeThisQuery { + q.minWordLen = &minWordLen + return q +} + +// MaxWordLen sets the maximum word length above which words will be ignored. +// Defaults to unbounded (0). +func (q *MoreLikeThisQuery) MaxWordLen(maxWordLen int) *MoreLikeThisQuery { + q.maxWordLen = &maxWordLen + return q +} + +// BoostTerms sets the boost factor to use when boosting terms. +// It defaults to 1. +func (q *MoreLikeThisQuery) BoostTerms(boostTerms float64) *MoreLikeThisQuery { + q.boostTerms = &boostTerms + return q +} + +// Analyzer specifies the analyzer that will be use to analyze the text. +// Defaults to the analyzer associated with the field. +func (q *MoreLikeThisQuery) Analyzer(analyzer string) *MoreLikeThisQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost for this query. +func (q *MoreLikeThisQuery) Boost(boost float64) *MoreLikeThisQuery { + q.boost = &boost + return q +} + +// FailOnUnsupportedField indicates whether to fail or return no result +// when this query is run against a field which is not supported such as +// a binary/numeric field. +func (q *MoreLikeThisQuery) FailOnUnsupportedField(fail bool) *MoreLikeThisQuery { + q.failOnUnsupportedField = &fail + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *MoreLikeThisQuery) QueryName(queryName string) *MoreLikeThisQuery { + q.queryName = queryName + return q +} + +// Source creates the source for the MLT query. +// It may return an error if the caller forgot to specify any documents to +// be "liked" in the MoreLikeThisQuery. +func (q *MoreLikeThisQuery) Source() (interface{}, error) { + // { + // "match_all" : { ... } + // } + if len(q.docs) == 0 { + return nil, errors.New(`more_like_this requires some documents to be "liked"`) + } + + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + source["mlt"] = params + + if len(q.fields) > 0 { + params["fields"] = q.fields + } + + likes := make([]interface{}, 0) + for _, doc := range q.docs { + src, err := doc.Source() + if err != nil { + return nil, err + } + likes = append(likes, src) + } + params["like"] = likes + + if len(q.unlikeDocs) > 0 { + dontLikes := make([]interface{}, 0) + for _, doc := range q.unlikeDocs { + src, err := doc.Source() + if err != nil { + return nil, err + } + dontLikes = append(dontLikes, src) + } + params["unlike"] = dontLikes + } + + if q.minimumShouldMatch != "" { + params["minimum_should_match"] = q.minimumShouldMatch + } + if q.minTermFreq != nil { + params["min_term_freq"] = *q.minTermFreq + } + if q.maxQueryTerms != nil { + params["max_query_terms"] = *q.maxQueryTerms + } + if len(q.stopWords) > 0 { + params["stop_words"] = q.stopWords + } + if q.minDocFreq != nil { + params["min_doc_freq"] = *q.minDocFreq + } + if q.maxDocFreq != nil { + params["max_doc_freq"] = *q.maxDocFreq + } + if q.minWordLen != nil { + params["min_word_len"] = *q.minWordLen + } + if q.maxWordLen != nil { + params["max_word_len"] = *q.maxWordLen + } + if q.boostTerms != nil { + params["boost_terms"] = *q.boostTerms + } + if q.boost != nil { + params["boost"] = *q.boost + } + if q.analyzer != "" { + params["analyzer"] = q.analyzer + } + if q.failOnUnsupportedField != nil { + params["fail_on_unsupported_field"] = *q.failOnUnsupportedField + } + if q.queryName != "" { + params["_name"] = q.queryName + } + if q.include != nil { + params["include"] = *q.include + } + + return source, nil +} + +// -- MoreLikeThisQueryItem -- + +// MoreLikeThisQueryItem represents a single item of a MoreLikeThisQuery +// to be "liked" or "unliked". +type MoreLikeThisQueryItem struct { + likeText string + + index string + typ string + id string + doc interface{} + fields []string + routing string + fsc *FetchSourceContext + version int64 + versionType string +} + +// NewMoreLikeThisQueryItem creates and initializes a MoreLikeThisQueryItem. +func NewMoreLikeThisQueryItem() *MoreLikeThisQueryItem { + return &MoreLikeThisQueryItem{ + version: -1, + } +} + +// LikeText represents a text to be "liked". +func (item *MoreLikeThisQueryItem) LikeText(likeText string) *MoreLikeThisQueryItem { + item.likeText = likeText + return item +} + +// Index represents the index of the item. +func (item *MoreLikeThisQueryItem) Index(index string) *MoreLikeThisQueryItem { + item.index = index + return item +} + +// Type represents the document type of the item. +func (item *MoreLikeThisQueryItem) Type(typ string) *MoreLikeThisQueryItem { + item.typ = typ + return item +} + +// Id represents the document id of the item. +func (item *MoreLikeThisQueryItem) Id(id string) *MoreLikeThisQueryItem { + item.id = id + return item +} + +// Doc represents a raw document template for the item. +func (item *MoreLikeThisQueryItem) Doc(doc interface{}) *MoreLikeThisQueryItem { + item.doc = doc + return item +} + +// Fields represents the list of fields of the item. +func (item *MoreLikeThisQueryItem) Fields(fields ...string) *MoreLikeThisQueryItem { + item.fields = append(item.fields, fields...) + return item +} + +// Routing sets the routing associated with the item. +func (item *MoreLikeThisQueryItem) Routing(routing string) *MoreLikeThisQueryItem { + item.routing = routing + return item +} + +// FetchSourceContext represents the fetch source of the item which controls +// if and how _source should be returned. +func (item *MoreLikeThisQueryItem) FetchSourceContext(fsc *FetchSourceContext) *MoreLikeThisQueryItem { + item.fsc = fsc + return item +} + +// Version specifies the version of the item. +func (item *MoreLikeThisQueryItem) Version(version int64) *MoreLikeThisQueryItem { + item.version = version + return item +} + +// VersionType represents the version type of the item. +func (item *MoreLikeThisQueryItem) VersionType(versionType string) *MoreLikeThisQueryItem { + item.versionType = versionType + return item +} + +// Source returns the JSON-serializable fragment of the entity. +func (item *MoreLikeThisQueryItem) Source() (interface{}, error) { + if item.likeText != "" { + return item.likeText, nil + } + + source := make(map[string]interface{}) + + if item.index != "" { + source["_index"] = item.index + } + if item.typ != "" { + source["_type"] = item.typ + } + if item.id != "" { + source["_id"] = item.id + } + if item.doc != nil { + source["doc"] = item.doc + } + if len(item.fields) > 0 { + source["fields"] = item.fields + } + if item.routing != "" { + source["_routing"] = item.routing + } + if item.fsc != nil { + src, err := item.fsc.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + if item.version >= 0 { + source["_version"] = item.version + } + if item.versionType != "" { + source["_version_type"] = item.versionType + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_multi_match.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_multi_match.go new file mode 100644 index 000000000..b9f74a0d3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_multi_match.go @@ -0,0 +1,275 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strings" +) + +// MultiMatchQuery builds on the MatchQuery to allow multi-field queries. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-multi-match-query.html +type MultiMatchQuery struct { + text interface{} + fields []string + fieldBoosts map[string]*float64 + typ string // best_fields, boolean, most_fields, cross_fields, phrase, phrase_prefix + operator string // AND or OR + analyzer string + boost *float64 + slop *int + fuzziness string + prefixLength *int + maxExpansions *int + minimumShouldMatch string + rewrite string + fuzzyRewrite string + tieBreaker *float64 + lenient *bool + cutoffFrequency *float64 + zeroTermsQuery string + queryName string +} + +// MultiMatchQuery creates and initializes a new MultiMatchQuery. +func NewMultiMatchQuery(text interface{}, fields ...string) *MultiMatchQuery { + q := &MultiMatchQuery{ + text: text, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } + q.fields = append(q.fields, fields...) + return q +} + +// Field adds a field to run the multi match against. +func (q *MultiMatchQuery) Field(field string) *MultiMatchQuery { + q.fields = append(q.fields, field) + return q +} + +// FieldWithBoost adds a field to run the multi match against with a specific boost. +func (q *MultiMatchQuery) FieldWithBoost(field string, boost float64) *MultiMatchQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// Type can be "best_fields", "boolean", "most_fields", "cross_fields", +// "phrase", or "phrase_prefix". +func (q *MultiMatchQuery) Type(typ string) *MultiMatchQuery { + var zero = float64(0.0) + var one = float64(1.0) + + switch strings.ToLower(typ) { + default: // best_fields / boolean + q.typ = "best_fields" + q.tieBreaker = &zero + case "most_fields": + q.typ = "most_fields" + q.tieBreaker = &one + case "cross_fields": + q.typ = "cross_fields" + q.tieBreaker = &zero + case "phrase": + q.typ = "phrase" + q.tieBreaker = &zero + case "phrase_prefix": + q.typ = "phrase_prefix" + q.tieBreaker = &zero + } + return q +} + +// Operator sets the operator to use when using boolean query. +// It can be either AND or OR (default). +func (q *MultiMatchQuery) Operator(operator string) *MultiMatchQuery { + q.operator = operator + return q +} + +// Analyzer sets the analyzer to use explicitly. It defaults to use explicit +// mapping config for the field, or, if not set, the default search analyzer. +func (q *MultiMatchQuery) Analyzer(analyzer string) *MultiMatchQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost for this query. +func (q *MultiMatchQuery) Boost(boost float64) *MultiMatchQuery { + q.boost = &boost + return q +} + +// Slop sets the phrase slop if evaluated to a phrase query type. +func (q *MultiMatchQuery) Slop(slop int) *MultiMatchQuery { + q.slop = &slop + return q +} + +// Fuzziness sets the fuzziness used when evaluated to a fuzzy query type. +// It defaults to "AUTO". +func (q *MultiMatchQuery) Fuzziness(fuzziness string) *MultiMatchQuery { + q.fuzziness = fuzziness + return q +} + +// PrefixLength for the fuzzy process. +func (q *MultiMatchQuery) PrefixLength(prefixLength int) *MultiMatchQuery { + q.prefixLength = &prefixLength + return q +} + +// MaxExpansions is the number of term expansions to use when using fuzzy +// or prefix type query. It defaults to unbounded so it's recommended +// to set it to a reasonable value for faster execution. +func (q *MultiMatchQuery) MaxExpansions(maxExpansions int) *MultiMatchQuery { + q.maxExpansions = &maxExpansions + return q +} + +// MinimumShouldMatch represents the minimum number of optional should clauses +// to match. +func (q *MultiMatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MultiMatchQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *MultiMatchQuery) Rewrite(rewrite string) *MultiMatchQuery { + q.rewrite = rewrite + return q +} + +func (q *MultiMatchQuery) FuzzyRewrite(fuzzyRewrite string) *MultiMatchQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +// TieBreaker for "best-match" disjunction queries (OR queries). +// The tie breaker capability allows documents that match more than one +// query clause (in this case on more than one field) to be scored better +// than documents that match only the best of the fields, without confusing +// this with the better case of two distinct matches in the multiple fields. +// +// A tie-breaker value of 1.0 is interpreted as a signal to score queries as +// "most-match" queries where all matching query clauses are considered for scoring. +func (q *MultiMatchQuery) TieBreaker(tieBreaker float64) *MultiMatchQuery { + q.tieBreaker = &tieBreaker + return q +} + +// Lenient indicates whether format based failures will be ignored. +func (q *MultiMatchQuery) Lenient(lenient bool) *MultiMatchQuery { + q.lenient = &lenient + return q +} + +// CutoffFrequency sets a cutoff value in [0..1] (or absolute number >=1) +// representing the maximum threshold of a terms document frequency to be +// considered a low frequency term. +func (q *MultiMatchQuery) CutoffFrequency(cutoff float64) *MultiMatchQuery { + q.cutoffFrequency = &cutoff + return q +} + +// ZeroTermsQuery can be "all" or "none". +func (q *MultiMatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MultiMatchQuery { + q.zeroTermsQuery = zeroTermsQuery + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *MultiMatchQuery) QueryName(queryName string) *MultiMatchQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *MultiMatchQuery) Source() (interface{}, error) { + // + // { + // "multi_match" : { + // "query" : "this is a test", + // "fields" : [ "subject", "message" ] + // } + // } + + source := make(map[string]interface{}) + + multiMatch := make(map[string]interface{}) + source["multi_match"] = multiMatch + + multiMatch["query"] = q.text + + if len(q.fields) > 0 { + fields := make([]string, 0) + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + multiMatch["fields"] = fields + } + + if q.typ != "" { + multiMatch["type"] = q.typ + } + + if q.operator != "" { + multiMatch["operator"] = q.operator + } + if q.analyzer != "" { + multiMatch["analyzer"] = q.analyzer + } + if q.boost != nil { + multiMatch["boost"] = *q.boost + } + if q.slop != nil { + multiMatch["slop"] = *q.slop + } + if q.fuzziness != "" { + multiMatch["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + multiMatch["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + multiMatch["max_expansions"] = *q.maxExpansions + } + if q.minimumShouldMatch != "" { + multiMatch["minimum_should_match"] = q.minimumShouldMatch + } + if q.rewrite != "" { + multiMatch["rewrite"] = q.rewrite + } + if q.fuzzyRewrite != "" { + multiMatch["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.tieBreaker != nil { + multiMatch["tie_breaker"] = *q.tieBreaker + } + if q.lenient != nil { + multiMatch["lenient"] = *q.lenient + } + if q.cutoffFrequency != nil { + multiMatch["cutoff_frequency"] = *q.cutoffFrequency + } + if q.zeroTermsQuery != "" { + multiMatch["zero_terms_query"] = q.zeroTermsQuery + } + if q.queryName != "" { + multiMatch["_name"] = q.queryName + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_nested.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_nested.go new file mode 100644 index 000000000..0a598f8bf --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_nested.go @@ -0,0 +1,85 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NestedQuery allows to query nested objects / docs. +// The query is executed against the nested objects / docs as if they were +// indexed as separate docs (they are, internally) and resulting in the +// root parent doc (or parent nested mapping). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-nested-query.html +type NestedQuery struct { + query Query + path string + scoreMode string + boost *float64 + queryName string + innerHit *InnerHit +} + +// NewNestedQuery creates and initializes a new NestedQuery. +func NewNestedQuery(path string, query Query) *NestedQuery { + return &NestedQuery{path: path, query: query} +} + +// ScoreMode specifies the score mode. +func (q *NestedQuery) ScoreMode(scoreMode string) *NestedQuery { + q.scoreMode = scoreMode + return q +} + +// Boost sets the boost for this query. +func (q *NestedQuery) Boost(boost float64) *NestedQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *NestedQuery) QueryName(queryName string) *NestedQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this nested query +// and reusing the defined path and query. +func (q *NestedQuery) InnerHit(innerHit *InnerHit) *NestedQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the query. +func (q *NestedQuery) Source() (interface{}, error) { + query := make(map[string]interface{}) + nq := make(map[string]interface{}) + query["nested"] = nq + + src, err := q.query.Source() + if err != nil { + return nil, err + } + nq["query"] = src + + nq["path"] = q.path + + if q.scoreMode != "" { + nq["score_mode"] = q.scoreMode + } + if q.boost != nil { + nq["boost"] = *q.boost + } + if q.queryName != "" { + nq["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + nq["inner_hits"] = src + } + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_not.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_not.go new file mode 100644 index 000000000..7a1ee8e08 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_not.go @@ -0,0 +1,45 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NotQuery filters out matched documents using a query. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-not-query.html +type NotQuery struct { + filter Query + queryName string +} + +// NewNotQuery creates and initializes a new NotQuery. +func NewNotQuery(filter Query) *NotQuery { + return &NotQuery{ + filter: filter, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *NotQuery) QueryName(queryName string) *NotQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *NotQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["not"] = params + + src, err := q.filter.Source() + if err != nil { + return nil, err + } + params["query"] = src + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_prefix.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_prefix.go new file mode 100644 index 000000000..1628ba8cc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_prefix.go @@ -0,0 +1,67 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PrefixQuery matches documents that have fields containing terms +// with a specified prefix (not analyzed). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-prefix-query.html +type PrefixQuery struct { + name string + prefix string + boost *float64 + rewrite string + queryName string +} + +// NewPrefixQuery creates and initializes a new PrefixQuery. +func NewPrefixQuery(name string, prefix string) *PrefixQuery { + return &PrefixQuery{name: name, prefix: prefix} +} + +// Boost sets the boost for this query. +func (q *PrefixQuery) Boost(boost float64) *PrefixQuery { + q.boost = &boost + return q +} + +func (q *PrefixQuery) Rewrite(rewrite string) *PrefixQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *PrefixQuery) QueryName(queryName string) *PrefixQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *PrefixQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["prefix"] = query + + if q.boost == nil && q.rewrite == "" && q.queryName == "" { + query[q.name] = q.prefix + } else { + subQuery := make(map[string]interface{}) + subQuery["prefix"] = q.prefix + if q.boost != nil { + subQuery["boost"] = *q.boost + } + if q.rewrite != "" { + subQuery["rewrite"] = q.rewrite + } + if q.queryName != "" { + subQuery["_name"] = q.queryName + } + query[q.name] = subQuery + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_query_string.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_query_string.go new file mode 100644 index 000000000..f1f767a47 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_query_string.go @@ -0,0 +1,359 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// QueryStringQuery uses the query parser in order to parse its content. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html +type QueryStringQuery struct { + queryString string + defaultField string + defaultOperator string + analyzer string + quoteAnalyzer string + quoteFieldSuffix string + autoGeneratePhraseQueries *bool + allowLeadingWildcard *bool + lowercaseExpandedTerms *bool + enablePositionIncrements *bool + analyzeWildcard *bool + locale string + boost *float64 + fuzziness string + fuzzyPrefixLength *int + fuzzyMaxExpansions *int + fuzzyRewrite string + phraseSlop *int + fields []string + fieldBoosts map[string]*float64 + useDisMax *bool + tieBreaker *float64 + rewrite string + minimumShouldMatch string + lenient *bool + queryName string + timeZone string + maxDeterminizedStates *int + escape *bool +} + +// NewQueryStringQuery creates and initializes a new QueryStringQuery. +func NewQueryStringQuery(queryString string) *QueryStringQuery { + return &QueryStringQuery{ + queryString: queryString, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } +} + +// DefaultField specifies the field to run against when no prefix field +// is specified. Only relevant when not explicitly adding fields the query +// string will run against. +func (q *QueryStringQuery) DefaultField(defaultField string) *QueryStringQuery { + q.defaultField = defaultField + return q +} + +// Field adds a field to run the query string against. +func (q *QueryStringQuery) Field(field string) *QueryStringQuery { + q.fields = append(q.fields, field) + return q +} + +// FieldWithBoost adds a field to run the query string against with a specific boost. +func (q *QueryStringQuery) FieldWithBoost(field string, boost float64) *QueryStringQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// UseDisMax specifies whether to combine queries using dis max or boolean +// query when more zhan one field is used with the query string. Defaults +// to dismax (true). +func (q *QueryStringQuery) UseDisMax(useDisMax bool) *QueryStringQuery { + q.useDisMax = &useDisMax + return q +} + +// TieBreaker is used when more than one field is used with the query string, +// and combined queries are using dismax. +func (q *QueryStringQuery) TieBreaker(tieBreaker float64) *QueryStringQuery { + q.tieBreaker = &tieBreaker + return q +} + +// DefaultOperator sets the boolean operator of the query parser used to +// parse the query string. +// +// In default mode (OR) terms without any modifiers +// are considered optional, e.g. "capital of Hungary" is equal to +// "capital OR of OR Hungary". +// +// In AND mode, terms are considered to be in conjunction. The above mentioned +// query is then parsed as "capital AND of AND Hungary". +func (q *QueryStringQuery) DefaultOperator(operator string) *QueryStringQuery { + q.defaultOperator = operator + return q +} + +// Analyzer is an optional analyzer used to analyze the query string. +// Note, if a field has search analyzer defined for it, then it will be used +// automatically. Defaults to the smart search analyzer. +func (q *QueryStringQuery) Analyzer(analyzer string) *QueryStringQuery { + q.analyzer = analyzer + return q +} + +// QuoteAnalyzer is an optional analyzer to be used to analyze the query string +// for phrase searches. Note, if a field has search analyzer defined for it, +// then it will be used automatically. Defaults to the smart search analyzer. +func (q *QueryStringQuery) QuoteAnalyzer(quoteAnalyzer string) *QueryStringQuery { + q.quoteAnalyzer = quoteAnalyzer + return q +} + +// AutoGeneratePhraseQueries indicates whether or not phrase queries will +// be automatically generated when the analyzer returns more then one term +// from whitespace delimited text. Set to false if phrase queries should only +// be generated when surrounded by double quotes. +func (q *QueryStringQuery) AutoGeneratePhraseQueries(autoGeneratePhraseQueries bool) *QueryStringQuery { + q.autoGeneratePhraseQueries = &autoGeneratePhraseQueries + return q +} + +// MaxDeterminizedState protects against too-difficult regular expression queries. +func (q *QueryStringQuery) MaxDeterminizedState(maxDeterminizedStates int) *QueryStringQuery { + q.maxDeterminizedStates = &maxDeterminizedStates + return q +} + +// AllowLeadingWildcard specifies whether leading wildcards should be allowed +// or not (defaults to true). +func (q *QueryStringQuery) AllowLeadingWildcard(allowLeadingWildcard bool) *QueryStringQuery { + q.allowLeadingWildcard = &allowLeadingWildcard + return q +} + +// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy +// and range queries are automatically lower-cased or not. Default is true. +func (q *QueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *QueryStringQuery { + q.lowercaseExpandedTerms = &lowercaseExpandedTerms + return q +} + +// EnablePositionIncrements indicates whether to enable position increments +// in result query. Defaults to true. +// +// When set, result phrase and multi-phrase queries will be aware of position +// increments. Useful when e.g. a StopFilter increases the position increment +// of the token that follows an omitted token. +func (q *QueryStringQuery) EnablePositionIncrements(enablePositionIncrements bool) *QueryStringQuery { + q.enablePositionIncrements = &enablePositionIncrements + return q +} + +// Fuzziness sets the edit distance for fuzzy queries. Default is "AUTO". +func (q *QueryStringQuery) Fuzziness(fuzziness string) *QueryStringQuery { + q.fuzziness = fuzziness + return q +} + +// FuzzyPrefixLength sets the minimum prefix length for fuzzy queries. +// Default is 1. +func (q *QueryStringQuery) FuzzyPrefixLength(fuzzyPrefixLength int) *QueryStringQuery { + q.fuzzyPrefixLength = &fuzzyPrefixLength + return q +} + +func (q *QueryStringQuery) FuzzyMaxExpansions(fuzzyMaxExpansions int) *QueryStringQuery { + q.fuzzyMaxExpansions = &fuzzyMaxExpansions + return q +} + +func (q *QueryStringQuery) FuzzyRewrite(fuzzyRewrite string) *QueryStringQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +// PhraseSlop sets the default slop for phrases. If zero, then exact matches +// are required. Default value is zero. +func (q *QueryStringQuery) PhraseSlop(phraseSlop int) *QueryStringQuery { + q.phraseSlop = &phraseSlop + return q +} + +// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. +func (q *QueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *QueryStringQuery { + q.analyzeWildcard = &analyzeWildcard + return q +} + +func (q *QueryStringQuery) Rewrite(rewrite string) *QueryStringQuery { + q.rewrite = rewrite + return q +} + +func (q *QueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *QueryStringQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// Boost sets the boost for this query. +func (q *QueryStringQuery) Boost(boost float64) *QueryStringQuery { + q.boost = &boost + return q +} + +// QuoteFieldSuffix is an optional field name suffix to automatically +// try and add to the field searched when using quoted text. +func (q *QueryStringQuery) QuoteFieldSuffix(quoteFieldSuffix string) *QueryStringQuery { + q.quoteFieldSuffix = quoteFieldSuffix + return q +} + +// Lenient indicates whether the query string parser should be lenient +// when parsing field values. It defaults to the index setting and if not +// set, defaults to false. +func (q *QueryStringQuery) Lenient(lenient bool) *QueryStringQuery { + q.lenient = &lenient + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *QueryStringQuery) QueryName(queryName string) *QueryStringQuery { + q.queryName = queryName + return q +} + +func (q *QueryStringQuery) Locale(locale string) *QueryStringQuery { + q.locale = locale + return q +} + +// TimeZone can be used to automatically adjust to/from fields using a +// timezone. Only used with date fields, of course. +func (q *QueryStringQuery) TimeZone(timeZone string) *QueryStringQuery { + q.timeZone = timeZone + return q +} + +// Escape performs escaping of the query string. +func (q *QueryStringQuery) Escape(escape bool) *QueryStringQuery { + q.escape = &escape + return q +} + +// Source returns JSON for the query. +func (q *QueryStringQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["query_string"] = query + + query["query"] = q.queryString + + if q.defaultField != "" { + query["default_field"] = q.defaultField + } + + if len(q.fields) > 0 { + fields := make([]string, 0) + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + query["fields"] = fields + } + + if q.tieBreaker != nil { + query["tie_breaker"] = *q.tieBreaker + } + if q.useDisMax != nil { + query["use_dis_max"] = *q.useDisMax + } + if q.defaultOperator != "" { + query["default_operator"] = q.defaultOperator + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.quoteAnalyzer != "" { + query["quote_analyzer"] = q.quoteAnalyzer + } + if q.autoGeneratePhraseQueries != nil { + query["auto_generate_phrase_queries"] = *q.autoGeneratePhraseQueries + } + if q.maxDeterminizedStates != nil { + query["max_determinized_states"] = *q.maxDeterminizedStates + } + if q.allowLeadingWildcard != nil { + query["allow_leading_wildcard"] = *q.allowLeadingWildcard + } + if q.lowercaseExpandedTerms != nil { + query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms + } + if q.enablePositionIncrements != nil { + query["enable_position_increments"] = *q.enablePositionIncrements + } + if q.fuzziness != "" { + query["fuzziness"] = q.fuzziness + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.fuzzyPrefixLength != nil { + query["fuzzy_prefix_length"] = *q.fuzzyPrefixLength + } + if q.fuzzyMaxExpansions != nil { + query["fuzzy_max_expansions"] = *q.fuzzyMaxExpansions + } + if q.fuzzyRewrite != "" { + query["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.phraseSlop != nil { + query["phrase_slop"] = *q.phraseSlop + } + if q.analyzeWildcard != nil { + query["analyze_wildcard"] = *q.analyzeWildcard + } + if q.rewrite != "" { + query["rewrite"] = q.rewrite + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.quoteFieldSuffix != "" { + query["quote_field_suffix"] = q.quoteFieldSuffix + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.locale != "" { + query["locale"] = q.locale + } + if q.timeZone != "" { + query["time_zone"] = q.timeZone + } + if q.escape != nil { + query["escape"] = *q.escape + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_range.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_range.go new file mode 100644 index 000000000..23e28597f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_range.go @@ -0,0 +1,144 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// RangeQuery matches documents with fields that have terms within a certain range. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html +type RangeQuery struct { + name string + from interface{} + to interface{} + timeZone string + includeLower bool + includeUpper bool + boost *float64 + queryName string + format string +} + +// NewRangeQuery creates and initializes a new RangeQuery. +func NewRangeQuery(name string) *RangeQuery { + return &RangeQuery{name: name, includeLower: true, includeUpper: true} +} + +// From indicates the from part of the RangeQuery. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) From(from interface{}) *RangeQuery { + q.from = from + return q +} + +// Gt indicates a greater-than value for the from part. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) Gt(from interface{}) *RangeQuery { + q.from = from + q.includeLower = false + return q +} + +// Gte indicates a greater-than-or-equal value for the from part. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) Gte(from interface{}) *RangeQuery { + q.from = from + q.includeLower = true + return q +} + +// To indicates the to part of the RangeQuery. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) To(to interface{}) *RangeQuery { + q.to = to + return q +} + +// Lt indicates a less-than value for the to part. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) Lt(to interface{}) *RangeQuery { + q.to = to + q.includeUpper = false + return q +} + +// Lte indicates a less-than-or-equal value for the to part. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) Lte(to interface{}) *RangeQuery { + q.to = to + q.includeUpper = true + return q +} + +// IncludeLower indicates whether the lower bound should be included or not. +// Defaults to true. +func (q *RangeQuery) IncludeLower(includeLower bool) *RangeQuery { + q.includeLower = includeLower + return q +} + +// IncludeUpper indicates whether the upper bound should be included or not. +// Defaults to true. +func (q *RangeQuery) IncludeUpper(includeUpper bool) *RangeQuery { + q.includeUpper = includeUpper + return q +} + +// Boost sets the boost for this query. +func (q *RangeQuery) Boost(boost float64) *RangeQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *RangeQuery) QueryName(queryName string) *RangeQuery { + q.queryName = queryName + return q +} + +// TimeZone is used for date fields. In that case, we can adjust the +// from/to fields using a timezone. +func (q *RangeQuery) TimeZone(timeZone string) *RangeQuery { + q.timeZone = timeZone + return q +} + +// Format is used for date fields. In that case, we can set the format +// to be used instead of the mapper format. +func (q *RangeQuery) Format(format string) *RangeQuery { + q.format = format + return q +} + +// Source returns JSON for the query. +func (q *RangeQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + rangeQ := make(map[string]interface{}) + source["range"] = rangeQ + + params := make(map[string]interface{}) + rangeQ[q.name] = params + + params["from"] = q.from + params["to"] = q.to + if q.timeZone != "" { + params["time_zone"] = q.timeZone + } + if q.format != "" { + params["format"] = q.format + } + if q.boost != nil { + params["boost"] = *q.boost + } + params["include_lower"] = q.includeLower + params["include_upper"] = q.includeUpper + + if q.queryName != "" { + rangeQ["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_regexp.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_regexp.go new file mode 100644 index 000000000..ecd9f7fe0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_regexp.go @@ -0,0 +1,82 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// RegexpQuery allows you to use regular expression term queries. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html +type RegexpQuery struct { + name string + regexp string + flags string + boost *float64 + rewrite string + queryName string + maxDeterminizedStates *int +} + +// NewRegexpQuery creates and initializes a new RegexpQuery. +func NewRegexpQuery(name string, regexp string) *RegexpQuery { + return &RegexpQuery{name: name, regexp: regexp} +} + +// Flags sets the regexp flags. +func (q *RegexpQuery) Flags(flags string) *RegexpQuery { + q.flags = flags + return q +} + +// MaxDeterminizedStates protects against complex regular expressions. +func (q *RegexpQuery) MaxDeterminizedStates(maxDeterminizedStates int) *RegexpQuery { + q.maxDeterminizedStates = &maxDeterminizedStates + return q +} + +// Boost sets the boost for this query. +func (q *RegexpQuery) Boost(boost float64) *RegexpQuery { + q.boost = &boost + return q +} + +func (q *RegexpQuery) Rewrite(rewrite string) *RegexpQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *RegexpQuery) QueryName(queryName string) *RegexpQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON-serializable query data. +func (q *RegexpQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["regexp"] = query + + x := make(map[string]interface{}) + x["value"] = q.regexp + if q.flags != "" { + x["flags"] = q.flags + } + if q.maxDeterminizedStates != nil { + x["max_determinized_states"] = *q.maxDeterminizedStates + } + if q.boost != nil { + x["boost"] = *q.boost + } + if q.rewrite != "" { + x["rewrite"] = q.rewrite + } + if q.queryName != "" { + x["name"] = q.queryName + } + query[q.name] = x + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_script.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_script.go new file mode 100644 index 000000000..3baa90574 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_script.go @@ -0,0 +1,51 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// ScriptQuery allows to define scripts as filters. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-query.html +type ScriptQuery struct { + script *Script + queryName string +} + +// NewScriptQuery creates and initializes a new ScriptQuery. +func NewScriptQuery(script *Script) *ScriptQuery { + return &ScriptQuery{ + script: script, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *ScriptQuery) QueryName(queryName string) *ScriptQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *ScriptQuery) Source() (interface{}, error) { + if q.script == nil { + return nil, errors.New("ScriptQuery expected a script") + } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["script"] = params + + src, err := q.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string.go new file mode 100644 index 000000000..fb0a2a9b9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string.go @@ -0,0 +1,185 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strings" +) + +// SimpleQueryStringQuery is a query that uses the SimpleQueryParser +// to parse its context. Unlike the regular query_string query, +// the simple_query_string query will never throw an exception, +// and discards invalid parts of the query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html +type SimpleQueryStringQuery struct { + queryText string + analyzer string + operator string + fields []string + fieldBoosts map[string]*float64 + minimumShouldMatch string + flags string + boost *float64 + lowercaseExpandedTerms *bool + lenient *bool + analyzeWildcard *bool + locale string + queryName string +} + +// NewSimpleQueryStringQuery creates and initializes a new SimpleQueryStringQuery. +func NewSimpleQueryStringQuery(text string) *SimpleQueryStringQuery { + return &SimpleQueryStringQuery{ + queryText: text, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } +} + +// Field adds a field to run the query against. +func (q *SimpleQueryStringQuery) Field(field string) *SimpleQueryStringQuery { + q.fields = append(q.fields, field) + return q +} + +// Field adds a field to run the query against with a specific boost. +func (q *SimpleQueryStringQuery) FieldWithBoost(field string, boost float64) *SimpleQueryStringQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// Boost sets the boost for this query. +func (q *SimpleQueryStringQuery) Boost(boost float64) *SimpleQueryStringQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *SimpleQueryStringQuery) QueryName(queryName string) *SimpleQueryStringQuery { + q.queryName = queryName + return q +} + +// Analyzer specifies the analyzer to use for the query. +func (q *SimpleQueryStringQuery) Analyzer(analyzer string) *SimpleQueryStringQuery { + q.analyzer = analyzer + return q +} + +// DefaultOperator specifies the default operator for the query. +func (q *SimpleQueryStringQuery) DefaultOperator(defaultOperator string) *SimpleQueryStringQuery { + q.operator = defaultOperator + return q +} + +// Flags sets the flags for the query. +func (q *SimpleQueryStringQuery) Flags(flags string) *SimpleQueryStringQuery { + q.flags = flags + return q +} + +// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy +// and range queries are automatically lower-cased or not. Default is true. +func (q *SimpleQueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *SimpleQueryStringQuery { + q.lowercaseExpandedTerms = &lowercaseExpandedTerms + return q +} + +func (q *SimpleQueryStringQuery) Locale(locale string) *SimpleQueryStringQuery { + q.locale = locale + return q +} + +// Lenient indicates whether the query string parser should be lenient +// when parsing field values. It defaults to the index setting and if not +// set, defaults to false. +func (q *SimpleQueryStringQuery) Lenient(lenient bool) *SimpleQueryStringQuery { + q.lenient = &lenient + return q +} + +// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. +func (q *SimpleQueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *SimpleQueryStringQuery { + q.analyzeWildcard = &analyzeWildcard + return q +} + +func (q *SimpleQueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *SimpleQueryStringQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// Source returns JSON for the query. +func (q *SimpleQueryStringQuery) Source() (interface{}, error) { + // { + // "simple_query_string" : { + // "query" : "\"fried eggs\" +(eggplant | potato) -frittata", + // "analyzer" : "snowball", + // "fields" : ["body^5","_all"], + // "default_operator" : "and" + // } + // } + + source := make(map[string]interface{}) + + query := make(map[string]interface{}) + source["simple_query_string"] = query + + query["query"] = q.queryText + + if len(q.fields) > 0 { + fields := make([]string, 0) + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + query["fields"] = fields + } + + if q.flags != "" { + query["flags"] = q.flags + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.operator != "" { + query["default_operator"] = strings.ToLower(q.operator) + } + if q.lowercaseExpandedTerms != nil { + query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.analyzeWildcard != nil { + query["analyze_wildcard"] = *q.analyzeWildcard + } + if q.locale != "" { + query["locale"] = q.locale + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.boost != nil { + query["boost"] = *q.boost + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_template_query.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_template_query.go new file mode 100644 index 000000000..0611c3ea4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_template_query.go @@ -0,0 +1,84 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TemplateQuery is a query that accepts a query template and a +// map of key/value pairs to fill in template parameters. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-template-query.html +type TemplateQuery struct { + template string + templateType string + vars map[string]interface{} +} + +// NewTemplateQuery creates and initializes a new TemplateQuery. +func NewTemplateQuery(name string) *TemplateQuery { + return &TemplateQuery{ + template: name, + vars: make(map[string]interface{}), + } +} + +// Template specifies the name of the template. +func (q *TemplateQuery) Template(name string) *TemplateQuery { + q.template = name + return q +} + +// TemplateType defines which kind of query we use. The values can be: +// inline, indexed, or file. If undefined, inline is used. +func (q *TemplateQuery) TemplateType(typ string) *TemplateQuery { + q.templateType = typ + return q +} + +// Var sets a single parameter pair. +func (q *TemplateQuery) Var(name string, value interface{}) *TemplateQuery { + q.vars[name] = value + return q +} + +// Vars sets parameters for the template query. +func (q *TemplateQuery) Vars(vars map[string]interface{}) *TemplateQuery { + q.vars = vars + return q +} + +// Source returns the JSON serializable content for the search. +func (q *TemplateQuery) Source() (interface{}, error) { + // { + // "template" : { + // "query" : {"match_{{template}}": {}}, + // "params" : { + // "template": "all" + // } + // } + // } + + query := make(map[string]interface{}) + + tmpl := make(map[string]interface{}) + query["template"] = tmpl + + // TODO(oe): Implementation differs from online documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html + var fieldname string + switch q.templateType { + case "file": // file + fieldname = "file" + case "indexed", "id": // indexed + fieldname = "id" + default: // inline + fieldname = "query" + } + + tmpl[fieldname] = q.template + if len(q.vars) > 0 { + tmpl["params"] = q.vars + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_term.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_term.go new file mode 100644 index 000000000..c20c5c66e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_term.go @@ -0,0 +1,58 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermQuery finds documents that contain the exact term specified +// in the inverted index. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-term-query.html +type TermQuery struct { + name string + value interface{} + boost *float64 + queryName string +} + +// NewTermQuery creates and initializes a new TermQuery. +func NewTermQuery(name string, value interface{}) *TermQuery { + return &TermQuery{name: name, value: value} +} + +// Boost sets the boost for this query. +func (q *TermQuery) Boost(boost float64) *TermQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *TermQuery) QueryName(queryName string) *TermQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *TermQuery) Source() (interface{}, error) { + // {"term":{"name":"value"}} + source := make(map[string]interface{}) + tq := make(map[string]interface{}) + source["term"] = tq + + if q.boost == nil && q.queryName == "" { + tq[q.name] = q.value + } else { + subQ := make(map[string]interface{}) + subQ["value"] = q.value + if q.boost != nil { + subQ["boost"] = *q.boost + } + if q.queryName != "" { + subQ["_name"] = q.queryName + } + tq[q.name] = subQ + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_terms.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_terms.go new file mode 100644 index 000000000..a7e158859 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_terms.go @@ -0,0 +1,58 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermsQuery filters documents that have fields that match any +// of the provided terms (not analyzed). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-terms-query.html +type TermsQuery struct { + name string + values []interface{} + queryName string + boost *float64 +} + +// NewTermsQuery creates and initializes a new TermsQuery. +func NewTermsQuery(name string, values ...interface{}) *TermsQuery { + q := &TermsQuery{ + name: name, + values: make([]interface{}, 0), + } + if len(values) > 0 { + q.values = append(q.values, values...) + } + return q +} + +// Boost sets the boost for this query. +func (q *TermsQuery) Boost(boost float64) *TermsQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *TermsQuery) QueryName(queryName string) *TermsQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the term query. +func (q *TermsQuery) Source() (interface{}, error) { + // {"terms":{"name":["value1","value2"]}} + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["terms"] = params + params[q.name] = q.values + if q.boost != nil { + params["boost"] = *q.boost + } + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_type.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_type.go new file mode 100644 index 000000000..884d4ae7b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_type.go @@ -0,0 +1,26 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TypeQuery filters documents matching the provided document / mapping type. +// +// For details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html +type TypeQuery struct { + typ string +} + +func NewTypeQuery(typ string) *TypeQuery { + return &TypeQuery{typ: typ} +} + +// Source returns JSON for the query. +func (q *TypeQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["type"] = params + params["value"] = q.typ + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_wildcard.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_wildcard.go new file mode 100644 index 000000000..127332da3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_wildcard.go @@ -0,0 +1,81 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// WildcardQuery matches documents that have fields matching a wildcard +// expression (not analyzed). Supported wildcards are *, which matches +// any character sequence (including the empty one), and ?, which matches +// any single character. Note this query can be slow, as it needs to iterate +// over many terms. In order to prevent extremely slow wildcard queries, +// a wildcard term should not start with one of the wildcards * or ?. +// The wildcard query maps to Lucene WildcardQuery. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-wildcard-query.html +type WildcardQuery struct { + name string + wildcard string + boost *float64 + rewrite string + queryName string +} + +// NewWildcardQuery creates and initializes a new WildcardQuery. +func NewWildcardQuery(name, wildcard string) *WildcardQuery { + return &WildcardQuery{ + name: name, + wildcard: wildcard, + } +} + +// Boost sets the boost for this query. +func (q *WildcardQuery) Boost(boost float64) *WildcardQuery { + q.boost = &boost + return q +} + +func (q *WildcardQuery) Rewrite(rewrite string) *WildcardQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the name of this query. +func (q *WildcardQuery) QueryName(queryName string) *WildcardQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable body of this query. +func (q *WildcardQuery) Source() (interface{}, error) { + // { + // "wildcard" : { + // "user" : { + // "wildcard" : "ki*y", + // "boost" : 1.0 + // } + // } + + source := make(map[string]interface{}) + + query := make(map[string]interface{}) + source["wildcard"] = query + + wq := make(map[string]interface{}) + query[q.name] = wq + + wq["wildcard"] = q.wildcard + + if q.boost != nil { + wq["boost"] = *q.boost + } + if q.rewrite != "" { + wq["rewrite"] = q.rewrite + } + if q.queryName != "" { + wq["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_request.go b/vendor/gopkg.in/olivere/elastic.v3/search_request.go new file mode 100644 index 000000000..f294cdb7a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_request.go @@ -0,0 +1,178 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strings" +) + +// SearchRequest combines a search request and its +// query details (see SearchSource). +// It is used in combination with MultiSearch. +type SearchRequest struct { + searchType string // default in ES is "query_then_fetch" + indices []string + types []string + routing *string + preference *string + requestCache *bool + scroll string + source interface{} +} + +// NewSearchRequest creates a new search request. +func NewSearchRequest() *SearchRequest { + return &SearchRequest{ + indices: make([]string, 0), + types: make([]string, 0), + } +} + +// SearchRequest must be one of "query_then_fetch", "query_and_fetch", +// "scan", "count", "dfs_query_then_fetch", or "dfs_query_and_fetch". +// Use one of the constants defined via SearchType. +func (r *SearchRequest) SearchType(searchType string) *SearchRequest { + r.searchType = searchType + return r +} + +func (r *SearchRequest) SearchTypeDfsQueryThenFetch() *SearchRequest { + return r.SearchType("dfs_query_then_fetch") +} + +func (r *SearchRequest) SearchTypeDfsQueryAndFetch() *SearchRequest { + return r.SearchType("dfs_query_and_fetch") +} + +func (r *SearchRequest) SearchTypeQueryThenFetch() *SearchRequest { + return r.SearchType("query_then_fetch") +} + +func (r *SearchRequest) SearchTypeQueryAndFetch() *SearchRequest { + return r.SearchType("query_and_fetch") +} + +func (r *SearchRequest) SearchTypeScan() *SearchRequest { + return r.SearchType("scan") +} + +func (r *SearchRequest) SearchTypeCount() *SearchRequest { + return r.SearchType("count") +} + +func (r *SearchRequest) Index(indices ...string) *SearchRequest { + r.indices = append(r.indices, indices...) + return r +} + +func (r *SearchRequest) HasIndices() bool { + return len(r.indices) > 0 +} + +func (r *SearchRequest) Type(types ...string) *SearchRequest { + r.types = append(r.types, types...) + return r +} + +func (r *SearchRequest) Routing(routing string) *SearchRequest { + r.routing = &routing + return r +} + +func (r *SearchRequest) Routings(routings ...string) *SearchRequest { + if routings != nil { + routings := strings.Join(routings, ",") + r.routing = &routings + } else { + r.routing = nil + } + return r +} + +func (r *SearchRequest) Preference(preference string) *SearchRequest { + r.preference = &preference + return r +} + +func (r *SearchRequest) RequestCache(requestCache bool) *SearchRequest { + r.requestCache = &requestCache + return r +} + +func (r *SearchRequest) Scroll(scroll string) *SearchRequest { + r.scroll = scroll + return r +} + +func (r *SearchRequest) SearchSource(searchSource *SearchSource) *SearchRequest { + return r.Source(searchSource) +} + +func (r *SearchRequest) Source(source interface{}) *SearchRequest { + switch v := source.(type) { + case *SearchSource: + src, err := v.Source() + if err != nil { + // Do not do anything in case of an error + return r + } + r.source = src + default: + r.source = source + } + return r +} + +// header is used e.g. by MultiSearch to get information about the search header +// of one SearchRequest. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html +func (r *SearchRequest) header() interface{} { + h := make(map[string]interface{}) + if r.searchType != "" { + h["search_type"] = r.searchType + } + + switch len(r.indices) { + case 0: + case 1: + h["index"] = r.indices[0] + default: + h["indices"] = r.indices + } + + switch len(r.types) { + case 0: + case 1: + h["type"] = r.types[0] + default: + h["types"] = r.types + } + + if r.routing != nil && *r.routing != "" { + h["routing"] = *r.routing + } + + if r.preference != nil && *r.preference != "" { + h["preference"] = *r.preference + } + + if r.requestCache != nil { + h["request_cache"] = fmt.Sprintf("%v", *r.requestCache) + } + + if r.scroll != "" { + h["scroll"] = r.scroll + } + + return h +} + +// body is used by MultiSearch to get information about the search body +// of one SearchRequest. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html +func (r *SearchRequest) body() interface{} { + return r.source +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_source.go b/vendor/gopkg.in/olivere/elastic.v3/search_source.go new file mode 100644 index 000000000..d3d041f10 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_source.go @@ -0,0 +1,499 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// SearchSource enables users to build the search source. +// It resembles the SearchSourceBuilder in Elasticsearch. +type SearchSource struct { + query Query + postQuery Query + from int + size int + explain *bool + version *bool + sorters []Sorter + trackScores bool + minScore *float64 + timeout string + terminateAfter *int + fieldNames []string + fieldDataFields []string + scriptFields []*ScriptField + fetchSourceContext *FetchSourceContext + aggregations map[string]Aggregation + highlight *Highlight + globalSuggestText string + suggesters []Suggester + rescores []*Rescore + defaultRescoreWindowSize *int + indexBoosts map[string]float64 + stats []string + innerHits map[string]*InnerHit +} + +// NewSearchSource initializes a new SearchSource. +func NewSearchSource() *SearchSource { + return &SearchSource{ + from: -1, + size: -1, + trackScores: false, + sorters: make([]Sorter, 0), + fieldDataFields: make([]string, 0), + scriptFields: make([]*ScriptField, 0), + aggregations: make(map[string]Aggregation), + rescores: make([]*Rescore, 0), + indexBoosts: make(map[string]float64), + stats: make([]string, 0), + innerHits: make(map[string]*InnerHit), + } +} + +// Query sets the query to use with this search source. +func (s *SearchSource) Query(query Query) *SearchSource { + s.query = query + return s +} + +// PostFilter will be executed after the query has been executed and +// only affects the search hits, not the aggregations. +// This filter is always executed as the last filtering mechanism. +func (s *SearchSource) PostFilter(postFilter Query) *SearchSource { + s.postQuery = postFilter + return s +} + +// From index to start the search from. Defaults to 0. +func (s *SearchSource) From(from int) *SearchSource { + s.from = from + return s +} + +// Size is the number of search hits to return. Defaults to 10. +func (s *SearchSource) Size(size int) *SearchSource { + s.size = size + return s +} + +// MinScore sets the minimum score below which docs will be filtered out. +func (s *SearchSource) MinScore(minScore float64) *SearchSource { + s.minScore = &minScore + return s +} + +// Explain indicates whether each search hit should be returned with +// an explanation of the hit (ranking). +func (s *SearchSource) Explain(explain bool) *SearchSource { + s.explain = &explain + return s +} + +// Version indicates whether each search hit should be returned with +// a version associated to it. +func (s *SearchSource) Version(version bool) *SearchSource { + s.version = &version + return s +} + +// Timeout controls how long a search is allowed to take, e.g. "1s" or "500ms". +func (s *SearchSource) Timeout(timeout string) *SearchSource { + s.timeout = timeout + return s +} + +// TimeoutInMillis controls how many milliseconds a search is allowed +// to take before it is canceled. +func (s *SearchSource) TimeoutInMillis(timeoutInMillis int) *SearchSource { + s.timeout = fmt.Sprintf("%dms", timeoutInMillis) + return s +} + +// TerminateAfter allows the request to stop after the given number +// of search hits are collected. +func (s *SearchSource) TerminateAfter(terminateAfter int) *SearchSource { + s.terminateAfter = &terminateAfter + return s +} + +// Sort adds a sort order. +func (s *SearchSource) Sort(field string, ascending bool) *SearchSource { + s.sorters = append(s.sorters, SortInfo{Field: field, Ascending: ascending}) + return s +} + +// SortWithInfo adds a sort order. +func (s *SearchSource) SortWithInfo(info SortInfo) *SearchSource { + s.sorters = append(s.sorters, info) + return s +} + +// SortBy adds a sort order. +func (s *SearchSource) SortBy(sorter ...Sorter) *SearchSource { + s.sorters = append(s.sorters, sorter...) + return s +} + +func (s *SearchSource) hasSort() bool { + return len(s.sorters) > 0 +} + +// TrackScores is applied when sorting and controls if scores will be +// tracked as well. Defaults to false. +func (s *SearchSource) TrackScores(trackScores bool) *SearchSource { + s.trackScores = trackScores + return s +} + +// Aggregation adds an aggreation to perform as part of the search. +func (s *SearchSource) Aggregation(name string, aggregation Aggregation) *SearchSource { + s.aggregations[name] = aggregation + return s +} + +// DefaultRescoreWindowSize sets the rescore window size for rescores +// that don't specify their window. +func (s *SearchSource) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchSource { + s.defaultRescoreWindowSize = &defaultRescoreWindowSize + return s +} + +// Highlight adds highlighting to the search. +func (s *SearchSource) Highlight(highlight *Highlight) *SearchSource { + s.highlight = highlight + return s +} + +// Highlighter returns the highlighter. +func (s *SearchSource) Highlighter() *Highlight { + if s.highlight == nil { + s.highlight = NewHighlight() + } + return s.highlight +} + +// GlobalSuggestText defines the global text to use with all suggesters. +// This avoids repetition. +func (s *SearchSource) GlobalSuggestText(text string) *SearchSource { + s.globalSuggestText = text + return s +} + +// Suggester adds a suggester to the search. +func (s *SearchSource) Suggester(suggester Suggester) *SearchSource { + s.suggesters = append(s.suggesters, suggester) + return s +} + +// Rescorer adds a rescorer to the search. +func (s *SearchSource) Rescorer(rescore *Rescore) *SearchSource { + s.rescores = append(s.rescores, rescore) + return s +} + +// ClearRescorers removes all rescorers from the search. +func (s *SearchSource) ClearRescorers() *SearchSource { + s.rescores = make([]*Rescore, 0) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource { + if s.fetchSourceContext == nil { + s.fetchSourceContext = NewFetchSourceContext(fetchSource) + } else { + s.fetchSourceContext.SetFetchSource(fetchSource) + } + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchSource { + s.fetchSourceContext = fetchSourceContext + return s +} + +// NoFields indicates that no fields should be loaded, resulting in only +// id and type to be returned per field. +func (s *SearchSource) NoFields() *SearchSource { + s.fieldNames = make([]string, 0) + return s +} + +// Field adds a single field to load and return (note, must be stored) as +// part of the search request. If none are specified, the source of the +// document will be returned. +func (s *SearchSource) Field(fieldName string) *SearchSource { + if s.fieldNames == nil { + s.fieldNames = make([]string, 0) + } + s.fieldNames = append(s.fieldNames, fieldName) + return s +} + +// Fields sets the fields to load and return as part of the search request. +// If none are specified, the source of the document will be returned. +func (s *SearchSource) Fields(fieldNames ...string) *SearchSource { + if s.fieldNames == nil { + s.fieldNames = make([]string, 0) + } + s.fieldNames = append(s.fieldNames, fieldNames...) + return s +} + +// FieldDataField adds a single field to load from the field data cache +// and return as part of the search request. +func (s *SearchSource) FieldDataField(fieldDataField string) *SearchSource { + s.fieldDataFields = append(s.fieldDataFields, fieldDataField) + return s +} + +// FieldDataFields adds one or more fields to load from the field data cache +// and return as part of the search request. +func (s *SearchSource) FieldDataFields(fieldDataFields ...string) *SearchSource { + s.fieldDataFields = append(s.fieldDataFields, fieldDataFields...) + return s +} + +// ScriptField adds a single script field with the provided script. +func (s *SearchSource) ScriptField(scriptField *ScriptField) *SearchSource { + s.scriptFields = append(s.scriptFields, scriptField) + return s +} + +// ScriptFields adds one or more script fields with the provided scripts. +func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource { + s.scriptFields = append(s.scriptFields, scriptFields...) + return s +} + +// IndexBoost sets the boost that a specific index will receive when the +// query is executed against it. +func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource { + s.indexBoosts[index] = boost + return s +} + +// Stats group this request will be aggregated under. +func (s *SearchSource) Stats(statsGroup ...string) *SearchSource { + s.stats = append(s.stats, statsGroup...) + return s +} + +// InnerHit adds an inner hit to return with the result. +func (s *SearchSource) InnerHit(name string, innerHit *InnerHit) *SearchSource { + s.innerHits[name] = innerHit + return s +} + +// Source returns the serializable JSON for the source builder. +func (s *SearchSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if s.from != -1 { + source["from"] = s.from + } + if s.size != -1 { + source["size"] = s.size + } + if s.timeout != "" { + source["timeout"] = s.timeout + } + if s.terminateAfter != nil { + source["terminate_after"] = *s.terminateAfter + } + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + source["query"] = src + } + if s.postQuery != nil { + src, err := s.postQuery.Source() + if err != nil { + return nil, err + } + source["post_filter"] = src + } + if s.minScore != nil { + source["min_score"] = *s.minScore + } + if s.version != nil { + source["version"] = *s.version + } + if s.explain != nil { + source["explain"] = *s.explain + } + if s.fetchSourceContext != nil { + src, err := s.fetchSourceContext.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + + if s.fieldNames != nil { + switch len(s.fieldNames) { + case 1: + source["fields"] = s.fieldNames[0] + default: + source["fields"] = s.fieldNames + } + } + + if len(s.fieldDataFields) > 0 { + source["fielddata_fields"] = s.fieldDataFields + } + + if len(s.scriptFields) > 0 { + sfmap := make(map[string]interface{}) + for _, scriptField := range s.scriptFields { + src, err := scriptField.Source() + if err != nil { + return nil, err + } + sfmap[scriptField.FieldName] = src + } + source["script_fields"] = sfmap + } + + if len(s.sorters) > 0 { + sortarr := make([]interface{}, 0) + for _, sorter := range s.sorters { + src, err := sorter.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } + + if s.trackScores { + source["track_scores"] = s.trackScores + } + + if len(s.indexBoosts) > 0 { + source["indices_boost"] = s.indexBoosts + } + + if len(s.aggregations) > 0 { + aggsMap := make(map[string]interface{}) + for name, aggregate := range s.aggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + source["aggregations"] = aggsMap + } + + if s.highlight != nil { + src, err := s.highlight.Source() + if err != nil { + return nil, err + } + source["highlight"] = src + } + + if len(s.suggesters) > 0 { + suggesters := make(map[string]interface{}) + for _, s := range s.suggesters { + src, err := s.Source(false) + if err != nil { + return nil, err + } + suggesters[s.Name()] = src + } + if s.globalSuggestText != "" { + suggesters["text"] = s.globalSuggestText + } + source["suggest"] = suggesters + } + + if len(s.rescores) > 0 { + // Strip empty rescores from request + rescores := make([]*Rescore, 0) + for _, r := range s.rescores { + if !r.IsEmpty() { + rescores = append(rescores, r) + } + } + + if len(rescores) == 1 { + rescores[0].defaultRescoreWindowSize = s.defaultRescoreWindowSize + src, err := rescores[0].Source() + if err != nil { + return nil, err + } + source["rescore"] = src + } else { + slice := make([]interface{}, 0) + for _, r := range rescores { + r.defaultRescoreWindowSize = s.defaultRescoreWindowSize + src, err := r.Source() + if err != nil { + return nil, err + } + slice = append(slice, src) + } + source["rescore"] = slice + } + } + + if len(s.stats) > 0 { + source["stats"] = s.stats + } + + if len(s.innerHits) > 0 { + // Top-level inner hits + // See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html#top-level-inner-hits + // "inner_hits": { + // "": { + // "": { + // "": { + // , + // [,"inner_hits" : { []+ } ]? + // } + // } + // }, + // [,"" : { ... } ]* + // } + m := make(map[string]interface{}) + for name, hit := range s.innerHits { + if hit.path != "" { + src, err := hit.Source() + if err != nil { + return nil, err + } + path := make(map[string]interface{}) + path[hit.path] = src + m[name] = map[string]interface{}{ + "path": path, + } + } else if hit.typ != "" { + src, err := hit.Source() + if err != nil { + return nil, err + } + typ := make(map[string]interface{}) + typ[hit.typ] = src + m[name] = map[string]interface{}{ + "type": typ, + } + } else { + // TODO the Java client throws here, because either path or typ must be specified + } + } + source["inner_hits"] = m + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_template.go b/vendor/gopkg.in/olivere/elastic.v3/search_template.go new file mode 100644 index 000000000..ee8809005 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_template.go @@ -0,0 +1,151 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// PutTemplateService creates or updates a search template. +// The documentation can be found at +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. +type PutTemplateService struct { + client *Client + pretty bool + id string + opType string + version *int + versionType string + bodyJson interface{} + bodyString string +} + +// NewPutTemplateService creates a new PutTemplateService. +func NewPutTemplateService(client *Client) *PutTemplateService { + return &PutTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *PutTemplateService) Id(id string) *PutTemplateService { + s.id = id + return s +} + +// OpType is an explicit operation type. +func (s *PutTemplateService) OpType(opType string) *PutTemplateService { + s.opType = opType + return s +} + +// Version is an explicit version number for concurrency control. +func (s *PutTemplateService) Version(version int) *PutTemplateService { + s.version = &version + return s +} + +// VersionType is a specific version type. +func (s *PutTemplateService) VersionType(versionType string) *PutTemplateService { + s.versionType = versionType + return s +} + +// BodyJson is the document as a JSON serializable object. +func (s *PutTemplateService) BodyJson(body interface{}) *PutTemplateService { + s.bodyJson = body + return s +} + +// BodyString is the document as a string. +func (s *PutTemplateService) BodyString(body string) *PutTemplateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *PutTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%d", *s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.opType != "" { + params.Set("op_type", s.opType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *PutTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *PutTemplateService) Do() (*PutTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PutTemplateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PutTemplateResponse is the response of PutTemplateService.Do. +type PutTemplateResponse struct { + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/sort.go b/vendor/gopkg.in/olivere/elastic.v3/sort.go new file mode 100644 index 000000000..191426141 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/sort.go @@ -0,0 +1,484 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// -- Sorter -- + +// Sorter is an interface for sorting strategies, e.g. ScoreSort or FieldSort. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html. +type Sorter interface { + Source() (interface{}, error) +} + +// -- SortInfo -- + +// SortInfo contains information about sorting a field. +type SortInfo struct { + Sorter + Field string + Ascending bool + Missing interface{} + IgnoreUnmapped *bool + SortMode string + NestedFilter Query + NestedPath string +} + +func (info SortInfo) Source() (interface{}, error) { + prop := make(map[string]interface{}) + if info.Ascending { + prop["order"] = "asc" + } else { + prop["order"] = "desc" + } + if info.Missing != nil { + prop["missing"] = info.Missing + } + if info.IgnoreUnmapped != nil { + prop["ignore_unmapped"] = *info.IgnoreUnmapped + } + if info.SortMode != "" { + prop["mode"] = info.SortMode + } + if info.NestedFilter != nil { + src, err := info.NestedFilter.Source() + if err != nil { + return nil, err + } + prop["nested_filter"] = src + } + if info.NestedPath != "" { + prop["nested_path"] = info.NestedPath + } + source := make(map[string]interface{}) + source[info.Field] = prop + return source, nil +} + +// -- ScoreSort -- + +// ScoreSort sorts by relevancy score. +type ScoreSort struct { + Sorter + ascending bool +} + +// NewScoreSort creates a new ScoreSort. +func NewScoreSort() ScoreSort { + return ScoreSort{ascending: false} // Descending by default! +} + +// Order defines whether sorting ascending (default) or descending. +func (s ScoreSort) Order(ascending bool) ScoreSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s ScoreSort) Asc() ScoreSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s ScoreSort) Desc() ScoreSort { + s.ascending = false + return s +} + +// Source returns the JSON-serializable data. +func (s ScoreSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_score"] = x + if s.ascending { + x["reverse"] = true + } + return source, nil +} + +// -- FieldSort -- + +// FieldSort sorts by a given field. +type FieldSort struct { + Sorter + fieldName string + ascending bool + missing interface{} + ignoreUnmapped *bool + unmappedType *string + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewFieldSort creates a new FieldSort. +func NewFieldSort(fieldName string) FieldSort { + return FieldSort{ + fieldName: fieldName, + ascending: true, + } +} + +// FieldName specifies the name of the field to be used for sorting. +func (s FieldSort) FieldName(fieldName string) FieldSort { + s.fieldName = fieldName + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s FieldSort) Order(ascending bool) FieldSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s FieldSort) Asc() FieldSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s FieldSort) Desc() FieldSort { + s.ascending = false + return s +} + +// Missing sets the value to be used when a field is missing in a document. +// You can also use "_last" or "_first" to sort missing last or first +// respectively. +func (s FieldSort) Missing(missing interface{}) FieldSort { + s.missing = missing + return s +} + +// IgnoreUnmapped specifies what happens if the field does not exist in +// the index. Set it to true to ignore, or set it to false to not ignore (default). +func (s FieldSort) IgnoreUnmapped(ignoreUnmapped bool) FieldSort { + s.ignoreUnmapped = &ignoreUnmapped + return s +} + +// UnmappedType sets the type to use when the current field is not mapped +// in an index. +func (s FieldSort) UnmappedType(typ string) FieldSort { + s.unmappedType = &typ + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min, max, sum, and avg. +func (s FieldSort) SortMode(sortMode string) FieldSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s FieldSort) NestedFilter(nestedFilter Query) FieldSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s FieldSort) NestedPath(nestedPath string) FieldSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s FieldSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source[s.fieldName] = x + if s.ascending { + x["order"] = "asc" + } else { + x["order"] = "desc" + } + if s.missing != nil { + x["missing"] = s.missing + } + if s.ignoreUnmapped != nil { + x["ignore_unmapped"] = *s.ignoreUnmapped + } + if s.unmappedType != nil { + x["unmapped_type"] = *s.unmappedType + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} + +// -- GeoDistanceSort -- + +// GeoDistanceSort allows for sorting by geographic distance. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting. +type GeoDistanceSort struct { + Sorter + fieldName string + points []*GeoPoint + geohashes []string + geoDistance *string + unit string + ascending bool + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewGeoDistanceSort creates a new sorter for geo distances. +func NewGeoDistanceSort(fieldName string) GeoDistanceSort { + return GeoDistanceSort{ + fieldName: fieldName, + points: make([]*GeoPoint, 0), + geohashes: make([]string, 0), + ascending: true, + } +} + +// FieldName specifies the name of the (geo) field to use for sorting. +func (s GeoDistanceSort) FieldName(fieldName string) GeoDistanceSort { + s.fieldName = fieldName + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s GeoDistanceSort) Order(ascending bool) GeoDistanceSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s GeoDistanceSort) Asc() GeoDistanceSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s GeoDistanceSort) Desc() GeoDistanceSort { + s.ascending = false + return s +} + +// Point specifies a point to create the range distance aggregations from. +func (s GeoDistanceSort) Point(lat, lon float64) GeoDistanceSort { + s.points = append(s.points, GeoPointFromLatLon(lat, lon)) + return s +} + +// Points specifies the geo point(s) to create the range distance aggregations from. +func (s GeoDistanceSort) Points(points ...*GeoPoint) GeoDistanceSort { + s.points = append(s.points, points...) + return s +} + +// GeoHashes specifies the geo point to create the range distance aggregations from. +func (s GeoDistanceSort) GeoHashes(geohashes ...string) GeoDistanceSort { + s.geohashes = append(s.geohashes, geohashes...) + return s +} + +// GeoDistance represents how to compute the distance. +// It can be sloppy_arc (default), arc, or plane. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting. +func (s GeoDistanceSort) GeoDistance(geoDistance string) GeoDistanceSort { + s.geoDistance = &geoDistance + return s +} + +// Unit specifies the distance unit to use. It defaults to km. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#distance-units +// for details. +func (s GeoDistanceSort) Unit(unit string) GeoDistanceSort { + s.unit = unit + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min, max, sum, and avg. +func (s GeoDistanceSort) SortMode(sortMode string) GeoDistanceSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s GeoDistanceSort) NestedFilter(nestedFilter Query) GeoDistanceSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s GeoDistanceSort) NestedPath(nestedPath string) GeoDistanceSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s GeoDistanceSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_geo_distance"] = x + + // Points + ptarr := make([]interface{}, 0) + for _, pt := range s.points { + ptarr = append(ptarr, pt.Source()) + } + for _, geohash := range s.geohashes { + ptarr = append(ptarr, geohash) + } + x[s.fieldName] = ptarr + + if s.unit != "" { + x["unit"] = s.unit + } + if s.geoDistance != nil { + x["distance_type"] = *s.geoDistance + } + + if !s.ascending { + x["reverse"] = true + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} + +// -- ScriptSort -- + +// ScriptSort sorts by a custom script. See +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html#modules-scripting +// for details about scripting. +type ScriptSort struct { + Sorter + script *Script + typ string + ascending bool + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewScriptSort creates and initializes a new ScriptSort. +// You must provide a script and a type, e.g. "string" or "number". +func NewScriptSort(script *Script, typ string) ScriptSort { + return ScriptSort{ + script: script, + typ: typ, + ascending: true, + } +} + +// Type sets the script type, which can be either "string" or "number". +func (s ScriptSort) Type(typ string) ScriptSort { + s.typ = typ + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s ScriptSort) Order(ascending bool) ScriptSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s ScriptSort) Asc() ScriptSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s ScriptSort) Desc() ScriptSort { + s.ascending = false + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min or max. +func (s ScriptSort) SortMode(sortMode string) ScriptSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s ScriptSort) NestedFilter(nestedFilter Query) ScriptSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s ScriptSort) NestedPath(nestedPath string) ScriptSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s ScriptSort) Source() (interface{}, error) { + if s.script == nil { + return nil, errors.New("ScriptSort expected a script") + } + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_script"] = x + + src, err := s.script.Source() + if err != nil { + return nil, err + } + x["script"] = src + + x["type"] = s.typ + + if !s.ascending { + x["reverse"] = true + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggest.go b/vendor/gopkg.in/olivere/elastic.v3/suggest.go new file mode 100644 index 000000000..f51437f6a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/suggest.go @@ -0,0 +1,144 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// SuggestService returns suggestions for text. +type SuggestService struct { + client *Client + pretty bool + routing string + preference string + indices []string + suggesters []Suggester +} + +func NewSuggestService(client *Client) *SuggestService { + builder := &SuggestService{ + client: client, + indices: make([]string, 0), + suggesters: make([]Suggester, 0), + } + return builder +} + +func (s *SuggestService) Index(indices ...string) *SuggestService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *SuggestService) Pretty(pretty bool) *SuggestService { + s.pretty = pretty + return s +} + +func (s *SuggestService) Routing(routing string) *SuggestService { + s.routing = routing + return s +} + +func (s *SuggestService) Preference(preference string) *SuggestService { + s.preference = preference + return s +} + +func (s *SuggestService) Suggester(suggester Suggester) *SuggestService { + s.suggesters = append(s.suggesters, suggester) + return s +} + +func (s *SuggestService) Do() (SuggestResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + path += strings.Join(indexPart, ",") + + // Suggest + path += "/_suggest" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + + // Set body + body := make(map[string]interface{}) + for _, s := range s.suggesters { + src, err := s.Source(false) + if err != nil { + return nil, err + } + body[s.Name()] = src + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // There is a _shard object that cannot be deserialized. + // So we use json.RawMessage instead. + var suggestions map[string]*json.RawMessage + if err := s.client.decoder.Decode(res.Body, &suggestions); err != nil { + return nil, err + } + + ret := make(SuggestResult) + for name, result := range suggestions { + if name != "_shards" { + var sug []Suggestion + if err := s.client.decoder.Decode(*result, &sug); err != nil { + return nil, err + } + ret[name] = sug + } + } + + return ret, nil +} + +type SuggestResult map[string][]Suggestion + +type Suggestion struct { + Text string `json:"text"` + Offset int `json:"offset"` + Length int `json:"length"` + Options []suggestionOption `json:"options"` +} + +type suggestionOption struct { + Text string `json:"text"` + Score float64 `json:"score"` + Freq int `json:"freq"` + Payload interface{} `json:"payload"` + CollateMatch bool `json:"collate_match"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggest_field.go b/vendor/gopkg.in/olivere/elastic.v3/suggest_field.go new file mode 100644 index 000000000..4738d9910 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/suggest_field.go @@ -0,0 +1,100 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" +) + +// SuggestField can be used by the caller to specify a suggest field +// at index time. For a detailed example, see e.g. +// http://www.elasticsearch.org/blog/you-complete-me/. +type SuggestField struct { + inputs []string + output *string + payload interface{} + weight int + contextQueries []SuggesterContextQuery +} + +func NewSuggestField() *SuggestField { + return &SuggestField{weight: -1} +} + +func (f *SuggestField) Input(input ...string) *SuggestField { + if f.inputs == nil { + f.inputs = make([]string, 0) + } + f.inputs = append(f.inputs, input...) + return f +} + +func (f *SuggestField) Output(output string) *SuggestField { + f.output = &output + return f +} + +func (f *SuggestField) Payload(payload interface{}) *SuggestField { + f.payload = payload + return f +} + +func (f *SuggestField) Weight(weight int) *SuggestField { + f.weight = weight + return f +} + +func (f *SuggestField) ContextQuery(queries ...SuggesterContextQuery) *SuggestField { + f.contextQueries = append(f.contextQueries, queries...) + return f +} + +// MarshalJSON encodes SuggestField into JSON. +func (f *SuggestField) MarshalJSON() ([]byte, error) { + source := make(map[string]interface{}) + + if f.inputs != nil { + switch len(f.inputs) { + case 1: + source["input"] = f.inputs[0] + default: + source["input"] = f.inputs + } + } + + if f.output != nil { + source["output"] = *f.output + } + + if f.payload != nil { + source["payload"] = f.payload + } + + if f.weight >= 0 { + source["weight"] = f.weight + } + + switch len(f.contextQueries) { + case 0: + case 1: + src, err := f.contextQueries[0].Source() + if err != nil { + return nil, err + } + source["context"] = src + default: + var ctxq []interface{} + for _, query := range f.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + source["context"] = ctxq + } + + return json.Marshal(source) +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester.go b/vendor/gopkg.in/olivere/elastic.v3/suggester.go new file mode 100644 index 000000000..c342b10d3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester.go @@ -0,0 +1,15 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Represents the generic suggester interface. +// A suggester's only purpose is to return the +// source of the query as a JSON-serializable +// object. Returning a map[string]interface{} +// will do. +type Suggester interface { + Name() string + Source(includeName bool) (interface{}, error) +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_completion.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_completion.go new file mode 100644 index 000000000..e0f5a3861 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_completion.go @@ -0,0 +1,129 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CompletionSuggester is a fast suggester for e.g. type-ahead completion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html +// for more details. +type CompletionSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery +} + +// Creates a new completion suggester. +func NewCompletionSuggester(name string) *CompletionSuggester { + return &CompletionSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *CompletionSuggester) Name() string { + return q.name +} + +func (q *CompletionSuggester) Text(text string) *CompletionSuggester { + q.text = text + return q +} + +func (q *CompletionSuggester) Field(field string) *CompletionSuggester { + q.field = field + return q +} + +func (q *CompletionSuggester) Analyzer(analyzer string) *CompletionSuggester { + q.analyzer = analyzer + return q +} + +func (q *CompletionSuggester) Size(size int) *CompletionSuggester { + q.size = &size + return q +} + +func (q *CompletionSuggester) ShardSize(shardSize int) *CompletionSuggester { + q.shardSize = &shardSize + return q +} + +func (q *CompletionSuggester) ContextQuery(query SuggesterContextQuery) *CompletionSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *CompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *CompletionSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +// completionSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the completion element. +type completionSuggesterRequest struct { + Text string `json:"text"` + Completion interface{} `json:"completion"` +} + +// Creates the source for the completion suggester. +func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) { + cs := &completionSuggesterRequest{} + + if q.text != "" { + cs.Text = q.text + } + + suggester := make(map[string]interface{}) + cs.Completion = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, 0) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // TODO(oe) Add completion-suggester specific parameters here + + if !includeName { + return cs, nil + } + + source := make(map[string]interface{}) + source[q.name] = cs + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy.go new file mode 100644 index 000000000..1c4455a61 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy.go @@ -0,0 +1,179 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FuzzyFuzzyCompletionSuggester is a FuzzyCompletionSuggester that allows fuzzy +// completion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html +// for details, and +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html#fuzzy +// for details about the fuzzy completion suggester. +type FuzzyCompletionSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + fuzziness interface{} + fuzzyTranspositions *bool + fuzzyMinLength *int + fuzzyPrefixLength *int + unicodeAware *bool +} + +// Fuzziness defines the fuzziness which is used in FuzzyCompletionSuggester. +type Fuzziness struct { +} + +// Creates a new completion suggester. +func NewFuzzyCompletionSuggester(name string) *FuzzyCompletionSuggester { + return &FuzzyCompletionSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *FuzzyCompletionSuggester) Name() string { + return q.name +} + +func (q *FuzzyCompletionSuggester) Text(text string) *FuzzyCompletionSuggester { + q.text = text + return q +} + +func (q *FuzzyCompletionSuggester) Field(field string) *FuzzyCompletionSuggester { + q.field = field + return q +} + +func (q *FuzzyCompletionSuggester) Analyzer(analyzer string) *FuzzyCompletionSuggester { + q.analyzer = analyzer + return q +} + +func (q *FuzzyCompletionSuggester) Size(size int) *FuzzyCompletionSuggester { + q.size = &size + return q +} + +func (q *FuzzyCompletionSuggester) ShardSize(shardSize int) *FuzzyCompletionSuggester { + q.shardSize = &shardSize + return q +} + +func (q *FuzzyCompletionSuggester) ContextQuery(query SuggesterContextQuery) *FuzzyCompletionSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *FuzzyCompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *FuzzyCompletionSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +// Fuzziness defines the strategy used to describe what "fuzzy" actually +// means for the suggester, e.g. 1, 2, "0", "1..2", ">4", or "AUTO". +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#fuzziness +// for a detailed description. +func (q *FuzzyCompletionSuggester) Fuzziness(fuzziness interface{}) *FuzzyCompletionSuggester { + q.fuzziness = fuzziness + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyTranspositions(fuzzyTranspositions bool) *FuzzyCompletionSuggester { + q.fuzzyTranspositions = &fuzzyTranspositions + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyMinLength(minLength int) *FuzzyCompletionSuggester { + q.fuzzyMinLength = &minLength + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyPrefixLength(prefixLength int) *FuzzyCompletionSuggester { + q.fuzzyPrefixLength = &prefixLength + return q +} + +func (q *FuzzyCompletionSuggester) UnicodeAware(unicodeAware bool) *FuzzyCompletionSuggester { + q.unicodeAware = &unicodeAware + return q +} + +// Creates the source for the completion suggester. +func (q *FuzzyCompletionSuggester) Source(includeName bool) (interface{}, error) { + cs := &completionSuggesterRequest{} + + if q.text != "" { + cs.Text = q.text + } + + suggester := make(map[string]interface{}) + cs.Completion = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, 0) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Fuzzy Completion Suggester fields + fuzzy := make(map[string]interface{}) + suggester["fuzzy"] = fuzzy + if q.fuzziness != nil { + fuzzy["fuzziness"] = q.fuzziness + } + if q.fuzzyTranspositions != nil { + fuzzy["transpositions"] = *q.fuzzyTranspositions + } + if q.fuzzyMinLength != nil { + fuzzy["min_length"] = *q.fuzzyMinLength + } + if q.fuzzyPrefixLength != nil { + fuzzy["prefix_length"] = *q.fuzzyPrefixLength + } + if q.unicodeAware != nil { + fuzzy["unicode_aware"] = *q.unicodeAware + } + + if !includeName { + return cs, nil + } + + source := make(map[string]interface{}) + source[q.name] = cs + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_context.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_context.go new file mode 100644 index 000000000..0903f2171 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_context.go @@ -0,0 +1,11 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SuggesterContextQuery is used to define context information within +// a suggestion request. +type SuggesterContextQuery interface { + Source() (interface{}, error) +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_context_category.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_context_category.go new file mode 100644 index 000000000..4b8e43f88 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_context_category.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// -- SuggesterCategoryMapping -- + +// SuggesterCategoryMapping provides a mapping for a category context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_mapping. +type SuggesterCategoryMapping struct { + name string + fieldName string + defaultValues []string +} + +// NewSuggesterCategoryMapping creates a new SuggesterCategoryMapping. +func NewSuggesterCategoryMapping(name string) *SuggesterCategoryMapping { + return &SuggesterCategoryMapping{ + name: name, + defaultValues: make([]string, 0), + } +} + +func (q *SuggesterCategoryMapping) DefaultValues(values ...string) *SuggesterCategoryMapping { + q.defaultValues = append(q.defaultValues, values...) + return q +} + +func (q *SuggesterCategoryMapping) FieldName(fieldName string) *SuggesterCategoryMapping { + q.fieldName = fieldName + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterCategoryMapping) Source() (interface{}, error) { + source := make(map[string]interface{}) + + x := make(map[string]interface{}) + source[q.name] = x + + x["type"] = "category" + + switch len(q.defaultValues) { + case 0: + x["default"] = q.defaultValues + case 1: + x["default"] = q.defaultValues[0] + default: + x["default"] = q.defaultValues + } + + if q.fieldName != "" { + x["path"] = q.fieldName + } + return source, nil +} + +// -- SuggesterCategoryQuery -- + +// SuggesterCategoryQuery provides querying a category context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_query. +type SuggesterCategoryQuery struct { + name string + values []string +} + +// NewSuggesterCategoryQuery creates a new SuggesterCategoryQuery. +func NewSuggesterCategoryQuery(name string, values ...string) *SuggesterCategoryQuery { + q := &SuggesterCategoryQuery{ + name: name, + values: make([]string, 0), + } + if len(values) > 0 { + q.values = append(q.values, values...) + } + return q +} + +func (q *SuggesterCategoryQuery) Values(values ...string) *SuggesterCategoryQuery { + q.values = append(q.values, values...) + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterCategoryQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + switch len(q.values) { + case 0: + source[q.name] = q.values + case 1: + source[q.name] = q.values[0] + default: + source[q.name] = q.values + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_context_geo.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_context_geo.go new file mode 100644 index 000000000..bde1a4067 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_context_geo.go @@ -0,0 +1,132 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// -- SuggesterGeoMapping -- + +// SuggesterGeoMapping provides a mapping for a geolocation context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_mapping. +type SuggesterGeoMapping struct { + name string + defaultLocations []*GeoPoint + precision []string + neighbors *bool + fieldName string +} + +// NewSuggesterGeoMapping creates a new SuggesterGeoMapping. +func NewSuggesterGeoMapping(name string) *SuggesterGeoMapping { + return &SuggesterGeoMapping{ + name: name, + defaultLocations: make([]*GeoPoint, 0), + precision: make([]string, 0), + } +} + +func (q *SuggesterGeoMapping) DefaultLocations(locations ...*GeoPoint) *SuggesterGeoMapping { + q.defaultLocations = append(q.defaultLocations, locations...) + return q +} + +func (q *SuggesterGeoMapping) Precision(precision ...string) *SuggesterGeoMapping { + q.precision = append(q.precision, precision...) + return q +} + +func (q *SuggesterGeoMapping) Neighbors(neighbors bool) *SuggesterGeoMapping { + q.neighbors = &neighbors + return q +} + +func (q *SuggesterGeoMapping) FieldName(fieldName string) *SuggesterGeoMapping { + q.fieldName = fieldName + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterGeoMapping) Source() (interface{}, error) { + source := make(map[string]interface{}) + + x := make(map[string]interface{}) + source[q.name] = x + + x["type"] = "geo" + + if len(q.precision) > 0 { + x["precision"] = q.precision + } + if q.neighbors != nil { + x["neighbors"] = *q.neighbors + } + + switch len(q.defaultLocations) { + case 0: + case 1: + x["default"] = q.defaultLocations[0].Source() + default: + arr := make([]interface{}, 0) + for _, p := range q.defaultLocations { + arr = append(arr, p.Source()) + } + x["default"] = arr + } + + if q.fieldName != "" { + x["path"] = q.fieldName + } + return source, nil +} + +// -- SuggesterGeoQuery -- + +// SuggesterGeoQuery provides querying a geolocation context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_query +type SuggesterGeoQuery struct { + name string + location *GeoPoint + precision []string +} + +// NewSuggesterGeoQuery creates a new SuggesterGeoQuery. +func NewSuggesterGeoQuery(name string, location *GeoPoint) *SuggesterGeoQuery { + return &SuggesterGeoQuery{ + name: name, + location: location, + precision: make([]string, 0), + } +} + +func (q *SuggesterGeoQuery) Precision(precision ...string) *SuggesterGeoQuery { + q.precision = append(q.precision, precision...) + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterGeoQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if len(q.precision) == 0 { + if q.location != nil { + source[q.name] = q.location.Source() + } + } else { + x := make(map[string]interface{}) + source[q.name] = x + + if q.location != nil { + x["value"] = q.location.Source() + } + + switch len(q.precision) { + case 0: + case 1: + x["precision"] = q.precision[0] + default: + x["precision"] = q.precision + } + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_phrase.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_phrase.go new file mode 100644 index 000000000..ee46329f0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_phrase.go @@ -0,0 +1,545 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// For more details, see +// http://www.elasticsearch.org/guide/reference/api/search/phrase-suggest/ +type PhraseSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + // fields specific to a phrase suggester + maxErrors *float64 + separator *string + realWordErrorLikelihood *float64 + confidence *float64 + generators map[string][]CandidateGenerator + gramSize *int + smoothingModel SmoothingModel + forceUnigrams *bool + tokenLimit *int + preTag, postTag *string + collateQuery *string + collatePreference *string + collateParams map[string]interface{} + collatePrune *bool +} + +// Creates a new phrase suggester. +func NewPhraseSuggester(name string) *PhraseSuggester { + return &PhraseSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + collateParams: make(map[string]interface{}), + } +} + +func (q *PhraseSuggester) Name() string { + return q.name +} + +func (q *PhraseSuggester) Text(text string) *PhraseSuggester { + q.text = text + return q +} + +func (q *PhraseSuggester) Field(field string) *PhraseSuggester { + q.field = field + return q +} + +func (q *PhraseSuggester) Analyzer(analyzer string) *PhraseSuggester { + q.analyzer = analyzer + return q +} + +func (q *PhraseSuggester) Size(size int) *PhraseSuggester { + q.size = &size + return q +} + +func (q *PhraseSuggester) ShardSize(shardSize int) *PhraseSuggester { + q.shardSize = &shardSize + return q +} + +func (q *PhraseSuggester) ContextQuery(query SuggesterContextQuery) *PhraseSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *PhraseSuggester) ContextQueries(queries ...SuggesterContextQuery) *PhraseSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +func (q *PhraseSuggester) GramSize(gramSize int) *PhraseSuggester { + if gramSize >= 1 { + q.gramSize = &gramSize + } + return q +} + +func (q *PhraseSuggester) MaxErrors(maxErrors float64) *PhraseSuggester { + q.maxErrors = &maxErrors + return q +} + +func (q *PhraseSuggester) Separator(separator string) *PhraseSuggester { + q.separator = &separator + return q +} + +func (q *PhraseSuggester) RealWordErrorLikelihood(realWordErrorLikelihood float64) *PhraseSuggester { + q.realWordErrorLikelihood = &realWordErrorLikelihood + return q +} + +func (q *PhraseSuggester) Confidence(confidence float64) *PhraseSuggester { + q.confidence = &confidence + return q +} + +func (q *PhraseSuggester) CandidateGenerator(generator CandidateGenerator) *PhraseSuggester { + if q.generators == nil { + q.generators = make(map[string][]CandidateGenerator) + } + typ := generator.Type() + if _, found := q.generators[typ]; !found { + q.generators[typ] = make([]CandidateGenerator, 0) + } + q.generators[typ] = append(q.generators[typ], generator) + return q +} + +func (q *PhraseSuggester) CandidateGenerators(generators ...CandidateGenerator) *PhraseSuggester { + for _, g := range generators { + q = q.CandidateGenerator(g) + } + return q +} + +func (q *PhraseSuggester) ClearCandidateGenerator() *PhraseSuggester { + q.generators = nil + return q +} + +func (q *PhraseSuggester) ForceUnigrams(forceUnigrams bool) *PhraseSuggester { + q.forceUnigrams = &forceUnigrams + return q +} + +func (q *PhraseSuggester) SmoothingModel(smoothingModel SmoothingModel) *PhraseSuggester { + q.smoothingModel = smoothingModel + return q +} + +func (q *PhraseSuggester) TokenLimit(tokenLimit int) *PhraseSuggester { + q.tokenLimit = &tokenLimit + return q +} + +func (q *PhraseSuggester) Highlight(preTag, postTag string) *PhraseSuggester { + q.preTag = &preTag + q.postTag = &postTag + return q +} + +func (q *PhraseSuggester) CollateQuery(collateQuery string) *PhraseSuggester { + q.collateQuery = &collateQuery + return q +} + +func (q *PhraseSuggester) CollatePreference(collatePreference string) *PhraseSuggester { + q.collatePreference = &collatePreference + return q +} + +func (q *PhraseSuggester) CollateParams(collateParams map[string]interface{}) *PhraseSuggester { + q.collateParams = collateParams + return q +} + +func (q *PhraseSuggester) CollatePrune(collatePrune bool) *PhraseSuggester { + q.collatePrune = &collatePrune + return q +} + +// simplePhraseSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the simple_phrase element. +type phraseSuggesterRequest struct { + Text string `json:"text"` + Phrase interface{} `json:"phrase"` +} + +// Creates the source for the phrase suggester. +func (q *PhraseSuggester) Source(includeName bool) (interface{}, error) { + ps := &phraseSuggesterRequest{} + + if q.text != "" { + ps.Text = q.text + } + + suggester := make(map[string]interface{}) + ps.Phrase = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, 0) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Phase-specified parameters + if q.realWordErrorLikelihood != nil { + suggester["real_word_error_likelihood"] = *q.realWordErrorLikelihood + } + if q.confidence != nil { + suggester["confidence"] = *q.confidence + } + if q.separator != nil { + suggester["separator"] = *q.separator + } + if q.maxErrors != nil { + suggester["max_errors"] = *q.maxErrors + } + if q.gramSize != nil { + suggester["gram_size"] = *q.gramSize + } + if q.forceUnigrams != nil { + suggester["force_unigrams"] = *q.forceUnigrams + } + if q.tokenLimit != nil { + suggester["token_limit"] = *q.tokenLimit + } + if q.generators != nil && len(q.generators) > 0 { + for typ, generators := range q.generators { + arr := make([]interface{}, 0) + for _, g := range generators { + src, err := g.Source() + if err != nil { + return nil, err + } + arr = append(arr, src) + } + suggester[typ] = arr + } + } + if q.smoothingModel != nil { + src, err := q.smoothingModel.Source() + if err != nil { + return nil, err + } + x := make(map[string]interface{}) + x[q.smoothingModel.Type()] = src + suggester["smoothing"] = x + } + if q.preTag != nil { + hl := make(map[string]string) + hl["pre_tag"] = *q.preTag + if q.postTag != nil { + hl["post_tag"] = *q.postTag + } + suggester["highlight"] = hl + } + if q.collateQuery != nil { + collate := make(map[string]interface{}) + suggester["collate"] = collate + if q.collateQuery != nil { + collate["query"] = *q.collateQuery + } + if q.collatePreference != nil { + collate["preference"] = *q.collatePreference + } + if len(q.collateParams) > 0 { + collate["params"] = q.collateParams + } + if q.collatePrune != nil { + collate["prune"] = *q.collatePrune + } + } + + if !includeName { + return ps, nil + } + + source := make(map[string]interface{}) + source[q.name] = ps + return source, nil +} + +// -- Smoothing models -- + +type SmoothingModel interface { + Type() string + Source() (interface{}, error) +} + +// StupidBackoffSmoothingModel implements a stupid backoff smoothing model. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type StupidBackoffSmoothingModel struct { + discount float64 +} + +func NewStupidBackoffSmoothingModel(discount float64) *StupidBackoffSmoothingModel { + return &StupidBackoffSmoothingModel{ + discount: discount, + } +} + +func (sm *StupidBackoffSmoothingModel) Type() string { + return "stupid_backoff" +} + +func (sm *StupidBackoffSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["discount"] = sm.discount + return source, nil +} + +// -- + +// LaplaceSmoothingModel implements a laplace smoothing model. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type LaplaceSmoothingModel struct { + alpha float64 +} + +func NewLaplaceSmoothingModel(alpha float64) *LaplaceSmoothingModel { + return &LaplaceSmoothingModel{ + alpha: alpha, + } +} + +func (sm *LaplaceSmoothingModel) Type() string { + return "laplace" +} + +func (sm *LaplaceSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["alpha"] = sm.alpha + return source, nil +} + +// -- + +// LinearInterpolationSmoothingModel implements a linear interpolation +// smoothing model. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type LinearInterpolationSmoothingModel struct { + trigramLamda float64 + bigramLambda float64 + unigramLambda float64 +} + +func NewLinearInterpolationSmoothingModel(trigramLamda, bigramLambda, unigramLambda float64) *LinearInterpolationSmoothingModel { + return &LinearInterpolationSmoothingModel{ + trigramLamda: trigramLamda, + bigramLambda: bigramLambda, + unigramLambda: unigramLambda, + } +} + +func (sm *LinearInterpolationSmoothingModel) Type() string { + return "linear_interpolation" +} + +func (sm *LinearInterpolationSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["trigram_lambda"] = sm.trigramLamda + source["bigram_lambda"] = sm.bigramLambda + source["unigram_lambda"] = sm.unigramLambda + return source, nil +} + +// -- CandidateGenerator -- + +type CandidateGenerator interface { + Type() string + Source() (interface{}, error) +} + +// DirectCandidateGenerator implements a direct candidate generator. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type DirectCandidateGenerator struct { + field string + preFilter *string + postFilter *string + suggestMode *string + accuracy *float64 + size *int + sort *string + stringDistance *string + maxEdits *int + maxInspections *int + maxTermFreq *float64 + prefixLength *int + minWordLength *int + minDocFreq *float64 +} + +func NewDirectCandidateGenerator(field string) *DirectCandidateGenerator { + return &DirectCandidateGenerator{ + field: field, + } +} + +func (g *DirectCandidateGenerator) Type() string { + return "direct_generator" +} + +func (g *DirectCandidateGenerator) Field(field string) *DirectCandidateGenerator { + g.field = field + return g +} + +func (g *DirectCandidateGenerator) PreFilter(preFilter string) *DirectCandidateGenerator { + g.preFilter = &preFilter + return g +} + +func (g *DirectCandidateGenerator) PostFilter(postFilter string) *DirectCandidateGenerator { + g.postFilter = &postFilter + return g +} + +func (g *DirectCandidateGenerator) SuggestMode(suggestMode string) *DirectCandidateGenerator { + g.suggestMode = &suggestMode + return g +} + +func (g *DirectCandidateGenerator) Accuracy(accuracy float64) *DirectCandidateGenerator { + g.accuracy = &accuracy + return g +} + +func (g *DirectCandidateGenerator) Size(size int) *DirectCandidateGenerator { + g.size = &size + return g +} + +func (g *DirectCandidateGenerator) Sort(sort string) *DirectCandidateGenerator { + g.sort = &sort + return g +} + +func (g *DirectCandidateGenerator) StringDistance(stringDistance string) *DirectCandidateGenerator { + g.stringDistance = &stringDistance + return g +} + +func (g *DirectCandidateGenerator) MaxEdits(maxEdits int) *DirectCandidateGenerator { + g.maxEdits = &maxEdits + return g +} + +func (g *DirectCandidateGenerator) MaxInspections(maxInspections int) *DirectCandidateGenerator { + g.maxInspections = &maxInspections + return g +} + +func (g *DirectCandidateGenerator) MaxTermFreq(maxTermFreq float64) *DirectCandidateGenerator { + g.maxTermFreq = &maxTermFreq + return g +} + +func (g *DirectCandidateGenerator) PrefixLength(prefixLength int) *DirectCandidateGenerator { + g.prefixLength = &prefixLength + return g +} + +func (g *DirectCandidateGenerator) MinWordLength(minWordLength int) *DirectCandidateGenerator { + g.minWordLength = &minWordLength + return g +} + +func (g *DirectCandidateGenerator) MinDocFreq(minDocFreq float64) *DirectCandidateGenerator { + g.minDocFreq = &minDocFreq + return g +} + +func (g *DirectCandidateGenerator) Source() (interface{}, error) { + source := make(map[string]interface{}) + if g.field != "" { + source["field"] = g.field + } + if g.suggestMode != nil { + source["suggest_mode"] = *g.suggestMode + } + if g.accuracy != nil { + source["accuracy"] = *g.accuracy + } + if g.size != nil { + source["size"] = *g.size + } + if g.sort != nil { + source["sort"] = *g.sort + } + if g.stringDistance != nil { + source["string_distance"] = *g.stringDistance + } + if g.maxEdits != nil { + source["max_edits"] = *g.maxEdits + } + if g.maxInspections != nil { + source["max_inspections"] = *g.maxInspections + } + if g.maxTermFreq != nil { + source["max_term_freq"] = *g.maxTermFreq + } + if g.prefixLength != nil { + source["prefix_length"] = *g.prefixLength + } + if g.minWordLength != nil { + source["min_word_length"] = *g.minWordLength + } + if g.minDocFreq != nil { + source["min_doc_freq"] = *g.minDocFreq + } + if g.preFilter != nil { + source["pre_filter"] = *g.preFilter + } + if g.postFilter != nil { + source["post_filter"] = *g.postFilter + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_term.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_term.go new file mode 100644 index 000000000..116af405a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_term.go @@ -0,0 +1,233 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// For more details, see +// http://www.elasticsearch.org/guide/reference/api/search/term-suggest/ +type TermSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + // fields specific to term suggester + suggestMode string + accuracy *float64 + sort string + stringDistance string + maxEdits *int + maxInspections *int + maxTermFreq *float64 + prefixLength *int + minWordLength *int + minDocFreq *float64 +} + +// Creates a new term suggester. +func NewTermSuggester(name string) *TermSuggester { + return &TermSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *TermSuggester) Name() string { + return q.name +} + +func (q *TermSuggester) Text(text string) *TermSuggester { + q.text = text + return q +} + +func (q *TermSuggester) Field(field string) *TermSuggester { + q.field = field + return q +} + +func (q *TermSuggester) Analyzer(analyzer string) *TermSuggester { + q.analyzer = analyzer + return q +} + +func (q *TermSuggester) Size(size int) *TermSuggester { + q.size = &size + return q +} + +func (q *TermSuggester) ShardSize(shardSize int) *TermSuggester { + q.shardSize = &shardSize + return q +} + +func (q *TermSuggester) ContextQuery(query SuggesterContextQuery) *TermSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *TermSuggester) ContextQueries(queries ...SuggesterContextQuery) *TermSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +func (q *TermSuggester) SuggestMode(suggestMode string) *TermSuggester { + q.suggestMode = suggestMode + return q +} + +func (q *TermSuggester) Accuracy(accuracy float64) *TermSuggester { + q.accuracy = &accuracy + return q +} + +func (q *TermSuggester) Sort(sort string) *TermSuggester { + q.sort = sort + return q +} + +func (q *TermSuggester) StringDistance(stringDistance string) *TermSuggester { + q.stringDistance = stringDistance + return q +} + +func (q *TermSuggester) MaxEdits(maxEdits int) *TermSuggester { + q.maxEdits = &maxEdits + return q +} + +func (q *TermSuggester) MaxInspections(maxInspections int) *TermSuggester { + q.maxInspections = &maxInspections + return q +} + +func (q *TermSuggester) MaxTermFreq(maxTermFreq float64) *TermSuggester { + q.maxTermFreq = &maxTermFreq + return q +} + +func (q *TermSuggester) PrefixLength(prefixLength int) *TermSuggester { + q.prefixLength = &prefixLength + return q +} + +func (q *TermSuggester) MinWordLength(minWordLength int) *TermSuggester { + q.minWordLength = &minWordLength + return q +} + +func (q *TermSuggester) MinDocFreq(minDocFreq float64) *TermSuggester { + q.minDocFreq = &minDocFreq + return q +} + +// termSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the term element. +type termSuggesterRequest struct { + Text string `json:"text"` + Term interface{} `json:"term"` +} + +// Creates the source for the term suggester. +func (q *TermSuggester) Source(includeName bool) (interface{}, error) { + // "suggest" : { + // "my-suggest-1" : { + // "text" : "the amsterdma meetpu", + // "term" : { + // "field" : "body" + // } + // }, + // "my-suggest-2" : { + // "text" : "the rottredam meetpu", + // "term" : { + // "field" : "title", + // } + // } + // } + ts := &termSuggesterRequest{} + if q.text != "" { + ts.Text = q.text + } + + suggester := make(map[string]interface{}) + ts.Term = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, 0) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Specific to term suggester + if q.suggestMode != "" { + suggester["suggest_mode"] = q.suggestMode + } + if q.accuracy != nil { + suggester["accuracy"] = *q.accuracy + } + if q.sort != "" { + suggester["sort"] = q.sort + } + if q.stringDistance != "" { + suggester["string_distance"] = q.stringDistance + } + if q.maxEdits != nil { + suggester["max_edits"] = *q.maxEdits + } + if q.maxInspections != nil { + suggester["max_inspections"] = *q.maxInspections + } + if q.maxTermFreq != nil { + suggester["max_term_freq"] = *q.maxTermFreq + } + if q.prefixLength != nil { + suggester["prefix_len"] = *q.prefixLength + } + if q.minWordLength != nil { + suggester["min_word_len"] = *q.minWordLength + } + if q.minDocFreq != nil { + suggester["min_doc_freq"] = *q.minDocFreq + } + + if !includeName { + return ts, nil + } + + source := make(map[string]interface{}) + source[q.name] = ts + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/tasks_cancel.go b/vendor/gopkg.in/olivere/elastic.v3/tasks_cancel.go new file mode 100644 index 000000000..7ee5c5cc3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/tasks_cancel.go @@ -0,0 +1,144 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// TasksCancelService can cancel long-running tasks. +// It is supported as of Elasticsearch 2.3.0. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/master/tasks-cancel.html +// for details. +type TasksCancelService struct { + client *Client + pretty bool + taskId *int64 + actions []string + nodeId []string + parentNode string + parentTask *int64 +} + +// NewTasksCancelService creates a new TasksCancelService. +func NewTasksCancelService(client *Client) *TasksCancelService { + return &TasksCancelService{ + client: client, + actions: make([]string, 0), + nodeId: make([]string, 0), + } +} + +// TaskId specifies the task to cancel. Set to -1 to cancel all tasks. +func (s *TasksCancelService) TaskId(taskId int64) *TasksCancelService { + s.taskId = &taskId + return s +} + +// Actions is a list of actions that should be cancelled. Leave empty to cancel all. +func (s *TasksCancelService) Actions(actions []string) *TasksCancelService { + s.actions = actions + return s +} + +// NodeId is a list of node IDs or names to limit the returned information; +// use `_local` to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *TasksCancelService) NodeId(nodeId []string) *TasksCancelService { + s.nodeId = nodeId + return s +} + +// ParentNode specifies to cancel tasks with specified parent node. +func (s *TasksCancelService) ParentNode(parentNode string) *TasksCancelService { + s.parentNode = parentNode + return s +} + +// ParentTask specifies to cancel tasks with specified parent task id. +// Set to -1 to cancel all. +func (s *TasksCancelService) ParentTask(parentTask int64) *TasksCancelService { + s.parentTask = &parentTask + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *TasksCancelService) Pretty(pretty bool) *TasksCancelService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *TasksCancelService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if s.taskId != nil { + path, err = uritemplates.Expand("/_tasks/{task_id}/_cancel", map[string]string{ + "task_id": fmt.Sprintf("%d", *s.taskId), + }) + } else { + path = "/_tasks/_cancel" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.actions) > 0 { + params.Set("actions", strings.Join(s.actions, ",")) + } + if len(s.nodeId) > 0 { + params.Set("node_id", strings.Join(s.nodeId, ",")) + } + if s.parentNode != "" { + params.Set("parent_node", s.parentNode) + } + if s.parentTask != nil { + params.Set("parent_task", fmt.Sprintf("%v", *s.parentTask)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *TasksCancelService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *TasksCancelService) Do() (*TasksListResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(TasksListResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/tasks_list.go b/vendor/gopkg.in/olivere/elastic.v3/tasks_list.go new file mode 100644 index 000000000..f74ede19f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/tasks_list.go @@ -0,0 +1,213 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// TasksListService retrieves the list of currently executing tasks +// on one ore more nodes in the cluster. It is part of the Task Management API +// documented at http://www.elastic.co/guide/en/elasticsearch/reference/master/tasks-list.html. +// +// It is supported as of Elasticsearch 2.3.0. +type TasksListService struct { + client *Client + pretty bool + taskId []int64 + actions []string + detailed *bool + nodeId []string + parentNode string + parentTask *int64 + waitForCompletion *bool +} + +// NewTasksListService creates a new TasksListService. +func NewTasksListService(client *Client) *TasksListService { + return &TasksListService{ + client: client, + taskId: make([]int64, 0), + actions: make([]string, 0), + nodeId: make([]string, 0), + } +} + +// TaskId indicates to returns the task(s) with specified id(s). +func (s *TasksListService) TaskId(taskId ...int64) *TasksListService { + s.taskId = append(s.taskId, taskId...) + return s +} + +// Actions is a list of actions that should be returned. Leave empty to return all. +func (s *TasksListService) Actions(actions ...string) *TasksListService { + s.actions = append(s.actions, actions...) + return s +} + +// Detailed indicates whether to return detailed task information (default: false). +func (s *TasksListService) Detailed(detailed bool) *TasksListService { + s.detailed = &detailed + return s +} + +// NodeId is a list of node IDs or names to limit the returned information; +// use `_local` to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *TasksListService) NodeId(nodeId ...string) *TasksListService { + s.nodeId = append(s.nodeId, nodeId...) + return s +} + +// ParentNode returns tasks with specified parent node. +func (s *TasksListService) ParentNode(parentNode string) *TasksListService { + s.parentNode = parentNode + return s +} + +// ParentTask returns tasks with specified parent task id. Set to -1 to return all. +func (s *TasksListService) ParentTask(parentTask int64) *TasksListService { + s.parentTask = &parentTask + return s +} + +// WaitForCompletion indicates whether to wait for the matching tasks +// to complete (default: false). +func (s *TasksListService) WaitForCompletion(waitForCompletion bool) *TasksListService { + s.waitForCompletion = &waitForCompletion + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *TasksListService) Pretty(pretty bool) *TasksListService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *TasksListService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.taskId) > 0 { + var tasks []string + for _, taskId := range s.taskId { + tasks = append(tasks, fmt.Sprintf("%d", taskId)) + } + path, err = uritemplates.Expand("/_tasks/{task_id}", map[string]string{ + "task_id": strings.Join(tasks, ","), + }) + } else { + path = "/_tasks" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.actions) > 0 { + params.Set("actions", strings.Join(s.actions, ",")) + } + if s.detailed != nil { + params.Set("detailed", fmt.Sprintf("%v", *s.detailed)) + } + if len(s.nodeId) > 0 { + params.Set("node_id", strings.Join(s.nodeId, ",")) + } + if s.parentNode != "" { + params.Set("parent_node", s.parentNode) + } + if s.parentTask != nil { + params.Set("parent_task", fmt.Sprintf("%v", *s.parentTask)) + } + if s.waitForCompletion != nil { + params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *TasksListService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *TasksListService) Do() (*TasksListResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(TasksListResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// TasksListResponse is the response of TasksListService.Do. +type TasksListResponse struct { + TaskFailures []*TaskOperationFailure `json:"task_failures"` + NodeFailures []*FailedNodeException `json:"node_failures"` + // Nodes returns the tasks per node. The key is the node id. + Nodes map[string]*DiscoveryNode `json:"nodes"` +} + +type TaskOperationFailure struct { + TaskId int64 `json:"task_id"` + NodeId string `json:"node_id"` + Status string `json:"status"` + Reason *ErrorDetails `json:"reason"` +} + +type FailedNodeException struct { + *ErrorDetails + NodeId string `json:"node_id"` +} + +type DiscoveryNode struct { + Name string `json:"name"` + TransportAddress string `json:"transport_address"` + Host string `json:"host"` + IP string `json:"ip"` + Attributes map[string]interface{} `json:"attributes"` + // Tasks returns the tasks by its id (as a string). + Tasks map[string]*TaskInfo `json:"tasks"` +} + +type TaskInfo struct { + Node string `json:"node"` + Id int64 `json:"id"` // the task id + Type string `json:"type"` + Action string `json:"action"` + Status interface{} `json:"status"` + Description interface{} `json:"description"` + StartTime string `json:"start_time"` + StartTimeInMillis int64 `json:"start_time_in_millis"` + RunningTime string `json:"running_time"` + RunningTimeInNanos int64 `json:"running_time_in_nanos"` + ParentTaskId string `json:"parent_task_id"` // like "YxJnVYjwSBm_AUbzddTajQ:12356" +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/termvectors.go b/vendor/gopkg.in/olivere/elastic.v3/termvectors.go new file mode 100644 index 000000000..c00a3363b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/termvectors.go @@ -0,0 +1,458 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// TermvectorsService returns information and statistics on terms in the +// fields of a particular document. The document could be stored in the +// index or artificially provided by the user. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.1/docs-termvectors.html +// for documentation. +type TermvectorsService struct { + client *Client + pretty bool + id string + index string + typ string + dfs *bool + doc interface{} + fieldStatistics *bool + fields []string + filter *TermvectorsFilterSettings + perFieldAnalyzer map[string]string + offsets *bool + parent string + payloads *bool + positions *bool + preference string + realtime *bool + routing string + termStatistics *bool + version interface{} + versionType string + bodyJson interface{} + bodyString string +} + +// NewTermvectorsService creates a new TermvectorsService. +func NewTermvectorsService(client *Client) *TermvectorsService { + return &TermvectorsService{ + client: client, + } +} + +// Index in which the document resides. +func (s *TermvectorsService) Index(index string) *TermvectorsService { + s.index = index + return s +} + +// Type of the document. +func (s *TermvectorsService) Type(typ string) *TermvectorsService { + s.typ = typ + return s +} + +// Id of the document. +func (s *TermvectorsService) Id(id string) *TermvectorsService { + s.id = id + return s +} + +// Dfs specifies if distributed frequencies should be returned instead +// shard frequencies. +func (s *TermvectorsService) Dfs(dfs bool) *TermvectorsService { + s.dfs = &dfs + return s +} + +// Doc is the document to analyze. +func (s *TermvectorsService) Doc(doc interface{}) *TermvectorsService { + s.doc = doc + return s +} + +// FieldStatistics specifies if document count, sum of document frequencies +// and sum of total term frequencies should be returned. +func (s *TermvectorsService) FieldStatistics(fieldStatistics bool) *TermvectorsService { + s.fieldStatistics = &fieldStatistics + return s +} + +// Fields a list of fields to return. +func (s *TermvectorsService) Fields(fields ...string) *TermvectorsService { + if s.fields == nil { + s.fields = make([]string, 0) + } + s.fields = append(s.fields, fields...) + return s +} + +// Filter adds terms filter settings. +func (s *TermvectorsService) Filter(filter *TermvectorsFilterSettings) *TermvectorsService { + s.filter = filter + return s +} + +// PerFieldAnalyzer allows to specify a different analyzer than the one +// at the field. +func (s *TermvectorsService) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *TermvectorsService { + s.perFieldAnalyzer = perFieldAnalyzer + return s +} + +// Offsets specifies if term offsets should be returned. +func (s *TermvectorsService) Offsets(offsets bool) *TermvectorsService { + s.offsets = &offsets + return s +} + +// Parent id of documents. +func (s *TermvectorsService) Parent(parent string) *TermvectorsService { + s.parent = parent + return s +} + +// Payloads specifies if term payloads should be returned. +func (s *TermvectorsService) Payloads(payloads bool) *TermvectorsService { + s.payloads = &payloads + return s +} + +// Positions specifies if term positions should be returned. +func (s *TermvectorsService) Positions(positions bool) *TermvectorsService { + s.positions = &positions + return s +} + +// Preference specify the node or shard the operation +// should be performed on (default: random). +func (s *TermvectorsService) Preference(preference string) *TermvectorsService { + s.preference = preference + return s +} + +// Realtime specifies if request is real-time as opposed to +// near-real-time (default: true). +func (s *TermvectorsService) Realtime(realtime bool) *TermvectorsService { + s.realtime = &realtime + return s +} + +// Routing is a specific routing value. +func (s *TermvectorsService) Routing(routing string) *TermvectorsService { + s.routing = routing + return s +} + +// TermStatistics specifies if total term frequency and document frequency +// should be returned. +func (s *TermvectorsService) TermStatistics(termStatistics bool) *TermvectorsService { + s.termStatistics = &termStatistics + return s +} + +// Version an explicit version number for concurrency control. +func (s *TermvectorsService) Version(version interface{}) *TermvectorsService { + s.version = version + return s +} + +// VersionType specifies a version type ("internal", "external", "external_gte", or "force"). +func (s *TermvectorsService) VersionType(versionType string) *TermvectorsService { + s.versionType = versionType + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *TermvectorsService) Pretty(pretty bool) *TermvectorsService { + s.pretty = pretty + return s +} + +// BodyJson defines the body parameters. See documentation. +func (s *TermvectorsService) BodyJson(body interface{}) *TermvectorsService { + s.bodyJson = body + return s +} + +// BodyString defines the body parameters as a string. See documentation. +func (s *TermvectorsService) BodyString(body string) *TermvectorsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *TermvectorsService) buildURL() (string, url.Values, error) { + var pathParam = map[string]string{ + "index": s.index, + "type": s.typ, + } + var path string + var err error + + // Build URL + if s.id != "" { + pathParam["id"] = s.id + path, err = uritemplates.Expand("/{index}/{type}/{id}/_termvectors", pathParam) + } else { + path, err = uritemplates.Expand("/{index}/{type}/_termvectors", pathParam) + } + + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.dfs != nil { + params.Set("dfs", fmt.Sprintf("%v", *s.dfs)) + } + if s.fieldStatistics != nil { + params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics)) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.offsets != nil { + params.Set("offsets", fmt.Sprintf("%v", *s.offsets)) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.payloads != nil { + params.Set("payloads", fmt.Sprintf("%v", *s.payloads)) + } + if s.positions != nil { + params.Set("positions", fmt.Sprintf("%v", *s.positions)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.termStatistics != nil { + params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *TermvectorsService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *TermvectorsService) Do() (*TermvectorsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } else { + data := make(map[string]interface{}) + if s.doc != nil { + data["doc"] = s.doc + } + if len(s.perFieldAnalyzer) > 0 { + data["per_field_analyzer"] = s.perFieldAnalyzer + } + if s.filter != nil { + src, err := s.filter.Source() + if err != nil { + return nil, err + } + data["filter"] = src + } + if len(data) > 0 { + body = data + } + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(TermvectorsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Filter settings -- + +// TermvectorsFilterSettings adds additional filters to a Termsvector request. +// It allows to filter terms based on their tf-idf scores. +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.1/docs-termvectors.html#_terms_filtering +// for more information. +type TermvectorsFilterSettings struct { + maxNumTerms *int64 + minTermFreq *int64 + maxTermFreq *int64 + minDocFreq *int64 + maxDocFreq *int64 + minWordLength *int64 + maxWordLength *int64 +} + +// NewTermvectorsFilterSettings creates and initializes a new TermvectorsFilterSettings struct. +func NewTermvectorsFilterSettings() *TermvectorsFilterSettings { + return &TermvectorsFilterSettings{} +} + +// MaxNumTerms specifies the maximum number of terms the must be returned per field. +func (fs *TermvectorsFilterSettings) MaxNumTerms(value int64) *TermvectorsFilterSettings { + fs.maxNumTerms = &value + return fs +} + +// MinTermFreq ignores words with less than this frequency in the source doc. +func (fs *TermvectorsFilterSettings) MinTermFreq(value int64) *TermvectorsFilterSettings { + fs.minTermFreq = &value + return fs +} + +// MaxTermFreq ignores words with more than this frequency in the source doc. +func (fs *TermvectorsFilterSettings) MaxTermFreq(value int64) *TermvectorsFilterSettings { + fs.maxTermFreq = &value + return fs +} + +// MinDocFreq ignores terms which do not occur in at least this many docs. +func (fs *TermvectorsFilterSettings) MinDocFreq(value int64) *TermvectorsFilterSettings { + fs.minDocFreq = &value + return fs +} + +// MaxDocFreq ignores terms which occur in more than this many docs. +func (fs *TermvectorsFilterSettings) MaxDocFreq(value int64) *TermvectorsFilterSettings { + fs.maxDocFreq = &value + return fs +} + +// MinWordLength specifies the minimum word length below which words will be ignored. +func (fs *TermvectorsFilterSettings) MinWordLength(value int64) *TermvectorsFilterSettings { + fs.minWordLength = &value + return fs +} + +// MaxWordLength specifies the maximum word length above which words will be ignored. +func (fs *TermvectorsFilterSettings) MaxWordLength(value int64) *TermvectorsFilterSettings { + fs.maxWordLength = &value + return fs +} + +// Source returns JSON for the query. +func (fs *TermvectorsFilterSettings) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fs.maxNumTerms != nil { + source["max_num_terms"] = *fs.maxNumTerms + } + if fs.minTermFreq != nil { + source["min_term_freq"] = *fs.minTermFreq + } + if fs.maxTermFreq != nil { + source["max_term_freq"] = *fs.maxTermFreq + } + if fs.minDocFreq != nil { + source["min_doc_freq"] = *fs.minDocFreq + } + if fs.maxDocFreq != nil { + source["max_doc_freq"] = *fs.maxDocFreq + } + if fs.minWordLength != nil { + source["min_word_length"] = *fs.minWordLength + } + if fs.maxWordLength != nil { + source["max_word_length"] = *fs.maxWordLength + } + return source, nil +} + +// -- Response types -- + +type TokenInfo struct { + StartOffset int64 `json:"start_offset"` + EndOffset int64 `json:"end_offset"` + Position int64 `json:"position"` + Payload string `json:"payload"` +} + +type TermsInfo struct { + DocFreq int64 `json:"doc_freq"` + Score float64 `json:"score"` + TermFreq int64 `json:"term_freq"` + Ttf int64 `json:"ttf"` + Tokens []TokenInfo `json:"tokens"` +} + +type FieldStatistics struct { + DocCount int64 `json:"doc_count"` + SumDocFreq int64 `json:"sum_doc_freq"` + SumTtf int64 `json:"sum_ttf"` +} + +type TermVectorsFieldInfo struct { + FieldStatistics FieldStatistics `json:"field_statistics"` + Terms map[string]TermsInfo `json:"terms"` +} + +// TermvectorsResponse is the response of TermvectorsService.Do. +type TermvectorsResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id,omitempty"` + Version int `json:"_version"` + Found bool `json:"found"` + Took int64 `json:"took"` + TermVectors map[string]TermVectorsFieldInfo `json:"term_vectors"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/update.go b/vendor/gopkg.in/olivere/elastic.v3/update.go new file mode 100644 index 000000000..3d99ee860 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/update.go @@ -0,0 +1,299 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// UpdateService updates a document in Elasticsearch. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-update.html +// for details. +type UpdateService struct { + client *Client + index string + typ string + id string + routing string + parent string + script *Script + fields []string + version *int64 + versionType string + retryOnConflict *int + refresh *bool + replicationType string + consistencyLevel string + upsert interface{} + scriptedUpsert *bool + docAsUpsert *bool + detectNoop *bool + doc interface{} + timeout string + pretty bool +} + +// NewUpdateService creates the service to update documents in Elasticsearch. +func NewUpdateService(client *Client) *UpdateService { + builder := &UpdateService{ + client: client, + fields: make([]string, 0), + } + return builder +} + +// Index is the name of the Elasticsearch index (required). +func (b *UpdateService) Index(name string) *UpdateService { + b.index = name + return b +} + +// Type is the type of the document (required). +func (b *UpdateService) Type(typ string) *UpdateService { + b.typ = typ + return b +} + +// Id is the identifier of the document to update (required). +func (b *UpdateService) Id(id string) *UpdateService { + b.id = id + return b +} + +// Routing specifies a specific routing value. +func (b *UpdateService) Routing(routing string) *UpdateService { + b.routing = routing + return b +} + +// Parent sets the id of the parent document. +func (b *UpdateService) Parent(parent string) *UpdateService { + b.parent = parent + return b +} + +// Script is the script definition. +func (b *UpdateService) Script(script *Script) *UpdateService { + b.script = script + return b +} + +// RetryOnConflict specifies how many times the operation should be retried +// when a conflict occurs (default: 0). +func (b *UpdateService) RetryOnConflict(retryOnConflict int) *UpdateService { + b.retryOnConflict = &retryOnConflict + return b +} + +// Fields is a list of fields to return in the response. +func (b *UpdateService) Fields(fields ...string) *UpdateService { + b.fields = make([]string, 0, len(fields)) + b.fields = append(b.fields, fields...) + return b +} + +// Version defines the explicit version number for concurrency control. +func (b *UpdateService) Version(version int64) *UpdateService { + b.version = &version + return b +} + +// VersionType is one of "internal" or "force". +func (b *UpdateService) VersionType(versionType string) *UpdateService { + b.versionType = versionType + return b +} + +// Refresh the index after performing the update. +func (b *UpdateService) Refresh(refresh bool) *UpdateService { + b.refresh = &refresh + return b +} + +// ReplicationType is one of "sync" or "async". +func (b *UpdateService) ReplicationType(replicationType string) *UpdateService { + b.replicationType = replicationType + return b +} + +// ConsistencyLevel is one of "one", "quorum", or "all". +// It sets the write consistency setting for the update operation. +func (b *UpdateService) ConsistencyLevel(consistencyLevel string) *UpdateService { + b.consistencyLevel = consistencyLevel + return b +} + +// Doc allows for updating a partial document. +func (b *UpdateService) Doc(doc interface{}) *UpdateService { + b.doc = doc + return b +} + +// Upsert can be used to index the document when it doesn't exist yet. +// Use this e.g. to initialize a document with a default value. +func (b *UpdateService) Upsert(doc interface{}) *UpdateService { + b.upsert = doc + return b +} + +// DocAsUpsert can be used to insert the document if it doesn't already exist. +func (b *UpdateService) DocAsUpsert(docAsUpsert bool) *UpdateService { + b.docAsUpsert = &docAsUpsert + return b +} + +// DetectNoop will instruct Elasticsearch to check if changes will occur +// when updating via Doc. It there aren't any changes, the request will +// turn into a no-op. +func (b *UpdateService) DetectNoop(detectNoop bool) *UpdateService { + b.detectNoop = &detectNoop + return b +} + +// ScriptedUpsert should be set to true if the referenced script +// (defined in Script or ScriptId) should be called to perform an insert. +// The default is false. +func (b *UpdateService) ScriptedUpsert(scriptedUpsert bool) *UpdateService { + b.scriptedUpsert = &scriptedUpsert + return b +} + +// Timeout is an explicit timeout for the operation, e.g. "1000", "1s" or "500ms". +func (b *UpdateService) Timeout(timeout string) *UpdateService { + b.timeout = timeout + return b +} + +// Pretty instructs to return human readable, prettified JSON. +func (b *UpdateService) Pretty(pretty bool) *UpdateService { + b.pretty = pretty + return b +} + +// url returns the URL part of the document request. +func (b *UpdateService) url() (string, url.Values, error) { + // Build url + path := "/{index}/{type}/{id}/_update" + path, err := uritemplates.Expand(path, map[string]string{ + "index": b.index, + "type": b.typ, + "id": b.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Parameters + params := make(url.Values) + if b.pretty { + params.Set("pretty", "true") + } + if b.routing != "" { + params.Set("routing", b.routing) + } + if b.parent != "" { + params.Set("parent", b.parent) + } + if b.timeout != "" { + params.Set("timeout", b.timeout) + } + if b.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *b.refresh)) + } + if b.replicationType != "" { + params.Set("replication", b.replicationType) + } + if b.consistencyLevel != "" { + params.Set("consistency", b.consistencyLevel) + } + if len(b.fields) > 0 { + params.Set("fields", strings.Join(b.fields, ",")) + } + if b.version != nil { + params.Set("version", fmt.Sprintf("%d", *b.version)) + } + if b.versionType != "" { + params.Set("version_type", b.versionType) + } + if b.retryOnConflict != nil { + params.Set("retry_on_conflict", fmt.Sprintf("%v", *b.retryOnConflict)) + } + + return path, params, nil +} + +// body returns the body part of the document request. +func (b *UpdateService) body() (interface{}, error) { + source := make(map[string]interface{}) + + if b.script != nil { + src, err := b.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + + if b.scriptedUpsert != nil { + source["scripted_upsert"] = *b.scriptedUpsert + } + + if b.upsert != nil { + source["upsert"] = b.upsert + } + + if b.doc != nil { + source["doc"] = b.doc + } + if b.docAsUpsert != nil { + source["doc_as_upsert"] = *b.docAsUpsert + } + if b.detectNoop != nil { + source["detect_noop"] = *b.detectNoop + } + + return source, nil +} + +// Do executes the update operation. +func (b *UpdateService) Do() (*UpdateResponse, error) { + path, params, err := b.url() + if err != nil { + return nil, err + } + + // Get body of the request + body, err := b.body() + if err != nil { + return nil, err + } + + // Get response + res, err := b.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(UpdateResponse) + if err := b.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// UpdateResponse is the result of updating a document in Elasticsearch. +type UpdateResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` + GetResult *GetResult `json:"get"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/update_by_query.go b/vendor/gopkg.in/olivere/elastic.v3/update_by_query.go new file mode 100644 index 000000000..28664c7df --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/update_by_query.go @@ -0,0 +1,655 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// UpdateByQueryService is documented at https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html. +type UpdateByQueryService struct { + client *Client + pretty bool + index []string + typ []string + xSource []string + xSourceExclude []string + xSourceInclude []string + allowNoIndices *bool + analyzeWildcard *bool + analyzer string + conflicts string + consistency string + defaultOperator string + df string + expandWildcards string + explain *bool + fielddataFields []string + fields []string + from *int + ignoreUnavailable *bool + lenient *bool + lowercaseExpandedTerms *bool + preference string + q string + refresh *bool + requestCache *bool + routing []string + scroll string + scrollSize *int + searchTimeout string + searchType string + size *int + sort []string + stats []string + suggestField string + suggestMode string + suggestSize *int + suggestText string + terminateAfter *int + timeout string + trackScores *bool + version *bool + versionType *bool + waitForCompletion *bool + script *Script + query Query + bodyJson interface{} + bodyString string +} + +// NewUpdateByQueryService creates a new UpdateByQueryService. +func NewUpdateByQueryService(client *Client) *UpdateByQueryService { + return &UpdateByQueryService{ + client: client, + xSource: make([]string, 0), + xSourceExclude: make([]string, 0), + xSourceInclude: make([]string, 0), + fielddataFields: make([]string, 0), + fields: make([]string, 0), + routing: make([]string, 0), + sort: make([]string, 0), + stats: make([]string, 0), + } +} + +// Type is a list of document types to search; leave empty to perform +// the operation on all types. +func (s *UpdateByQueryService) Type(typ ...string) *UpdateByQueryService { + s.typ = append(s.typ, typ...) + return s +} + +// Index is a list of index names to search; use `_all` or empty string to +// perform the operation on all indices. +func (s *UpdateByQueryService) Index(index ...string) *UpdateByQueryService { + s.index = append(s.index, index...) + return s +} + +// XSource is true or false to return the _source field or not, +// or a list of fields to return. +func (s *UpdateByQueryService) XSource(xSource ...string) *UpdateByQueryService { + s.xSource = append(s.xSource, xSource...) + return s +} + +// XSourceExclude represents a list of fields to exclude from the returned _source field. +func (s *UpdateByQueryService) XSourceExclude(xSourceExclude ...string) *UpdateByQueryService { + s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) + return s +} + +// XSourceInclude represents a list of fields to extract and return from the _source field. +func (s *UpdateByQueryService) XSourceInclude(xSourceInclude ...string) *UpdateByQueryService { + s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or when +// no indices have been specified). +func (s *UpdateByQueryService) AllowNoIndices(allowNoIndices bool) *UpdateByQueryService { + s.allowNoIndices = &allowNoIndices + return s +} + +// AnalyzeWildcard specifies whether wildcard and prefix queries should be +// analyzed (default: false). +func (s *UpdateByQueryService) AnalyzeWildcard(analyzeWildcard bool) *UpdateByQueryService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer specifies the analyzer to use for the query string. +func (s *UpdateByQueryService) Analyzer(analyzer string) *UpdateByQueryService { + s.analyzer = analyzer + return s +} + +// Conflicts indicates what to do when the process detects version conflicts. +// Possible values are "proceed" and "abort". +func (s *UpdateByQueryService) Conflicts(conflicts string) *UpdateByQueryService { + s.conflicts = conflicts + return s +} + +// AbortOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("abort"). +func (s *UpdateByQueryService) AbortOnVersionConflict() *UpdateByQueryService { + s.conflicts = "abort" + return s +} + +// ProceedOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("proceed"). +func (s *UpdateByQueryService) ProceedOnVersionConflict() *UpdateByQueryService { + s.conflicts = "proceed" + return s +} + +// Consistency sets an explicit write consistency setting for the operation. +// Possible values are "one", "quorum", and "all". +func (s *UpdateByQueryService) Consistency(consistency string) *UpdateByQueryService { + s.consistency = consistency + return s +} + +// DefaultOperator is the default operator for query string query (AND or OR). +func (s *UpdateByQueryService) DefaultOperator(defaultOperator string) *UpdateByQueryService { + s.defaultOperator = defaultOperator + return s +} + +// Df specifies the field to use as default where no field prefix is given in the query string. +func (s *UpdateByQueryService) Df(df string) *UpdateByQueryService { + s.df = df + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *UpdateByQueryService) ExpandWildcards(expandWildcards string) *UpdateByQueryService { + s.expandWildcards = expandWildcards + return s +} + +// Explain specifies whether to return detailed information about score +// computation as part of a hit. +func (s *UpdateByQueryService) Explain(explain bool) *UpdateByQueryService { + s.explain = &explain + return s +} + +// FielddataFields is a list of fields to return as the field data +// representation of a field for each hit. +func (s *UpdateByQueryService) FielddataFields(fielddataFields ...string) *UpdateByQueryService { + s.fielddataFields = append(s.fielddataFields, fielddataFields...) + return s +} + +// Fields is a list of fields to return as part of a hit. +func (s *UpdateByQueryService) Fields(fields ...string) *UpdateByQueryService { + s.fields = append(s.fields, fields...) + return s +} + +// From is the starting offset (default: 0). +func (s *UpdateByQueryService) From(from int) *UpdateByQueryService { + s.from = &from + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *UpdateByQueryService) IgnoreUnavailable(ignoreUnavailable bool) *UpdateByQueryService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Lenient specifies whether format-based query failures +// (such as providing text to a numeric field) should be ignored. +func (s *UpdateByQueryService) Lenient(lenient bool) *UpdateByQueryService { + s.lenient = &lenient + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *UpdateByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *UpdateByQueryService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// Preference specifies the node or shard the operation should be performed on +// (default: random). +func (s *UpdateByQueryService) Preference(preference string) *UpdateByQueryService { + s.preference = preference + return s +} + +// Query in the Lucene query string syntax. +func (s *UpdateByQueryService) Q(q string) *UpdateByQueryService { + s.q = q + return s +} + +// Refresh indicates whether the effected indexes should be refreshed. +func (s *UpdateByQueryService) Refresh(refresh bool) *UpdateByQueryService { + s.refresh = &refresh + return s +} + +// RequestCache specifies if request cache should be used for this request +// or not, defaults to index level setting. +func (s *UpdateByQueryService) RequestCache(requestCache bool) *UpdateByQueryService { + s.requestCache = &requestCache + return s +} + +// Routing is a list of specific routing values. +func (s *UpdateByQueryService) Routing(routing ...string) *UpdateByQueryService { + s.routing = append(s.routing, routing...) + return s +} + +// Scroll specifies how long a consistent view of the index should be maintained +// for scrolled search. +func (s *UpdateByQueryService) Scroll(scroll string) *UpdateByQueryService { + s.scroll = scroll + return s +} + +// ScrollSize is the size on the scroll request powering the update_by_query. +func (s *UpdateByQueryService) ScrollSize(scrollSize int) *UpdateByQueryService { + s.scrollSize = &scrollSize + return s +} + +// SearchTimeout defines an explicit timeout for each search request. +// Defaults to no timeout. +func (s *UpdateByQueryService) SearchTimeout(searchTimeout string) *UpdateByQueryService { + s.searchTimeout = searchTimeout + return s +} + +// SearchType is the search operation type. Possible values are +// "query_then_fetch" and "dfs_query_then_fetch". +func (s *UpdateByQueryService) SearchType(searchType string) *UpdateByQueryService { + s.searchType = searchType + return s +} + +// Size represents the number of hits to return (default: 10). +func (s *UpdateByQueryService) Size(size int) *UpdateByQueryService { + s.size = &size + return s +} + +// Sort is a list of : pairs. +func (s *UpdateByQueryService) Sort(sort ...string) *UpdateByQueryService { + s.sort = append(s.sort, sort...) + return s +} + +// SortByField adds a sort order. +func (s *UpdateByQueryService) SortByField(field string, ascending bool) *UpdateByQueryService { + if ascending { + s.sort = append(s.sort, fmt.Sprintf("%s:asc", field)) + } else { + s.sort = append(s.sort, fmt.Sprintf("%s:desc", field)) + } + return s +} + +// Stats specifies specific tag(s) of the request for logging and statistical purposes. +func (s *UpdateByQueryService) Stats(stats ...string) *UpdateByQueryService { + s.stats = append(s.stats, stats...) + return s +} + +// SuggestField specifies which field to use for suggestions. +func (s *UpdateByQueryService) SuggestField(suggestField string) *UpdateByQueryService { + s.suggestField = suggestField + return s +} + +// SuggestMode specifies the suggest mode. Possible values are +// "missing", "popular", and "always". +func (s *UpdateByQueryService) SuggestMode(suggestMode string) *UpdateByQueryService { + s.suggestMode = suggestMode + return s +} + +// SuggestSize specifies how many suggestions to return in response. +func (s *UpdateByQueryService) SuggestSize(suggestSize int) *UpdateByQueryService { + s.suggestSize = &suggestSize + return s +} + +// SuggestText specifies the source text for which the suggestions should be returned. +func (s *UpdateByQueryService) SuggestText(suggestText string) *UpdateByQueryService { + s.suggestText = suggestText + return s +} + +// TerminateAfter indicates the maximum number of documents to collect +// for each shard, upon reaching which the query execution will terminate early. +func (s *UpdateByQueryService) TerminateAfter(terminateAfter int) *UpdateByQueryService { + s.terminateAfter = &terminateAfter + return s +} + +// Timeout is the time each individual bulk request should wait for shards +// that are unavailable. +func (s *UpdateByQueryService) Timeout(timeout string) *UpdateByQueryService { + s.timeout = timeout + return s +} + +// TimeoutInMillis sets the timeout in milliseconds. +func (s *UpdateByQueryService) TimeoutInMillis(timeoutInMillis int) *UpdateByQueryService { + s.timeout = fmt.Sprintf("%dms", timeoutInMillis) + return s +} + +// TrackScores indicates whether to calculate and return scores even if +// they are not used for sorting. +func (s *UpdateByQueryService) TrackScores(trackScores bool) *UpdateByQueryService { + s.trackScores = &trackScores + return s +} + +// Version specifies whether to return document version as part of a hit. +func (s *UpdateByQueryService) Version(version bool) *UpdateByQueryService { + s.version = &version + return s +} + +// VersionType indicates if the document increment the version number (internal) +// on hit or not (reindex). +func (s *UpdateByQueryService) VersionType(versionType bool) *UpdateByQueryService { + s.versionType = &versionType + return s +} + +// WaitForCompletion indicates if the request should block until the reindex is complete. +func (s *UpdateByQueryService) WaitForCompletion(waitForCompletion bool) *UpdateByQueryService { + s.waitForCompletion = &waitForCompletion + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *UpdateByQueryService) Pretty(pretty bool) *UpdateByQueryService { + s.pretty = pretty + return s +} + +// Script sets an update script. +func (s *UpdateByQueryService) Script(script *Script) *UpdateByQueryService { + s.script = script + return s +} + +// Query sets a query definition using the Query DSL. +func (s *UpdateByQueryService) Query(query Query) *UpdateByQueryService { + s.query = query + return s +} + +// BodyJson specifies e.g. the query to restrict the results specified with the +// Query DSL (optional). The interface{} will be serialized to a JSON document, +// so use a map[string]interface{}. +func (s *UpdateByQueryService) BodyJson(body interface{}) *UpdateByQueryService { + s.bodyJson = body + return s +} + +// Body specifies e.g. a query to restrict the results specified with +// the Query DSL (optional). +func (s *UpdateByQueryService) BodyString(body string) *UpdateByQueryService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *UpdateByQueryService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.index) > 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_update_by_query", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) > 0 && len(s.typ) == 0 { + path, err = uritemplates.Expand("/{index}/_update_by_query", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.index) == 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_update_by_query", map[string]string{ + "type": strings.Join(s.typ, ","), + }) + } else { + path = "/_all/_update_by_query" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.xSource) > 0 { + params.Set("_source", strings.Join(s.xSource, ",")) + } + if len(s.xSourceExclude) > 0 { + params.Set("_source_exclude", strings.Join(s.xSourceExclude, ",")) + } + if len(s.xSourceInclude) > 0 { + params.Set("_source_include", strings.Join(s.xSourceInclude, ",")) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.conflicts != "" { + params.Set("conflicts", s.conflicts) + } + if s.consistency != "" { + params.Set("consistency", s.consistency) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.explain != nil { + params.Set("explain", fmt.Sprintf("%v", *s.explain)) + } + if len(s.fielddataFields) > 0 { + params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.from != nil { + params.Set("from", fmt.Sprintf("%d", *s.from)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.requestCache != nil { + params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) + } + if len(s.routing) > 0 { + params.Set("routing", strings.Join(s.routing, ",")) + } + if s.scroll != "" { + params.Set("scroll", s.scroll) + } + if s.scrollSize != nil { + params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize)) + } + if s.searchTimeout != "" { + params.Set("search_timeout", s.searchTimeout) + } + if s.searchType != "" { + params.Set("search_type", s.searchType) + } + if s.size != nil { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + if len(s.sort) > 0 { + params.Set("sort", strings.Join(s.sort, ",")) + } + if len(s.stats) > 0 { + params.Set("stats", strings.Join(s.stats, ",")) + } + if s.suggestField != "" { + params.Set("suggest_field", s.suggestField) + } + if s.suggestMode != "" { + params.Set("suggest_mode", s.suggestMode) + } + if s.suggestSize != nil { + params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize)) + } + if s.suggestText != "" { + params.Set("suggest_text", s.suggestText) + } + if s.terminateAfter != nil { + params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.trackScores != nil { + params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", *s.version)) + } + if s.versionType != nil { + params.Set("version_type", fmt.Sprintf("%v", *s.versionType)) + } + if s.waitForCompletion != nil { + params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *UpdateByQueryService) Validate() error { + return nil +} + +// body returns the body part of the document request. +func (s *UpdateByQueryService) body() (interface{}, error) { + if s.bodyJson != nil { + return s.bodyJson, nil + } + if s.bodyString != "" { + return s.bodyString, nil + } + + source := make(map[string]interface{}) + + if s.script != nil { + src, err := s.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + source["query"] = src + } + + return source, nil +} + +// Do executes the operation. +func (s *UpdateByQueryService) Do() (*UpdateByQueryResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body, err := s.body() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(UpdateByQueryResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// UpdateByQueryResponse is the response of UpdateByQueryService.Do. +type UpdateByQueryResponse struct { + Took int64 `json:"took"` + TimedOut bool `json:"timed_out"` + Total int64 `json:"total"` + Updated int64 `json:"updated"` + Created int64 `json:"created"` + Deleted int64 `json:"deleted"` + Batches int64 `json:"batches"` + VersionConflicts int64 `json:"version_conflicts"` + Noops int64 `json:"noops"` + Retries int64 `json:"retries"` + Canceled string `json:"canceled"` + Failures []shardOperationFailure `json:"failures"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/uritemplates/LICENSE b/vendor/gopkg.in/olivere/elastic.v3/uritemplates/LICENSE new file mode 100644 index 000000000..de9c88cb6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/gopkg.in/olivere/elastic.v3/uritemplates/uritemplates.go b/vendor/gopkg.in/olivere/elastic.v3/uritemplates/uritemplates.go new file mode 100644 index 000000000..8a84813fe --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/uritemplates/uritemplates.go @@ -0,0 +1,359 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 4 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// +// To use uritemplates, parse a template string and expand it with a value +// map: +// +// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") +// values := make(map[string]interface{}) +// values["user"] = "jtacoma" +// values["repo"] = "uritemplates" +// expanded, _ := template.ExpandString(values) +// fmt.Printf(expanded) +// +package uritemplates + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) (escaped string) { + if allowReserved { + escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return escaped +} + +// A UriTemplate is a parsed representation of a URI template. +type UriTemplate struct { + raw string + parts []templatePart +} + +// Parse parses a URI template string into a UriTemplate object. +func Parse(rawtemplate string) (template *UriTemplate, err error) { + template = new(UriTemplate) + template.raw = rawtemplate + split := strings.Split(rawtemplate, "{") + template.parts = make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + err = errors.New("unexpected }") + break + } + template.parts[i].raw = s + } else { + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + err = errors.New("malformed template") + break + } + expression := subsplit[0] + template.parts[i*2-1], err = parseExpression(expression) + if err != nil { + break + } + template.parts[i*2].raw = subsplit[1] + } + } + if err != nil { + template = nil + } + return template, err +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (self *UriTemplate) Expand(value interface{}) (string, error) { + values, ismap := value.(map[string]interface{}) + if !ismap { + if m, ismap := struct2map(value); !ismap { + return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.") + } else { + return self.Expand(m) + } + } + var buf bytes.Buffer + for _, p := range self.parts { + err := p.expand(&buf, values) + if err != nil { + return "", err + } + } + return buf.String(), nil +} + +func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error { + if len(self.raw) > 0 { + buf.WriteString(self.raw) + return nil + } + var zeroLen = buf.Len() + buf.WriteString(self.first) + var firstLen = buf.Len() + for _, term := range self.terms { + value, exists := values[term.name] + if !exists { + continue + } + if buf.Len() != firstLen { + buf.WriteString(self.sep) + } + switch v := value.(type) { + case string: + self.expandString(buf, term, v) + case []interface{}: + self.expandArray(buf, term, v) + case map[string]interface{}: + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, v) + default: + if m, ismap := struct2map(value); ismap { + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, m) + } else { + str := fmt.Sprintf("%v", value) + self.expandString(buf, term, str) + } + } + } + if buf.Len() == firstLen { + original := buf.Bytes()[:zeroLen] + buf.Reset() + buf.Write(original) + } + return nil +} + +func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if self.named { + buf.WriteString(name) + if empty { + buf.WriteString(self.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + self.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, self.allowReserved)) +} + +func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { + if len(a) == 0 { + return + } else if !t.explode { + self.expandName(buf, t.name, false) + } + for i, value := range a { + if t.explode && i > 0 { + buf.WriteString(self.sep) + } else if i > 0 { + buf.WriteString(",") + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + if self.named && t.explode { + self.expandName(buf, t.name, len(s) == 0) + } + buf.WriteString(escape(s, self.allowReserved)) + } +} + +func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { + if len(m) == 0 { + return + } + if !t.explode { + self.expandName(buf, t.name, len(m) == 0) + } + var firstLen = buf.Len() + for k, value := range m { + if firstLen != buf.Len() { + if t.explode { + buf.WriteString(self.sep) + } else { + buf.WriteString(",") + } + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if t.explode { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune('=') + buf.WriteString(escape(s, self.allowReserved)) + } else { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune(',') + buf.WriteString(escape(s, self.allowReserved)) + } + } +} + +func struct2map(v interface{}) (map[string]interface{}, bool) { + value := reflect.ValueOf(v) + switch value.Type().Kind() { + case reflect.Ptr: + return struct2map(value.Elem().Interface()) + case reflect.Struct: + m := make(map[string]interface{}) + for i := 0; i < value.NumField(); i++ { + tag := value.Type().Field(i).Tag + var name string + if strings.Contains(string(tag), ":") { + name = tag.Get("uri") + } else { + name = strings.TrimSpace(string(tag)) + } + if len(name) == 0 { + name = value.Type().Field(i).Name + } + m[name] = value.Field(i).Interface() + } + return m, true + } + return nil, false +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/uritemplates/utils.go b/vendor/gopkg.in/olivere/elastic.v3/uritemplates/utils.go new file mode 100644 index 000000000..399ef4623 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/uritemplates/utils.go @@ -0,0 +1,13 @@ +package uritemplates + +func Expand(path string, expansions map[string]string) (string, error) { + template, err := Parse(path) + if err != nil { + return "", err + } + values := make(map[string]interface{}) + for k, v := range expansions { + values[k] = v + } + return template.Expand(values) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 308a3ad72..4185a9fda 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -113,9 +113,10 @@ "revisionTime": "2016-07-02T02:15:29-07:00" }, { - "path": "github.com/mitchellh/go-homedir", - "revision": "d682a8f0cf139663a984ff12528da460ca963de9", - "revisionTime": "2015-10-24T22:24:27-07:00" + "checksumSHA1": "GOSe2XEQI4AYwrMoLZu8vtmzkJM=", + "path": "github.com/minio/redigo/redis", + "revision": "5e2117cd32d677a36dcd8c9c83776a065555653b", + "revisionTime": "2016-07-24T00:05:56Z" }, { "path": "github.com/pkg/profile", @@ -138,6 +139,12 @@ "revision": "762fd2bfc12e766d90478d638255981ab1966a3d", "revisionTime": "2016-03-24T19:44:43+05:30" }, + { + "checksumSHA1": "rKV8YLkXpeNG1Oix8hlYqVsEFb4=", + "path": "github.com/streadway/amqp", + "revision": "2e25825abdbd7752ff08b270d313b93519a0a232", + "revisionTime": "2016-03-11T21:55:03Z" + }, { "path": "golang.org/x/crypto/bcrypt", "revision": "7b85b097bf7527677d54d3220065e966a0e3b613", @@ -157,6 +164,24 @@ "path": "gopkg.in/check.v1", "revision": "11d3bc7aa68e238947792f30573146a3231fc0f1", "revisionTime": "2015-07-29T10:04:31+02:00" + }, + { + "checksumSHA1": "eC4OV7g1tug838fPWrwflstr/TM=", + "path": "gopkg.in/olivere/elastic.v3", + "revision": "f7ae701daf3abe5dfb99f57b3f47738ec93c9c26", + "revisionTime": "2016-07-16T10:42:39Z" + }, + { + "checksumSHA1": "lLdKOn9RPtFoTtPUNq5+sIInAiE=", + "path": "gopkg.in/olivere/elastic.v3/backoff", + "revision": "f7ae701daf3abe5dfb99f57b3f47738ec93c9c26", + "revisionTime": "2016-07-16T10:42:39Z" + }, + { + "checksumSHA1": "XQg6xG6l15Ke43KolthYYnVDCYo=", + "path": "gopkg.in/olivere/elastic.v3/uritemplates", + "revision": "f7ae701daf3abe5dfb99f57b3f47738ec93c9c26", + "revisionTime": "2016-07-16T10:42:39Z" } ], "rootPath": "github.com/minio/minio" diff --git a/xl-v1-bucket.go b/xl-v1-bucket.go index 38d0659a2..ad70f6d97 100644 --- a/xl-v1-bucket.go +++ b/xl-v1-bucket.go @@ -207,6 +207,10 @@ func (xl xlObjects) listBuckets() (bucketsInfo []BucketInfo, err error) { if !IsValidBucketName(volInfo.Name) { continue } + // Ignore the volume special bucket. + if volInfo.Name == minioMetaBucket { + continue + } bucketsInfo = append(bucketsInfo, BucketInfo{ Name: volInfo.Name, Created: volInfo.Created,