mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
Add access
format support for Elasticsearch notification target (#4006)
This change adds `access` format support for notifications to a Elasticsearch server, and it refactors `namespace` format support. In the case of `access` format, for each event in Minio, a JSON document is inserted into Elasticsearch with its timestamp set to the event's timestamp, and with the ID generated automatically by elasticsearch. No events are modified or deleted in this mode. In the case of `namespace` format, for each event in Minio, a JSON document is keyed together by the bucket and object name is updated in Elasticsearch. In the case of an object being created or over-written in Minio, a new document or an existing document is inserted into the Elasticsearch index. If an object is deleted in Minio, the corresponding document is deleted from the Elasticsearch index. Additionally, this change upgrades Elasticsearch support to the 5.x series. This is a breaking change, and users of previous elasticsearch versions should upgrade. Also updates documentation on Elasticsearch notification target usage and has a link to an elasticsearch upgrade guide. This is the last patch that finally resolves #3928.
This commit is contained in:
parent
2040d32ef8
commit
a2a8d54bb6
@ -18,6 +18,7 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/minio/minio/pkg/wildcard"
|
"github.com/minio/minio/pkg/wildcard"
|
||||||
)
|
)
|
||||||
@ -221,3 +222,16 @@ func filterRuleMatch(object string, frs []filterRule) bool {
|
|||||||
}
|
}
|
||||||
return prefixMatch && suffixMatch
|
return prefixMatch && suffixMatch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A type to represent dynamic error generation functions for
|
||||||
|
// notifications.
|
||||||
|
type notificationErrorFactoryFunc func(string, ...interface{}) error
|
||||||
|
|
||||||
|
// A function to build dynamic error generation functions for
|
||||||
|
// notifications by setting an error prefix string.
|
||||||
|
func newNotificationErrorFactory(prefix string) notificationErrorFactoryFunc {
|
||||||
|
return func(msg string, a ...interface{}) error {
|
||||||
|
s := fmt.Sprintf(msg, a...)
|
||||||
|
return fmt.Errorf("%s: %s", prefix, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -17,14 +17,20 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/minio/sha256-simd"
|
"gopkg.in/olivere/elastic.v5"
|
||||||
"gopkg.in/olivere/elastic.v3"
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
esErrFunc = newNotificationErrorFactory("Elasticsearch")
|
||||||
|
|
||||||
|
errESFormat = esErrFunc(`"format" value is invalid - it must be one of "%s" or "%s".`, formatNamespace, formatAccess)
|
||||||
|
errESIndex = esErrFunc("Index name was not specified in the configuration.")
|
||||||
)
|
)
|
||||||
|
|
||||||
// elasticQueue is a elasticsearch event notification queue.
|
// elasticQueue is a elasticsearch event notification queue.
|
||||||
@ -39,14 +45,15 @@ func (e *elasticSearchNotify) Validate() error {
|
|||||||
if !e.Enable {
|
if !e.Enable {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if e.Format != formatNamespace {
|
if e.Format != formatNamespace && e.Format != formatAccess {
|
||||||
return fmt.Errorf(
|
return errESFormat
|
||||||
"Elasticsearch Notifier Error: \"format\" must be \"%s\"",
|
|
||||||
formatNamespace)
|
|
||||||
}
|
}
|
||||||
if _, err := checkURL(e.URL); err != nil {
|
if _, err := checkURL(e.URL); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if e.Index == "" {
|
||||||
|
return errESIndex
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,15 +67,11 @@ func dialElastic(esNotify elasticSearchNotify) (*elastic.Client, error) {
|
|||||||
if !esNotify.Enable {
|
if !esNotify.Enable {
|
||||||
return nil, errNotifyNotEnabled
|
return nil, errNotifyNotEnabled
|
||||||
}
|
}
|
||||||
client, err := elastic.NewClient(
|
return elastic.NewClient(
|
||||||
elastic.SetURL(esNotify.URL),
|
elastic.SetURL(esNotify.URL),
|
||||||
elastic.SetSniff(false),
|
elastic.SetSniff(false),
|
||||||
elastic.SetMaxRetries(10),
|
elastic.SetMaxRetries(10),
|
||||||
)
|
)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return client, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newElasticNotify(accountID string) (*logrus.Logger, error) {
|
func newElasticNotify(accountID string) (*logrus.Logger, error) {
|
||||||
@ -77,23 +80,26 @@ func newElasticNotify(accountID string) (*logrus.Logger, error) {
|
|||||||
// Dial to elastic search.
|
// Dial to elastic search.
|
||||||
client, err := dialElastic(esNotify)
|
client, err := dialElastic(esNotify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, esErrFunc("Error dialing the server: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use the IndexExists service to check if a specified index exists.
|
// Use the IndexExists service to check if a specified index exists.
|
||||||
exists, err := client.IndexExists(esNotify.Index).Do()
|
exists, err := client.IndexExists(esNotify.Index).
|
||||||
|
Do(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, esErrFunc("Error checking if index exists: %v", err)
|
||||||
}
|
}
|
||||||
// Index does not exist, attempt to create it.
|
// Index does not exist, attempt to create it.
|
||||||
if !exists {
|
if !exists {
|
||||||
var createIndex *elastic.IndicesCreateResult
|
var createIndex *elastic.IndicesCreateResult
|
||||||
createIndex, err = client.CreateIndex(esNotify.Index).Do()
|
createIndex, err = client.CreateIndex(esNotify.Index).
|
||||||
|
Do(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, esErrFunc("Error creating index `%s`: %v",
|
||||||
|
esNotify.Index, err)
|
||||||
}
|
}
|
||||||
if !createIndex.Acknowledged {
|
if !createIndex.Acknowledged {
|
||||||
return nil, errors.New("Index not created")
|
return nil, esErrFunc("Index not created")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,7 +124,7 @@ func newElasticNotify(accountID string) (*logrus.Logger, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Fire is required to implement logrus hook
|
// Fire is required to implement logrus hook
|
||||||
func (q elasticClient) Fire(entry *logrus.Entry) error {
|
func (q elasticClient) Fire(entry *logrus.Entry) (err error) {
|
||||||
// Reflect on eventType and Key on their native type.
|
// Reflect on eventType and Key on their native type.
|
||||||
entryStr, ok := entry.Data["EventType"].(string)
|
entryStr, ok := entry.Data["EventType"].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -129,25 +135,44 @@ func (q elasticClient) Fire(entry *logrus.Entry) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate a unique key id. Choosing sha256 here.
|
switch q.params.Format {
|
||||||
shaKey := sha256.Sum256([]byte(keyStr))
|
case formatNamespace:
|
||||||
keyStr = hex.EncodeToString(shaKey[:])
|
|
||||||
|
|
||||||
// If event matches as delete, we purge the previous index.
|
// If event matches as delete, we purge the previous index.
|
||||||
if eventMatch(entryStr, []string{"s3:ObjectRemoved:*"}) {
|
if eventMatch(entryStr, []string{"s3:ObjectRemoved:*"}) {
|
||||||
_, err := q.Client.Delete().Index(q.params.Index).
|
_, err = q.Client.Delete().Index(q.params.Index).
|
||||||
Type("event").Id(keyStr).Do()
|
Type("event").Id(keyStr).Do(context.Background())
|
||||||
if err != nil {
|
break
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
} // else we update elastic index or create a new one.
|
} // else we update elastic index or create a new one.
|
||||||
_, err := q.Client.Index().Index(q.params.Index).
|
_, err = q.Client.Index().Index(q.params.Index).
|
||||||
Type("event").
|
Type("event").
|
||||||
BodyJson(map[string]interface{}{
|
BodyJson(map[string]interface{}{
|
||||||
"Records": entry.Data["Records"],
|
"Records": entry.Data["Records"],
|
||||||
}).Id(keyStr).Do()
|
}).Id(keyStr).Do(context.Background())
|
||||||
return err
|
case formatAccess:
|
||||||
|
// eventTime is taken from the first entry in the
|
||||||
|
// records.
|
||||||
|
events, ok := entry.Data["Records"].([]NotificationEvent)
|
||||||
|
if !ok {
|
||||||
|
return esErrFunc("Unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"])
|
||||||
|
}
|
||||||
|
var eventTime time.Time
|
||||||
|
eventTime, err = time.Parse(timeFormatAMZ, events[0].EventTime)
|
||||||
|
if err != nil {
|
||||||
|
return esErrFunc("Unable to parse event time \"%s\": %v",
|
||||||
|
events[0].EventTime, err)
|
||||||
|
}
|
||||||
|
// Extract event time in milliseconds for Elasticsearch.
|
||||||
|
eventTimeStr := fmt.Sprintf("%d", eventTime.UnixNano()/1000000)
|
||||||
|
_, err = q.Client.Index().Index(q.params.Index).Type("event").
|
||||||
|
Timestamp(eventTimeStr).
|
||||||
|
BodyJson(map[string]interface{}{
|
||||||
|
"Records": entry.Data["Records"],
|
||||||
|
}).Do(context.Background())
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return esErrFunc("Error inserting/deleting entry: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Required for logrus hook implementation
|
// Required for logrus hook implementation
|
||||||
|
@ -90,14 +90,11 @@ VALUES (?, ?);`
|
|||||||
tableExistsMySQL = `SELECT 1 FROM %s;`
|
tableExistsMySQL = `SELECT 1 FROM %s;`
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeMySQLError(msg string, a ...interface{}) error {
|
|
||||||
s := fmt.Sprintf(msg, a...)
|
|
||||||
return fmt.Errorf("MySQL Notifier Error: %s", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
myNFormatError = makeMySQLError(`"format" value is invalid - it must be one of "%s" or "%s".`, formatNamespace, formatAccess)
|
mysqlErrFunc = newNotificationErrorFactory("MySQL")
|
||||||
myNTableError = makeMySQLError("Table was not specified in the configuration.")
|
|
||||||
|
errMysqlFormat = mysqlErrFunc(`"format" value is invalid - it must be one of "%s" or "%s".`, formatNamespace, formatAccess)
|
||||||
|
errMysqlTable = mysqlErrFunc("Table was not specified in the configuration.")
|
||||||
)
|
)
|
||||||
|
|
||||||
type mySQLNotify struct {
|
type mySQLNotify struct {
|
||||||
@ -127,7 +124,7 @@ func (m *mySQLNotify) Validate() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if m.Format != formatNamespace && m.Format != formatAccess {
|
if m.Format != formatNamespace && m.Format != formatAccess {
|
||||||
return myNFormatError
|
return errMysqlFormat
|
||||||
}
|
}
|
||||||
if m.DsnString == "" {
|
if m.DsnString == "" {
|
||||||
if _, err := checkURL(m.Host); err != nil {
|
if _, err := checkURL(m.Host); err != nil {
|
||||||
@ -135,7 +132,7 @@ func (m *mySQLNotify) Validate() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if m.Table == "" {
|
if m.Table == "" {
|
||||||
return myNTableError
|
return errMysqlTable
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -169,7 +166,7 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
|
|||||||
|
|
||||||
db, err := sql.Open("mysql", dsnStr)
|
db, err := sql.Open("mysql", dsnStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mySQLConn{}, makeMySQLError(
|
return mySQLConn{}, mysqlErrFunc(
|
||||||
"Connection opening failure (dsnStr=%s): %v",
|
"Connection opening failure (dsnStr=%s): %v",
|
||||||
dsnStr, err)
|
dsnStr, err)
|
||||||
}
|
}
|
||||||
@ -177,7 +174,7 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
|
|||||||
// ping to check that server is actually reachable.
|
// ping to check that server is actually reachable.
|
||||||
err = db.Ping()
|
err = db.Ping()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mySQLConn{}, makeMySQLError(
|
return mySQLConn{}, mysqlErrFunc(
|
||||||
"Ping to server failed with: %v", err)
|
"Ping to server failed with: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,7 +190,7 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
|
|||||||
_, errCreate := db.Exec(fmt.Sprintf(createStmt, msql.Table))
|
_, errCreate := db.Exec(fmt.Sprintf(createStmt, msql.Table))
|
||||||
if errCreate != nil {
|
if errCreate != nil {
|
||||||
// failed to create the table. error out.
|
// failed to create the table. error out.
|
||||||
return mySQLConn{}, makeMySQLError(
|
return mySQLConn{}, mysqlErrFunc(
|
||||||
"'Select' failed with %v, then 'Create Table' failed with %v",
|
"'Select' failed with %v, then 'Create Table' failed with %v",
|
||||||
err, errCreate,
|
err, errCreate,
|
||||||
)
|
)
|
||||||
@ -209,21 +206,21 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
|
|||||||
msql.Table))
|
msql.Table))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mySQLConn{},
|
return mySQLConn{},
|
||||||
makeMySQLError("create UPSERT prepared statement failed with: %v", err)
|
mysqlErrFunc("create UPSERT prepared statement failed with: %v", err)
|
||||||
}
|
}
|
||||||
// delete statement
|
// delete statement
|
||||||
stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNSMySQL,
|
stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNSMySQL,
|
||||||
msql.Table))
|
msql.Table))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mySQLConn{},
|
return mySQLConn{},
|
||||||
makeMySQLError("create DELETE prepared statement failed with: %v", err)
|
mysqlErrFunc("create DELETE prepared statement failed with: %v", err)
|
||||||
}
|
}
|
||||||
case formatAccess:
|
case formatAccess:
|
||||||
// insert statement
|
// insert statement
|
||||||
stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccessMySQL,
|
stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccessMySQL,
|
||||||
msql.Table))
|
msql.Table))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mySQLConn{}, makeMySQLError(
|
return mySQLConn{}, mysqlErrFunc(
|
||||||
"create INSERT prepared statement failed with: %v", err)
|
"create INSERT prepared statement failed with: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -274,7 +271,7 @@ func (myC mySQLConn) Fire(entry *logrus.Entry) error {
|
|||||||
"Records": d,
|
"Records": d,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, makeMySQLError(
|
return nil, mysqlErrFunc(
|
||||||
"Unable to encode event %v to JSON: %v", d, err)
|
"Unable to encode event %v to JSON: %v", d, err)
|
||||||
}
|
}
|
||||||
return value, nil
|
return value, nil
|
||||||
@ -287,7 +284,7 @@ func (myC mySQLConn) Fire(entry *logrus.Entry) error {
|
|||||||
// delete row from the table
|
// delete row from the table
|
||||||
_, err := myC.preparedStmts["deleteRow"].Exec(entry.Data["Key"])
|
_, err := myC.preparedStmts["deleteRow"].Exec(entry.Data["Key"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makeMySQLError(
|
return mysqlErrFunc(
|
||||||
"Error deleting event with key = %v - got mysql error - %v",
|
"Error deleting event with key = %v - got mysql error - %v",
|
||||||
entry.Data["Key"], err,
|
entry.Data["Key"], err,
|
||||||
)
|
)
|
||||||
@ -301,7 +298,7 @@ func (myC mySQLConn) Fire(entry *logrus.Entry) error {
|
|||||||
// upsert row into the table
|
// upsert row into the table
|
||||||
_, err = myC.preparedStmts["upsertRow"].Exec(entry.Data["Key"], value)
|
_, err = myC.preparedStmts["upsertRow"].Exec(entry.Data["Key"], value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makeMySQLError(
|
return mysqlErrFunc(
|
||||||
"Unable to upsert event with Key=%v and Value=%v - got mysql error - %v",
|
"Unable to upsert event with Key=%v and Value=%v - got mysql error - %v",
|
||||||
entry.Data["Key"], entry.Data["Records"], err,
|
entry.Data["Key"], entry.Data["Records"], err,
|
||||||
)
|
)
|
||||||
@ -312,11 +309,11 @@ func (myC mySQLConn) Fire(entry *logrus.Entry) error {
|
|||||||
// records.
|
// records.
|
||||||
events, ok := entry.Data["Records"].([]NotificationEvent)
|
events, ok := entry.Data["Records"].([]NotificationEvent)
|
||||||
if !ok {
|
if !ok {
|
||||||
return makeMySQLError("unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"])
|
return mysqlErrFunc("unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"])
|
||||||
}
|
}
|
||||||
eventTime, err := time.Parse(timeFormatAMZ, events[0].EventTime)
|
eventTime, err := time.Parse(timeFormatAMZ, events[0].EventTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makeMySQLError("unable to parse event time \"%s\": %v",
|
return mysqlErrFunc("unable to parse event time \"%s\": %v",
|
||||||
events[0].EventTime, err)
|
events[0].EventTime, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -327,7 +324,7 @@ func (myC mySQLConn) Fire(entry *logrus.Entry) error {
|
|||||||
|
|
||||||
_, err = myC.preparedStmts["insertRow"].Exec(eventTime, value)
|
_, err = myC.preparedStmts["insertRow"].Exec(eventTime, value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makeMySQLError("Unable to insert event with value=%v: %v",
|
return mysqlErrFunc("Unable to insert event with value=%v: %v",
|
||||||
value, err)
|
value, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,14 +98,11 @@ VALUES ($1, $2);`
|
|||||||
tableExists = `SELECT 1 FROM %s;`
|
tableExists = `SELECT 1 FROM %s;`
|
||||||
)
|
)
|
||||||
|
|
||||||
func makePGError(msg string, a ...interface{}) error {
|
|
||||||
s := fmt.Sprintf(msg, a...)
|
|
||||||
return fmt.Errorf("PostgreSQL Notifier Error: %s", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
pgNFormatError = makePGError(`"format" value is invalid - it must be one of "%s" or "%s".`, formatNamespace, formatAccess)
|
pgErrFunc = newNotificationErrorFactory("PostgreSQL")
|
||||||
pgNTableError = makePGError("Table was not specified in the configuration.")
|
|
||||||
|
errPGFormatError = pgErrFunc(`"format" value is invalid - it must be one of "%s" or "%s".`, formatNamespace, formatAccess)
|
||||||
|
errPGTableError = pgErrFunc("Table was not specified in the configuration.")
|
||||||
)
|
)
|
||||||
|
|
||||||
type postgreSQLNotify struct {
|
type postgreSQLNotify struct {
|
||||||
@ -135,7 +132,7 @@ func (p *postgreSQLNotify) Validate() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if p.Format != formatNamespace && p.Format != formatAccess {
|
if p.Format != formatNamespace && p.Format != formatAccess {
|
||||||
return pgNFormatError
|
return errPGFormatError
|
||||||
}
|
}
|
||||||
if p.ConnectionString == "" {
|
if p.ConnectionString == "" {
|
||||||
if _, err := checkURL(p.Host); err != nil {
|
if _, err := checkURL(p.Host); err != nil {
|
||||||
@ -143,7 +140,7 @@ func (p *postgreSQLNotify) Validate() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if p.Table == "" {
|
if p.Table == "" {
|
||||||
return pgNTableError
|
return errPGTableError
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -182,7 +179,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
|
|||||||
|
|
||||||
db, err := sql.Open("postgres", connStr)
|
db, err := sql.Open("postgres", connStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pgConn{}, makePGError(
|
return pgConn{}, pgErrFunc(
|
||||||
"Connection opening failure (connectionString=%s): %v",
|
"Connection opening failure (connectionString=%s): %v",
|
||||||
connStr, err)
|
connStr, err)
|
||||||
}
|
}
|
||||||
@ -190,7 +187,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
|
|||||||
// ping to check that server is actually reachable.
|
// ping to check that server is actually reachable.
|
||||||
err = db.Ping()
|
err = db.Ping()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pgConn{}, makePGError("Ping to server failed with: %v",
|
return pgConn{}, pgErrFunc("Ping to server failed with: %v",
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -206,7 +203,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
|
|||||||
_, errCreate := db.Exec(fmt.Sprintf(createStmt, pgN.Table))
|
_, errCreate := db.Exec(fmt.Sprintf(createStmt, pgN.Table))
|
||||||
if errCreate != nil {
|
if errCreate != nil {
|
||||||
// failed to create the table. error out.
|
// failed to create the table. error out.
|
||||||
return pgConn{}, makePGError(
|
return pgConn{}, pgErrFunc(
|
||||||
"'Select' failed with %v, then 'Create Table' failed with %v",
|
"'Select' failed with %v, then 'Create Table' failed with %v",
|
||||||
err, errCreate,
|
err, errCreate,
|
||||||
)
|
)
|
||||||
@ -221,14 +218,14 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
|
|||||||
stmts["upsertRow"], err = db.Prepare(fmt.Sprintf(upsertRowForNS,
|
stmts["upsertRow"], err = db.Prepare(fmt.Sprintf(upsertRowForNS,
|
||||||
pgN.Table))
|
pgN.Table))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pgConn{}, makePGError(
|
return pgConn{}, pgErrFunc(
|
||||||
"create UPSERT prepared statement failed with: %v", err)
|
"create UPSERT prepared statement failed with: %v", err)
|
||||||
}
|
}
|
||||||
// delete statement
|
// delete statement
|
||||||
stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNS,
|
stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNS,
|
||||||
pgN.Table))
|
pgN.Table))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pgConn{}, makePGError(
|
return pgConn{}, pgErrFunc(
|
||||||
"create DELETE prepared statement failed with: %v", err)
|
"create DELETE prepared statement failed with: %v", err)
|
||||||
}
|
}
|
||||||
case formatAccess:
|
case formatAccess:
|
||||||
@ -236,7 +233,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
|
|||||||
stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccess,
|
stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccess,
|
||||||
pgN.Table))
|
pgN.Table))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pgConn{}, makePGError(
|
return pgConn{}, pgErrFunc(
|
||||||
"create INSERT prepared statement failed with: %v", err)
|
"create INSERT prepared statement failed with: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -279,7 +276,7 @@ func jsonEncodeEventData(d interface{}) ([]byte, error) {
|
|||||||
"Records": d,
|
"Records": d,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, makePGError(
|
return nil, pgErrFunc(
|
||||||
"Unable to encode event %v to JSON: %v", d, err)
|
"Unable to encode event %v to JSON: %v", d, err)
|
||||||
}
|
}
|
||||||
return value, nil
|
return value, nil
|
||||||
@ -301,7 +298,7 @@ func (pgC pgConn) Fire(entry *logrus.Entry) error {
|
|||||||
// delete row from the table
|
// delete row from the table
|
||||||
_, err := pgC.preparedStmts["deleteRow"].Exec(entry.Data["Key"])
|
_, err := pgC.preparedStmts["deleteRow"].Exec(entry.Data["Key"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makePGError(
|
return pgErrFunc(
|
||||||
"Error deleting event with key=%v: %v",
|
"Error deleting event with key=%v: %v",
|
||||||
entry.Data["Key"], err,
|
entry.Data["Key"], err,
|
||||||
)
|
)
|
||||||
@ -315,7 +312,7 @@ func (pgC pgConn) Fire(entry *logrus.Entry) error {
|
|||||||
// upsert row into the table
|
// upsert row into the table
|
||||||
_, err = pgC.preparedStmts["upsertRow"].Exec(entry.Data["Key"], value)
|
_, err = pgC.preparedStmts["upsertRow"].Exec(entry.Data["Key"], value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makePGError(
|
return pgErrFunc(
|
||||||
"Unable to upsert event with key=%v and value=%v: %v",
|
"Unable to upsert event with key=%v and value=%v: %v",
|
||||||
entry.Data["Key"], entry.Data["Records"], err,
|
entry.Data["Key"], entry.Data["Records"], err,
|
||||||
)
|
)
|
||||||
@ -326,11 +323,11 @@ func (pgC pgConn) Fire(entry *logrus.Entry) error {
|
|||||||
// records.
|
// records.
|
||||||
events, ok := entry.Data["Records"].([]NotificationEvent)
|
events, ok := entry.Data["Records"].([]NotificationEvent)
|
||||||
if !ok {
|
if !ok {
|
||||||
return makePGError("unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"])
|
return pgErrFunc("unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"])
|
||||||
}
|
}
|
||||||
eventTime, err := time.Parse(timeFormatAMZ, events[0].EventTime)
|
eventTime, err := time.Parse(timeFormatAMZ, events[0].EventTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makePGError("unable to parse event time \"%s\": %v",
|
return pgErrFunc("unable to parse event time \"%s\": %v",
|
||||||
events[0].EventTime, err)
|
events[0].EventTime, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -341,7 +338,7 @@ func (pgC pgConn) Fire(entry *logrus.Entry) error {
|
|||||||
|
|
||||||
_, err = pgC.preparedStmts["insertRow"].Exec(eventTime, value)
|
_, err = pgC.preparedStmts["insertRow"].Exec(eventTime, value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makePGError("Unable to insert event with value=%v: %v",
|
return pgErrFunc("Unable to insert event with value=%v: %v",
|
||||||
value, err)
|
value, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,6 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
@ -27,14 +26,11 @@ import (
|
|||||||
"github.com/garyburd/redigo/redis"
|
"github.com/garyburd/redigo/redis"
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeRedisError(msg string, a ...interface{}) error {
|
|
||||||
s := fmt.Sprintf(msg, a...)
|
|
||||||
return fmt.Errorf("Redis Notifier Error: %s", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
rdNFormatError = makeRedisError(`"format" value is invalid - it must be one of "access" or "namespace".`)
|
redisErrFunc = newNotificationErrorFactory("Redis")
|
||||||
rdNKeyError = makeRedisError("Key was not specified in the configuration.")
|
|
||||||
|
errRedisFormat = redisErrFunc(`"format" value is invalid - it must be one of "access" or "namespace".`)
|
||||||
|
errRedisKeyError = redisErrFunc("Key was not specified in the configuration.")
|
||||||
)
|
)
|
||||||
|
|
||||||
// redisNotify to send logs to Redis server
|
// redisNotify to send logs to Redis server
|
||||||
@ -51,13 +47,13 @@ func (r *redisNotify) Validate() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if r.Format != formatNamespace && r.Format != formatAccess {
|
if r.Format != formatNamespace && r.Format != formatAccess {
|
||||||
return rdNFormatError
|
return errRedisFormat
|
||||||
}
|
}
|
||||||
if _, _, err := net.SplitHostPort(r.Addr); err != nil {
|
if _, _, err := net.SplitHostPort(r.Addr); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if r.Key == "" {
|
if r.Key == "" {
|
||||||
return rdNKeyError
|
return errRedisKeyError
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -106,13 +102,13 @@ func dialRedis(rNotify redisNotify) (*redis.Pool, error) {
|
|||||||
// Check connection.
|
// Check connection.
|
||||||
_, err := rConn.Do("PING")
|
_, err := rConn.Do("PING")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, makeRedisError("Error connecting to server: %v", err)
|
return nil, redisErrFunc("Error connecting to server: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that Key is of desired type
|
// Test that Key is of desired type
|
||||||
reply, err := redis.String(rConn.Do("TYPE", rNotify.Key))
|
reply, err := redis.String(rConn.Do("TYPE", rNotify.Key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, makeRedisError("Error getting type of Key=%s: %v",
|
return nil, redisErrFunc("Error getting type of Key=%s: %v",
|
||||||
rNotify.Key, err)
|
rNotify.Key, err)
|
||||||
}
|
}
|
||||||
if reply != "none" {
|
if reply != "none" {
|
||||||
@ -121,7 +117,7 @@ func dialRedis(rNotify redisNotify) (*redis.Pool, error) {
|
|||||||
expectedType = "list"
|
expectedType = "list"
|
||||||
}
|
}
|
||||||
if reply != expectedType {
|
if reply != expectedType {
|
||||||
return nil, makeRedisError(
|
return nil, redisErrFunc(
|
||||||
"Key=%s has type %s, but we expect it to be a %s",
|
"Key=%s has type %s, but we expect it to be a %s",
|
||||||
rNotify.Key, reply, expectedType)
|
rNotify.Key, reply, expectedType)
|
||||||
}
|
}
|
||||||
@ -137,7 +133,7 @@ func newRedisNotify(accountID string) (*logrus.Logger, error) {
|
|||||||
// Dial redis.
|
// Dial redis.
|
||||||
rPool, err := dialRedis(rNotify)
|
rPool, err := dialRedis(rNotify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, redisErrFunc("Error dialing server: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rrConn := redisConn{
|
rrConn := redisConn{
|
||||||
@ -175,7 +171,7 @@ func (r redisConn) Fire(entry *logrus.Entry) error {
|
|||||||
if eventMatch(entryStr, []string{"s3:ObjectRemoved:*"}) {
|
if eventMatch(entryStr, []string{"s3:ObjectRemoved:*"}) {
|
||||||
_, err := rConn.Do("HDEL", r.params.Key, entry.Data["Key"])
|
_, err := rConn.Do("HDEL", r.params.Key, entry.Data["Key"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makeRedisError("Error deleting entry: %v",
|
return redisErrFunc("Error deleting entry: %v",
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -185,14 +181,14 @@ func (r redisConn) Fire(entry *logrus.Entry) error {
|
|||||||
"Records": entry.Data["Records"],
|
"Records": entry.Data["Records"],
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makeRedisError(
|
return redisErrFunc(
|
||||||
"Unable to encode event %v to JSON: %v",
|
"Unable to encode event %v to JSON: %v",
|
||||||
entry.Data["Records"], err)
|
entry.Data["Records"], err)
|
||||||
}
|
}
|
||||||
_, err = rConn.Do("HSET", r.params.Key, entry.Data["Key"],
|
_, err = rConn.Do("HSET", r.params.Key, entry.Data["Key"],
|
||||||
value)
|
value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makeRedisError("Error updating hash entry: %v",
|
return redisErrFunc("Error updating hash entry: %v",
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
case formatAccess:
|
case formatAccess:
|
||||||
@ -200,18 +196,18 @@ func (r redisConn) Fire(entry *logrus.Entry) error {
|
|||||||
// records.
|
// records.
|
||||||
events, ok := entry.Data["Records"].([]NotificationEvent)
|
events, ok := entry.Data["Records"].([]NotificationEvent)
|
||||||
if !ok {
|
if !ok {
|
||||||
return makeRedisError("unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"])
|
return redisErrFunc("unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"])
|
||||||
}
|
}
|
||||||
eventTime := events[0].EventTime
|
eventTime := events[0].EventTime
|
||||||
|
|
||||||
listEntry := []interface{}{eventTime, entry.Data["Records"]}
|
listEntry := []interface{}{eventTime, entry.Data["Records"]}
|
||||||
jsonValue, err := json.Marshal(listEntry)
|
jsonValue, err := json.Marshal(listEntry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makeRedisError("JSON encoding error: %v", err)
|
return redisErrFunc("JSON encoding error: %v", err)
|
||||||
}
|
}
|
||||||
_, err = rConn.Do("RPUSH", r.params.Key, jsonValue)
|
_, err = rConn.Do("RPUSH", r.params.Key, jsonValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return makeRedisError("Error appending to Redis list: %v",
|
return redisErrFunc("Error appending to Redis list: %v",
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -137,29 +137,52 @@ python rabbit.py
|
|||||||
<a name="Elasticsearch"></a>
|
<a name="Elasticsearch"></a>
|
||||||
## Publish Minio events via Elasticsearch
|
## Publish Minio events via Elasticsearch
|
||||||
|
|
||||||
Install Elasticsearch 2.4 from [here](https://www.elastic.co/downloads/past-releases/elasticsearch-2-4-0).
|
Install [Elasticsearch](https://www.elastic.co/downloads/elasticsearch) server. Minio server supports the latest major release series 5.x. Elasticsearch provides version upgrade migration guidelines [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html).
|
||||||
|
|
||||||
|
This notification target supports two formats: _namespace_ and _access_.
|
||||||
|
|
||||||
|
When the _namespace_ format is used, Minio synchronizes objects in the bucket with documents in the index. For each event in the Minio, the server creates a document with the bucket and object name from the event as the document ID. Other details of the event are stored in the body of the document. Thus if an existing object is over-written in Minio, the corresponding document in the Elasticsearch index is updated. If an object is deleted, the corresponding document is deleted from the index.
|
||||||
|
|
||||||
|
When the _access_ format is used, Minio appends events as documents in an Elasticsearch index. For each event, a document with the event details, with the timestamp of document set to the event's timestamp is appended to an index. The ID of the documented is randomly generated by Elasticsearch. No documents are deleted or modified in this format.
|
||||||
|
|
||||||
|
The steps below show how to use this notification target in `namespace` format. The other format is very similar and is omitted for brevity.
|
||||||
|
|
||||||
## Recipe steps
|
|
||||||
|
|
||||||
### Step 1: Add Elasticsearch endpoint to Minio
|
### Step 1: Add Elasticsearch endpoint to Minio
|
||||||
|
|
||||||
The default location of Minio server configuration file is ``~/.minio/config.json``. Update the Elasticsearch configuration block in ``config.json`` as follows:
|
The default location of Minio server configuration file is ``~/.minio/config.json``. The Elasticsearch configuration is located in the `elasticsearch` key under the `notify` top-level key. Create a configuration key-value pair here for your Elasticsearch instance. The key is a name for your Elasticsearch endpoint, and the value is a collection of key-value parameters described in the table below.
|
||||||
|
|
||||||
|
| Parameter | Type | Description |
|
||||||
|
|:---|:---|:---|
|
||||||
|
| `enable` | _bool_ | (Required) Is this server endpoint configuration active/enabled? |
|
||||||
|
| `format` | _string_ | (Required) Either `namespace` or `access`. |
|
||||||
|
| `url` | _string_ | (Required) The Elasticsearch server's address. For example: `http://localhost:9200`. |
|
||||||
|
| `index` | _string_ | (Required) The name of an Elasticsearch index in which Minio will store documents. |
|
||||||
|
|
||||||
|
An example of Elasticsearch configuration is as follows:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"elasticsearch": {
|
"elasticsearch": {
|
||||||
"1": {
|
"1": {
|
||||||
"enable": true,
|
"enable": true,
|
||||||
|
"format": "namespace",
|
||||||
"url": "http://127.0.0.1:9200",
|
"url": "http://127.0.0.1:9200",
|
||||||
"index": "bucketevents"
|
"index": "minio_events"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
```
|
```
|
||||||
|
|
||||||
Restart Minio server to reflect config changes. ``bucketevents`` is the index used by Elasticsearch.
|
After updating the configuration file, restart the Minio server to put the changes into effect. The server will print a line like `SQS ARNs: arn:minio:sqs:us-east-1:1:elasticsearch` at start-up if there were no errors.
|
||||||
|
|
||||||
|
Note that, you can add as many Elasticsearch server endpoint configurations as needed by providing an identifier (like "1" in the example above) for the Elasticsearch instance and an object of per-server configuration parameters.
|
||||||
|
|
||||||
### Step 2: Enable bucket notification using Minio client
|
### Step 2: Enable bucket notification using Minio client
|
||||||
|
|
||||||
We will enable bucket event notification to trigger whenever a JPEG image is uploaded or deleted from ``images`` bucket on ``myminio`` server. Here ARN value is ``arn:minio:sqs:us-east-1:1:elasticsearch``. To understand more about ARN please follow [AWS ARN](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) documentation.
|
We will now enable bucket event notifications on a bucket named `images`. Whenever a JPEG image is created/overwritten, a new document is added or an existing document is updated in the Elasticsearch index configured above. When an existing object is deleted, the corresponding document is deleted from the index. Thus, the rows in the Elasticsearch index, reflect the `.jpg` objects in the `images` bucket.
|
||||||
|
|
||||||
|
To configure this bucket notification, we need the ARN printed by Minio in the previous step. Additional information about ARN is available [here](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
|
||||||
|
|
||||||
|
With the `mc` tool, the configuration is very simple to add. Let us say that the Minio server is aliased as `myminio` in our mc configuration. Execute the following:
|
||||||
|
|
||||||
```
|
```
|
||||||
mc mb myminio/images
|
mc mb myminio/images
|
||||||
@ -170,26 +193,18 @@ arn:minio:sqs:us-east-1:1:elasticsearch s3:ObjectCreated:*,s3:ObjectRemoved:* Fi
|
|||||||
|
|
||||||
### Step 3: Test on Elasticsearch
|
### Step 3: Test on Elasticsearch
|
||||||
|
|
||||||
Upload a JPEG image into ``images`` bucket, this is the bucket which has been configured for event notification.
|
Upload a JPEG image into ``images`` bucket.
|
||||||
|
|
||||||
```
|
```
|
||||||
mc cp myphoto.jpg myminio/images
|
mc cp myphoto.jpg myminio/images
|
||||||
```
|
```
|
||||||
|
|
||||||
Run ``curl`` to see new index name ``bucketevents`` in your Elasticsearch setup.
|
Use curl to view contents of ``minio_events`` index.
|
||||||
|
|
||||||
```
|
```
|
||||||
curl -XGET '127.0.0.1:9200/_cat/indices?v'
|
$ curl "http://localhost:9200/minio_events/_search?pretty=true"
|
||||||
health status index pri rep docs.count docs.deleted store.size pri.store.size
|
|
||||||
yellow open bucketevents 5 1 1 0 7.8kb 7.8kb
|
|
||||||
```
|
|
||||||
|
|
||||||
Use curl to view contents of ``bucketevents`` index.
|
|
||||||
|
|
||||||
```
|
|
||||||
curl -XGET '127.0.0.1:9200/bucketevents/_search?pretty=1'
|
|
||||||
{
|
{
|
||||||
"took" : 3,
|
"took" : 40,
|
||||||
"timed_out" : false,
|
"timed_out" : false,
|
||||||
"_shards" : {
|
"_shards" : {
|
||||||
"total" : 5,
|
"total" : 5,
|
||||||
@ -199,25 +214,30 @@ curl -XGET '127.0.0.1:9200/bucketevents/_search?pretty=1'
|
|||||||
"hits" : {
|
"hits" : {
|
||||||
"total" : 1,
|
"total" : 1,
|
||||||
"max_score" : 1.0,
|
"max_score" : 1.0,
|
||||||
"hits" : [ {
|
"hits" : [
|
||||||
"_index" : "bucketevents",
|
{
|
||||||
|
"_index" : "minio_events",
|
||||||
"_type" : "event",
|
"_type" : "event",
|
||||||
"_id" : "AVcRVOlwe-uNB1tfj6bx",
|
"_id" : "images/myphoto.jpg",
|
||||||
"_score" : 1.0,
|
"_score" : 1.0,
|
||||||
"_source" : {
|
"_source" : {
|
||||||
"Records" : [ {
|
"Records" : [
|
||||||
|
{
|
||||||
"eventVersion" : "2.0",
|
"eventVersion" : "2.0",
|
||||||
"eventSource" : "aws:s3",
|
"eventSource" : "minio:s3",
|
||||||
"awsRegion" : "us-east-1",
|
"awsRegion" : "us-east-1",
|
||||||
"eventTime" : "2016-09-09T23:42:39.977Z",
|
"eventTime" : "2017-03-30T08:00:41Z",
|
||||||
"eventName" : "s3:ObjectCreated:Put",
|
"eventName" : "s3:ObjectCreated:Put",
|
||||||
"userIdentity" : {
|
"userIdentity" : {
|
||||||
"principalId" : "minio"
|
"principalId" : "minio"
|
||||||
},
|
},
|
||||||
"requestParameters" : {
|
"requestParameters" : {
|
||||||
"sourceIPAddress" : "10.1.10.150:52140"
|
"sourceIPAddress" : "127.0.0.1:38062"
|
||||||
|
},
|
||||||
|
"responseElements" : {
|
||||||
|
"x-amz-request-id" : "14B09A09703FC47B",
|
||||||
|
"x-minio-origin-endpoint" : "http://192.168.86.115:9000"
|
||||||
},
|
},
|
||||||
"responseElements" : { },
|
|
||||||
"s3" : {
|
"s3" : {
|
||||||
"s3SchemaVersion" : "1.0",
|
"s3SchemaVersion" : "1.0",
|
||||||
"configurationId" : "Config",
|
"configurationId" : "Config",
|
||||||
@ -230,18 +250,28 @@ curl -XGET '127.0.0.1:9200/bucketevents/_search?pretty=1'
|
|||||||
},
|
},
|
||||||
"object" : {
|
"object" : {
|
||||||
"key" : "myphoto.jpg",
|
"key" : "myphoto.jpg",
|
||||||
"size" : 200436,
|
"size" : 6474,
|
||||||
"sequencer" : "1472CC35E6971AF3"
|
"eTag" : "a3410f4f8788b510d6f19c5067e60a90",
|
||||||
|
"sequencer" : "14B09A09703FC47B"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"source" : {
|
||||||
|
"host" : "127.0.0.1",
|
||||||
|
"port" : "38062",
|
||||||
|
"userAgent" : "Minio (linux; amd64) minio-go/2.0.3 mc/2017-02-15T17:57:25Z"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} ]
|
]
|
||||||
}
|
}
|
||||||
} ]
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
``curl`` output above states that an Elasticsearch index has been successfully created with notification contents.
|
This output shows that a document has been created for the event in Elasticsearch.
|
||||||
|
|
||||||
|
Here we see that the document ID is the bucket and object name. In case `access` format was used, the document ID would be automatically generated by Elasticsearch.
|
||||||
|
|
||||||
<a name="Redis"></a>
|
<a name="Redis"></a>
|
||||||
## Publish Minio events via Redis
|
## Publish Minio events via Redis
|
||||||
@ -270,7 +300,6 @@ The default location of Minio server configuration file is ``~/.minio/config.jso
|
|||||||
|
|
||||||
An example of Redis configuration is as follows:
|
An example of Redis configuration is as follows:
|
||||||
|
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"redis": {
|
"redis": {
|
||||||
"1": {
|
"1": {
|
||||||
|
74
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
Normal file
74
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.7
|
||||||
|
|
||||||
|
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
|
||||||
|
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Do sends an HTTP request with the provided http.Client and returns
|
||||||
|
// an HTTP response.
|
||||||
|
//
|
||||||
|
// If the client is nil, http.DefaultClient is used.
|
||||||
|
//
|
||||||
|
// The provided ctx must be non-nil. If it is canceled or times out,
|
||||||
|
// ctx.Err() will be returned.
|
||||||
|
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||||
|
if client == nil {
|
||||||
|
client = http.DefaultClient
|
||||||
|
}
|
||||||
|
resp, err := client.Do(req.WithContext(ctx))
|
||||||
|
// If we got an error, and the context has been canceled,
|
||||||
|
// the context's error is probably more useful.
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
err = ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get issues a GET request via the Do function.
|
||||||
|
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||||
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Do(ctx, client, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Head issues a HEAD request via the Do function.
|
||||||
|
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||||
|
req, err := http.NewRequest("HEAD", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Do(ctx, client, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Post issues a POST request via the Do function.
|
||||||
|
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
|
||||||
|
req, err := http.NewRequest("POST", url, body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", bodyType)
|
||||||
|
return Do(ctx, client, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostForm issues a POST request via the Do function.
|
||||||
|
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
|
||||||
|
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
||||||
|
}
|
147
vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
generated
vendored
Normal file
147
vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.7
|
||||||
|
|
||||||
|
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
func nop() {}
|
||||||
|
|
||||||
|
var (
|
||||||
|
testHookContextDoneBeforeHeaders = nop
|
||||||
|
testHookDoReturned = nop
|
||||||
|
testHookDidBodyClose = nop
|
||||||
|
)
|
||||||
|
|
||||||
|
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
|
||||||
|
// If the client is nil, http.DefaultClient is used.
|
||||||
|
// If the context is canceled or times out, ctx.Err() will be returned.
|
||||||
|
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||||
|
if client == nil {
|
||||||
|
client = http.DefaultClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(djd): Respect any existing value of req.Cancel.
|
||||||
|
cancel := make(chan struct{})
|
||||||
|
req.Cancel = cancel
|
||||||
|
|
||||||
|
type responseAndError struct {
|
||||||
|
resp *http.Response
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
result := make(chan responseAndError, 1)
|
||||||
|
|
||||||
|
// Make local copies of test hooks closed over by goroutines below.
|
||||||
|
// Prevents data races in tests.
|
||||||
|
testHookDoReturned := testHookDoReturned
|
||||||
|
testHookDidBodyClose := testHookDidBodyClose
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
testHookDoReturned()
|
||||||
|
result <- responseAndError{resp, err}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var resp *http.Response
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
testHookContextDoneBeforeHeaders()
|
||||||
|
close(cancel)
|
||||||
|
// Clean up after the goroutine calling client.Do:
|
||||||
|
go func() {
|
||||||
|
if r := <-result; r.resp != nil {
|
||||||
|
testHookDidBodyClose()
|
||||||
|
r.resp.Body.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return nil, ctx.Err()
|
||||||
|
case r := <-result:
|
||||||
|
var err error
|
||||||
|
resp, err = r.resp, r.err
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
close(cancel)
|
||||||
|
case <-c:
|
||||||
|
// The response's Body is closed.
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
resp.Body = ¬ifyingReader{resp.Body, c}
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get issues a GET request via the Do function.
|
||||||
|
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||||
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Do(ctx, client, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Head issues a HEAD request via the Do function.
|
||||||
|
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||||
|
req, err := http.NewRequest("HEAD", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Do(ctx, client, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Post issues a POST request via the Do function.
|
||||||
|
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
|
||||||
|
req, err := http.NewRequest("POST", url, body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", bodyType)
|
||||||
|
return Do(ctx, client, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostForm issues a POST request via the Do function.
|
||||||
|
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
|
||||||
|
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// notifyingReader is an io.ReadCloser that closes the notify channel after
|
||||||
|
// Close is called or a Read fails on the underlying ReadCloser.
|
||||||
|
type notifyingReader struct {
|
||||||
|
io.ReadCloser
|
||||||
|
notify chan<- struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *notifyingReader) Read(p []byte) (int, error) {
|
||||||
|
n, err := r.ReadCloser.Read(p)
|
||||||
|
if err != nil && r.notify != nil {
|
||||||
|
close(r.notify)
|
||||||
|
r.notify = nil
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *notifyingReader) Close() error {
|
||||||
|
err := r.ReadCloser.Close()
|
||||||
|
if r.notify != nil {
|
||||||
|
close(r.notify)
|
||||||
|
r.notify = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
22
vendor/gopkg.in/olivere/elastic.v3/backoff/LICENSE
generated
vendored
22
vendor/gopkg.in/olivere/elastic.v3/backoff/LICENSE
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
Portions of this code rely on this LICENSE:
|
|
||||||
|
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2014 Cenk Altı
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
||||||
this software and associated documentation files (the "Software"), to deal in
|
|
||||||
the Software without restriction, including without limitation the rights to
|
|
||||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
|
||||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
|
||||||
subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
|
||||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
|
||||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
|
||||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
159
vendor/gopkg.in/olivere/elastic.v3/backoff/backoff.go
generated
vendored
159
vendor/gopkg.in/olivere/elastic.v3/backoff/backoff.go
generated
vendored
@ -1,159 +0,0 @@
|
|||||||
// Copyright 2012-2016 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package backoff
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"math/rand"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Backoff is an interface for different types of backoff algorithms.
|
|
||||||
type Backoff interface {
|
|
||||||
Next() time.Duration
|
|
||||||
Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop is used as a signal to indicate that no more retries should be made.
|
|
||||||
const Stop time.Duration = -1
|
|
||||||
|
|
||||||
// -- Simple Backoff --
|
|
||||||
|
|
||||||
// SimpleBackoff takes a list of fixed values for backoff intervals.
|
|
||||||
// Each call to Next returns the next value from that fixed list.
|
|
||||||
// After each value is returned, subsequent calls to Next will only return
|
|
||||||
// the last element. The caller may specify if the values are "jittered".
|
|
||||||
type SimpleBackoff struct {
|
|
||||||
sync.Mutex
|
|
||||||
ticks []int
|
|
||||||
index int
|
|
||||||
jitter bool
|
|
||||||
stop bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified
|
|
||||||
// list of fixed intervals in milliseconds.
|
|
||||||
func NewSimpleBackoff(ticks ...int) *SimpleBackoff {
|
|
||||||
return &SimpleBackoff{
|
|
||||||
ticks: ticks,
|
|
||||||
index: 0,
|
|
||||||
jitter: false,
|
|
||||||
stop: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Jitter, when set, randomizes to return a value of [0.5*value .. 1.5*value].
|
|
||||||
func (b *SimpleBackoff) Jitter(doJitter bool) *SimpleBackoff {
|
|
||||||
b.Lock()
|
|
||||||
defer b.Unlock()
|
|
||||||
b.jitter = doJitter
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendStop, when enables, makes Next to return Stop once
|
|
||||||
// the list of values is exhausted.
|
|
||||||
func (b *SimpleBackoff) SendStop(doStop bool) *SimpleBackoff {
|
|
||||||
b.Lock()
|
|
||||||
defer b.Unlock()
|
|
||||||
b.stop = doStop
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next wait interval.
|
|
||||||
func (b *SimpleBackoff) Next() time.Duration {
|
|
||||||
b.Lock()
|
|
||||||
defer b.Unlock()
|
|
||||||
|
|
||||||
i := b.index
|
|
||||||
if i >= len(b.ticks) {
|
|
||||||
if b.stop {
|
|
||||||
return Stop
|
|
||||||
}
|
|
||||||
i = len(b.ticks) - 1
|
|
||||||
b.index = i
|
|
||||||
} else {
|
|
||||||
b.index++
|
|
||||||
}
|
|
||||||
|
|
||||||
ms := b.ticks[i]
|
|
||||||
if b.jitter {
|
|
||||||
ms = jitter(ms)
|
|
||||||
}
|
|
||||||
return time.Duration(ms) * time.Millisecond
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset resets SimpleBackoff.
|
|
||||||
func (b *SimpleBackoff) Reset() {
|
|
||||||
b.Lock()
|
|
||||||
b.index = 0
|
|
||||||
b.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis].
|
|
||||||
func jitter(millis int) int {
|
|
||||||
if millis <= 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return millis/2 + rand.Intn(millis)
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- Exponential --
|
|
||||||
|
|
||||||
// ExponentialBackoff implements the simple exponential backoff described by
|
|
||||||
// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html.
|
|
||||||
type ExponentialBackoff struct {
|
|
||||||
sync.Mutex
|
|
||||||
t float64 // initial timeout (in msec)
|
|
||||||
f float64 // exponential factor (e.g. 2)
|
|
||||||
m float64 // maximum timeout (in msec)
|
|
||||||
n int64 // number of retries
|
|
||||||
stop bool // indicates whether Next should send "Stop" whan max timeout is reached
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewExponentialBackoff returns a ExponentialBackoff backoff policy.
|
|
||||||
// Use initialTimeout to set the first/minimal interval
|
|
||||||
// and maxTimeout to set the maximum wait interval.
|
|
||||||
func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff {
|
|
||||||
return &ExponentialBackoff{
|
|
||||||
t: float64(int64(initialTimeout / time.Millisecond)),
|
|
||||||
f: 2.0,
|
|
||||||
m: float64(int64(maxTimeout / time.Millisecond)),
|
|
||||||
n: 0,
|
|
||||||
stop: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendStop, when enables, makes Next to return Stop once
|
|
||||||
// the maximum timeout is reached.
|
|
||||||
func (b *ExponentialBackoff) SendStop(doStop bool) *ExponentialBackoff {
|
|
||||||
b.Lock()
|
|
||||||
defer b.Unlock()
|
|
||||||
b.stop = doStop
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next wait interval.
|
|
||||||
func (t *ExponentialBackoff) Next() time.Duration {
|
|
||||||
t.Lock()
|
|
||||||
defer t.Unlock()
|
|
||||||
|
|
||||||
n := float64(atomic.AddInt64(&t.n, 1))
|
|
||||||
r := 1.0 + rand.Float64() // random number in [1..2]
|
|
||||||
m := math.Min(r*t.t*math.Pow(t.f, n), t.m)
|
|
||||||
if t.stop && m >= t.m {
|
|
||||||
return Stop
|
|
||||||
}
|
|
||||||
d := time.Duration(int64(m)) * time.Millisecond
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset resets the backoff policy so that it can be reused.
|
|
||||||
func (t *ExponentialBackoff) Reset() {
|
|
||||||
t.Lock()
|
|
||||||
t.n = 0
|
|
||||||
t.Unlock()
|
|
||||||
}
|
|
301
vendor/gopkg.in/olivere/elastic.v3/delete_by_query.go
generated
vendored
301
vendor/gopkg.in/olivere/elastic.v3/delete_by_query.go
generated
vendored
@ -1,301 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeleteByQueryService deletes documents that match a query.
|
|
||||||
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html.
|
|
||||||
type DeleteByQueryService struct {
|
|
||||||
client *Client
|
|
||||||
indices []string
|
|
||||||
types []string
|
|
||||||
analyzer string
|
|
||||||
consistency string
|
|
||||||
defaultOper string
|
|
||||||
df string
|
|
||||||
ignoreUnavailable *bool
|
|
||||||
allowNoIndices *bool
|
|
||||||
expandWildcards string
|
|
||||||
replication string
|
|
||||||
routing string
|
|
||||||
timeout string
|
|
||||||
pretty bool
|
|
||||||
q string
|
|
||||||
query Query
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDeleteByQueryService creates a new DeleteByQueryService.
|
|
||||||
// You typically use the client's DeleteByQuery to get a reference to
|
|
||||||
// the service.
|
|
||||||
func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
|
|
||||||
builder := &DeleteByQueryService{
|
|
||||||
client: client,
|
|
||||||
}
|
|
||||||
return builder
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index sets the indices on which to perform the delete operation.
|
|
||||||
func (s *DeleteByQueryService) Index(indices ...string) *DeleteByQueryService {
|
|
||||||
if s.indices == nil {
|
|
||||||
s.indices = make([]string, 0)
|
|
||||||
}
|
|
||||||
s.indices = append(s.indices, indices...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type limits the delete operation to the given types.
|
|
||||||
func (s *DeleteByQueryService) Type(types ...string) *DeleteByQueryService {
|
|
||||||
if s.types == nil {
|
|
||||||
s.types = make([]string, 0)
|
|
||||||
}
|
|
||||||
s.types = append(s.types, types...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Analyzer to use for the query string.
|
|
||||||
func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
|
|
||||||
s.analyzer = analyzer
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consistency represents the specific write consistency setting for the operation.
|
|
||||||
// It can be one, quorum, or all.
|
|
||||||
func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService {
|
|
||||||
s.consistency = consistency
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultOperator for query string query (AND or OR).
|
|
||||||
func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
|
|
||||||
s.defaultOper = defaultOperator
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// DF is the field to use as default where no field prefix is given in the query string.
|
|
||||||
func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
|
|
||||||
s.df = defaultField
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultField is the field to use as default where no field prefix is given in the query string.
|
|
||||||
// It is an alias to the DF func.
|
|
||||||
func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
|
|
||||||
s.df = defaultField
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreUnavailable indicates whether specified concrete indices should be
|
|
||||||
// ignored when unavailable (missing or closed).
|
|
||||||
func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
|
|
||||||
s.ignoreUnavailable = &ignore
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
|
||||||
// expression resolves into no concrete indices (including the _all string
|
|
||||||
// or when no indices have been specified).
|
|
||||||
func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
|
|
||||||
s.allowNoIndices = &allow
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpandWildcards indicates whether to expand wildcard expression to
|
|
||||||
// concrete indices that are open, closed or both. It can be "open" or "closed".
|
|
||||||
func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
|
|
||||||
s.expandWildcards = expand
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Replication sets a specific replication type (sync or async).
|
|
||||||
func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService {
|
|
||||||
s.replication = replication
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Q specifies the query in Lucene query string syntax. You can also use
|
|
||||||
// Query to programmatically specify the query.
|
|
||||||
func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
|
|
||||||
s.q = query
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryString is an alias to Q. Notice that you can also use Query to
|
|
||||||
// programmatically set the query.
|
|
||||||
func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
|
|
||||||
s.q = query
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Routing sets a specific routing value.
|
|
||||||
func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService {
|
|
||||||
s.routing = routing
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms".
|
|
||||||
func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
|
|
||||||
s.timeout = timeout
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pretty indents the JSON output from Elasticsearch.
|
|
||||||
func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
|
|
||||||
s.pretty = pretty
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query sets the query programmatically.
|
|
||||||
func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
|
|
||||||
s.query = query
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do executes the delete-by-query operation.
|
|
||||||
func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Build url
|
|
||||||
path := "/"
|
|
||||||
|
|
||||||
// Indices part
|
|
||||||
indexPart := make([]string, 0)
|
|
||||||
for _, index := range s.indices {
|
|
||||||
index, err = uritemplates.Expand("{index}", map[string]string{
|
|
||||||
"index": index,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
indexPart = append(indexPart, index)
|
|
||||||
}
|
|
||||||
if len(indexPart) > 0 {
|
|
||||||
path += strings.Join(indexPart, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Types part
|
|
||||||
typesPart := make([]string, 0)
|
|
||||||
for _, typ := range s.types {
|
|
||||||
typ, err = uritemplates.Expand("{type}", map[string]string{
|
|
||||||
"type": typ,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
typesPart = append(typesPart, typ)
|
|
||||||
}
|
|
||||||
if len(typesPart) > 0 {
|
|
||||||
path += "/" + strings.Join(typesPart, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search
|
|
||||||
path += "/_query"
|
|
||||||
|
|
||||||
// Parameters
|
|
||||||
params := make(url.Values)
|
|
||||||
if s.analyzer != "" {
|
|
||||||
params.Set("analyzer", s.analyzer)
|
|
||||||
}
|
|
||||||
if s.consistency != "" {
|
|
||||||
params.Set("consistency", s.consistency)
|
|
||||||
}
|
|
||||||
if s.defaultOper != "" {
|
|
||||||
params.Set("default_operator", s.defaultOper)
|
|
||||||
}
|
|
||||||
if s.df != "" {
|
|
||||||
params.Set("df", s.df)
|
|
||||||
}
|
|
||||||
if s.ignoreUnavailable != nil {
|
|
||||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
|
||||||
}
|
|
||||||
if s.allowNoIndices != nil {
|
|
||||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
|
||||||
}
|
|
||||||
if s.expandWildcards != "" {
|
|
||||||
params.Set("expand_wildcards", s.expandWildcards)
|
|
||||||
}
|
|
||||||
if s.replication != "" {
|
|
||||||
params.Set("replication", s.replication)
|
|
||||||
}
|
|
||||||
if s.routing != "" {
|
|
||||||
params.Set("routing", s.routing)
|
|
||||||
}
|
|
||||||
if s.timeout != "" {
|
|
||||||
params.Set("timeout", s.timeout)
|
|
||||||
}
|
|
||||||
if s.pretty {
|
|
||||||
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
|
||||||
}
|
|
||||||
if s.q != "" {
|
|
||||||
params.Set("q", s.q)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set body if there is a query set
|
|
||||||
var body interface{}
|
|
||||||
if s.query != nil {
|
|
||||||
src, err := s.query.Source()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
query := make(map[string]interface{})
|
|
||||||
query["query"] = src
|
|
||||||
body = query
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get response
|
|
||||||
res, err := s.client.PerformRequest("DELETE", path, params, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return result
|
|
||||||
ret := new(DeleteByQueryResult)
|
|
||||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService.
|
|
||||||
type DeleteByQueryResult struct {
|
|
||||||
Took int64 `json:"took"`
|
|
||||||
TimedOut bool `json:"timed_out"`
|
|
||||||
Indices map[string]IndexDeleteByQueryResult `json:"_indices"`
|
|
||||||
Failures []shardOperationFailure `json:"failures"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// IndexNames returns the names of the indices the DeleteByQuery touched.
|
|
||||||
func (res DeleteByQueryResult) IndexNames() []string {
|
|
||||||
var indices []string
|
|
||||||
for index, _ := range res.Indices {
|
|
||||||
indices = append(indices, index)
|
|
||||||
}
|
|
||||||
return indices
|
|
||||||
}
|
|
||||||
|
|
||||||
// All returns the index delete-by-query result of all indices.
|
|
||||||
func (res DeleteByQueryResult) All() IndexDeleteByQueryResult {
|
|
||||||
all, _ := res.Indices["_all"]
|
|
||||||
return all
|
|
||||||
}
|
|
||||||
|
|
||||||
// IndexDeleteByQueryResult is the result of a delete-by-query for a specific
|
|
||||||
// index.
|
|
||||||
type IndexDeleteByQueryResult struct {
|
|
||||||
// Found documents, matching the query.
|
|
||||||
Found int `json:"found"`
|
|
||||||
// Deleted documents, successfully, from the given index.
|
|
||||||
Deleted int `json:"deleted"`
|
|
||||||
// Missing documents when trying to delete them.
|
|
||||||
Missing int `json:"missing"`
|
|
||||||
// Failed documents to be deleted for the given index.
|
|
||||||
Failed int `json:"failed"`
|
|
||||||
}
|
|
130
vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go
generated
vendored
130
vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go
generated
vendored
@ -1,130 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IndicesDeleteWarmerService allows to delete a warmer.
|
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html.
|
|
||||||
type IndicesDeleteWarmerService struct {
|
|
||||||
client *Client
|
|
||||||
pretty bool
|
|
||||||
index []string
|
|
||||||
name []string
|
|
||||||
masterTimeout string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIndicesDeleteWarmerService creates a new IndicesDeleteWarmerService.
|
|
||||||
func NewIndicesDeleteWarmerService(client *Client) *IndicesDeleteWarmerService {
|
|
||||||
return &IndicesDeleteWarmerService{
|
|
||||||
client: client,
|
|
||||||
index: make([]string, 0),
|
|
||||||
name: make([]string, 0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index is a list of index names the mapping should be added to
|
|
||||||
// (supports wildcards); use `_all` or omit to add the mapping on all indices.
|
|
||||||
func (s *IndicesDeleteWarmerService) Index(indices ...string) *IndicesDeleteWarmerService {
|
|
||||||
s.index = append(s.index, indices...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name is a list of warmer names to delete (supports wildcards);
|
|
||||||
// use `_all` to delete all warmers in the specified indices.
|
|
||||||
func (s *IndicesDeleteWarmerService) Name(name ...string) *IndicesDeleteWarmerService {
|
|
||||||
s.name = append(s.name, name...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// MasterTimeout specifies the timeout for connection to master.
|
|
||||||
func (s *IndicesDeleteWarmerService) MasterTimeout(masterTimeout string) *IndicesDeleteWarmerService {
|
|
||||||
s.masterTimeout = masterTimeout
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pretty indicates that the JSON response be indented and human readable.
|
|
||||||
func (s *IndicesDeleteWarmerService) Pretty(pretty bool) *IndicesDeleteWarmerService {
|
|
||||||
s.pretty = pretty
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildURL builds the URL for the operation.
|
|
||||||
func (s *IndicesDeleteWarmerService) buildURL() (string, url.Values, error) {
|
|
||||||
// Build URL
|
|
||||||
path, err := uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{
|
|
||||||
"index": strings.Join(s.index, ","),
|
|
||||||
"name": strings.Join(s.name, ","),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", url.Values{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add query string parameters
|
|
||||||
params := url.Values{}
|
|
||||||
if s.pretty {
|
|
||||||
params.Set("pretty", "1")
|
|
||||||
}
|
|
||||||
if s.masterTimeout != "" {
|
|
||||||
params.Set("master_timeout", s.masterTimeout)
|
|
||||||
}
|
|
||||||
if len(s.name) > 0 {
|
|
||||||
params.Set("name", strings.Join(s.name, ","))
|
|
||||||
}
|
|
||||||
return path, params, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate checks if the operation is valid.
|
|
||||||
func (s *IndicesDeleteWarmerService) Validate() error {
|
|
||||||
var invalid []string
|
|
||||||
if len(s.index) == 0 {
|
|
||||||
invalid = append(invalid, "Index")
|
|
||||||
}
|
|
||||||
if len(s.name) == 0 {
|
|
||||||
invalid = append(invalid, "Name")
|
|
||||||
}
|
|
||||||
if len(invalid) > 0 {
|
|
||||||
return fmt.Errorf("missing required fields: %v", invalid)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do executes the operation.
|
|
||||||
func (s *IndicesDeleteWarmerService) Do() (*DeleteWarmerResponse, error) {
|
|
||||||
// Check pre-conditions
|
|
||||||
if err := s.Validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get URL for request
|
|
||||||
path, params, err := s.buildURL()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get HTTP response
|
|
||||||
res, err := s.client.PerformRequest("DELETE", path, params, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return operation response
|
|
||||||
ret := new(DeleteWarmerResponse)
|
|
||||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteWarmerResponse is the response of IndicesDeleteWarmerService.Do.
|
|
||||||
type DeleteWarmerResponse struct {
|
|
||||||
Acknowledged bool `json:"acknowledged"`
|
|
||||||
}
|
|
193
vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer.go
generated
vendored
193
vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer.go
generated
vendored
@ -1,193 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IndicesGetWarmerService allows to get the definition of a warmer for a
|
|
||||||
// specific index (or alias, or several indices) based on its name.
|
|
||||||
// The provided name can be a simple wildcard expression or omitted to get
|
|
||||||
// all warmers.
|
|
||||||
//
|
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html
|
|
||||||
// for more information.
|
|
||||||
type IndicesGetWarmerService struct {
|
|
||||||
client *Client
|
|
||||||
pretty bool
|
|
||||||
index []string
|
|
||||||
name []string
|
|
||||||
typ []string
|
|
||||||
allowNoIndices *bool
|
|
||||||
expandWildcards string
|
|
||||||
ignoreUnavailable *bool
|
|
||||||
local *bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIndicesGetWarmerService creates a new IndicesGetWarmerService.
|
|
||||||
func NewIndicesGetWarmerService(client *Client) *IndicesGetWarmerService {
|
|
||||||
return &IndicesGetWarmerService{
|
|
||||||
client: client,
|
|
||||||
typ: make([]string, 0),
|
|
||||||
index: make([]string, 0),
|
|
||||||
name: make([]string, 0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index is a list of index names to restrict the operation; use `_all` to perform the operation on all indices.
|
|
||||||
func (s *IndicesGetWarmerService) Index(indices ...string) *IndicesGetWarmerService {
|
|
||||||
s.index = append(s.index, indices...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name is the name of the warmer (supports wildcards); leave empty to get all warmers.
|
|
||||||
func (s *IndicesGetWarmerService) Name(name ...string) *IndicesGetWarmerService {
|
|
||||||
s.name = append(s.name, name...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type is a list of type names the mapping should be added to
|
|
||||||
// (supports wildcards); use `_all` or omit to add the mapping on all types.
|
|
||||||
func (s *IndicesGetWarmerService) Type(typ ...string) *IndicesGetWarmerService {
|
|
||||||
s.typ = append(s.typ, typ...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
|
||||||
// expression resolves into no concrete indices.
|
|
||||||
// This includes `_all` string or when no indices have been specified.
|
|
||||||
func (s *IndicesGetWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesGetWarmerService {
|
|
||||||
s.allowNoIndices = &allowNoIndices
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpandWildcards indicates whether to expand wildcard expression to
|
|
||||||
// concrete indices that are open, closed or both.
|
|
||||||
func (s *IndicesGetWarmerService) ExpandWildcards(expandWildcards string) *IndicesGetWarmerService {
|
|
||||||
s.expandWildcards = expandWildcards
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreUnavailable indicates whether specified concrete indices should be
|
|
||||||
// ignored when unavailable (missing or closed).
|
|
||||||
func (s *IndicesGetWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetWarmerService {
|
|
||||||
s.ignoreUnavailable = &ignoreUnavailable
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Local indicates wether or not to return local information,
|
|
||||||
// do not retrieve the state from master node (default: false).
|
|
||||||
func (s *IndicesGetWarmerService) Local(local bool) *IndicesGetWarmerService {
|
|
||||||
s.local = &local
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pretty indicates that the JSON response be indented and human readable.
|
|
||||||
func (s *IndicesGetWarmerService) Pretty(pretty bool) *IndicesGetWarmerService {
|
|
||||||
s.pretty = pretty
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildURL builds the URL for the operation.
|
|
||||||
func (s *IndicesGetWarmerService) buildURL() (string, url.Values, error) {
|
|
||||||
var err error
|
|
||||||
var path string
|
|
||||||
|
|
||||||
if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) == 0 {
|
|
||||||
path = "/_warmer"
|
|
||||||
} else if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) > 0 {
|
|
||||||
path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{
|
|
||||||
"name": strings.Join(s.name, ","),
|
|
||||||
})
|
|
||||||
} else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) == 0 {
|
|
||||||
path, err = uritemplates.Expand("/_all/{type}/_warmer", map[string]string{
|
|
||||||
"type": strings.Join(s.typ, ","),
|
|
||||||
})
|
|
||||||
} else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) > 0 {
|
|
||||||
path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{
|
|
||||||
"type": strings.Join(s.typ, ","),
|
|
||||||
"name": strings.Join(s.name, ","),
|
|
||||||
})
|
|
||||||
} else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) == 0 {
|
|
||||||
path, err = uritemplates.Expand("/{index}/_warmer", map[string]string{
|
|
||||||
"index": strings.Join(s.index, ","),
|
|
||||||
})
|
|
||||||
} else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) > 0 {
|
|
||||||
path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{
|
|
||||||
"index": strings.Join(s.index, ","),
|
|
||||||
"name": strings.Join(s.name, ","),
|
|
||||||
})
|
|
||||||
} else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) == 0 {
|
|
||||||
path, err = uritemplates.Expand("/{index}/{type}/_warmer", map[string]string{
|
|
||||||
"index": strings.Join(s.index, ","),
|
|
||||||
"type": strings.Join(s.typ, ","),
|
|
||||||
})
|
|
||||||
} else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) > 0 {
|
|
||||||
path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{
|
|
||||||
"index": strings.Join(s.index, ","),
|
|
||||||
"type": strings.Join(s.typ, ","),
|
|
||||||
"name": strings.Join(s.name, ","),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return "", url.Values{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add query string parameters
|
|
||||||
params := url.Values{}
|
|
||||||
if s.pretty {
|
|
||||||
params.Set("pretty", "1")
|
|
||||||
}
|
|
||||||
if s.allowNoIndices != nil {
|
|
||||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
|
||||||
}
|
|
||||||
if s.expandWildcards != "" {
|
|
||||||
params.Set("expand_wildcards", s.expandWildcards)
|
|
||||||
}
|
|
||||||
if s.ignoreUnavailable != nil {
|
|
||||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
|
||||||
}
|
|
||||||
if s.local != nil {
|
|
||||||
params.Set("local", fmt.Sprintf("%v", *s.local))
|
|
||||||
}
|
|
||||||
return path, params, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate checks if the operation is valid.
|
|
||||||
func (s *IndicesGetWarmerService) Validate() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do executes the operation.
|
|
||||||
func (s *IndicesGetWarmerService) Do() (map[string]interface{}, error) {
|
|
||||||
// Check pre-conditions
|
|
||||||
if err := s.Validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get URL for request
|
|
||||||
path, params, err := s.buildURL()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get HTTP response
|
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return operation response
|
|
||||||
var ret map[string]interface{}
|
|
||||||
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
110
vendor/gopkg.in/olivere/elastic.v3/indices_put_alias.go
generated
vendored
110
vendor/gopkg.in/olivere/elastic.v3/indices_put_alias.go
generated
vendored
@ -1,110 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
type AliasService struct {
|
|
||||||
client *Client
|
|
||||||
actions []aliasAction
|
|
||||||
pretty bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type aliasAction struct {
|
|
||||||
// "add" or "remove"
|
|
||||||
Type string
|
|
||||||
// Index name
|
|
||||||
Index string
|
|
||||||
// Alias name
|
|
||||||
Alias string
|
|
||||||
// Filter
|
|
||||||
Filter Query
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAliasService(client *Client) *AliasService {
|
|
||||||
builder := &AliasService{
|
|
||||||
client: client,
|
|
||||||
actions: make([]aliasAction, 0),
|
|
||||||
}
|
|
||||||
return builder
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *AliasService) Pretty(pretty bool) *AliasService {
|
|
||||||
s.pretty = pretty
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *AliasService) Add(indexName string, aliasName string) *AliasService {
|
|
||||||
action := aliasAction{Type: "add", Index: indexName, Alias: aliasName}
|
|
||||||
s.actions = append(s.actions, action)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService {
|
|
||||||
action := aliasAction{Type: "add", Index: indexName, Alias: aliasName, Filter: filter}
|
|
||||||
s.actions = append(s.actions, action)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *AliasService) Remove(indexName string, aliasName string) *AliasService {
|
|
||||||
action := aliasAction{Type: "remove", Index: indexName, Alias: aliasName}
|
|
||||||
s.actions = append(s.actions, action)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *AliasService) Do() (*AliasResult, error) {
|
|
||||||
// Build url
|
|
||||||
path := "/_aliases"
|
|
||||||
|
|
||||||
// Parameters
|
|
||||||
params := make(url.Values)
|
|
||||||
if s.pretty {
|
|
||||||
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Actions
|
|
||||||
body := make(map[string]interface{})
|
|
||||||
actionsJson := make([]interface{}, 0)
|
|
||||||
|
|
||||||
for _, action := range s.actions {
|
|
||||||
actionJson := make(map[string]interface{})
|
|
||||||
detailsJson := make(map[string]interface{})
|
|
||||||
detailsJson["index"] = action.Index
|
|
||||||
detailsJson["alias"] = action.Alias
|
|
||||||
if action.Filter != nil {
|
|
||||||
src, err := action.Filter.Source()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
detailsJson["filter"] = src
|
|
||||||
}
|
|
||||||
actionJson[action.Type] = detailsJson
|
|
||||||
actionsJson = append(actionsJson, actionJson)
|
|
||||||
}
|
|
||||||
|
|
||||||
body["actions"] = actionsJson
|
|
||||||
|
|
||||||
// Get response
|
|
||||||
res, err := s.client.PerformRequest("POST", path, params, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return results
|
|
||||||
ret := new(AliasResult)
|
|
||||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- Result of an alias request.
|
|
||||||
|
|
||||||
type AliasResult struct {
|
|
||||||
Acknowledged bool `json:"acknowledged"`
|
|
||||||
}
|
|
221
vendor/gopkg.in/olivere/elastic.v3/indices_put_warmer.go
generated
vendored
221
vendor/gopkg.in/olivere/elastic.v3/indices_put_warmer.go
generated
vendored
@ -1,221 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IndicesPutWarmerService allows to register a warmer.
|
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html.
|
|
||||||
type IndicesPutWarmerService struct {
|
|
||||||
client *Client
|
|
||||||
pretty bool
|
|
||||||
typ []string
|
|
||||||
index []string
|
|
||||||
name string
|
|
||||||
masterTimeout string
|
|
||||||
ignoreUnavailable *bool
|
|
||||||
allowNoIndices *bool
|
|
||||||
requestCache *bool
|
|
||||||
expandWildcards string
|
|
||||||
bodyJson map[string]interface{}
|
|
||||||
bodyString string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIndicesPutWarmerService creates a new IndicesPutWarmerService.
|
|
||||||
func NewIndicesPutWarmerService(client *Client) *IndicesPutWarmerService {
|
|
||||||
return &IndicesPutWarmerService{
|
|
||||||
client: client,
|
|
||||||
index: make([]string, 0),
|
|
||||||
typ: make([]string, 0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index is a list of index names the mapping should be added to
|
|
||||||
// (supports wildcards); use `_all` or omit to add the mapping on all indices.
|
|
||||||
func (s *IndicesPutWarmerService) Index(indices ...string) *IndicesPutWarmerService {
|
|
||||||
s.index = append(s.index, indices...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type is a list of type names the mapping should be added to
|
|
||||||
// (supports wildcards); use `_all` or omit to add the mapping on all types.
|
|
||||||
func (s *IndicesPutWarmerService) Type(typ ...string) *IndicesPutWarmerService {
|
|
||||||
s.typ = append(s.typ, typ...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name specifies the name of the warmer (supports wildcards);
|
|
||||||
// leave empty to get all warmers
|
|
||||||
func (s *IndicesPutWarmerService) Name(name string) *IndicesPutWarmerService {
|
|
||||||
s.name = name
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// MasterTimeout specifies the timeout for connection to master.
|
|
||||||
func (s *IndicesPutWarmerService) MasterTimeout(masterTimeout string) *IndicesPutWarmerService {
|
|
||||||
s.masterTimeout = masterTimeout
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreUnavailable indicates whether specified concrete indices should be
|
|
||||||
// ignored when unavailable (missing or closed).
|
|
||||||
func (s *IndicesPutWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutWarmerService {
|
|
||||||
s.ignoreUnavailable = &ignoreUnavailable
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
|
||||||
// expression resolves into no concrete indices.
|
|
||||||
// This includes `_all` string or when no indices have been specified.
|
|
||||||
func (s *IndicesPutWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesPutWarmerService {
|
|
||||||
s.allowNoIndices = &allowNoIndices
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequestCache specifies whether the request to be warmed should use the request cache,
|
|
||||||
// defaults to index level setting
|
|
||||||
func (s *IndicesPutWarmerService) RequestCache(requestCache bool) *IndicesPutWarmerService {
|
|
||||||
s.requestCache = &requestCache
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpandWildcards indicates whether to expand wildcard expression to
|
|
||||||
// concrete indices that are open, closed or both.
|
|
||||||
func (s *IndicesPutWarmerService) ExpandWildcards(expandWildcards string) *IndicesPutWarmerService {
|
|
||||||
s.expandWildcards = expandWildcards
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pretty indicates that the JSON response be indented and human readable.
|
|
||||||
func (s *IndicesPutWarmerService) Pretty(pretty bool) *IndicesPutWarmerService {
|
|
||||||
s.pretty = pretty
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// BodyJson contains the mapping definition.
|
|
||||||
func (s *IndicesPutWarmerService) BodyJson(mapping map[string]interface{}) *IndicesPutWarmerService {
|
|
||||||
s.bodyJson = mapping
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// BodyString is the mapping definition serialized as a string.
|
|
||||||
func (s *IndicesPutWarmerService) BodyString(mapping string) *IndicesPutWarmerService {
|
|
||||||
s.bodyString = mapping
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildURL builds the URL for the operation.
|
|
||||||
func (s *IndicesPutWarmerService) buildURL() (string, url.Values, error) {
|
|
||||||
var err error
|
|
||||||
var path string
|
|
||||||
|
|
||||||
if len(s.index) == 0 && len(s.typ) == 0 {
|
|
||||||
path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{
|
|
||||||
"name": s.name,
|
|
||||||
})
|
|
||||||
} else if len(s.index) == 0 && len(s.typ) > 0 {
|
|
||||||
path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{
|
|
||||||
"type": strings.Join(s.typ, ","),
|
|
||||||
"name": s.name,
|
|
||||||
})
|
|
||||||
} else if len(s.index) > 0 && len(s.typ) == 0 {
|
|
||||||
path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{
|
|
||||||
"index": strings.Join(s.index, ","),
|
|
||||||
"name": s.name,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{
|
|
||||||
"index": strings.Join(s.index, ","),
|
|
||||||
"type": strings.Join(s.typ, ","),
|
|
||||||
"name": s.name,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return "", url.Values{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add query string parameters
|
|
||||||
params := url.Values{}
|
|
||||||
if s.pretty {
|
|
||||||
params.Set("pretty", "1")
|
|
||||||
}
|
|
||||||
if s.ignoreUnavailable != nil {
|
|
||||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
|
||||||
}
|
|
||||||
if s.allowNoIndices != nil {
|
|
||||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
|
||||||
}
|
|
||||||
if s.requestCache != nil {
|
|
||||||
params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache))
|
|
||||||
}
|
|
||||||
if s.expandWildcards != "" {
|
|
||||||
params.Set("expand_wildcards", s.expandWildcards)
|
|
||||||
}
|
|
||||||
if s.masterTimeout != "" {
|
|
||||||
params.Set("master_timeout", s.masterTimeout)
|
|
||||||
}
|
|
||||||
return path, params, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate checks if the operation is valid.
|
|
||||||
func (s *IndicesPutWarmerService) Validate() error {
|
|
||||||
var invalid []string
|
|
||||||
if s.name == "" {
|
|
||||||
invalid = append(invalid, "Name")
|
|
||||||
}
|
|
||||||
if s.bodyString == "" && s.bodyJson == nil {
|
|
||||||
invalid = append(invalid, "BodyJson")
|
|
||||||
}
|
|
||||||
if len(invalid) > 0 {
|
|
||||||
return fmt.Errorf("missing required fields: %v", invalid)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do executes the operation.
|
|
||||||
func (s *IndicesPutWarmerService) Do() (*PutWarmerResponse, error) {
|
|
||||||
// Check pre-conditions
|
|
||||||
if err := s.Validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get URL for request
|
|
||||||
path, params, err := s.buildURL()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup HTTP request body
|
|
||||||
var body interface{}
|
|
||||||
if s.bodyJson != nil {
|
|
||||||
body = s.bodyJson
|
|
||||||
} else {
|
|
||||||
body = s.bodyString
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get HTTP response
|
|
||||||
res, err := s.client.PerformRequest("PUT", path, params, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return operation response
|
|
||||||
ret := new(PutWarmerResponse)
|
|
||||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutWarmerResponse is the response of IndicesPutWarmerService.Do.
|
|
||||||
type PutWarmerResponse struct {
|
|
||||||
Acknowledged bool `json:"acknowledged"`
|
|
||||||
}
|
|
93
vendor/gopkg.in/olivere/elastic.v3/indices_refresh.go
generated
vendored
93
vendor/gopkg.in/olivere/elastic.v3/indices_refresh.go
generated
vendored
@ -1,93 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RefreshService struct {
|
|
||||||
client *Client
|
|
||||||
indices []string
|
|
||||||
force *bool
|
|
||||||
pretty bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRefreshService(client *Client) *RefreshService {
|
|
||||||
builder := &RefreshService{
|
|
||||||
client: client,
|
|
||||||
indices: make([]string, 0),
|
|
||||||
}
|
|
||||||
return builder
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *RefreshService) Index(indices ...string) *RefreshService {
|
|
||||||
s.indices = append(s.indices, indices...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *RefreshService) Force(force bool) *RefreshService {
|
|
||||||
s.force = &force
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *RefreshService) Pretty(pretty bool) *RefreshService {
|
|
||||||
s.pretty = pretty
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *RefreshService) Do() (*RefreshResult, error) {
|
|
||||||
// Build url
|
|
||||||
path := "/"
|
|
||||||
|
|
||||||
// Indices part
|
|
||||||
indexPart := make([]string, 0)
|
|
||||||
for _, index := range s.indices {
|
|
||||||
index, err := uritemplates.Expand("{index}", map[string]string{
|
|
||||||
"index": index,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
indexPart = append(indexPart, index)
|
|
||||||
}
|
|
||||||
if len(indexPart) > 0 {
|
|
||||||
path += strings.Join(indexPart, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
path += "/_refresh"
|
|
||||||
|
|
||||||
// Parameters
|
|
||||||
params := make(url.Values)
|
|
||||||
if s.force != nil {
|
|
||||||
params.Set("force", fmt.Sprintf("%v", *s.force))
|
|
||||||
}
|
|
||||||
if s.pretty {
|
|
||||||
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get response
|
|
||||||
res, err := s.client.PerformRequest("POST", path, params, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return result
|
|
||||||
ret := new(RefreshResult)
|
|
||||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- Result of a refresh request.
|
|
||||||
|
|
||||||
type RefreshResult struct {
|
|
||||||
Shards shardsInfo `json:"_shards,omitempty"`
|
|
||||||
}
|
|
129
vendor/gopkg.in/olivere/elastic.v3/optimize.go
generated
vendored
129
vendor/gopkg.in/olivere/elastic.v3/optimize.go
generated
vendored
@ -1,129 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
|
||||||
)
|
|
||||||
|
|
||||||
type OptimizeService struct {
|
|
||||||
client *Client
|
|
||||||
indices []string
|
|
||||||
maxNumSegments *int
|
|
||||||
onlyExpungeDeletes *bool
|
|
||||||
flush *bool
|
|
||||||
waitForMerge *bool
|
|
||||||
force *bool
|
|
||||||
pretty bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewOptimizeService(client *Client) *OptimizeService {
|
|
||||||
builder := &OptimizeService{
|
|
||||||
client: client,
|
|
||||||
indices: make([]string, 0),
|
|
||||||
}
|
|
||||||
return builder
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OptimizeService) Index(indices ...string) *OptimizeService {
|
|
||||||
s.indices = append(s.indices, indices...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OptimizeService) MaxNumSegments(maxNumSegments int) *OptimizeService {
|
|
||||||
s.maxNumSegments = &maxNumSegments
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OptimizeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *OptimizeService {
|
|
||||||
s.onlyExpungeDeletes = &onlyExpungeDeletes
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OptimizeService) Flush(flush bool) *OptimizeService {
|
|
||||||
s.flush = &flush
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OptimizeService) WaitForMerge(waitForMerge bool) *OptimizeService {
|
|
||||||
s.waitForMerge = &waitForMerge
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OptimizeService) Force(force bool) *OptimizeService {
|
|
||||||
s.force = &force
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OptimizeService) Pretty(pretty bool) *OptimizeService {
|
|
||||||
s.pretty = pretty
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OptimizeService) Do() (*OptimizeResult, error) {
|
|
||||||
// Build url
|
|
||||||
path := "/"
|
|
||||||
|
|
||||||
// Indices part
|
|
||||||
indexPart := make([]string, 0)
|
|
||||||
for _, index := range s.indices {
|
|
||||||
index, err := uritemplates.Expand("{index}", map[string]string{
|
|
||||||
"index": index,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
indexPart = append(indexPart, index)
|
|
||||||
}
|
|
||||||
if len(indexPart) > 0 {
|
|
||||||
path += strings.Join(indexPart, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
path += "/_optimize"
|
|
||||||
|
|
||||||
// Parameters
|
|
||||||
params := make(url.Values)
|
|
||||||
if s.maxNumSegments != nil {
|
|
||||||
params.Set("max_num_segments", fmt.Sprintf("%d", *s.maxNumSegments))
|
|
||||||
}
|
|
||||||
if s.onlyExpungeDeletes != nil {
|
|
||||||
params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes))
|
|
||||||
}
|
|
||||||
if s.flush != nil {
|
|
||||||
params.Set("flush", fmt.Sprintf("%v", *s.flush))
|
|
||||||
}
|
|
||||||
if s.waitForMerge != nil {
|
|
||||||
params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge))
|
|
||||||
}
|
|
||||||
if s.force != nil {
|
|
||||||
params.Set("force", fmt.Sprintf("%v", *s.force))
|
|
||||||
}
|
|
||||||
if s.pretty {
|
|
||||||
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get response
|
|
||||||
res, err := s.client.PerformRequest("POST", path, params, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return result
|
|
||||||
ret := new(OptimizeResult)
|
|
||||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- Result of an optimize request.
|
|
||||||
|
|
||||||
type OptimizeResult struct {
|
|
||||||
Shards shardsInfo `json:"_shards,omitempty"`
|
|
||||||
}
|
|
308
vendor/gopkg.in/olivere/elastic.v3/percolate.go
generated
vendored
308
vendor/gopkg.in/olivere/elastic.v3/percolate.go
generated
vendored
@ -1,308 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PercolateService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-percolate.html.
|
|
||||||
type PercolateService struct {
|
|
||||||
client *Client
|
|
||||||
pretty bool
|
|
||||||
index string
|
|
||||||
typ string
|
|
||||||
id string
|
|
||||||
version interface{}
|
|
||||||
versionType string
|
|
||||||
routing []string
|
|
||||||
preference string
|
|
||||||
ignoreUnavailable *bool
|
|
||||||
percolateIndex string
|
|
||||||
percolatePreference string
|
|
||||||
percolateRouting string
|
|
||||||
source string
|
|
||||||
allowNoIndices *bool
|
|
||||||
expandWildcards string
|
|
||||||
percolateFormat string
|
|
||||||
percolateType string
|
|
||||||
bodyJson interface{}
|
|
||||||
bodyString string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPercolateService creates a new PercolateService.
|
|
||||||
func NewPercolateService(client *Client) *PercolateService {
|
|
||||||
return &PercolateService{
|
|
||||||
client: client,
|
|
||||||
routing: make([]string, 0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index is the name of the index of the document being percolated.
|
|
||||||
func (s *PercolateService) Index(index string) *PercolateService {
|
|
||||||
s.index = index
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type is the type of the document being percolated.
|
|
||||||
func (s *PercolateService) Type(typ string) *PercolateService {
|
|
||||||
s.typ = typ
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Id is to substitute the document in the request body with a
|
|
||||||
// document that is known by the specified id. On top of the id,
|
|
||||||
// the index and type parameter will be used to retrieve
|
|
||||||
// the document from within the cluster.
|
|
||||||
func (s *PercolateService) Id(id string) *PercolateService {
|
|
||||||
s.id = id
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpandWildcards indicates whether to expand wildcard expressions
|
|
||||||
// to concrete indices that are open, closed or both.
|
|
||||||
func (s *PercolateService) ExpandWildcards(expandWildcards string) *PercolateService {
|
|
||||||
s.expandWildcards = expandWildcards
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// PercolateFormat indicates whether to return an array of matching
|
|
||||||
// query IDs instead of objects.
|
|
||||||
func (s *PercolateService) PercolateFormat(percolateFormat string) *PercolateService {
|
|
||||||
s.percolateFormat = percolateFormat
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// PercolateType is the type to percolate document into. Defaults to type.
|
|
||||||
func (s *PercolateService) PercolateType(percolateType string) *PercolateService {
|
|
||||||
s.percolateType = percolateType
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// PercolateRouting is the routing value to use when percolating
|
|
||||||
// the existing document.
|
|
||||||
func (s *PercolateService) PercolateRouting(percolateRouting string) *PercolateService {
|
|
||||||
s.percolateRouting = percolateRouting
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Source is the URL-encoded request definition.
|
|
||||||
func (s *PercolateService) Source(source string) *PercolateService {
|
|
||||||
s.source = source
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
|
||||||
// expression resolves into no concrete indices.
|
|
||||||
// (This includes `_all` string or when no indices have been specified).
|
|
||||||
func (s *PercolateService) AllowNoIndices(allowNoIndices bool) *PercolateService {
|
|
||||||
s.allowNoIndices = &allowNoIndices
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreUnavailable indicates whether specified concrete indices should
|
|
||||||
// be ignored when unavailable (missing or closed).
|
|
||||||
func (s *PercolateService) IgnoreUnavailable(ignoreUnavailable bool) *PercolateService {
|
|
||||||
s.ignoreUnavailable = &ignoreUnavailable
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// PercolateIndex is the index to percolate the document into. Defaults to index.
|
|
||||||
func (s *PercolateService) PercolateIndex(percolateIndex string) *PercolateService {
|
|
||||||
s.percolateIndex = percolateIndex
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// PercolatePreference defines which shard to prefer when executing
|
|
||||||
// the percolate request.
|
|
||||||
func (s *PercolateService) PercolatePreference(percolatePreference string) *PercolateService {
|
|
||||||
s.percolatePreference = percolatePreference
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version is an explicit version number for concurrency control.
|
|
||||||
func (s *PercolateService) Version(version interface{}) *PercolateService {
|
|
||||||
s.version = version
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// VersionType is the specific version type.
|
|
||||||
func (s *PercolateService) VersionType(versionType string) *PercolateService {
|
|
||||||
s.versionType = versionType
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Routing is a list of specific routing values.
|
|
||||||
func (s *PercolateService) Routing(routing []string) *PercolateService {
|
|
||||||
s.routing = routing
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Preference specifies the node or shard the operation should be
|
|
||||||
// performed on (default: random).
|
|
||||||
func (s *PercolateService) Preference(preference string) *PercolateService {
|
|
||||||
s.preference = preference
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pretty indicates that the JSON response be indented and human readable.
|
|
||||||
func (s *PercolateService) Pretty(pretty bool) *PercolateService {
|
|
||||||
s.pretty = pretty
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Doc wraps the given document into the "doc" key of the body.
|
|
||||||
func (s *PercolateService) Doc(doc interface{}) *PercolateService {
|
|
||||||
return s.BodyJson(map[string]interface{}{"doc": doc})
|
|
||||||
}
|
|
||||||
|
|
||||||
// BodyJson is the percolator request definition using the percolate DSL.
|
|
||||||
func (s *PercolateService) BodyJson(body interface{}) *PercolateService {
|
|
||||||
s.bodyJson = body
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// BodyString is the percolator request definition using the percolate DSL.
|
|
||||||
func (s *PercolateService) BodyString(body string) *PercolateService {
|
|
||||||
s.bodyString = body
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildURL builds the URL for the operation.
|
|
||||||
func (s *PercolateService) buildURL() (string, url.Values, error) {
|
|
||||||
// Build URL
|
|
||||||
var path string
|
|
||||||
var err error
|
|
||||||
if s.id == "" {
|
|
||||||
path, err = uritemplates.Expand("/{index}/{type}/_percolate", map[string]string{
|
|
||||||
"index": s.index,
|
|
||||||
"type": s.typ,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
path, err = uritemplates.Expand("/{index}/{type}/{id}/_percolate", map[string]string{
|
|
||||||
"index": s.index,
|
|
||||||
"type": s.typ,
|
|
||||||
"id": s.id,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return "", url.Values{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add query string parameters
|
|
||||||
params := url.Values{}
|
|
||||||
if s.pretty {
|
|
||||||
params.Set("pretty", "1")
|
|
||||||
}
|
|
||||||
if s.version != nil {
|
|
||||||
params.Set("version", fmt.Sprintf("%v", s.version))
|
|
||||||
}
|
|
||||||
if s.versionType != "" {
|
|
||||||
params.Set("version_type", s.versionType)
|
|
||||||
}
|
|
||||||
if len(s.routing) > 0 {
|
|
||||||
params.Set("routing", strings.Join(s.routing, ","))
|
|
||||||
}
|
|
||||||
if s.preference != "" {
|
|
||||||
params.Set("preference", s.preference)
|
|
||||||
}
|
|
||||||
if s.ignoreUnavailable != nil {
|
|
||||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
|
||||||
}
|
|
||||||
if s.percolateIndex != "" {
|
|
||||||
params.Set("percolate_index", s.percolateIndex)
|
|
||||||
}
|
|
||||||
if s.percolatePreference != "" {
|
|
||||||
params.Set("percolate_preference", s.percolatePreference)
|
|
||||||
}
|
|
||||||
if s.percolateRouting != "" {
|
|
||||||
params.Set("percolate_routing", s.percolateRouting)
|
|
||||||
}
|
|
||||||
if s.source != "" {
|
|
||||||
params.Set("source", s.source)
|
|
||||||
}
|
|
||||||
if s.allowNoIndices != nil {
|
|
||||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
|
||||||
}
|
|
||||||
if s.expandWildcards != "" {
|
|
||||||
params.Set("expand_wildcards", s.expandWildcards)
|
|
||||||
}
|
|
||||||
if s.percolateFormat != "" {
|
|
||||||
params.Set("percolate_format", s.percolateFormat)
|
|
||||||
}
|
|
||||||
if s.percolateType != "" {
|
|
||||||
params.Set("percolate_type", s.percolateType)
|
|
||||||
}
|
|
||||||
return path, params, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate checks if the operation is valid.
|
|
||||||
func (s *PercolateService) Validate() error {
|
|
||||||
var invalid []string
|
|
||||||
if s.index == "" {
|
|
||||||
invalid = append(invalid, "Index")
|
|
||||||
}
|
|
||||||
if s.typ == "" {
|
|
||||||
invalid = append(invalid, "Type")
|
|
||||||
}
|
|
||||||
if len(invalid) > 0 {
|
|
||||||
return fmt.Errorf("missing required fields: %v", invalid)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do executes the operation.
|
|
||||||
func (s *PercolateService) Do() (*PercolateResponse, error) {
|
|
||||||
// Check pre-conditions
|
|
||||||
if err := s.Validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get URL for request
|
|
||||||
path, params, err := s.buildURL()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup HTTP request body
|
|
||||||
var body interface{}
|
|
||||||
if s.bodyJson != nil {
|
|
||||||
body = s.bodyJson
|
|
||||||
} else {
|
|
||||||
body = s.bodyString
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get HTTP response
|
|
||||||
res, err := s.client.PerformRequest("GET", path, params, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return operation response
|
|
||||||
ret := new(PercolateResponse)
|
|
||||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PercolateResponse is the response of PercolateService.Do.
|
|
||||||
type PercolateResponse struct {
|
|
||||||
TookInMillis int64 `json:"took"` // search time in milliseconds
|
|
||||||
Total int64 `json:"total"` // total matches
|
|
||||||
Matches []*PercolateMatch `json:"matches,omitempty"`
|
|
||||||
Aggregations Aggregations `json:"aggregations,omitempty"` // results from aggregations
|
|
||||||
}
|
|
||||||
|
|
||||||
// PercolateMatch returns a single match in a PercolateResponse.
|
|
||||||
type PercolateMatch struct {
|
|
||||||
Index string `json:"_index,omitempty"`
|
|
||||||
Id string `json:"_id"`
|
|
||||||
Score float64 `json:"_score,omitempty"`
|
|
||||||
}
|
|
270
vendor/gopkg.in/olivere/elastic.v3/reindexer.go
generated
vendored
270
vendor/gopkg.in/olivere/elastic.v3/reindexer.go
generated
vendored
@ -1,270 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reindexer simplifies the process of reindexing an index. You typically
|
|
||||||
// reindex a source index to a target index. However, you can also specify
|
|
||||||
// a query that filters out documents from the source index before bulk
|
|
||||||
// indexing them into the target index. The caller may also specify a
|
|
||||||
// different client for the target, e.g. when copying indices from one
|
|
||||||
// Elasticsearch cluster to another.
|
|
||||||
//
|
|
||||||
// Internally, the Reindex users a scan and scroll operation on the source
|
|
||||||
// index and bulk indexing to push data into the target index.
|
|
||||||
//
|
|
||||||
// By default the reindexer fetches the _source, _parent, and _routing
|
|
||||||
// attributes from the source index, using the provided CopyToTargetIndex
|
|
||||||
// will copy those attributes into the destinationIndex.
|
|
||||||
// This behaviour can be overridden by setting the ScanFields and providing a
|
|
||||||
// custom ReindexerFunc.
|
|
||||||
//
|
|
||||||
// The caller is responsible for setting up and/or clearing the target index
|
|
||||||
// before starting the reindex process.
|
|
||||||
//
|
|
||||||
// See http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html
|
|
||||||
// for more information about reindexing.
|
|
||||||
type Reindexer struct {
|
|
||||||
sourceClient, targetClient *Client
|
|
||||||
sourceIndex string
|
|
||||||
query Query
|
|
||||||
scanFields []string
|
|
||||||
bulkSize int
|
|
||||||
size int
|
|
||||||
scroll string
|
|
||||||
reindexerFunc ReindexerFunc
|
|
||||||
progress ReindexerProgressFunc
|
|
||||||
statsOnly bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// A ReindexerFunc receives each hit from the sourceIndex.
|
|
||||||
// It can choose to add any number of BulkableRequests to the bulkService.
|
|
||||||
type ReindexerFunc func(hit *SearchHit, bulkService *BulkService) error
|
|
||||||
|
|
||||||
// CopyToTargetIndex returns a ReindexerFunc that copies the SearchHit's
|
|
||||||
// _source, _parent, and _routing attributes into the targetIndex
|
|
||||||
func CopyToTargetIndex(targetIndex string) ReindexerFunc {
|
|
||||||
return func(hit *SearchHit, bulkService *BulkService) error {
|
|
||||||
// TODO(oe) Do we need to deserialize here?
|
|
||||||
source := make(map[string]interface{})
|
|
||||||
if err := json.Unmarshal(*hit.Source, &source); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req := NewBulkIndexRequest().Index(targetIndex).Type(hit.Type).Id(hit.Id).Doc(source)
|
|
||||||
if hit.Parent != "" {
|
|
||||||
req = req.Parent(hit.Parent)
|
|
||||||
}
|
|
||||||
if hit.Routing != "" {
|
|
||||||
req = req.Routing(hit.Routing)
|
|
||||||
}
|
|
||||||
bulkService.Add(req)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReindexerProgressFunc is a callback that can be used with Reindexer
|
|
||||||
// to report progress while reindexing data.
|
|
||||||
type ReindexerProgressFunc func(current, total int64)
|
|
||||||
|
|
||||||
// ReindexerResponse is returned from the Do func in a Reindexer.
|
|
||||||
// By default, it returns the number of succeeded and failed bulk operations.
|
|
||||||
// To return details about all failed items, set StatsOnly to false in
|
|
||||||
// Reindexer.
|
|
||||||
type ReindexerResponse struct {
|
|
||||||
Success int64
|
|
||||||
Failed int64
|
|
||||||
Errors []*BulkResponseItem
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReindexer returns a new Reindexer.
|
|
||||||
func NewReindexer(client *Client, source string, reindexerFunc ReindexerFunc) *Reindexer {
|
|
||||||
return &Reindexer{
|
|
||||||
sourceClient: client,
|
|
||||||
sourceIndex: source,
|
|
||||||
reindexerFunc: reindexerFunc,
|
|
||||||
statsOnly: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TargetClient specifies a different client for the target. This is
|
|
||||||
// necessary when the target index is in a different Elasticsearch cluster.
|
|
||||||
// By default, the source and target clients are the same.
|
|
||||||
func (ix *Reindexer) TargetClient(c *Client) *Reindexer {
|
|
||||||
ix.targetClient = c
|
|
||||||
return ix
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query specifies the query to apply to the source. It filters out those
|
|
||||||
// documents to be indexed into target. A nil query does not filter out any
|
|
||||||
// documents.
|
|
||||||
func (ix *Reindexer) Query(q Query) *Reindexer {
|
|
||||||
ix.query = q
|
|
||||||
return ix
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScanFields specifies the fields the scan query should load.
|
|
||||||
// The default fields are _source, _parent, _routing.
|
|
||||||
func (ix *Reindexer) ScanFields(scanFields ...string) *Reindexer {
|
|
||||||
ix.scanFields = scanFields
|
|
||||||
return ix
|
|
||||||
}
|
|
||||||
|
|
||||||
// BulkSize returns the number of documents to send to Elasticsearch per chunk.
|
|
||||||
// The default is 500.
|
|
||||||
func (ix *Reindexer) BulkSize(bulkSize int) *Reindexer {
|
|
||||||
ix.bulkSize = bulkSize
|
|
||||||
return ix
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size is the number of results to return per shard, not per request.
|
|
||||||
// So a size of 10 which hits 5 shards will return a maximum of 50 results
|
|
||||||
// per scan request.
|
|
||||||
func (ix *Reindexer) Size(size int) *Reindexer {
|
|
||||||
ix.size = size
|
|
||||||
return ix
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scroll specifies for how long the scroll operation on the source index
|
|
||||||
// should be maintained. The default is 5m.
|
|
||||||
func (ix *Reindexer) Scroll(timeout string) *Reindexer {
|
|
||||||
ix.scroll = timeout
|
|
||||||
return ix
|
|
||||||
}
|
|
||||||
|
|
||||||
// Progress indicates a callback that will be called while indexing.
|
|
||||||
func (ix *Reindexer) Progress(f ReindexerProgressFunc) *Reindexer {
|
|
||||||
ix.progress = f
|
|
||||||
return ix
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatsOnly indicates whether the Do method should return details e.g. about
|
|
||||||
// the documents that failed while indexing. It is true by default, i.e. only
|
|
||||||
// the number of documents that succeeded/failed are returned. Set to false
|
|
||||||
// if you want all the details.
|
|
||||||
func (ix *Reindexer) StatsOnly(statsOnly bool) *Reindexer {
|
|
||||||
ix.statsOnly = statsOnly
|
|
||||||
return ix
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do starts the reindexing process.
|
|
||||||
func (ix *Reindexer) Do() (*ReindexerResponse, error) {
|
|
||||||
if ix.sourceClient == nil {
|
|
||||||
return nil, errors.New("no source client")
|
|
||||||
}
|
|
||||||
if ix.sourceIndex == "" {
|
|
||||||
return nil, errors.New("no source index")
|
|
||||||
}
|
|
||||||
if ix.targetClient == nil {
|
|
||||||
ix.targetClient = ix.sourceClient
|
|
||||||
}
|
|
||||||
if ix.scanFields == nil {
|
|
||||||
ix.scanFields = []string{"_source", "_parent", "_routing"}
|
|
||||||
}
|
|
||||||
if ix.bulkSize <= 0 {
|
|
||||||
ix.bulkSize = 500
|
|
||||||
}
|
|
||||||
if ix.scroll == "" {
|
|
||||||
ix.scroll = "5m"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count total to report progress (if necessary)
|
|
||||||
var err error
|
|
||||||
var current, total int64
|
|
||||||
if ix.progress != nil {
|
|
||||||
total, err = ix.count()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare scan and scroll to iterate through the source index
|
|
||||||
scanner := ix.sourceClient.Scan(ix.sourceIndex).Scroll(ix.scroll).Fields(ix.scanFields...)
|
|
||||||
if ix.query != nil {
|
|
||||||
scanner = scanner.Query(ix.query)
|
|
||||||
}
|
|
||||||
if ix.size > 0 {
|
|
||||||
scanner = scanner.Size(ix.size)
|
|
||||||
}
|
|
||||||
cursor, err := scanner.Do()
|
|
||||||
|
|
||||||
bulk := ix.targetClient.Bulk()
|
|
||||||
|
|
||||||
ret := &ReindexerResponse{
|
|
||||||
Errors: make([]*BulkResponseItem, 0),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Main loop iterates through the source index and bulk indexes into target.
|
|
||||||
for {
|
|
||||||
docs, err := cursor.Next()
|
|
||||||
if err == EOS {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return ret, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if docs.TotalHits() > 0 {
|
|
||||||
for _, hit := range docs.Hits.Hits {
|
|
||||||
if ix.progress != nil {
|
|
||||||
current++
|
|
||||||
ix.progress(current, total)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := ix.reindexerFunc(hit, bulk)
|
|
||||||
if err != nil {
|
|
||||||
return ret, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if bulk.NumberOfActions() >= ix.bulkSize {
|
|
||||||
bulk, err = ix.commit(bulk, ret)
|
|
||||||
if err != nil {
|
|
||||||
return ret, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Final flush
|
|
||||||
if bulk.NumberOfActions() > 0 {
|
|
||||||
bulk, err = ix.commit(bulk, ret)
|
|
||||||
if err != nil {
|
|
||||||
return ret, err
|
|
||||||
}
|
|
||||||
bulk = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// count returns the number of documents in the source index.
|
|
||||||
// The query is taken into account, if specified.
|
|
||||||
func (ix *Reindexer) count() (int64, error) {
|
|
||||||
service := ix.sourceClient.Count(ix.sourceIndex)
|
|
||||||
if ix.query != nil {
|
|
||||||
service = service.Query(ix.query)
|
|
||||||
}
|
|
||||||
return service.Do()
|
|
||||||
}
|
|
||||||
|
|
||||||
// commit commits a bulk, updates the stats, and returns a fresh bulk service.
|
|
||||||
func (ix *Reindexer) commit(bulk *BulkService, ret *ReindexerResponse) (*BulkService, error) {
|
|
||||||
bres, err := bulk.Do()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ret.Success += int64(len(bres.Succeeded()))
|
|
||||||
failed := bres.Failed()
|
|
||||||
ret.Failed += int64(len(failed))
|
|
||||||
if !ix.statsOnly {
|
|
||||||
ret.Errors = append(ret.Errors, failed...)
|
|
||||||
}
|
|
||||||
bulk = ix.targetClient.Bulk()
|
|
||||||
return bulk, nil
|
|
||||||
}
|
|
358
vendor/gopkg.in/olivere/elastic.v3/scan.go
generated
vendored
358
vendor/gopkg.in/olivere/elastic.v3/scan.go
generated
vendored
@ -1,358 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultKeepAlive = "5m"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// End of stream (or scan)
|
|
||||||
EOS = errors.New("EOS")
|
|
||||||
|
|
||||||
// No ScrollId
|
|
||||||
ErrNoScrollId = errors.New("no scrollId")
|
|
||||||
)
|
|
||||||
|
|
||||||
// ScanService manages a cursor through documents in Elasticsearch.
|
|
||||||
type ScanService struct {
|
|
||||||
client *Client
|
|
||||||
indices []string
|
|
||||||
types []string
|
|
||||||
keepAlive string
|
|
||||||
searchSource *SearchSource
|
|
||||||
pretty bool
|
|
||||||
routing string
|
|
||||||
preference string
|
|
||||||
size *int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewScanService creates a new service to iterate through the results
|
|
||||||
// of a query.
|
|
||||||
func NewScanService(client *Client) *ScanService {
|
|
||||||
builder := &ScanService{
|
|
||||||
client: client,
|
|
||||||
searchSource: NewSearchSource().Query(NewMatchAllQuery()),
|
|
||||||
}
|
|
||||||
return builder
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index sets the name(s) of the index to use for scan.
|
|
||||||
func (s *ScanService) Index(indices ...string) *ScanService {
|
|
||||||
if s.indices == nil {
|
|
||||||
s.indices = make([]string, 0)
|
|
||||||
}
|
|
||||||
s.indices = append(s.indices, indices...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Types allows to restrict the scan to a list of types.
|
|
||||||
func (s *ScanService) Type(types ...string) *ScanService {
|
|
||||||
if s.types == nil {
|
|
||||||
s.types = make([]string, 0)
|
|
||||||
}
|
|
||||||
s.types = append(s.types, types...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scroll is an alias for KeepAlive, the time to keep
|
|
||||||
// the cursor alive (e.g. "5m" for 5 minutes).
|
|
||||||
func (s *ScanService) Scroll(keepAlive string) *ScanService {
|
|
||||||
s.keepAlive = keepAlive
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeepAlive sets the maximum time the cursor will be
|
|
||||||
// available before expiration (e.g. "5m" for 5 minutes).
|
|
||||||
func (s *ScanService) KeepAlive(keepAlive string) *ScanService {
|
|
||||||
s.keepAlive = keepAlive
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fields tells Elasticsearch to only load specific fields from a search hit.
|
|
||||||
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-fields.html.
|
|
||||||
func (s *ScanService) Fields(fields ...string) *ScanService {
|
|
||||||
s.searchSource = s.searchSource.Fields(fields...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// SearchSource sets the search source builder to use with this service.
|
|
||||||
func (s *ScanService) SearchSource(searchSource *SearchSource) *ScanService {
|
|
||||||
s.searchSource = searchSource
|
|
||||||
if s.searchSource == nil {
|
|
||||||
s.searchSource = NewSearchSource().Query(NewMatchAllQuery())
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Routing allows for (a comma-separated) list of specific routing values.
|
|
||||||
func (s *ScanService) Routing(routings ...string) *ScanService {
|
|
||||||
s.routing = strings.Join(routings, ",")
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Preference specifies the node or shard the operation should be
|
|
||||||
// performed on (default: "random").
|
|
||||||
func (s *ScanService) Preference(preference string) *ScanService {
|
|
||||||
s.preference = preference
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query sets the query to perform, e.g. MatchAllQuery.
|
|
||||||
func (s *ScanService) Query(query Query) *ScanService {
|
|
||||||
s.searchSource = s.searchSource.Query(query)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// PostFilter is executed as the last filter. It only affects the
|
|
||||||
// search hits but not facets. See
|
|
||||||
// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html
|
|
||||||
// for details.
|
|
||||||
func (s *ScanService) PostFilter(postFilter Query) *ScanService {
|
|
||||||
s.searchSource = s.searchSource.PostFilter(postFilter)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchSource indicates whether the response should contain the stored
|
|
||||||
// _source for every hit.
|
|
||||||
func (s *ScanService) FetchSource(fetchSource bool) *ScanService {
|
|
||||||
s.searchSource = s.searchSource.FetchSource(fetchSource)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchSourceContext indicates how the _source should be fetched.
|
|
||||||
func (s *ScanService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScanService {
|
|
||||||
s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version can be set to true to return a version for each search hit.
|
|
||||||
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html.
|
|
||||||
func (s *ScanService) Version(version bool) *ScanService {
|
|
||||||
s.searchSource = s.searchSource.Version(version)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort the results by the given field, in the given order.
|
|
||||||
// Use the alternative SortWithInfo to use a struct to define the sorting.
|
|
||||||
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
|
|
||||||
// for detailed documentation of sorting.
|
|
||||||
func (s *ScanService) Sort(field string, ascending bool) *ScanService {
|
|
||||||
s.searchSource = s.searchSource.Sort(field, ascending)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// SortWithInfo defines how to sort results.
|
|
||||||
// Use the Sort func for a shortcut.
|
|
||||||
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
|
|
||||||
// for detailed documentation of sorting.
|
|
||||||
func (s *ScanService) SortWithInfo(info SortInfo) *ScanService {
|
|
||||||
s.searchSource = s.searchSource.SortWithInfo(info)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// SortBy defines how to sort results.
|
|
||||||
// Use the Sort func for a shortcut.
|
|
||||||
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
|
|
||||||
// for detailed documentation of sorting.
|
|
||||||
func (s *ScanService) SortBy(sorter ...Sorter) *ScanService {
|
|
||||||
s.searchSource = s.searchSource.SortBy(sorter...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pretty enables the caller to indent the JSON output.
|
|
||||||
func (s *ScanService) Pretty(pretty bool) *ScanService {
|
|
||||||
s.pretty = pretty
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size is the number of results to return per shard, not per request.
|
|
||||||
// So a size of 10 which hits 5 shards will return a maximum of 50 results
|
|
||||||
// per scan request.
|
|
||||||
func (s *ScanService) Size(size int) *ScanService {
|
|
||||||
s.size = &size
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do executes the query and returns a "server-side cursor".
|
|
||||||
func (s *ScanService) Do() (*ScanCursor, error) {
|
|
||||||
// Build url
|
|
||||||
path := "/"
|
|
||||||
|
|
||||||
// Indices part
|
|
||||||
indexPart := make([]string, 0)
|
|
||||||
for _, index := range s.indices {
|
|
||||||
index, err := uritemplates.Expand("{index}", map[string]string{
|
|
||||||
"index": index,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
indexPart = append(indexPart, index)
|
|
||||||
}
|
|
||||||
if len(indexPart) > 0 {
|
|
||||||
path += strings.Join(indexPart, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Types
|
|
||||||
typesPart := make([]string, 0)
|
|
||||||
for _, typ := range s.types {
|
|
||||||
typ, err := uritemplates.Expand("{type}", map[string]string{
|
|
||||||
"type": typ,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
typesPart = append(typesPart, typ)
|
|
||||||
}
|
|
||||||
if len(typesPart) > 0 {
|
|
||||||
path += "/" + strings.Join(typesPart, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search
|
|
||||||
path += "/_search"
|
|
||||||
|
|
||||||
// Parameters
|
|
||||||
params := make(url.Values)
|
|
||||||
if !s.searchSource.hasSort() {
|
|
||||||
// TODO: ES 2.1 deprecates search_type=scan. See https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated.
|
|
||||||
params.Set("search_type", "scan")
|
|
||||||
}
|
|
||||||
if s.pretty {
|
|
||||||
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
|
||||||
}
|
|
||||||
if s.keepAlive != "" {
|
|
||||||
params.Set("scroll", s.keepAlive)
|
|
||||||
} else {
|
|
||||||
params.Set("scroll", defaultKeepAlive)
|
|
||||||
}
|
|
||||||
if s.size != nil && *s.size > 0 {
|
|
||||||
params.Set("size", fmt.Sprintf("%d", *s.size))
|
|
||||||
}
|
|
||||||
if s.routing != "" {
|
|
||||||
params.Set("routing", s.routing)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get response
|
|
||||||
body, err := s.searchSource.Source()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res, err := s.client.PerformRequest("POST", path, params, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return result
|
|
||||||
searchResult := new(SearchResult)
|
|
||||||
if err := s.client.decoder.Decode(res.Body, searchResult); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cursor := NewScanCursor(s.client, s.keepAlive, s.pretty, searchResult)
|
|
||||||
|
|
||||||
return cursor, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanCursor represents a single page of results from
|
|
||||||
// an Elasticsearch Scan operation.
|
|
||||||
type ScanCursor struct {
|
|
||||||
Results *SearchResult
|
|
||||||
|
|
||||||
client *Client
|
|
||||||
keepAlive string
|
|
||||||
pretty bool
|
|
||||||
currentPage int
|
|
||||||
}
|
|
||||||
|
|
||||||
// newScanCursor returns a new initialized instance
|
|
||||||
// of scanCursor.
|
|
||||||
func NewScanCursor(client *Client, keepAlive string, pretty bool, searchResult *SearchResult) *ScanCursor {
|
|
||||||
return &ScanCursor{
|
|
||||||
client: client,
|
|
||||||
keepAlive: keepAlive,
|
|
||||||
pretty: pretty,
|
|
||||||
Results: searchResult,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TotalHits is a convenience method that returns the number
|
|
||||||
// of hits the cursor will iterate through.
|
|
||||||
func (c *ScanCursor) TotalHits() int64 {
|
|
||||||
if c.Results.Hits == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return c.Results.Hits.TotalHits
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next search result or nil when all
|
|
||||||
// documents have been scanned.
|
|
||||||
//
|
|
||||||
// Usage:
|
|
||||||
//
|
|
||||||
// for {
|
|
||||||
// res, err := cursor.Next()
|
|
||||||
// if err == elastic.EOS {
|
|
||||||
// // End of stream (or scan)
|
|
||||||
// break
|
|
||||||
// }
|
|
||||||
// if err != nil {
|
|
||||||
// // Handle error
|
|
||||||
// }
|
|
||||||
// // Work with res
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
func (c *ScanCursor) Next() (*SearchResult, error) {
|
|
||||||
if c.currentPage > 0 {
|
|
||||||
if c.Results.Hits == nil || len(c.Results.Hits.Hits) == 0 || c.Results.Hits.TotalHits == 0 {
|
|
||||||
return nil, EOS
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c.Results.ScrollId == "" {
|
|
||||||
return nil, EOS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build url
|
|
||||||
path := "/_search/scroll"
|
|
||||||
|
|
||||||
// Parameters
|
|
||||||
params := make(url.Values)
|
|
||||||
if c.pretty {
|
|
||||||
params.Set("pretty", fmt.Sprintf("%v", c.pretty))
|
|
||||||
}
|
|
||||||
if c.keepAlive != "" {
|
|
||||||
params.Set("scroll", c.keepAlive)
|
|
||||||
} else {
|
|
||||||
params.Set("scroll", defaultKeepAlive)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set body
|
|
||||||
body := c.Results.ScrollId
|
|
||||||
|
|
||||||
// Get response
|
|
||||||
res, err := c.client.PerformRequest("POST", path, params, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return result
|
|
||||||
c.Results = &SearchResult{ScrollId: body}
|
|
||||||
if err := c.client.decoder.Decode(res.Body, c.Results); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.currentPage += 1
|
|
||||||
|
|
||||||
return c.Results, nil
|
|
||||||
}
|
|
205
vendor/gopkg.in/olivere/elastic.v3/scroll.go
generated
vendored
205
vendor/gopkg.in/olivere/elastic.v3/scroll.go
generated
vendored
@ -1,205 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ScrollService manages a cursor through documents in Elasticsearch.
|
|
||||||
type ScrollService struct {
|
|
||||||
client *Client
|
|
||||||
indices []string
|
|
||||||
types []string
|
|
||||||
keepAlive string
|
|
||||||
query Query
|
|
||||||
size *int
|
|
||||||
pretty bool
|
|
||||||
scrollId string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewScrollService(client *Client) *ScrollService {
|
|
||||||
builder := &ScrollService{
|
|
||||||
client: client,
|
|
||||||
query: NewMatchAllQuery(),
|
|
||||||
}
|
|
||||||
return builder
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ScrollService) Index(indices ...string) *ScrollService {
|
|
||||||
if s.indices == nil {
|
|
||||||
s.indices = make([]string, 0)
|
|
||||||
}
|
|
||||||
s.indices = append(s.indices, indices...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ScrollService) Type(types ...string) *ScrollService {
|
|
||||||
if s.types == nil {
|
|
||||||
s.types = make([]string, 0)
|
|
||||||
}
|
|
||||||
s.types = append(s.types, types...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scroll is an alias for KeepAlive, the time to keep
|
|
||||||
// the cursor alive (e.g. "5m" for 5 minutes).
|
|
||||||
func (s *ScrollService) Scroll(keepAlive string) *ScrollService {
|
|
||||||
s.keepAlive = keepAlive
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeepAlive sets the maximum time the cursor will be
|
|
||||||
// available before expiration (e.g. "5m" for 5 minutes).
|
|
||||||
func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService {
|
|
||||||
s.keepAlive = keepAlive
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ScrollService) Query(query Query) *ScrollService {
|
|
||||||
s.query = query
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ScrollService) Pretty(pretty bool) *ScrollService {
|
|
||||||
s.pretty = pretty
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ScrollService) Size(size int) *ScrollService {
|
|
||||||
s.size = &size
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ScrollService) ScrollId(scrollId string) *ScrollService {
|
|
||||||
s.scrollId = scrollId
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ScrollService) Do() (*SearchResult, error) {
|
|
||||||
if s.scrollId == "" {
|
|
||||||
return s.GetFirstPage()
|
|
||||||
}
|
|
||||||
return s.GetNextPage()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ScrollService) GetFirstPage() (*SearchResult, error) {
|
|
||||||
// Build url
|
|
||||||
path := "/"
|
|
||||||
|
|
||||||
// Indices part
|
|
||||||
indexPart := make([]string, 0)
|
|
||||||
for _, index := range s.indices {
|
|
||||||
index, err := uritemplates.Expand("{index}", map[string]string{
|
|
||||||
"index": index,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
indexPart = append(indexPart, index)
|
|
||||||
}
|
|
||||||
if len(indexPart) > 0 {
|
|
||||||
path += strings.Join(indexPart, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Types
|
|
||||||
typesPart := make([]string, 0)
|
|
||||||
for _, typ := range s.types {
|
|
||||||
typ, err := uritemplates.Expand("{type}", map[string]string{
|
|
||||||
"type": typ,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
typesPart = append(typesPart, typ)
|
|
||||||
}
|
|
||||||
if len(typesPart) > 0 {
|
|
||||||
path += "/" + strings.Join(typesPart, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search
|
|
||||||
path += "/_search"
|
|
||||||
|
|
||||||
// Parameters
|
|
||||||
params := make(url.Values)
|
|
||||||
if s.pretty {
|
|
||||||
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
|
||||||
}
|
|
||||||
if s.keepAlive != "" {
|
|
||||||
params.Set("scroll", s.keepAlive)
|
|
||||||
} else {
|
|
||||||
params.Set("scroll", defaultKeepAlive)
|
|
||||||
}
|
|
||||||
if s.size != nil && *s.size > 0 {
|
|
||||||
params.Set("size", fmt.Sprintf("%d", *s.size))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set body
|
|
||||||
body := make(map[string]interface{})
|
|
||||||
if s.query != nil {
|
|
||||||
src, err := s.query.Source()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
body["query"] = src
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get response
|
|
||||||
res, err := s.client.PerformRequest("POST", path, params, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return result
|
|
||||||
searchResult := new(SearchResult)
|
|
||||||
if err := s.client.decoder.Decode(res.Body, searchResult); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return searchResult, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ScrollService) GetNextPage() (*SearchResult, error) {
|
|
||||||
if s.scrollId == "" {
|
|
||||||
return nil, EOS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build url
|
|
||||||
path := "/_search/scroll"
|
|
||||||
|
|
||||||
// Parameters
|
|
||||||
params := make(url.Values)
|
|
||||||
if s.pretty {
|
|
||||||
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
|
||||||
}
|
|
||||||
if s.keepAlive != "" {
|
|
||||||
params.Set("scroll", s.keepAlive)
|
|
||||||
} else {
|
|
||||||
params.Set("scroll", defaultKeepAlive)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get response
|
|
||||||
res, err := s.client.PerformRequest("POST", path, params, s.scrollId)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return result
|
|
||||||
searchResult := new(SearchResult)
|
|
||||||
if err := s.client.decoder.Decode(res.Body, searchResult); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine last page
|
|
||||||
if searchResult == nil || searchResult.Hits == nil || len(searchResult.Hits.Hits) == 0 || searchResult.Hits.TotalHits == 0 {
|
|
||||||
return nil, EOS
|
|
||||||
}
|
|
||||||
|
|
||||||
return searchResult, nil
|
|
||||||
}
|
|
67
vendor/gopkg.in/olivere/elastic.v3/search_queries_missing.go
generated
vendored
67
vendor/gopkg.in/olivere/elastic.v3/search_queries_missing.go
generated
vendored
@ -1,67 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
// MissingQuery returns documents that have only null values or no value
|
|
||||||
// in the original field.
|
|
||||||
//
|
|
||||||
// For details, see:
|
|
||||||
// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-missing-query.html
|
|
||||||
type MissingQuery struct {
|
|
||||||
name string
|
|
||||||
queryName string
|
|
||||||
nullValue *bool
|
|
||||||
existence *bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMissingQuery creates and initializes a new MissingQuery.
|
|
||||||
func NewMissingQuery(name string) *MissingQuery {
|
|
||||||
return &MissingQuery{name: name}
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryName sets the query name for the query that can be used when
|
|
||||||
// searching for matched filters hit.
|
|
||||||
func (q *MissingQuery) QueryName(queryName string) *MissingQuery {
|
|
||||||
q.queryName = queryName
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
// NullValue indicates whether the missing filter automatically includes
|
|
||||||
// fields with null value configured in the mappings. Defaults to false.
|
|
||||||
func (q *MissingQuery) NullValue(nullValue bool) *MissingQuery {
|
|
||||||
q.nullValue = &nullValue
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
// Existence indicates whether the missing filter includes documents where
|
|
||||||
// the field doesn't exist in the docs.
|
|
||||||
func (q *MissingQuery) Existence(existence bool) *MissingQuery {
|
|
||||||
q.existence = &existence
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
// Source returns JSON for the query.
|
|
||||||
func (q *MissingQuery) Source() (interface{}, error) {
|
|
||||||
// {
|
|
||||||
// "missing" : {
|
|
||||||
// "field" : "..."
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
source := make(map[string]interface{})
|
|
||||||
params := make(map[string]interface{})
|
|
||||||
source["missing"] = params
|
|
||||||
params["field"] = q.name
|
|
||||||
if q.nullValue != nil {
|
|
||||||
params["null_value"] = *q.nullValue
|
|
||||||
}
|
|
||||||
if q.existence != nil {
|
|
||||||
params["existence"] = *q.existence
|
|
||||||
}
|
|
||||||
if q.queryName != "" {
|
|
||||||
params["_name"] = q.queryName
|
|
||||||
}
|
|
||||||
return source, nil
|
|
||||||
}
|
|
45
vendor/gopkg.in/olivere/elastic.v3/search_queries_not.go
generated
vendored
45
vendor/gopkg.in/olivere/elastic.v3/search_queries_not.go
generated
vendored
@ -1,45 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
// NotQuery filters out matched documents using a query.
|
|
||||||
//
|
|
||||||
// For details, see
|
|
||||||
// https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-not-query.html
|
|
||||||
type NotQuery struct {
|
|
||||||
filter Query
|
|
||||||
queryName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNotQuery creates and initializes a new NotQuery.
|
|
||||||
func NewNotQuery(filter Query) *NotQuery {
|
|
||||||
return &NotQuery{
|
|
||||||
filter: filter,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryName sets the query name for the filter that can be used
|
|
||||||
// when searching for matched_filters per hit
|
|
||||||
func (q *NotQuery) QueryName(queryName string) *NotQuery {
|
|
||||||
q.queryName = queryName
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
// Source returns JSON for the query.
|
|
||||||
func (q *NotQuery) Source() (interface{}, error) {
|
|
||||||
source := make(map[string]interface{})
|
|
||||||
params := make(map[string]interface{})
|
|
||||||
source["not"] = params
|
|
||||||
|
|
||||||
src, err := q.filter.Source()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
params["query"] = src
|
|
||||||
if q.queryName != "" {
|
|
||||||
params["_name"] = q.queryName
|
|
||||||
}
|
|
||||||
return source, nil
|
|
||||||
}
|
|
84
vendor/gopkg.in/olivere/elastic.v3/search_queries_template_query.go
generated
vendored
84
vendor/gopkg.in/olivere/elastic.v3/search_queries_template_query.go
generated
vendored
@ -1,84 +0,0 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-license.
|
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
|
||||||
|
|
||||||
package elastic
|
|
||||||
|
|
||||||
// TemplateQuery is a query that accepts a query template and a
|
|
||||||
// map of key/value pairs to fill in template parameters.
|
|
||||||
//
|
|
||||||
// For more details, see
|
|
||||||
// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-template-query.html
|
|
||||||
type TemplateQuery struct {
|
|
||||||
template string
|
|
||||||
templateType string
|
|
||||||
vars map[string]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTemplateQuery creates and initializes a new TemplateQuery.
|
|
||||||
func NewTemplateQuery(name string) *TemplateQuery {
|
|
||||||
return &TemplateQuery{
|
|
||||||
template: name,
|
|
||||||
vars: make(map[string]interface{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Template specifies the name of the template.
|
|
||||||
func (q *TemplateQuery) Template(name string) *TemplateQuery {
|
|
||||||
q.template = name
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
// TemplateType defines which kind of query we use. The values can be:
|
|
||||||
// inline, indexed, or file. If undefined, inline is used.
|
|
||||||
func (q *TemplateQuery) TemplateType(typ string) *TemplateQuery {
|
|
||||||
q.templateType = typ
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
// Var sets a single parameter pair.
|
|
||||||
func (q *TemplateQuery) Var(name string, value interface{}) *TemplateQuery {
|
|
||||||
q.vars[name] = value
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
// Vars sets parameters for the template query.
|
|
||||||
func (q *TemplateQuery) Vars(vars map[string]interface{}) *TemplateQuery {
|
|
||||||
q.vars = vars
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
// Source returns the JSON serializable content for the search.
|
|
||||||
func (q *TemplateQuery) Source() (interface{}, error) {
|
|
||||||
// {
|
|
||||||
// "template" : {
|
|
||||||
// "query" : {"match_{{template}}": {}},
|
|
||||||
// "params" : {
|
|
||||||
// "template": "all"
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
query := make(map[string]interface{})
|
|
||||||
|
|
||||||
tmpl := make(map[string]interface{})
|
|
||||||
query["template"] = tmpl
|
|
||||||
|
|
||||||
// TODO(oe): Implementation differs from online documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html
|
|
||||||
var fieldname string
|
|
||||||
switch q.templateType {
|
|
||||||
case "file": // file
|
|
||||||
fieldname = "file"
|
|
||||||
case "indexed", "id": // indexed
|
|
||||||
fieldname = "id"
|
|
||||||
default: // inline
|
|
||||||
fieldname = "query"
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpl[fieldname] = q.template
|
|
||||||
if len(q.vars) > 0 {
|
|
||||||
tmpl["params"] = q.vars
|
|
||||||
}
|
|
||||||
|
|
||||||
return query, nil
|
|
||||||
}
|
|
195
vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-5.0.md
generated
vendored
Normal file
195
vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-5.0.md
generated
vendored
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
# Changes in Elastic 5.0
|
||||||
|
|
||||||
|
## Enforce context.Context in PerformRequest and Do
|
||||||
|
|
||||||
|
We enforce the usage of `context.Context` everywhere you execute a request.
|
||||||
|
You need to change all your `Do()` calls to pass a context: `Do(ctx)`.
|
||||||
|
This enables automatic request cancelation and many other patterns.
|
||||||
|
|
||||||
|
If you don't need this, simply pass `context.TODO()` or `context.Background()`.
|
||||||
|
|
||||||
|
## Warmers removed
|
||||||
|
|
||||||
|
Warmers are no longer necessary and have been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_index_apis.html#_warmers).
|
||||||
|
|
||||||
|
## Optimize removed
|
||||||
|
|
||||||
|
Optimize was deprecated in ES 2.0 and has been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_rest_api_changes.html#_literal__optimize_literal_endpoint_removed).
|
||||||
|
Use [Force Merge](https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html) instead.
|
||||||
|
|
||||||
|
## Missing Query removed
|
||||||
|
|
||||||
|
The `missing` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-exists-query.html#_literal_missing_literal_query).
|
||||||
|
Use `exists` query with `must_not` in `bool` query instead.
|
||||||
|
|
||||||
|
## And Query removed
|
||||||
|
|
||||||
|
The `and` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
|
||||||
|
Use `must` clauses in a `bool` query instead.
|
||||||
|
|
||||||
|
## Not Query removed
|
||||||
|
|
||||||
|
TODO Is it removed?
|
||||||
|
|
||||||
|
## Or Query removed
|
||||||
|
|
||||||
|
The `or` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
|
||||||
|
Use `should` clauses in a `bool` query instead.
|
||||||
|
|
||||||
|
## Filtered Query removed
|
||||||
|
|
||||||
|
The `filtered` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
|
||||||
|
Use `bool` query instead, which supports `filter` clauses too.
|
||||||
|
|
||||||
|
## Limit Query removed
|
||||||
|
|
||||||
|
The `limit` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
|
||||||
|
Use the `terminate_after` parameter instead.
|
||||||
|
|
||||||
|
# Template Query removed
|
||||||
|
|
||||||
|
The `template` query has been [deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-template-query.html). You should use
|
||||||
|
Search Templates instead.
|
||||||
|
|
||||||
|
We remove it from Elastic 5.0 as the 5.0 update is already a good opportunity
|
||||||
|
to get rid of old stuff.
|
||||||
|
|
||||||
|
## `_timestamp` and `_ttl` removed
|
||||||
|
|
||||||
|
Both of these fields were deprecated and are now [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_mapping_changes.html#_literal__timestamp_literal_and_literal__ttl_literal).
|
||||||
|
|
||||||
|
## Search template Put/Delete API returns `acknowledged` only
|
||||||
|
|
||||||
|
The response type for Put/Delete search templates has changed.
|
||||||
|
It only returns a single `acknowledged` flag now.
|
||||||
|
|
||||||
|
## Fields has been renamed to Stored Fields
|
||||||
|
|
||||||
|
The `fields` parameter has been renamed to `stored_fields`.
|
||||||
|
See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fields_literal_parameter).
|
||||||
|
|
||||||
|
## Fielddatafields has been renamed to Docvaluefields
|
||||||
|
|
||||||
|
The `fielddata_fields` parameter [has been renamed](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fielddata_fields_literal_parameter)
|
||||||
|
to `docvalue_fields`.
|
||||||
|
|
||||||
|
## Type exists endpoint changed
|
||||||
|
|
||||||
|
The endpoint for checking whether a type exists has been changed from
|
||||||
|
`HEAD {index}/{type}` to `HEAD {index}/_mapping/{type}`.
|
||||||
|
See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking_50_rest_api_changes.html#_literal_head_index_type_literal_replaced_with_literal_head_index__mapping_type_literal).
|
||||||
|
|
||||||
|
## Refresh parameter changed
|
||||||
|
|
||||||
|
The `?refresh` parameter previously could be a boolean value. It indicated
|
||||||
|
whether changes made by a request (e.g. by the Bulk API) should be immediately
|
||||||
|
visible in search, or not. Using `refresh=true` had the positive effect of
|
||||||
|
immediately seeing the changes when searching; the negative effect is that
|
||||||
|
it is a rather big performance hit.
|
||||||
|
|
||||||
|
With 5.0, you now have the choice between these 3 values.
|
||||||
|
|
||||||
|
* `"true"` - Refresh immediately
|
||||||
|
* `"false"` - Do not refresh (the default value)
|
||||||
|
* `"wait_for"` - Wait until ES made the document visible in search
|
||||||
|
|
||||||
|
See [?refresh](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-refresh.html) in the documentation.
|
||||||
|
|
||||||
|
Notice that `true` and `false` (the boolean values) are no longer available
|
||||||
|
now in Elastic. You must use a string instead, with one of the above values.
|
||||||
|
|
||||||
|
## ReindexerService removed
|
||||||
|
|
||||||
|
The `ReindexerService` was a custom solution that was started in the ES 1.x era
|
||||||
|
to automate reindexing data, from one index to another or even between clusters.
|
||||||
|
|
||||||
|
ES 2.3 introduced its own [Reindex API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html)
|
||||||
|
so we're going to remove our custom solution and ask you to use the native reindexer.
|
||||||
|
|
||||||
|
The `ReindexService` is available via `client.Reindex()` (which used to point
|
||||||
|
to the custom reindexer).
|
||||||
|
|
||||||
|
## Delete By Query back in core
|
||||||
|
|
||||||
|
The [Delete By Query API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html)
|
||||||
|
was moved into a plugin in 2.0. Now its back in core with a complete rewrite based on the Bulk API.
|
||||||
|
|
||||||
|
It has it's own endpoint at `/_delete_by_query`.
|
||||||
|
|
||||||
|
Delete By Query, Reindex, and Update By Query are very similar under the hood.
|
||||||
|
|
||||||
|
## Reindex, Delete By Query, and Update By Query response changed
|
||||||
|
|
||||||
|
The response from the above APIs changed a bit. E.g. the `retries` value
|
||||||
|
used to be an `int64` and returns separate values for `bulk` and `search` now:
|
||||||
|
|
||||||
|
```
|
||||||
|
// Old
|
||||||
|
{
|
||||||
|
...
|
||||||
|
"retries": 123,
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
// New
|
||||||
|
{
|
||||||
|
...
|
||||||
|
"retries": {
|
||||||
|
"bulk": 123,
|
||||||
|
"search": 0
|
||||||
|
},
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## ScanService removed
|
||||||
|
|
||||||
|
The `ScanService` is removed. Use the (new) `ScrollService` instead.
|
||||||
|
|
||||||
|
## New ScrollService
|
||||||
|
|
||||||
|
There was confusion around `ScanService` and `ScrollService` doing basically
|
||||||
|
the same. One was returning slices and didn't support all query details, the
|
||||||
|
other returned one document after another and wasn't safe for concurrent use.
|
||||||
|
So we merged the two and merged it into a new `ScrollService` that
|
||||||
|
removes all the problems with the older services.
|
||||||
|
|
||||||
|
In other words:
|
||||||
|
If you used `ScanService`, switch to `ScrollService`.
|
||||||
|
If you used the old `ScrollService`, you might need to fix some things but
|
||||||
|
overall it should just work.
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
- We replaced `elastic.EOS` with `io.EOF` to indicate the "end of scroll".
|
||||||
|
|
||||||
|
TODO Not implemented yet
|
||||||
|
|
||||||
|
## Suggesters
|
||||||
|
|
||||||
|
They have been [completely rewritten in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html).
|
||||||
|
|
||||||
|
Some changes:
|
||||||
|
- Suggesters no longer have an [output](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html#_simpler_completion_indexing).
|
||||||
|
|
||||||
|
TODO Fix all structural changes in suggesters
|
||||||
|
|
||||||
|
## Percolator
|
||||||
|
|
||||||
|
Percolator has [changed considerably](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_percolator.html).
|
||||||
|
|
||||||
|
Elastic 5.0 adds the new
|
||||||
|
[Percolator Query](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html)
|
||||||
|
which can be used in combination with the new
|
||||||
|
[Percolator type](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/percolator.html).
|
||||||
|
|
||||||
|
The Percolate service is removed from Elastic 5.0.
|
||||||
|
|
||||||
|
## Remove Consistency, add WaitForActiveShards
|
||||||
|
|
||||||
|
The `consistency` parameter has been removed in a lot of places, e.g. the Bulk,
|
||||||
|
Index, Delete, Delete-by-Query, Reindex, Update, and Update-by-Query API.
|
||||||
|
|
||||||
|
It has been replaced by a somewhat similar `wait_for_active_shards` parameter.
|
||||||
|
See https://github.com/elastic/elasticsearch/pull/19454.
|
@ -6,12 +6,20 @@
|
|||||||
#
|
#
|
||||||
# Please keep this list sorted.
|
# Please keep this list sorted.
|
||||||
|
|
||||||
|
0x6875790d0a [@huydx](https://github.com/huydx)
|
||||||
Adam Alix [@adamalix](https://github.com/adamalix)
|
Adam Alix [@adamalix](https://github.com/adamalix)
|
||||||
Adam Weiner [@adamweiner](https://github.com/adamweiner)
|
Adam Weiner [@adamweiner](https://github.com/adamweiner)
|
||||||
|
Adrian Lungu [@AdrianLungu](https://github.com/AdrianLungu)
|
||||||
|
Alex [@akotlar](https://github.com/akotlar)
|
||||||
|
Alexandre Olivier [@aliphen](https://github.com/aliphen)
|
||||||
Alexey Sharov [@nizsheanez](https://github.com/nizsheanez)
|
Alexey Sharov [@nizsheanez](https://github.com/nizsheanez)
|
||||||
|
AndreKR [@AndreKR](https://github.com/AndreKR)
|
||||||
|
Andrew Gaul [@andrewgaul](https://github.com/andrewgaul)
|
||||||
|
Benjamin Fernandes [@LotharSee](https://github.com/LotharSee)
|
||||||
Benjamin Zarzycki [@kf6nux](https://github.com/kf6nux)
|
Benjamin Zarzycki [@kf6nux](https://github.com/kf6nux)
|
||||||
Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va)
|
Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va)
|
||||||
Brady Love [@bradylove](https://github.com/bradylove)
|
Brady Love [@bradylove](https://github.com/bradylove)
|
||||||
|
Bryan Conklin [@bmconklin](https://github.com/bmconklin)
|
||||||
Bruce Zhou [@brucez-isell](https://github.com/brucez-isell)
|
Bruce Zhou [@brucez-isell](https://github.com/brucez-isell)
|
||||||
Chris M [@tebriel](https://github.com/tebriel)
|
Chris M [@tebriel](https://github.com/tebriel)
|
||||||
Christophe Courtaut [@kri5](https://github.com/kri5)
|
Christophe Courtaut [@kri5](https://github.com/kri5)
|
||||||
@ -21,25 +29,39 @@ Daniel Barrett [@shendaras](https://github.com/shendaras)
|
|||||||
Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath)
|
Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath)
|
||||||
Daniel Imfeld [@dimfeld](https://github.com/dimfeld)
|
Daniel Imfeld [@dimfeld](https://github.com/dimfeld)
|
||||||
Dwayne Schultz [@myshkin5](https://github.com/myshkin5)
|
Dwayne Schultz [@myshkin5](https://github.com/myshkin5)
|
||||||
|
Ellison Leão [@ellisonleao](https://github.com/ellisonleao)
|
||||||
|
Eugene Egorov [@EugeneEgorov](https://github.com/EugeneEgorov)
|
||||||
Faolan C-P [@fcheslack](https://github.com/fcheslack)
|
Faolan C-P [@fcheslack](https://github.com/fcheslack)
|
||||||
Gerhard Häring [@ghaering](https://github.com/ghaering)
|
Gerhard Häring [@ghaering](https://github.com/ghaering)
|
||||||
Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos)
|
Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos)
|
||||||
Guillaume J. Charmes [@creack](https://github.com/creack)
|
Guillaume J. Charmes [@creack](https://github.com/creack)
|
||||||
|
Guiseppe [@gm42](https://github.com/gm42)
|
||||||
Han Yu [@MoonighT](https://github.com/MoonighT)
|
Han Yu [@MoonighT](https://github.com/MoonighT)
|
||||||
Harrison Wright [@wright8191](https://github.com/wright8191)
|
Harrison Wright [@wright8191](https://github.com/wright8191)
|
||||||
Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy)
|
Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy)
|
||||||
|
initialcontext [@initialcontext](https://github.com/initialcontext)
|
||||||
Isaac Saldana [@isaldana](https://github.com/isaldana)
|
Isaac Saldana [@isaldana](https://github.com/isaldana)
|
||||||
Jack Lindamood [@cep21](https://github.com/cep21)
|
Jack Lindamood [@cep21](https://github.com/cep21)
|
||||||
|
Jacob [@jdelgad](https://github.com/jdelgad)
|
||||||
|
Jayme Rotsaert [@jrots](https://github.com/jrots)
|
||||||
|
Jeremy Canady [@jrmycanady](https://github.com/jrmycanady)
|
||||||
Joe Buck [@four2five](https://github.com/four2five)
|
Joe Buck [@four2five](https://github.com/four2five)
|
||||||
John Barker [@j16r](https://github.com/j16r)
|
John Barker [@j16r](https://github.com/j16r)
|
||||||
John Goodall [@jgoodall](https://github.com/jgoodall)
|
John Goodall [@jgoodall](https://github.com/jgoodall)
|
||||||
|
John Stanford [@jxstanford](https://github.com/jxstanford)
|
||||||
|
jun [@coseyo](https://github.com/coseyo)
|
||||||
Junpei Tsuji [@jun06t](https://github.com/jun06t)
|
Junpei Tsuji [@jun06t](https://github.com/jun06t)
|
||||||
Kenta SUZUKI [@suzuken](https://github.com/suzuken)
|
Kenta SUZUKI [@suzuken](https://github.com/suzuken)
|
||||||
|
Kyle Brandt [@kylebrandt](https://github.com/kylebrandt)
|
||||||
|
Leandro Piccilli [@lpic10](https://github.com/lpic10)
|
||||||
Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh)
|
Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh)
|
||||||
Mara Kim [@autochthe](https://github.com/autochthe)
|
Mara Kim [@autochthe](https://github.com/autochthe)
|
||||||
Marcy Buccellato [@marcybuccellato](https://github.com/marcybuccellato)
|
Marcy Buccellato [@marcybuccellato](https://github.com/marcybuccellato)
|
||||||
|
Mark Costello [@mcos](https://github.com/mcos)
|
||||||
Medhi Bechina [@mdzor](https://github.com/mdzor)
|
Medhi Bechina [@mdzor](https://github.com/mdzor)
|
||||||
|
mosa [@mosasiru](https://github.com/mosasiru)
|
||||||
naimulhaider [@naimulhaider](https://github.com/naimulhaider)
|
naimulhaider [@naimulhaider](https://github.com/naimulhaider)
|
||||||
|
Naoya Yoshizawa [@azihsoyn](https://github.com/azihsoyn)
|
||||||
navins [@ishare](https://github.com/ishare)
|
navins [@ishare](https://github.com/ishare)
|
||||||
Naoya Tsutsumi [@tutuming](https://github.com/tutuming)
|
Naoya Tsutsumi [@tutuming](https://github.com/tutuming)
|
||||||
Nicholas Wolff [@nwolff](https://github.com/nwolff)
|
Nicholas Wolff [@nwolff](https://github.com/nwolff)
|
||||||
@ -52,9 +74,15 @@ Sean DuBois [@Sean-Der](https://github.com/Sean-Der)
|
|||||||
Shalin LK [@shalinlk](https://github.com/shalinlk)
|
Shalin LK [@shalinlk](https://github.com/shalinlk)
|
||||||
Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic)
|
Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic)
|
||||||
Stuart Warren [@Woz](https://github.com/stuart-warren)
|
Stuart Warren [@Woz](https://github.com/stuart-warren)
|
||||||
|
Sulaiman [@salajlan](https://github.com/salajlan)
|
||||||
Sundar [@sundarv85](https://github.com/sundarv85)
|
Sundar [@sundarv85](https://github.com/sundarv85)
|
||||||
|
Take [ww24](https://github.com/ww24)
|
||||||
Tetsuya Morimoto [@t2y](https://github.com/t2y)
|
Tetsuya Morimoto [@t2y](https://github.com/t2y)
|
||||||
TimeEmit [@TimeEmit](https://github.com/timeemit)
|
TimeEmit [@TimeEmit](https://github.com/timeemit)
|
||||||
TusharM [@tusharm](https://github.com/tusharm)
|
TusharM [@tusharm](https://github.com/tusharm)
|
||||||
|
wangtuo [@wangtuo](https://github.com/wangtuo)
|
||||||
wolfkdy [@wolfkdy](https://github.com/wolfkdy)
|
wolfkdy [@wolfkdy](https://github.com/wolfkdy)
|
||||||
|
Wyndham Blanton [@wyndhblb](https://github.com/wyndhblb)
|
||||||
|
Yarden Bar [@ayashjorden](https://github.com/ayashjorden)
|
||||||
zakthomas [@zakthomas](https://github.com/zakthomas)
|
zakthomas [@zakthomas](https://github.com/zakthomas)
|
||||||
|
singham [@zhaochenxiao90](https://github.com/zhaochenxiao90)
|
@ -5,6 +5,7 @@ your issue/question without further inquiry. Thank you.
|
|||||||
|
|
||||||
[ ] elastic.v2 (for Elasticsearch 1.x)
|
[ ] elastic.v2 (for Elasticsearch 1.x)
|
||||||
[ ] elastic.v3 (for Elasticsearch 2.x)
|
[ ] elastic.v3 (for Elasticsearch 2.x)
|
||||||
|
[ ] elastic.v5 (for Elasticsearch 5.x)
|
||||||
|
|
||||||
### Please describe the expected behavior
|
### Please describe the expected behavior
|
||||||
|
|
106
vendor/gopkg.in/olivere/elastic.v3/README.md → vendor/gopkg.in/olivere/elastic.v5/README.md
generated
vendored
106
vendor/gopkg.in/olivere/elastic.v3/README.md → vendor/gopkg.in/olivere/elastic.v5/README.md
generated
vendored
@ -3,8 +3,8 @@
|
|||||||
Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
|
Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
|
||||||
[Go](http://www.golang.org/) programming language.
|
[Go](http://www.golang.org/) programming language.
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v3)](https://travis-ci.org/olivere/elastic)
|
[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v5)](https://travis-ci.org/olivere/elastic)
|
||||||
[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/gopkg.in/olivere/elastic.v3)
|
[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/gopkg.in/olivere/elastic.v5)
|
||||||
[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
|
[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
|
||||||
|
|
||||||
See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
|
See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
|
||||||
@ -12,40 +12,58 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for additional informati
|
|||||||
|
|
||||||
## Releases
|
## Releases
|
||||||
|
|
||||||
**The release branches (e.g. [`release-branch.v3`](https://github.com/olivere/elastic/tree/release-branch.v3)) are actively being worked on and can break at any time. If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).**
|
**The release branches (e.g. [`release-branch.v5`](https://github.com/olivere/elastic/tree/release-branch.v5))
|
||||||
|
are actively being worked on and can break at any time.
|
||||||
|
If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).**
|
||||||
|
|
||||||
Here's the version matrix:
|
Here's the version matrix:
|
||||||
|
|
||||||
Elasticsearch version | Elastic version -| Package URL
|
Elasticsearch version | Elastic version -| Package URL
|
||||||
----------------------|------------------|------------
|
----------------------|------------------|------------
|
||||||
|
5.x | 5.0 | [`gopkg.in/olivere/elastic.v5`](https://gopkg.in/olivere/elastic.v5) ([source](https://github.com/olivere/elastic/tree/release-branch.v5) [doc](http://godoc.org/gopkg.in/olivere/elastic.v5))
|
||||||
2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3))
|
2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3))
|
||||||
1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2))
|
1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2))
|
||||||
0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1))
|
0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1))
|
||||||
|
|
||||||
**Example:**
|
**Example:**
|
||||||
|
|
||||||
You have installed Elasticsearch 2.1.1 and want to use Elastic. As listed above, you should use Elastic 3.0. So you first install the stable release of Elastic 3.0 from gopkg.in.
|
You have installed Elasticsearch 5.0.0 and want to use Elastic.
|
||||||
|
As listed above, you should use Elastic 5.0.
|
||||||
|
So you first install the stable release of Elastic 5.0 from gopkg.in.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ go get gopkg.in/olivere/elastic.v3
|
$ go get gopkg.in/olivere/elastic.v5
|
||||||
```
|
```
|
||||||
|
|
||||||
You then import it with this import path:
|
You then import it with this import path:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
import "gopkg.in/olivere/elastic.v3"
|
import elastic "gopkg.in/olivere/elastic.v5"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Elastic 5.0
|
||||||
|
|
||||||
|
Elastic 5.0 targets Elasticsearch 5.0.0 and later. Elasticsearch 5.0.0 was
|
||||||
|
[released on 26th October 2016](https://www.elastic.co/blog/elasticsearch-5-0-0-released).
|
||||||
|
|
||||||
|
Notice that there are will be a lot of [breaking changes in Elasticsearch 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking-changes-5.0.html)
|
||||||
|
and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v5/CHANGELOG-5.0.md)
|
||||||
|
as we did in the transition from Elastic 2.0 (for Elasticsearch 1.x) to Elastic 3.0 (for Elasticsearch 2.x).
|
||||||
|
|
||||||
|
Furthermore, the jump in version numbers will give us a chance to be in sync with the Elastic Stack.
|
||||||
|
|
||||||
### Elastic 3.0
|
### Elastic 3.0
|
||||||
|
|
||||||
Elastic 3.0 targets Elasticsearch 2.0 and later. Elasticsearch 2.0.0 was [released on 28th October 2015](https://www.elastic.co/blog/elasticsearch-2-0-0-released).
|
Elastic 3.0 targets Elasticsearch 2.x and is published via [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3).
|
||||||
|
|
||||||
Notice that there are a lot of [breaking changes in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html) and we used this as an opportunity to [clean up and refactor Elastic as well](https://github.com/olivere/elastic/blob/release-branch.v3/CHANGELOG-3.0.md).
|
Elastic 3.0 will only get critical bug fixes. You should update to a recent version.
|
||||||
|
|
||||||
### Elastic 2.0
|
### Elastic 2.0
|
||||||
|
|
||||||
Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2).
|
Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2).
|
||||||
|
|
||||||
|
Elastic 2.0 will only get critical bug fixes. You should update to a recent version.
|
||||||
|
|
||||||
### Elastic 1.0
|
### Elastic 1.0
|
||||||
|
|
||||||
Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic
|
Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic
|
||||||
@ -68,7 +86,7 @@ to rewrite your application big time. More often than not it's renaming APIs
|
|||||||
and adding/removing features so that Elastic is in sync with Elasticsearch.
|
and adding/removing features so that Elastic is in sync with Elasticsearch.
|
||||||
|
|
||||||
Elastic has been used in production with the following Elasticsearch versions:
|
Elastic has been used in production with the following Elasticsearch versions:
|
||||||
0.90, 1.0-1.7. Furthermore, we use [Travis CI](https://travis-ci.org/)
|
0.90, 1.0-1.7, and 2.0-2.4.1. Furthermore, we use [Travis CI](https://travis-ci.org/)
|
||||||
to test Elastic with the most recent versions of Elasticsearch and Go.
|
to test Elastic with the most recent versions of Elasticsearch and Go.
|
||||||
See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml)
|
See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml)
|
||||||
file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic)
|
file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic)
|
||||||
@ -83,20 +101,25 @@ Having said that, I hope you find the project useful.
|
|||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go). The client connects to Elasticsearch on `http://127.0.0.1:9200` by default.
|
The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go).
|
||||||
|
The client connects to Elasticsearch on `http://127.0.0.1:9200` by default.
|
||||||
|
|
||||||
You typically create one client for your app. Here's a complete example of
|
You typically create one client for your app. Here's a complete example of
|
||||||
creating a client, creating an index, adding a document, executing a search etc.
|
creating a client, creating an index, adding a document, executing a search etc.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
// Create a context
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
// Create a client
|
// Create a client
|
||||||
client, err := elastic.NewClient()
|
client, err := elastic.NewClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Handle error
|
// Handle error
|
||||||
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create an index
|
// Create an index
|
||||||
_, err = client.CreateIndex("twitter").Do()
|
_, err = client.CreateIndex("twitter").Do(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Handle error
|
// Handle error
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -109,8 +132,8 @@ _, err = client.Index().
|
|||||||
Type("tweet").
|
Type("tweet").
|
||||||
Id("1").
|
Id("1").
|
||||||
BodyJson(tweet).
|
BodyJson(tweet).
|
||||||
Refresh(true).
|
Refresh("true").
|
||||||
Do()
|
Do(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Handle error
|
// Handle error
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -124,7 +147,7 @@ searchResult, err := client.Search().
|
|||||||
Sort("user", true). // sort by "user" field, ascending
|
Sort("user", true). // sort by "user" field, ascending
|
||||||
From(0).Size(10). // take documents 0-9
|
From(0).Size(10). // take documents 0-9
|
||||||
Pretty(true). // pretty print request and response JSON
|
Pretty(true). // pretty print request and response JSON
|
||||||
Do() // execute
|
Do(ctx) // execute
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Handle error
|
// Handle error
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -171,7 +194,7 @@ if searchResult.Hits.TotalHits > 0 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete the index again
|
// Delete the index again
|
||||||
_, err = client.DeleteIndex("twitter").Do()
|
_, err = client.DeleteIndex("twitter").Do(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Handle error
|
// Handle error
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -203,6 +226,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
|
|||||||
|
|
||||||
- [x] Search
|
- [x] Search
|
||||||
- [x] Search Template
|
- [x] Search Template
|
||||||
|
- [ ] Multi Search Template
|
||||||
- [ ] Search Shards API
|
- [ ] Search Shards API
|
||||||
- [x] Suggesters
|
- [x] Suggesters
|
||||||
- [x] Term Suggester
|
- [x] Term Suggester
|
||||||
@ -214,7 +238,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
|
|||||||
- [ ] Search Exists API
|
- [ ] Search Exists API
|
||||||
- [ ] Validate API
|
- [ ] Validate API
|
||||||
- [x] Explain API
|
- [x] Explain API
|
||||||
- [x] Percolator API
|
- [ ] Profile API
|
||||||
- [x] Field Stats API
|
- [x] Field Stats API
|
||||||
|
|
||||||
### Aggregations
|
### Aggregations
|
||||||
@ -224,6 +248,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
|
|||||||
- [x] Cardinality
|
- [x] Cardinality
|
||||||
- [x] Extended Stats
|
- [x] Extended Stats
|
||||||
- [x] Geo Bounds
|
- [x] Geo Bounds
|
||||||
|
- [ ] Geo Centroid
|
||||||
- [x] Max
|
- [x] Max
|
||||||
- [x] Min
|
- [x] Min
|
||||||
- [x] Percentiles
|
- [x] Percentiles
|
||||||
@ -243,7 +268,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
|
|||||||
- [ ] GeoHash Grid
|
- [ ] GeoHash Grid
|
||||||
- [x] Global
|
- [x] Global
|
||||||
- [x] Histogram
|
- [x] Histogram
|
||||||
- [x] IPv4 Range
|
- [x] IP Range
|
||||||
- [x] Missing
|
- [x] Missing
|
||||||
- [x] Nested
|
- [x] Nested
|
||||||
- [x] Range
|
- [x] Range
|
||||||
@ -257,11 +282,16 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
|
|||||||
- [x] Max Bucket
|
- [x] Max Bucket
|
||||||
- [x] Min Bucket
|
- [x] Min Bucket
|
||||||
- [x] Sum Bucket
|
- [x] Sum Bucket
|
||||||
|
- [x] Stats Bucket
|
||||||
|
- [ ] Extended Stats Bucket
|
||||||
|
- [ ] Percentiles Bucket
|
||||||
- [x] Moving Average
|
- [x] Moving Average
|
||||||
- [x] Cumulative Sum
|
- [x] Cumulative Sum
|
||||||
- [x] Bucket Script
|
- [x] Bucket Script
|
||||||
- [x] Bucket Selector
|
- [x] Bucket Selector
|
||||||
- [x] Serial Differencing
|
- [x] Serial Differencing
|
||||||
|
- [ ] Matrix Aggregations
|
||||||
|
- [ ] Matrix Stats
|
||||||
- [x] Aggregation Metadata
|
- [x] Aggregation Metadata
|
||||||
|
|
||||||
### Indices APIs
|
### Indices APIs
|
||||||
@ -271,6 +301,8 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
|
|||||||
- [x] Get Index
|
- [x] Get Index
|
||||||
- [x] Indices Exists
|
- [x] Indices Exists
|
||||||
- [x] Open / Close Index
|
- [x] Open / Close Index
|
||||||
|
- [x] Shrink Index
|
||||||
|
- [ ] Rollover Index
|
||||||
- [x] Put Mapping
|
- [x] Put Mapping
|
||||||
- [x] Get Mapping
|
- [x] Get Mapping
|
||||||
- [ ] Get Field Mapping
|
- [ ] Get Field Mapping
|
||||||
@ -278,17 +310,17 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
|
|||||||
- [x] Index Aliases
|
- [x] Index Aliases
|
||||||
- [x] Update Indices Settings
|
- [x] Update Indices Settings
|
||||||
- [x] Get Settings
|
- [x] Get Settings
|
||||||
- [ ] Analyze
|
- [x] Analyze
|
||||||
- [x] Index Templates
|
- [x] Index Templates
|
||||||
- [x] Warmers
|
- [ ] Shadow Replica Indices
|
||||||
- [x] Indices Stats
|
- [x] Indices Stats
|
||||||
- [ ] Indices Segments
|
- [ ] Indices Segments
|
||||||
- [ ] Indices Recovery
|
- [ ] Indices Recovery
|
||||||
|
- [ ] Indices Shard Stores
|
||||||
- [ ] Clear Cache
|
- [ ] Clear Cache
|
||||||
- [x] Flush
|
- [x] Flush
|
||||||
- [x] Refresh
|
- [x] Refresh
|
||||||
- [x] Optimize
|
- [x] Force Merge
|
||||||
- [ ] Shadow Replica Indices
|
|
||||||
- [ ] Upgrade
|
- [ ] Upgrade
|
||||||
|
|
||||||
### cat APIs
|
### cat APIs
|
||||||
@ -302,13 +334,16 @@ The cat APIs are not implemented as of now. We think they are better suited for
|
|||||||
- [ ] cat health
|
- [ ] cat health
|
||||||
- [ ] cat indices
|
- [ ] cat indices
|
||||||
- [ ] cat master
|
- [ ] cat master
|
||||||
|
- [ ] cat nodeattrs
|
||||||
- [ ] cat nodes
|
- [ ] cat nodes
|
||||||
- [ ] cat pending tasks
|
- [ ] cat pending tasks
|
||||||
- [ ] cat plugins
|
- [ ] cat plugins
|
||||||
- [ ] cat recovery
|
- [ ] cat recovery
|
||||||
|
- [ ] cat repositories
|
||||||
- [ ] cat thread pool
|
- [ ] cat thread pool
|
||||||
- [ ] cat shards
|
- [ ] cat shards
|
||||||
- [ ] cat segments
|
- [ ] cat segments
|
||||||
|
- [ ] cat snapshots
|
||||||
|
|
||||||
### Cluster APIs
|
### Cluster APIs
|
||||||
|
|
||||||
@ -318,10 +353,11 @@ The cat APIs are not implemented as of now. We think they are better suited for
|
|||||||
- [ ] Pending Cluster Tasks
|
- [ ] Pending Cluster Tasks
|
||||||
- [ ] Cluster Reroute
|
- [ ] Cluster Reroute
|
||||||
- [ ] Cluster Update Settings
|
- [ ] Cluster Update Settings
|
||||||
- [ ] Nodes Stats
|
- [x] Nodes Stats
|
||||||
- [x] Nodes Info
|
- [x] Nodes Info
|
||||||
- [x] Task Management API
|
- [x] Task Management API
|
||||||
- [ ] Nodes hot_threads
|
- [ ] Nodes hot_threads
|
||||||
|
- [ ] Cluster Allocation Explain API
|
||||||
|
|
||||||
### Query DSL
|
### Query DSL
|
||||||
|
|
||||||
@ -329,6 +365,8 @@ The cat APIs are not implemented as of now. We think they are better suited for
|
|||||||
- [x] Inner hits
|
- [x] Inner hits
|
||||||
- Full text queries
|
- Full text queries
|
||||||
- [x] Match Query
|
- [x] Match Query
|
||||||
|
- [x] Match Phrase Query
|
||||||
|
- [x] Match Phrase Prefix Query
|
||||||
- [x] Multi Match Query
|
- [x] Multi Match Query
|
||||||
- [x] Common Terms Query
|
- [x] Common Terms Query
|
||||||
- [x] Query String Query
|
- [x] Query String Query
|
||||||
@ -338,7 +376,6 @@ The cat APIs are not implemented as of now. We think they are better suited for
|
|||||||
- [x] Terms Query
|
- [x] Terms Query
|
||||||
- [x] Range Query
|
- [x] Range Query
|
||||||
- [x] Exists Query
|
- [x] Exists Query
|
||||||
- [x] Missing Query
|
|
||||||
- [x] Prefix Query
|
- [x] Prefix Query
|
||||||
- [x] Wildcard Query
|
- [x] Wildcard Query
|
||||||
- [x] Regexp Query
|
- [x] Regexp Query
|
||||||
@ -352,15 +389,11 @@ The cat APIs are not implemented as of now. We think they are better suited for
|
|||||||
- [x] Function Score Query
|
- [x] Function Score Query
|
||||||
- [x] Boosting Query
|
- [x] Boosting Query
|
||||||
- [x] Indices Query
|
- [x] Indices Query
|
||||||
- [x] And Query (deprecated)
|
|
||||||
- [x] Not Query
|
|
||||||
- [x] Or Query (deprecated)
|
|
||||||
- [ ] Filtered Query (deprecated)
|
|
||||||
- [ ] Limit Query (deprecated)
|
|
||||||
- Joining queries
|
- Joining queries
|
||||||
- [x] Nested Query
|
- [x] Nested Query
|
||||||
- [x] Has Child Query
|
- [x] Has Child Query
|
||||||
- [x] Has Parent Query
|
- [x] Has Parent Query
|
||||||
|
- [x] Parent Id Query
|
||||||
- Geo queries
|
- Geo queries
|
||||||
- [ ] GeoShape Query
|
- [ ] GeoShape Query
|
||||||
- [x] Geo Bounding Box Query
|
- [x] Geo Bounding Box Query
|
||||||
@ -372,6 +405,7 @@ The cat APIs are not implemented as of now. We think they are better suited for
|
|||||||
- [x] More Like This Query
|
- [x] More Like This Query
|
||||||
- [x] Template Query
|
- [x] Template Query
|
||||||
- [x] Script Query
|
- [x] Script Query
|
||||||
|
- [x] Percolate Query
|
||||||
- Span queries
|
- Span queries
|
||||||
- [ ] Span Term Query
|
- [ ] Span Term Query
|
||||||
- [ ] Span Multi Term Query
|
- [ ] Span Multi Term Query
|
||||||
@ -381,6 +415,9 @@ The cat APIs are not implemented as of now. We think they are better suited for
|
|||||||
- [ ] Span Not Query
|
- [ ] Span Not Query
|
||||||
- [ ] Span Containing Query
|
- [ ] Span Containing Query
|
||||||
- [ ] Span Within Query
|
- [ ] Span Within Query
|
||||||
|
- [ ] Span Field Masking Query
|
||||||
|
- [ ] Minimum Should Match
|
||||||
|
- [ ] Multi Term Query Rewrite
|
||||||
|
|
||||||
### Modules
|
### Modules
|
||||||
|
|
||||||
@ -392,12 +429,15 @@ The cat APIs are not implemented as of now. We think they are better suited for
|
|||||||
- [x] Sort by field
|
- [x] Sort by field
|
||||||
- [x] Sort by geo distance
|
- [x] Sort by geo distance
|
||||||
- [x] Sort by script
|
- [x] Sort by script
|
||||||
|
- [x] Sort by doc
|
||||||
|
|
||||||
### Scan
|
### Scrolling
|
||||||
|
|
||||||
Scrolling through documents (e.g. `search_type=scan`) are implemented via
|
Scrolling is supported via a `ScrollService`. It supports an iterator-like interface.
|
||||||
the `Scroll` and `Scan` services. The `ClearScroll` API is implemented as well.
|
The `ClearScroll` API is implemented as well.
|
||||||
|
|
||||||
|
A pattern for [efficiently scrolling in parallel](https://github.com/olivere/elastic/wiki/ScrollParallel)
|
||||||
|
is described in the [Wiki](https://github.com/olivere/elastic/wiki).
|
||||||
|
|
||||||
## How to contribute
|
## How to contribute
|
||||||
|
|
||||||
@ -406,9 +446,9 @@ Read [the contribution guidelines](https://github.com/olivere/elastic/blob/maste
|
|||||||
## Credits
|
## Credits
|
||||||
|
|
||||||
Thanks a lot for the great folks working hard on
|
Thanks a lot for the great folks working hard on
|
||||||
[Elasticsearch](http://www.elasticsearch.org/)
|
[Elasticsearch](https://www.elastic.co/products/elasticsearch)
|
||||||
and
|
and
|
||||||
[Go](http://www.golang.org/).
|
[Go](https://golang.org/).
|
||||||
|
|
||||||
Elastic uses portions of the
|
Elastic uses portions of the
|
||||||
[uritemplates](https://github.com/jtacoma/uritemplates) library
|
[uritemplates](https://github.com/jtacoma/uritemplates) library
|
11
vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go
generated
vendored
Normal file
11
vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
// AcknowledgedResponse is returned from various APIs. It simply indicates
|
||||||
|
// whether the operation is ack'd or not.
|
||||||
|
type AcknowledgedResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
}
|
152
vendor/gopkg.in/olivere/elastic.v5/backoff.go
generated
vendored
Normal file
152
vendor/gopkg.in/olivere/elastic.v5/backoff.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BackoffFunc specifies the signature of a function that returns the
|
||||||
|
// time to wait before the next call to a resource. To stop retrying
|
||||||
|
// return false in the 2nd return value.
|
||||||
|
type BackoffFunc func(retry int) (time.Duration, bool)
|
||||||
|
|
||||||
|
// Backoff allows callers to implement their own Backoff strategy.
|
||||||
|
type Backoff interface {
|
||||||
|
// Next implements a BackoffFunc.
|
||||||
|
Next(retry int) (time.Duration, bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- ZeroBackoff --
|
||||||
|
|
||||||
|
// ZeroBackoff is a fixed backoff policy whose backoff time is always zero,
|
||||||
|
// meaning that the operation is retried immediately without waiting,
|
||||||
|
// indefinitely.
|
||||||
|
type ZeroBackoff struct{}
|
||||||
|
|
||||||
|
// Next implements BackoffFunc for ZeroBackoff.
|
||||||
|
func (b ZeroBackoff) Next(retry int) (time.Duration, bool) {
|
||||||
|
return 0, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- StopBackoff --
|
||||||
|
|
||||||
|
// StopBackoff is a fixed backoff policy that always returns false for
|
||||||
|
// Next(), meaning that the operation should never be retried.
|
||||||
|
type StopBackoff struct{}
|
||||||
|
|
||||||
|
// Next implements BackoffFunc for StopBackoff.
|
||||||
|
func (b StopBackoff) Next(retry int) (time.Duration, bool) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- ConstantBackoff --
|
||||||
|
|
||||||
|
// ConstantBackoff is a backoff policy that always returns the same delay.
|
||||||
|
type ConstantBackoff struct {
|
||||||
|
interval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConstantBackoff returns a new ConstantBackoff.
|
||||||
|
func NewConstantBackoff(interval time.Duration) *ConstantBackoff {
|
||||||
|
return &ConstantBackoff{interval: interval}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next implements BackoffFunc for ConstantBackoff.
|
||||||
|
func (b *ConstantBackoff) Next(retry int) (time.Duration, bool) {
|
||||||
|
return b.interval, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Exponential --
|
||||||
|
|
||||||
|
// ExponentialBackoff implements the simple exponential backoff described by
|
||||||
|
// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html.
|
||||||
|
type ExponentialBackoff struct {
|
||||||
|
sync.Mutex
|
||||||
|
t float64 // initial timeout (in msec)
|
||||||
|
f float64 // exponential factor (e.g. 2)
|
||||||
|
m float64 // maximum timeout (in msec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExponentialBackoff returns a ExponentialBackoff backoff policy.
|
||||||
|
// Use initialTimeout to set the first/minimal interval
|
||||||
|
// and maxTimeout to set the maximum wait interval.
|
||||||
|
func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff {
|
||||||
|
return &ExponentialBackoff{
|
||||||
|
t: float64(int64(initialTimeout / time.Millisecond)),
|
||||||
|
f: 2.0,
|
||||||
|
m: float64(int64(maxTimeout / time.Millisecond)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next implements BackoffFunc for ExponentialBackoff.
|
||||||
|
func (b *ExponentialBackoff) Next(retry int) (time.Duration, bool) {
|
||||||
|
b.Lock()
|
||||||
|
defer b.Unlock()
|
||||||
|
|
||||||
|
r := 1.0 + rand.Float64() // random number in [1..2]
|
||||||
|
m := math.Min(r*b.t*math.Pow(b.f, float64(retry)), b.m)
|
||||||
|
if m >= b.m {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
d := time.Duration(int64(m)) * time.Millisecond
|
||||||
|
return d, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Simple Backoff --
|
||||||
|
|
||||||
|
// SimpleBackoff takes a list of fixed values for backoff intervals.
|
||||||
|
// Each call to Next returns the next value from that fixed list.
|
||||||
|
// After each value is returned, subsequent calls to Next will only return
|
||||||
|
// the last element. The values are optionally "jittered" (off by default).
|
||||||
|
type SimpleBackoff struct {
|
||||||
|
sync.Mutex
|
||||||
|
ticks []int
|
||||||
|
jitter bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified
|
||||||
|
// list of fixed intervals in milliseconds.
|
||||||
|
func NewSimpleBackoff(ticks ...int) *SimpleBackoff {
|
||||||
|
return &SimpleBackoff{
|
||||||
|
ticks: ticks,
|
||||||
|
jitter: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Jitter enables or disables jittering values.
|
||||||
|
func (b *SimpleBackoff) Jitter(flag bool) *SimpleBackoff {
|
||||||
|
b.Lock()
|
||||||
|
b.jitter = flag
|
||||||
|
b.Unlock()
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis].
|
||||||
|
func jitter(millis int) int {
|
||||||
|
if millis <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return millis/2 + rand.Intn(millis)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next implements BackoffFunc for SimpleBackoff.
|
||||||
|
func (b *SimpleBackoff) Next(retry int) (time.Duration, bool) {
|
||||||
|
b.Lock()
|
||||||
|
defer b.Unlock()
|
||||||
|
|
||||||
|
if retry >= len(b.ticks) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
ms := b.ticks[retry]
|
||||||
|
if b.jitter {
|
||||||
|
ms = jitter(ms)
|
||||||
|
}
|
||||||
|
return time.Duration(ms) * time.Millisecond, true
|
||||||
|
}
|
96
vendor/gopkg.in/olivere/elastic.v3/bulk.go → vendor/gopkg.in/olivere/elastic.v5/bulk.go
generated
vendored
96
vendor/gopkg.in/olivere/elastic.v3/bulk.go → vendor/gopkg.in/olivere/elastic.v5/bulk.go
generated
vendored
@ -10,7 +10,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BulkService allows for batching bulk requests and sending them to
|
// BulkService allows for batching bulk requests and sending them to
|
||||||
@ -22,7 +24,7 @@ import (
|
|||||||
// reuse BulkService to send many batches. You do not have to create a new
|
// reuse BulkService to send many batches. You do not have to create a new
|
||||||
// BulkService for each batch.
|
// BulkService for each batch.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/docs-bulk.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||||
// for more details.
|
// for more details.
|
||||||
type BulkService struct {
|
type BulkService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -30,18 +32,22 @@ type BulkService struct {
|
|||||||
index string
|
index string
|
||||||
typ string
|
typ string
|
||||||
requests []BulkableRequest
|
requests []BulkableRequest
|
||||||
|
pipeline string
|
||||||
timeout string
|
timeout string
|
||||||
refresh *bool
|
refresh string
|
||||||
|
routing string
|
||||||
|
waitForActiveShards string
|
||||||
pretty bool
|
pretty bool
|
||||||
|
|
||||||
|
// estimated bulk size in bytes, up to the request index sizeInBytesCursor
|
||||||
sizeInBytes int64
|
sizeInBytes int64
|
||||||
|
sizeInBytesCursor int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBulkService initializes a new BulkService.
|
// NewBulkService initializes a new BulkService.
|
||||||
func NewBulkService(client *Client) *BulkService {
|
func NewBulkService(client *Client) *BulkService {
|
||||||
builder := &BulkService{
|
builder := &BulkService{
|
||||||
client: client,
|
client: client,
|
||||||
requests: make([]BulkableRequest, 0),
|
|
||||||
}
|
}
|
||||||
return builder
|
return builder
|
||||||
}
|
}
|
||||||
@ -49,6 +55,7 @@ func NewBulkService(client *Client) *BulkService {
|
|||||||
func (s *BulkService) reset() {
|
func (s *BulkService) reset() {
|
||||||
s.requests = make([]BulkableRequest, 0)
|
s.requests = make([]BulkableRequest, 0)
|
||||||
s.sizeInBytes = 0
|
s.sizeInBytes = 0
|
||||||
|
s.sizeInBytesCursor = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Index specifies the index to use for all batches. You may also leave
|
// Index specifies the index to use for all batches. You may also leave
|
||||||
@ -73,11 +80,35 @@ func (s *BulkService) Timeout(timeout string) *BulkService {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Refresh, when set to true, tells Elasticsearch to make the bulk requests
|
// Refresh controls when changes made by this request are made visible
|
||||||
// available to search immediately after being processed. Normally, this
|
// to search. The allowed values are: "true" (refresh the relevant
|
||||||
// only happens after a specified refresh interval.
|
// primary and replica shards immediately), "wait_for" (wait for the
|
||||||
func (s *BulkService) Refresh(refresh bool) *BulkService {
|
// changes to be made visible by a refresh before applying), or "false"
|
||||||
s.refresh = &refresh
|
// (no refresh related actions).
|
||||||
|
func (s *BulkService) Refresh(refresh string) *BulkService {
|
||||||
|
s.refresh = refresh
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing specifies the routing value.
|
||||||
|
func (s *BulkService) Routing(routing string) *BulkService {
|
||||||
|
s.routing = routing
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pipeline specifies the pipeline id to preprocess incoming documents with.
|
||||||
|
func (s *BulkService) Pipeline(pipeline string) *BulkService {
|
||||||
|
s.pipeline = pipeline
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards sets the number of shard copies that must be active
|
||||||
|
// before proceeding with the bulk operation. Defaults to 1, meaning the
|
||||||
|
// primary shard only. Set to `all` for all shard copies, otherwise set to
|
||||||
|
// any non-negative value less than or equal to the total number of copies
|
||||||
|
// for the shard (number of replicas + 1).
|
||||||
|
func (s *BulkService) WaitForActiveShards(waitForActiveShards string) *BulkService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,7 +123,6 @@ func (s *BulkService) Pretty(pretty bool) *BulkService {
|
|||||||
func (s *BulkService) Add(requests ...BulkableRequest) *BulkService {
|
func (s *BulkService) Add(requests ...BulkableRequest) *BulkService {
|
||||||
for _, r := range requests {
|
for _, r := range requests {
|
||||||
s.requests = append(s.requests, r)
|
s.requests = append(s.requests, r)
|
||||||
s.sizeInBytes += s.estimateSizeInBytes(r)
|
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
@ -100,6 +130,13 @@ func (s *BulkService) Add(requests ...BulkableRequest) *BulkService {
|
|||||||
// EstimatedSizeInBytes returns the estimated size of all bulkable
|
// EstimatedSizeInBytes returns the estimated size of all bulkable
|
||||||
// requests added via Add.
|
// requests added via Add.
|
||||||
func (s *BulkService) EstimatedSizeInBytes() int64 {
|
func (s *BulkService) EstimatedSizeInBytes() int64 {
|
||||||
|
if s.sizeInBytesCursor == len(s.requests) {
|
||||||
|
return s.sizeInBytes
|
||||||
|
}
|
||||||
|
for _, r := range s.requests[s.sizeInBytesCursor:] {
|
||||||
|
s.sizeInBytes += s.estimateSizeInBytes(r)
|
||||||
|
s.sizeInBytesCursor++
|
||||||
|
}
|
||||||
return s.sizeInBytes
|
return s.sizeInBytes
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,7 +160,7 @@ func (s *BulkService) NumberOfActions() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *BulkService) bodyAsString() (string, error) {
|
func (s *BulkService) bodyAsString() (string, error) {
|
||||||
buf := bytes.NewBufferString("")
|
var buf bytes.Buffer
|
||||||
|
|
||||||
for _, req := range s.requests {
|
for _, req := range s.requests {
|
||||||
source, err := req.Source()
|
source, err := req.Source()
|
||||||
@ -131,10 +168,8 @@ func (s *BulkService) bodyAsString() (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
for _, line := range source {
|
for _, line := range source {
|
||||||
_, err := buf.WriteString(fmt.Sprintf("%s\n", line))
|
buf.WriteString(line)
|
||||||
if err != nil {
|
buf.WriteByte('\n')
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,7 +179,7 @@ func (s *BulkService) bodyAsString() (string, error) {
|
|||||||
// Do sends the batched requests to Elasticsearch. Note that, when successful,
|
// Do sends the batched requests to Elasticsearch. Note that, when successful,
|
||||||
// you can reuse the BulkService for the next batch as the list of bulk
|
// you can reuse the BulkService for the next batch as the list of bulk
|
||||||
// requests is cleared on success.
|
// requests is cleared on success.
|
||||||
func (s *BulkService) Do() (*BulkResponse, error) {
|
func (s *BulkService) Do(ctx context.Context) (*BulkResponse, error) {
|
||||||
// No actions?
|
// No actions?
|
||||||
if s.NumberOfActions() == 0 {
|
if s.NumberOfActions() == 0 {
|
||||||
return nil, errors.New("elastic: No bulk actions to commit")
|
return nil, errors.New("elastic: No bulk actions to commit")
|
||||||
@ -158,7 +193,7 @@ func (s *BulkService) Do() (*BulkResponse, error) {
|
|||||||
|
|
||||||
// Build url
|
// Build url
|
||||||
path := "/"
|
path := "/"
|
||||||
if s.index != "" {
|
if len(s.index) > 0 {
|
||||||
index, err := uritemplates.Expand("{index}", map[string]string{
|
index, err := uritemplates.Expand("{index}", map[string]string{
|
||||||
"index": s.index,
|
"index": s.index,
|
||||||
})
|
})
|
||||||
@ -167,7 +202,7 @@ func (s *BulkService) Do() (*BulkResponse, error) {
|
|||||||
}
|
}
|
||||||
path += index + "/"
|
path += index + "/"
|
||||||
}
|
}
|
||||||
if s.typ != "" {
|
if len(s.typ) > 0 {
|
||||||
typ, err := uritemplates.Expand("{type}", map[string]string{
|
typ, err := uritemplates.Expand("{type}", map[string]string{
|
||||||
"type": s.typ,
|
"type": s.typ,
|
||||||
})
|
})
|
||||||
@ -183,15 +218,24 @@ func (s *BulkService) Do() (*BulkResponse, error) {
|
|||||||
if s.pretty {
|
if s.pretty {
|
||||||
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
||||||
}
|
}
|
||||||
if s.refresh != nil {
|
if s.pipeline != "" {
|
||||||
params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
|
params.Set("pipeline", s.pipeline)
|
||||||
|
}
|
||||||
|
if s.refresh != "" {
|
||||||
|
params.Set("refresh", s.refresh)
|
||||||
|
}
|
||||||
|
if s.routing != "" {
|
||||||
|
params.Set("routing", s.routing)
|
||||||
}
|
}
|
||||||
if s.timeout != "" {
|
if s.timeout != "" {
|
||||||
params.Set("timeout", s.timeout)
|
params.Set("timeout", s.timeout)
|
||||||
}
|
}
|
||||||
|
if s.waitForActiveShards != "" {
|
||||||
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
|
}
|
||||||
|
|
||||||
// Get response
|
// Get response
|
||||||
res, err := s.client.PerformRequest("POST", path, params, body)
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -260,7 +304,7 @@ type BulkResponseItem struct {
|
|||||||
Index string `json:"_index,omitempty"`
|
Index string `json:"_index,omitempty"`
|
||||||
Type string `json:"_type,omitempty"`
|
Type string `json:"_type,omitempty"`
|
||||||
Id string `json:"_id,omitempty"`
|
Id string `json:"_id,omitempty"`
|
||||||
Version int `json:"_version,omitempty"`
|
Version int64 `json:"_version,omitempty"`
|
||||||
Status int `json:"status,omitempty"`
|
Status int `json:"status,omitempty"`
|
||||||
Found bool `json:"found,omitempty"`
|
Found bool `json:"found,omitempty"`
|
||||||
Error *ErrorDetails `json:"error,omitempty"`
|
Error *ErrorDetails `json:"error,omitempty"`
|
||||||
@ -292,7 +336,7 @@ func (r *BulkResponse) ByAction(action string) []*BulkResponseItem {
|
|||||||
if r.Items == nil {
|
if r.Items == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
items := make([]*BulkResponseItem, 0)
|
var items []*BulkResponseItem
|
||||||
for _, item := range r.Items {
|
for _, item := range r.Items {
|
||||||
if result, found := item[action]; found {
|
if result, found := item[action]; found {
|
||||||
items = append(items, result)
|
items = append(items, result)
|
||||||
@ -307,7 +351,7 @@ func (r *BulkResponse) ById(id string) []*BulkResponseItem {
|
|||||||
if r.Items == nil {
|
if r.Items == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
items := make([]*BulkResponseItem, 0)
|
var items []*BulkResponseItem
|
||||||
for _, item := range r.Items {
|
for _, item := range r.Items {
|
||||||
for _, result := range item {
|
for _, result := range item {
|
||||||
if result.Id == id {
|
if result.Id == id {
|
||||||
@ -324,7 +368,7 @@ func (r *BulkResponse) Failed() []*BulkResponseItem {
|
|||||||
if r.Items == nil {
|
if r.Items == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
errors := make([]*BulkResponseItem, 0)
|
var errors []*BulkResponseItem
|
||||||
for _, item := range r.Items {
|
for _, item := range r.Items {
|
||||||
for _, result := range item {
|
for _, result := range item {
|
||||||
if !(result.Status >= 200 && result.Status <= 299) {
|
if !(result.Status >= 200 && result.Status <= 299) {
|
||||||
@ -341,7 +385,7 @@ func (r *BulkResponse) Succeeded() []*BulkResponseItem {
|
|||||||
if r.Items == nil {
|
if r.Items == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
succeeded := make([]*BulkResponseItem, 0)
|
var succeeded []*BulkResponseItem
|
||||||
for _, item := range r.Items {
|
for _, item := range r.Items {
|
||||||
for _, result := range item {
|
for _, result := range item {
|
||||||
if result.Status >= 200 && result.Status <= 299 {
|
if result.Status >= 200 && result.Status <= 299 {
|
@ -12,9 +12,9 @@ import (
|
|||||||
|
|
||||||
// -- Bulk delete request --
|
// -- Bulk delete request --
|
||||||
|
|
||||||
// Bulk request to remove a document from Elasticsearch.
|
// BulkDeleteRequest is a request to remove a document from Elasticsearch.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||||
// for details.
|
// for details.
|
||||||
type BulkDeleteRequest struct {
|
type BulkDeleteRequest struct {
|
||||||
BulkableRequest
|
BulkableRequest
|
||||||
@ -23,7 +23,6 @@ type BulkDeleteRequest struct {
|
|||||||
id string
|
id string
|
||||||
parent string
|
parent string
|
||||||
routing string
|
routing string
|
||||||
refresh *bool
|
|
||||||
version int64 // default is MATCH_ANY
|
version int64 // default is MATCH_ANY
|
||||||
versionType string // default is "internal"
|
versionType string // default is "internal"
|
||||||
|
|
||||||
@ -73,15 +72,6 @@ func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest {
|
|||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
// Refresh indicates whether to update the shards immediately after
|
|
||||||
// the delete has been processed. Deleted documents will disappear
|
|
||||||
// in search immediately at the cost of slower bulk performance.
|
|
||||||
func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest {
|
|
||||||
r.refresh = &refresh
|
|
||||||
r.source = nil
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version indicates the version to be deleted as part of an optimistic
|
// Version indicates the version to be deleted as part of an optimistic
|
||||||
// concurrency model.
|
// concurrency model.
|
||||||
func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
|
func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
|
||||||
@ -110,7 +100,7 @@ func (r *BulkDeleteRequest) String() string {
|
|||||||
|
|
||||||
// Source returns the on-wire representation of the delete request,
|
// Source returns the on-wire representation of the delete request,
|
||||||
// split into an action-and-meta-data line and an (optional) source line.
|
// split into an action-and-meta-data line and an (optional) source line.
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||||
// for details.
|
// for details.
|
||||||
func (r *BulkDeleteRequest) Source() ([]string, error) {
|
func (r *BulkDeleteRequest) Source() ([]string, error) {
|
||||||
if r.source != nil {
|
if r.source != nil {
|
||||||
@ -141,9 +131,6 @@ func (r *BulkDeleteRequest) Source() ([]string, error) {
|
|||||||
if r.versionType != "" {
|
if r.versionType != "" {
|
||||||
deleteCommand["_version_type"] = r.versionType
|
deleteCommand["_version_type"] = r.versionType
|
||||||
}
|
}
|
||||||
if r.refresh != nil {
|
|
||||||
deleteCommand["refresh"] = *r.refresh
|
|
||||||
}
|
|
||||||
source["delete"] = deleteCommand
|
source["delete"] = deleteCommand
|
||||||
|
|
||||||
body, err := json.Marshal(source)
|
body, err := json.Marshal(source)
|
@ -10,9 +10,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Bulk request to add a document to Elasticsearch.
|
// BulkIndexRequest is a request to add a document to Elasticsearch.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||||
// for details.
|
// for details.
|
||||||
type BulkIndexRequest struct {
|
type BulkIndexRequest struct {
|
||||||
BulkableRequest
|
BulkableRequest
|
||||||
@ -22,12 +22,12 @@ type BulkIndexRequest struct {
|
|||||||
opType string
|
opType string
|
||||||
routing string
|
routing string
|
||||||
parent string
|
parent string
|
||||||
timestamp string
|
|
||||||
ttl int64
|
|
||||||
refresh *bool
|
|
||||||
version int64 // default is MATCH_ANY
|
version int64 // default is MATCH_ANY
|
||||||
versionType string // default is "internal"
|
versionType string // default is "internal"
|
||||||
doc interface{}
|
doc interface{}
|
||||||
|
pipeline string
|
||||||
|
retryOnConflict *int
|
||||||
|
ttl string
|
||||||
|
|
||||||
source []string
|
source []string
|
||||||
}
|
}
|
||||||
@ -65,7 +65,7 @@ func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
|
|||||||
|
|
||||||
// OpType specifies if this request should follow create-only or upsert
|
// OpType specifies if this request should follow create-only or upsert
|
||||||
// behavior. This follows the OpType of the standard document index API.
|
// behavior. This follows the OpType of the standard document index API.
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#operation-type
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#operation-type
|
||||||
// for details.
|
// for details.
|
||||||
func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
|
func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
|
||||||
r.opType = opType
|
r.opType = opType
|
||||||
@ -87,34 +87,6 @@ func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest {
|
|||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
// Timestamp can be used to index a document with a timestamp.
|
|
||||||
// This is deprecated as of 2.0.0-beta2; you should use a normal date field
|
|
||||||
// and set its value explicitly.
|
|
||||||
func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest {
|
|
||||||
r.timestamp = timestamp
|
|
||||||
r.source = nil
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ttl (time to live) sets an expiration date for the document. Expired
|
|
||||||
// documents will be expunged automatically.
|
|
||||||
// This is deprecated as of 2.0.0-beta2 and will be replaced by a different
|
|
||||||
// implementation in a future version.
|
|
||||||
func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest {
|
|
||||||
r.ttl = ttl
|
|
||||||
r.source = nil
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Refresh indicates whether to update the shards immediately after
|
|
||||||
// the request has been processed. Newly added documents will appear
|
|
||||||
// in search immediately at the cost of slower bulk performance.
|
|
||||||
func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest {
|
|
||||||
r.refresh = &refresh
|
|
||||||
r.source = nil
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version indicates the version of the document as part of an optimistic
|
// Version indicates the version of the document as part of an optimistic
|
||||||
// concurrency model.
|
// concurrency model.
|
||||||
func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
|
func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
|
||||||
@ -126,7 +98,7 @@ func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
|
|||||||
// VersionType specifies how versions are created. It can be e.g. internal,
|
// VersionType specifies how versions are created. It can be e.g. internal,
|
||||||
// external, external_gte, or force.
|
// external, external_gte, or force.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-versioning
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#index-versioning
|
||||||
// for details.
|
// for details.
|
||||||
func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
|
func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
|
||||||
r.versionType = versionType
|
r.versionType = versionType
|
||||||
@ -141,6 +113,27 @@ func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest {
|
|||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RetryOnConflict specifies how often to retry in case of a version conflict.
|
||||||
|
func (r *BulkIndexRequest) RetryOnConflict(retryOnConflict int) *BulkIndexRequest {
|
||||||
|
r.retryOnConflict = &retryOnConflict
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// TTL is an expiration time for the document.
|
||||||
|
func (r *BulkIndexRequest) TTL(ttl string) *BulkIndexRequest {
|
||||||
|
r.ttl = ttl
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pipeline to use while processing the request.
|
||||||
|
func (r *BulkIndexRequest) Pipeline(pipeline string) *BulkIndexRequest {
|
||||||
|
r.pipeline = pipeline
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
// String returns the on-wire representation of the index request,
|
// String returns the on-wire representation of the index request,
|
||||||
// concatenated as a single string.
|
// concatenated as a single string.
|
||||||
func (r *BulkIndexRequest) String() string {
|
func (r *BulkIndexRequest) String() string {
|
||||||
@ -153,7 +146,7 @@ func (r *BulkIndexRequest) String() string {
|
|||||||
|
|
||||||
// Source returns the on-wire representation of the index request,
|
// Source returns the on-wire representation of the index request,
|
||||||
// split into an action-and-meta-data line and an (optional) source line.
|
// split into an action-and-meta-data line and an (optional) source line.
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||||
// for details.
|
// for details.
|
||||||
func (r *BulkIndexRequest) Source() ([]string, error) {
|
func (r *BulkIndexRequest) Source() ([]string, error) {
|
||||||
// { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
|
// { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
|
||||||
@ -183,20 +176,20 @@ func (r *BulkIndexRequest) Source() ([]string, error) {
|
|||||||
if r.parent != "" {
|
if r.parent != "" {
|
||||||
indexCommand["_parent"] = r.parent
|
indexCommand["_parent"] = r.parent
|
||||||
}
|
}
|
||||||
if r.timestamp != "" {
|
|
||||||
indexCommand["_timestamp"] = r.timestamp
|
|
||||||
}
|
|
||||||
if r.ttl > 0 {
|
|
||||||
indexCommand["_ttl"] = r.ttl
|
|
||||||
}
|
|
||||||
if r.version > 0 {
|
if r.version > 0 {
|
||||||
indexCommand["_version"] = r.version
|
indexCommand["_version"] = r.version
|
||||||
}
|
}
|
||||||
if r.versionType != "" {
|
if r.versionType != "" {
|
||||||
indexCommand["_version_type"] = r.versionType
|
indexCommand["_version_type"] = r.versionType
|
||||||
}
|
}
|
||||||
if r.refresh != nil {
|
if r.retryOnConflict != nil {
|
||||||
indexCommand["refresh"] = *r.refresh
|
indexCommand["_retry_on_conflict"] = *r.retryOnConflict
|
||||||
|
}
|
||||||
|
if r.ttl != "" {
|
||||||
|
indexCommand["_ttl"] = r.ttl
|
||||||
|
}
|
||||||
|
if r.pipeline != "" {
|
||||||
|
indexCommand["pipeline"] = r.pipeline
|
||||||
}
|
}
|
||||||
command[r.opType] = indexCommand
|
command[r.opType] = indexCommand
|
||||||
line, err := json.Marshal(command)
|
line, err := json.Marshal(command)
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2016 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,7 +9,7 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/backoff"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BulkProcessorService allows to easily process bulk requests. It allows setting
|
// BulkProcessorService allows to easily process bulk requests. It allows setting
|
||||||
@ -129,9 +129,14 @@ func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService {
|
|||||||
// You can interoperate with the BulkProcessor returned by Do, e.g. Start and
|
// You can interoperate with the BulkProcessor returned by Do, e.g. Start and
|
||||||
// Stop (or Close) it.
|
// Stop (or Close) it.
|
||||||
//
|
//
|
||||||
|
// Context is an optional context that is passed into the bulk request
|
||||||
|
// service calls. In contrast to other operations, this context is used in
|
||||||
|
// a long running process. You could use it to pass e.g. loggers, but you
|
||||||
|
// shouldn't use it for cancellation.
|
||||||
|
//
|
||||||
// Calling Do several times returns new BulkProcessors. You probably don't
|
// Calling Do several times returns new BulkProcessors. You probably don't
|
||||||
// want to do this. BulkProcessorService implements just a builder pattern.
|
// want to do this. BulkProcessorService implements just a builder pattern.
|
||||||
func (s *BulkProcessorService) Do() (*BulkProcessor, error) {
|
func (s *BulkProcessorService) Do(ctx context.Context) (*BulkProcessor, error) {
|
||||||
p := newBulkProcessor(
|
p := newBulkProcessor(
|
||||||
s.c,
|
s.c,
|
||||||
s.beforeFn,
|
s.beforeFn,
|
||||||
@ -145,7 +150,7 @@ func (s *BulkProcessorService) Do() (*BulkProcessor, error) {
|
|||||||
s.initialTimeout,
|
s.initialTimeout,
|
||||||
s.maxTimeout)
|
s.maxTimeout)
|
||||||
|
|
||||||
err := p.Start()
|
err := p.Start(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -270,7 +275,7 @@ func newBulkProcessor(
|
|||||||
|
|
||||||
// Start starts the bulk processor. If the processor is already started,
|
// Start starts the bulk processor. If the processor is already started,
|
||||||
// nil is returned.
|
// nil is returned.
|
||||||
func (p *BulkProcessor) Start() error {
|
func (p *BulkProcessor) Start(ctx context.Context) error {
|
||||||
p.startedMu.Lock()
|
p.startedMu.Lock()
|
||||||
defer p.startedMu.Unlock()
|
defer p.startedMu.Unlock()
|
||||||
|
|
||||||
@ -292,7 +297,7 @@ func (p *BulkProcessor) Start() error {
|
|||||||
for i := 0; i < p.numWorkers; i++ {
|
for i := 0; i < p.numWorkers; i++ {
|
||||||
p.workerWg.Add(1)
|
p.workerWg.Add(1)
|
||||||
p.workers[i] = newBulkWorker(p, i)
|
p.workers[i] = newBulkWorker(p, i)
|
||||||
go p.workers[i].work()
|
go p.workers[i].work(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the ticker for flush (if enabled)
|
// Start the ticker for flush (if enabled)
|
||||||
@ -420,7 +425,7 @@ func newBulkWorker(p *BulkProcessor, i int) *bulkWorker {
|
|||||||
|
|
||||||
// work waits for bulk requests and manual flush calls on the respective
|
// work waits for bulk requests and manual flush calls on the respective
|
||||||
// channels and is invoked as a goroutine when the bulk processor is started.
|
// channels and is invoked as a goroutine when the bulk processor is started.
|
||||||
func (w *bulkWorker) work() {
|
func (w *bulkWorker) work(ctx context.Context) {
|
||||||
defer func() {
|
defer func() {
|
||||||
w.p.workerWg.Done()
|
w.p.workerWg.Done()
|
||||||
close(w.flushAckC)
|
close(w.flushAckC)
|
||||||
@ -435,20 +440,20 @@ func (w *bulkWorker) work() {
|
|||||||
// Received a new request
|
// Received a new request
|
||||||
w.service.Add(req)
|
w.service.Add(req)
|
||||||
if w.commitRequired() {
|
if w.commitRequired() {
|
||||||
w.commit() // TODO swallow errors here?
|
w.commit(ctx) // TODO swallow errors here?
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Channel closed: Stop.
|
// Channel closed: Stop.
|
||||||
stop = true
|
stop = true
|
||||||
if w.service.NumberOfActions() > 0 {
|
if w.service.NumberOfActions() > 0 {
|
||||||
w.commit() // TODO swallow errors here?
|
w.commit(ctx) // TODO swallow errors here?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-w.flushC:
|
case <-w.flushC:
|
||||||
// Commit outstanding requests
|
// Commit outstanding requests
|
||||||
if w.service.NumberOfActions() > 0 {
|
if w.service.NumberOfActions() > 0 {
|
||||||
w.commit() // TODO swallow errors here?
|
w.commit(ctx) // TODO swallow errors here?
|
||||||
}
|
}
|
||||||
w.flushAckC <- struct{}{}
|
w.flushAckC <- struct{}{}
|
||||||
}
|
}
|
||||||
@ -457,19 +462,19 @@ func (w *bulkWorker) work() {
|
|||||||
|
|
||||||
// commit commits the bulk requests in the given service,
|
// commit commits the bulk requests in the given service,
|
||||||
// invoking callbacks as specified.
|
// invoking callbacks as specified.
|
||||||
func (w *bulkWorker) commit() error {
|
func (w *bulkWorker) commit(ctx context.Context) error {
|
||||||
var res *BulkResponse
|
var res *BulkResponse
|
||||||
|
|
||||||
// commitFunc will commit bulk requests and, on failure, be retried
|
// commitFunc will commit bulk requests and, on failure, be retried
|
||||||
// via exponential backoff
|
// via exponential backoff
|
||||||
commitFunc := func() error {
|
commitFunc := func() error {
|
||||||
var err error
|
var err error
|
||||||
res, err = w.service.Do()
|
res, err = w.service.Do(ctx)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// notifyFunc will be called if retry fails
|
// notifyFunc will be called if retry fails
|
||||||
notifyFunc := func(err error, d time.Duration) {
|
notifyFunc := func(err error) {
|
||||||
w.p.c.errorf("elastic: bulk processor %q failed but will retry in %v: %v", w.p.name, d, err)
|
w.p.c.errorf("elastic: bulk processor %q failed but will retry: %v", w.p.name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
id := atomic.AddInt64(&w.p.executionId, 1)
|
id := atomic.AddInt64(&w.p.executionId, 1)
|
||||||
@ -490,8 +495,8 @@ func (w *bulkWorker) commit() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Commit bulk requests
|
// Commit bulk requests
|
||||||
policy := backoff.NewExponentialBackoff(w.p.initialTimeout, w.p.maxTimeout).SendStop(true)
|
policy := NewExponentialBackoff(w.p.initialTimeout, w.p.maxTimeout)
|
||||||
err := backoff.RetryNotify(commitFunc, policy, notifyFunc)
|
err := RetryNotify(commitFunc, policy, notifyFunc)
|
||||||
w.updateStats(res)
|
w.updateStats(res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err)
|
w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err)
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
// -- Bulkable request (index/update/delete) --
|
// -- Bulkable request (index/update/delete) --
|
||||||
|
|
||||||
// Generic interface to bulkable requests.
|
// BulkableRequest is a generic interface to bulkable requests.
|
||||||
type BulkableRequest interface {
|
type BulkableRequest interface {
|
||||||
fmt.Stringer
|
fmt.Stringer
|
||||||
Source() ([]string, error)
|
Source() ([]string, error)
|
@ -10,9 +10,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Bulk request to update a document in Elasticsearch.
|
// BulkUpdateRequest is a request to update a document in Elasticsearch.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||||
// for details.
|
// for details.
|
||||||
type BulkUpdateRequest struct {
|
type BulkUpdateRequest struct {
|
||||||
BulkableRequest
|
BulkableRequest
|
||||||
@ -23,15 +23,14 @@ type BulkUpdateRequest struct {
|
|||||||
routing string
|
routing string
|
||||||
parent string
|
parent string
|
||||||
script *Script
|
script *Script
|
||||||
|
scriptedUpsert *bool
|
||||||
version int64 // default is MATCH_ANY
|
version int64 // default is MATCH_ANY
|
||||||
versionType string // default is "internal"
|
versionType string // default is "internal"
|
||||||
retryOnConflict *int
|
retryOnConflict *int
|
||||||
refresh *bool
|
|
||||||
upsert interface{}
|
upsert interface{}
|
||||||
docAsUpsert *bool
|
docAsUpsert *bool
|
||||||
|
detectNoop *bool
|
||||||
doc interface{}
|
doc interface{}
|
||||||
ttl int64
|
|
||||||
timestamp string
|
|
||||||
|
|
||||||
source []string
|
source []string
|
||||||
}
|
}
|
||||||
@ -79,8 +78,8 @@ func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Script specifies an update script.
|
// Script specifies an update script.
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/docs-bulk.html#bulk-update
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html#bulk-update
|
||||||
// and https://www.elastic.co/guide/en/elasticsearch/reference/2.x/modules-scripting.html
|
// and https://www.elastic.co/guide/en/elasticsearch/reference/5.2/modules-scripting.html
|
||||||
// for details.
|
// for details.
|
||||||
func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest {
|
func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest {
|
||||||
r.script = script
|
r.script = script
|
||||||
@ -88,6 +87,16 @@ func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest {
|
|||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ScripedUpsert specifies if your script will run regardless of
|
||||||
|
// whether the document exists or not.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-update.html#_literal_scripted_upsert_literal
|
||||||
|
func (r *BulkUpdateRequest) ScriptedUpsert(upsert bool) *BulkUpdateRequest {
|
||||||
|
r.scriptedUpsert = &upsert
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
// RetryOnConflict specifies how often to retry in case of a version conflict.
|
// RetryOnConflict specifies how often to retry in case of a version conflict.
|
||||||
func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
|
func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
|
||||||
r.retryOnConflict = &retryOnConflict
|
r.retryOnConflict = &retryOnConflict
|
||||||
@ -111,15 +120,6 @@ func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
|
|||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
// Refresh indicates whether to update the shards immediately after
|
|
||||||
// the request has been processed. Updated documents will appear
|
|
||||||
// in search immediately at the cost of slower bulk performance.
|
|
||||||
func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest {
|
|
||||||
r.refresh = &refresh
|
|
||||||
r.source = nil
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Doc specifies the updated document.
|
// Doc specifies the updated document.
|
||||||
func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
|
func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
|
||||||
r.doc = doc
|
r.doc = doc
|
||||||
@ -130,7 +130,7 @@ func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
|
|||||||
// DocAsUpsert indicates whether the contents of Doc should be used as
|
// DocAsUpsert indicates whether the contents of Doc should be used as
|
||||||
// the Upsert value.
|
// the Upsert value.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/docs-update.html#_literal_doc_as_upsert_literal
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-update.html#_literal_doc_as_upsert_literal
|
||||||
// for details.
|
// for details.
|
||||||
func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
|
func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
|
||||||
r.docAsUpsert = &docAsUpsert
|
r.docAsUpsert = &docAsUpsert
|
||||||
@ -138,6 +138,15 @@ func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
|
|||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DetectNoop specifies whether changes that don't affect the document
|
||||||
|
// should be ignored (true) or unignored (false). This is enabled by default
|
||||||
|
// in Elasticsearch.
|
||||||
|
func (r *BulkUpdateRequest) DetectNoop(detectNoop bool) *BulkUpdateRequest {
|
||||||
|
r.detectNoop = &detectNoop
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
// Upsert specifies the document to use for upserts. It will be used for
|
// Upsert specifies the document to use for upserts. It will be used for
|
||||||
// create if the original document does not exist.
|
// create if the original document does not exist.
|
||||||
func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
|
func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
|
||||||
@ -146,22 +155,6 @@ func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
|
|||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ttl specifies the time to live, and optional expiry time.
|
|
||||||
// This is deprecated as of 2.0.0-beta2.
|
|
||||||
func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest {
|
|
||||||
r.ttl = ttl
|
|
||||||
r.source = nil
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timestamp specifies a timestamp for the document.
|
|
||||||
// This is deprecated as of 2.0.0-beta2.
|
|
||||||
func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest {
|
|
||||||
r.timestamp = timestamp
|
|
||||||
r.source = nil
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the on-wire representation of the update request,
|
// String returns the on-wire representation of the update request,
|
||||||
// concatenated as a single string.
|
// concatenated as a single string.
|
||||||
func (r *BulkUpdateRequest) String() string {
|
func (r *BulkUpdateRequest) String() string {
|
||||||
@ -193,9 +186,9 @@ func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error)
|
|||||||
|
|
||||||
// Source returns the on-wire representation of the update request,
|
// Source returns the on-wire representation of the update request,
|
||||||
// split into an action-and-meta-data line and an (optional) source line.
|
// split into an action-and-meta-data line and an (optional) source line.
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||||
// for details.
|
// for details.
|
||||||
func (r BulkUpdateRequest) Source() ([]string, error) {
|
func (r *BulkUpdateRequest) Source() ([]string, error) {
|
||||||
// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
|
// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
|
||||||
// { "doc" : { "field1" : "value1", ... } }
|
// { "doc" : { "field1" : "value1", ... } }
|
||||||
// or
|
// or
|
||||||
@ -226,21 +219,12 @@ func (r BulkUpdateRequest) Source() ([]string, error) {
|
|||||||
if r.parent != "" {
|
if r.parent != "" {
|
||||||
updateCommand["_parent"] = r.parent
|
updateCommand["_parent"] = r.parent
|
||||||
}
|
}
|
||||||
if r.timestamp != "" {
|
|
||||||
updateCommand["_timestamp"] = r.timestamp
|
|
||||||
}
|
|
||||||
if r.ttl > 0 {
|
|
||||||
updateCommand["_ttl"] = r.ttl
|
|
||||||
}
|
|
||||||
if r.version > 0 {
|
if r.version > 0 {
|
||||||
updateCommand["_version"] = r.version
|
updateCommand["_version"] = r.version
|
||||||
}
|
}
|
||||||
if r.versionType != "" {
|
if r.versionType != "" {
|
||||||
updateCommand["_version_type"] = r.versionType
|
updateCommand["_version_type"] = r.versionType
|
||||||
}
|
}
|
||||||
if r.refresh != nil {
|
|
||||||
updateCommand["refresh"] = *r.refresh
|
|
||||||
}
|
|
||||||
if r.retryOnConflict != nil {
|
if r.retryOnConflict != nil {
|
||||||
updateCommand["_retry_on_conflict"] = *r.retryOnConflict
|
updateCommand["_retry_on_conflict"] = *r.retryOnConflict
|
||||||
}
|
}
|
||||||
@ -256,9 +240,15 @@ func (r BulkUpdateRequest) Source() ([]string, error) {
|
|||||||
if r.docAsUpsert != nil {
|
if r.docAsUpsert != nil {
|
||||||
source["doc_as_upsert"] = *r.docAsUpsert
|
source["doc_as_upsert"] = *r.docAsUpsert
|
||||||
}
|
}
|
||||||
|
if r.detectNoop != nil {
|
||||||
|
source["detect_noop"] = *r.detectNoop
|
||||||
|
}
|
||||||
if r.upsert != nil {
|
if r.upsert != nil {
|
||||||
source["upsert"] = r.upsert
|
source["upsert"] = r.upsert
|
||||||
}
|
}
|
||||||
|
if r.scriptedUpsert != nil {
|
||||||
|
source["scripted_upsert"] = *r.scriptedUpsert
|
||||||
|
}
|
||||||
if r.doc != nil {
|
if r.doc != nil {
|
||||||
// {"doc":{...}}
|
// {"doc":{...}}
|
||||||
source["doc"] = r.doc
|
source["doc"] = r.doc
|
@ -8,21 +8,17 @@ import "net/url"
|
|||||||
|
|
||||||
// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
|
// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
|
||||||
// remove anything but scheme, userinfo, host, path, and port.
|
// remove anything but scheme, userinfo, host, path, and port.
|
||||||
// It also removes all trailing slashes. It also skips invalid URLs or
|
// It also removes all trailing slashes. Invalid URLs or URLs that do not
|
||||||
// URLs that do not use protocol http or https.
|
// use protocol http or https are skipped.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// http://127.0.0.1:9200/?query=1 -> http://127.0.0.1:9200
|
// http://127.0.0.1:9200/?query=1 -> http://127.0.0.1:9200
|
||||||
// http://127.0.0.1:9200/db1/ -> http://127.0.0.1:9200/db1
|
// http://127.0.0.1:9200/db1/ -> http://127.0.0.1:9200/db1
|
||||||
// 127.0.0.1:9200 -> http://127.0.0.1:9200
|
|
||||||
func canonicalize(rawurls ...string) []string {
|
func canonicalize(rawurls ...string) []string {
|
||||||
var canonicalized []string
|
var canonicalized []string
|
||||||
for _, rawurl := range rawurls {
|
for _, rawurl := range rawurls {
|
||||||
u, err := url.Parse(rawurl)
|
u, err := url.Parse(rawurl)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(u.Scheme) == 0 {
|
|
||||||
u.Scheme = DefaultScheme
|
|
||||||
}
|
|
||||||
if u.Scheme == "http" || u.Scheme == "https" {
|
if u.Scheme == "http" || u.Scheme == "https" {
|
||||||
// Trim trailing slashes
|
// Trim trailing slashes
|
||||||
for len(u.Path) > 0 && u.Path[len(u.Path)-1] == '/' {
|
for len(u.Path) > 0 && u.Path[len(u.Path)-1] == '/' {
|
@ -8,11 +8,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClearScrollService clears one or more scroll contexts by their ids.
|
// ClearScrollService clears one or more scroll contexts by their ids.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html#_clear_scroll_api
|
||||||
// for details.
|
// for details.
|
||||||
type ClearScrollService struct {
|
type ClearScrollService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -67,7 +69,7 @@ func (s *ClearScrollService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *ClearScrollService) Do() (*ClearScrollResponse, error) {
|
func (s *ClearScrollService) Do(ctx context.Context) (*ClearScrollResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -83,7 +85,7 @@ func (s *ClearScrollService) Do() (*ClearScrollResponse, error) {
|
|||||||
body := strings.Join(s.scrollId, ",")
|
body := strings.Join(s.scrollId, ",")
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("DELETE", path, params, body)
|
res, err := s.client.PerformRequest(ctx, "DELETE", path, params, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
345
vendor/gopkg.in/olivere/elastic.v3/client.go → vendor/gopkg.in/olivere/elastic.v5/client.go
generated
vendored
345
vendor/gopkg.in/olivere/elastic.v3/client.go → vendor/gopkg.in/olivere/elastic.v5/client.go
generated
vendored
@ -9,7 +9,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -17,13 +16,16 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/context/ctxhttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Version is the current version of Elastic.
|
// Version is the current version of Elastic.
|
||||||
Version = "3.0.43"
|
Version = "5.0.31"
|
||||||
|
|
||||||
// DefaultUrl is the default endpoint of Elasticsearch on the local machine.
|
// DefaultURL is the default endpoint of Elasticsearch on the local machine.
|
||||||
// It is used e.g. when initializing a new Client without a specific URL.
|
// It is used e.g. when initializing a new Client without a specific URL.
|
||||||
DefaultURL = "http://127.0.0.1:9200"
|
DefaultURL = "http://127.0.0.1:9200"
|
||||||
|
|
||||||
@ -67,11 +69,6 @@ const (
|
|||||||
// process, DefaultSnifferTimeoutStartup is used.
|
// process, DefaultSnifferTimeoutStartup is used.
|
||||||
DefaultSnifferTimeout = 2 * time.Second
|
DefaultSnifferTimeout = 2 * time.Second
|
||||||
|
|
||||||
// DefaultMaxRetries is the number of retries for a single request after
|
|
||||||
// Elastic will give up and return an error. It is zero by default, so
|
|
||||||
// retry is disabled by default.
|
|
||||||
DefaultMaxRetries = 0
|
|
||||||
|
|
||||||
// DefaultSendGetBodyAs is the HTTP method to use when elastic is sending
|
// DefaultSendGetBodyAs is the HTTP method to use when elastic is sending
|
||||||
// a GET request with a body.
|
// a GET request with a body.
|
||||||
DefaultSendGetBodyAs = "GET"
|
DefaultSendGetBodyAs = "GET"
|
||||||
@ -94,6 +91,9 @@ var (
|
|||||||
// ErrTimeout is raised when a request timed out, e.g. when WaitForStatus
|
// ErrTimeout is raised when a request timed out, e.g. when WaitForStatus
|
||||||
// didn't return in time.
|
// didn't return in time.
|
||||||
ErrTimeout = errors.New("timeout")
|
ErrTimeout = errors.New("timeout")
|
||||||
|
|
||||||
|
// noRetries is a retrier that does not retry.
|
||||||
|
noRetries = NewStopRetrier()
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClientOptionFunc is a function that configures a Client.
|
// ClientOptionFunc is a function that configures a Client.
|
||||||
@ -114,7 +114,6 @@ type Client struct {
|
|||||||
errorlog Logger // error log for critical messages
|
errorlog Logger // error log for critical messages
|
||||||
infolog Logger // information log for e.g. response times
|
infolog Logger // information log for e.g. response times
|
||||||
tracelog Logger // trace log for debugging
|
tracelog Logger // trace log for debugging
|
||||||
maxRetries int // max. number of retries
|
|
||||||
scheme string // http or https
|
scheme string // http or https
|
||||||
healthcheckEnabled bool // healthchecks enabled or disabled
|
healthcheckEnabled bool // healthchecks enabled or disabled
|
||||||
healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup
|
healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup
|
||||||
@ -125,6 +124,7 @@ type Client struct {
|
|||||||
snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup
|
snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup
|
||||||
snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API
|
snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API
|
||||||
snifferInterval time.Duration // interval between sniffing
|
snifferInterval time.Duration // interval between sniffing
|
||||||
|
snifferCallback SnifferCallback // callback to modify the sniffing decision
|
||||||
snifferStop chan bool // notify sniffer to stop, and notify back
|
snifferStop chan bool // notify sniffer to stop, and notify back
|
||||||
decoder Decoder // used to decode data sent from Elasticsearch
|
decoder Decoder // used to decode data sent from Elasticsearch
|
||||||
basicAuth bool // indicates whether to send HTTP Basic Auth credentials
|
basicAuth bool // indicates whether to send HTTP Basic Auth credentials
|
||||||
@ -133,6 +133,7 @@ type Client struct {
|
|||||||
sendGetBodyAs string // override for when sending a GET with a body
|
sendGetBodyAs string // override for when sending a GET with a body
|
||||||
requiredPlugins []string // list of required plugins
|
requiredPlugins []string // list of required plugins
|
||||||
gzipEnabled bool // gzip compression enabled or disabled (default)
|
gzipEnabled bool // gzip compression enabled or disabled (default)
|
||||||
|
retrier Retrier // strategy for retries
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a new client to work with Elasticsearch.
|
// NewClient creates a new client to work with Elasticsearch.
|
||||||
@ -148,14 +149,13 @@ type Client struct {
|
|||||||
//
|
//
|
||||||
// client, err := elastic.NewClient(
|
// client, err := elastic.NewClient(
|
||||||
// elastic.SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"),
|
// elastic.SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"),
|
||||||
// elastic.SetMaxRetries(10),
|
|
||||||
// elastic.SetBasicAuth("user", "secret"))
|
// elastic.SetBasicAuth("user", "secret"))
|
||||||
//
|
//
|
||||||
// If no URL is configured, Elastic uses DefaultURL by default.
|
// If no URL is configured, Elastic uses DefaultURL by default.
|
||||||
//
|
//
|
||||||
// If the sniffer is enabled (the default), the new client then sniffes
|
// If the sniffer is enabled (the default), the new client then sniffes
|
||||||
// the cluster via the Nodes Info API
|
// the cluster via the Nodes Info API
|
||||||
// (see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html#cluster-nodes-info).
|
// (see https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-nodes-info.html#cluster-nodes-info).
|
||||||
// It uses the URLs specified by the caller. The caller is responsible
|
// It uses the URLs specified by the caller. The caller is responsible
|
||||||
// to only pass a list of URLs of nodes that belong to the same cluster.
|
// to only pass a list of URLs of nodes that belong to the same cluster.
|
||||||
// This sniffing process is run on startup and periodically.
|
// This sniffing process is run on startup and periodically.
|
||||||
@ -174,7 +174,10 @@ type Client struct {
|
|||||||
//
|
//
|
||||||
// Connections are automatically marked as dead or healthy while
|
// Connections are automatically marked as dead or healthy while
|
||||||
// making requests to Elasticsearch. When a request fails, Elastic will
|
// making requests to Elasticsearch. When a request fails, Elastic will
|
||||||
// retry up to a maximum number of retries configured with SetMaxRetries.
|
// call into the Retry strategy which can be specified with SetRetry.
|
||||||
|
// The Retry strategy is also responsible for handling backoff i.e. the time
|
||||||
|
// to wait before starting the next request. There are various standard
|
||||||
|
// backoff implementations, e.g. ExponentialBackoff or SimpleBackoff.
|
||||||
// Retries are disabled by default.
|
// Retries are disabled by default.
|
||||||
//
|
//
|
||||||
// If no HttpClient is configured, then http.DefaultClient is used.
|
// If no HttpClient is configured, then http.DefaultClient is used.
|
||||||
@ -191,7 +194,6 @@ func NewClient(options ...ClientOptionFunc) (*Client, error) {
|
|||||||
cindex: -1,
|
cindex: -1,
|
||||||
scheme: DefaultScheme,
|
scheme: DefaultScheme,
|
||||||
decoder: &DefaultDecoder{},
|
decoder: &DefaultDecoder{},
|
||||||
maxRetries: DefaultMaxRetries,
|
|
||||||
healthcheckEnabled: DefaultHealthcheckEnabled,
|
healthcheckEnabled: DefaultHealthcheckEnabled,
|
||||||
healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup,
|
healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup,
|
||||||
healthcheckTimeout: DefaultHealthcheckTimeout,
|
healthcheckTimeout: DefaultHealthcheckTimeout,
|
||||||
@ -201,9 +203,11 @@ func NewClient(options ...ClientOptionFunc) (*Client, error) {
|
|||||||
snifferTimeoutStartup: DefaultSnifferTimeoutStartup,
|
snifferTimeoutStartup: DefaultSnifferTimeoutStartup,
|
||||||
snifferTimeout: DefaultSnifferTimeout,
|
snifferTimeout: DefaultSnifferTimeout,
|
||||||
snifferInterval: DefaultSnifferInterval,
|
snifferInterval: DefaultSnifferInterval,
|
||||||
|
snifferCallback: nopSnifferCallback,
|
||||||
snifferStop: make(chan bool),
|
snifferStop: make(chan bool),
|
||||||
sendGetBodyAs: DefaultSendGetBodyAs,
|
sendGetBodyAs: DefaultSendGetBodyAs,
|
||||||
gzipEnabled: DefaultGzipEnabled,
|
gzipEnabled: DefaultGzipEnabled,
|
||||||
|
retrier: noRetries, // no retries by default
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the options on it
|
// Run the options on it
|
||||||
@ -213,11 +217,25 @@ func NewClient(options ...ClientOptionFunc) (*Client, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use a default URL and normalize them
|
||||||
if len(c.urls) == 0 {
|
if len(c.urls) == 0 {
|
||||||
c.urls = []string{DefaultURL}
|
c.urls = []string{DefaultURL}
|
||||||
}
|
}
|
||||||
c.urls = canonicalize(c.urls...)
|
c.urls = canonicalize(c.urls...)
|
||||||
|
|
||||||
|
// If the URLs have auth info, use them here as an alternative to SetBasicAuth
|
||||||
|
if !c.basicAuth {
|
||||||
|
for _, urlStr := range c.urls {
|
||||||
|
u, err := url.Parse(urlStr)
|
||||||
|
if err == nil && u.User != nil {
|
||||||
|
c.basicAuth = true
|
||||||
|
c.basicAuthUsername = u.User.Username()
|
||||||
|
c.basicAuthPassword, _ = u.User.Password()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Check if we can make a request to any of the specified URLs
|
// Check if we can make a request to any of the specified URLs
|
||||||
if c.healthcheckEnabled {
|
if c.healthcheckEnabled {
|
||||||
if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil {
|
if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil {
|
||||||
@ -293,7 +311,6 @@ func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) {
|
|||||||
cindex: -1,
|
cindex: -1,
|
||||||
scheme: DefaultScheme,
|
scheme: DefaultScheme,
|
||||||
decoder: &DefaultDecoder{},
|
decoder: &DefaultDecoder{},
|
||||||
maxRetries: 1,
|
|
||||||
healthcheckEnabled: false,
|
healthcheckEnabled: false,
|
||||||
healthcheckTimeoutStartup: off,
|
healthcheckTimeoutStartup: off,
|
||||||
healthcheckTimeout: off,
|
healthcheckTimeout: off,
|
||||||
@ -303,9 +320,11 @@ func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) {
|
|||||||
snifferTimeoutStartup: off,
|
snifferTimeoutStartup: off,
|
||||||
snifferTimeout: off,
|
snifferTimeout: off,
|
||||||
snifferInterval: off,
|
snifferInterval: off,
|
||||||
|
snifferCallback: nopSnifferCallback,
|
||||||
snifferStop: make(chan bool),
|
snifferStop: make(chan bool),
|
||||||
sendGetBodyAs: DefaultSendGetBodyAs,
|
sendGetBodyAs: DefaultSendGetBodyAs,
|
||||||
gzipEnabled: DefaultGzipEnabled,
|
gzipEnabled: DefaultGzipEnabled,
|
||||||
|
retrier: noRetries, // no retries by default
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the options on it
|
// Run the options on it
|
||||||
@ -315,11 +334,25 @@ func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use a default URL and normalize them
|
||||||
if len(c.urls) == 0 {
|
if len(c.urls) == 0 {
|
||||||
c.urls = []string{DefaultURL}
|
c.urls = []string{DefaultURL}
|
||||||
}
|
}
|
||||||
c.urls = canonicalize(c.urls...)
|
c.urls = canonicalize(c.urls...)
|
||||||
|
|
||||||
|
// If the URLs have auth info, use them here as an alternative to SetBasicAuth
|
||||||
|
if !c.basicAuth {
|
||||||
|
for _, urlStr := range c.urls {
|
||||||
|
u, err := url.Parse(urlStr)
|
||||||
|
if err == nil && u.User != nil {
|
||||||
|
c.basicAuth = true
|
||||||
|
c.basicAuthUsername = u.User.Username()
|
||||||
|
c.basicAuthPassword, _ = u.User.Password()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, url := range c.urls {
|
for _, url := range c.urls {
|
||||||
c.conns = append(c.conns, newConn(url, url))
|
c.conns = append(c.conns, newConn(url, url))
|
||||||
}
|
}
|
||||||
@ -434,6 +467,27 @@ func SetSnifferInterval(interval time.Duration) ClientOptionFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SnifferCallback defines the protocol for sniffing decisions.
|
||||||
|
type SnifferCallback func(*NodesInfoNode) bool
|
||||||
|
|
||||||
|
// nopSnifferCallback is the default sniffer callback: It accepts
|
||||||
|
// all nodes the sniffer finds.
|
||||||
|
var nopSnifferCallback = func(*NodesInfoNode) bool { return true }
|
||||||
|
|
||||||
|
// SetSnifferCallback allows the caller to modify sniffer decisions.
|
||||||
|
// When setting the callback, the given SnifferCallback is called for
|
||||||
|
// each (healthy) node found during the sniffing process.
|
||||||
|
// If the callback returns false, the node is ignored: No requests
|
||||||
|
// are routed to it.
|
||||||
|
func SetSnifferCallback(f SnifferCallback) ClientOptionFunc {
|
||||||
|
return func(c *Client) error {
|
||||||
|
if f != nil {
|
||||||
|
c.snifferCallback = f
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// SetHealthcheck enables or disables healthchecks (enabled by default).
|
// SetHealthcheck enables or disables healthchecks (enabled by default).
|
||||||
func SetHealthcheck(enabled bool) ClientOptionFunc {
|
func SetHealthcheck(enabled bool) ClientOptionFunc {
|
||||||
return func(c *Client) error {
|
return func(c *Client) error {
|
||||||
@ -476,12 +530,24 @@ func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc {
|
|||||||
|
|
||||||
// SetMaxRetries sets the maximum number of retries before giving up when
|
// SetMaxRetries sets the maximum number of retries before giving up when
|
||||||
// performing a HTTP request to Elasticsearch.
|
// performing a HTTP request to Elasticsearch.
|
||||||
|
//
|
||||||
|
// Deprecated: Replace with a Retry implementation.
|
||||||
func SetMaxRetries(maxRetries int) ClientOptionFunc {
|
func SetMaxRetries(maxRetries int) ClientOptionFunc {
|
||||||
return func(c *Client) error {
|
return func(c *Client) error {
|
||||||
if maxRetries < 0 {
|
if maxRetries < 0 {
|
||||||
return errors.New("MaxRetries must be greater than or equal to 0")
|
return errors.New("MaxRetries must be greater than or equal to 0")
|
||||||
|
} else if maxRetries == 0 {
|
||||||
|
c.retrier = noRetries
|
||||||
|
} else {
|
||||||
|
// Create a Retrier that will wait for 100ms (+/- jitter) between requests.
|
||||||
|
// This resembles the old behavior with maxRetries.
|
||||||
|
ticks := make([]int, maxRetries)
|
||||||
|
for i := 0; i < len(ticks); i++ {
|
||||||
|
ticks[i] = 100
|
||||||
|
}
|
||||||
|
backoff := NewSimpleBackoff(ticks...)
|
||||||
|
c.retrier = NewBackoffRetrier(backoff)
|
||||||
}
|
}
|
||||||
c.maxRetries = maxRetries
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -546,7 +612,7 @@ func SetTraceLog(logger Logger) ClientOptionFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendGetBodyAs specifies the HTTP method to use when sending a GET request
|
// SetSendGetBodyAs specifies the HTTP method to use when sending a GET request
|
||||||
// with a body. It is GET by default.
|
// with a body. It is GET by default.
|
||||||
func SetSendGetBodyAs(httpMethod string) ClientOptionFunc {
|
func SetSendGetBodyAs(httpMethod string) ClientOptionFunc {
|
||||||
return func(c *Client) error {
|
return func(c *Client) error {
|
||||||
@ -555,6 +621,18 @@ func SetSendGetBodyAs(httpMethod string) ClientOptionFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetRetrier specifies the retry strategy that handles errors during
|
||||||
|
// HTTP request/response with Elasticsearch.
|
||||||
|
func SetRetrier(retrier Retrier) ClientOptionFunc {
|
||||||
|
return func(c *Client) error {
|
||||||
|
if retrier == nil {
|
||||||
|
retrier = noRetries // no retries by default
|
||||||
|
}
|
||||||
|
c.retrier = retrier
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// String returns a string representation of the client status.
|
// String returns a string representation of the client status.
|
||||||
func (c *Client) String() string {
|
func (c *Client) String() string {
|
||||||
c.connsMu.Lock()
|
c.connsMu.Lock()
|
||||||
@ -681,9 +759,10 @@ func (c *Client) dumpResponse(resp *http.Response) {
|
|||||||
func (c *Client) sniffer() {
|
func (c *Client) sniffer() {
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
timeout := c.snifferTimeout
|
timeout := c.snifferTimeout
|
||||||
|
interval := c.snifferInterval
|
||||||
c.mu.RUnlock()
|
c.mu.RUnlock()
|
||||||
|
|
||||||
ticker := time.NewTicker(timeout)
|
ticker := time.NewTicker(interval)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@ -711,8 +790,8 @@ func (c *Client) sniff(timeout time.Duration) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Use all available URLs provided to sniff the cluster.
|
// Use all available URLs provided to sniff the cluster.
|
||||||
|
var urls []string
|
||||||
urlsMap := make(map[string]bool)
|
urlsMap := make(map[string]bool)
|
||||||
urls := make([]string, 0)
|
|
||||||
|
|
||||||
// Add all URLs provided on startup
|
// Add all URLs provided on startup
|
||||||
for _, url := range c.urls {
|
for _, url := range c.urls {
|
||||||
@ -763,7 +842,7 @@ func (c *Client) sniff(timeout time.Duration) error {
|
|||||||
// from the result of calling Nodes Info API. Otherwise, an empty array
|
// from the result of calling Nodes Info API. Otherwise, an empty array
|
||||||
// is returned.
|
// is returned.
|
||||||
func (c *Client) sniffNode(url string) []*conn {
|
func (c *Client) sniffNode(url string) []*conn {
|
||||||
nodes := make([]*conn, 0)
|
var nodes []*conn
|
||||||
|
|
||||||
// Call the Nodes Info API at /_nodes/http
|
// Call the Nodes Info API at /_nodes/http
|
||||||
req, err := NewRequest("GET", url+"/_nodes/http")
|
req, err := NewRequest("GET", url+"/_nodes/http")
|
||||||
@ -792,20 +871,14 @@ func (c *Client) sniffNode(url string) []*conn {
|
|||||||
var info NodesInfoResponse
|
var info NodesInfoResponse
|
||||||
if err := json.NewDecoder(res.Body).Decode(&info); err == nil {
|
if err := json.NewDecoder(res.Body).Decode(&info); err == nil {
|
||||||
if len(info.Nodes) > 0 {
|
if len(info.Nodes) > 0 {
|
||||||
switch c.scheme {
|
|
||||||
case "https":
|
|
||||||
for nodeID, node := range info.Nodes {
|
for nodeID, node := range info.Nodes {
|
||||||
url := c.extractHostname("https", node.HTTPSAddress)
|
if c.snifferCallback(node) {
|
||||||
|
if node.HTTP != nil && len(node.HTTP.PublishAddress) > 0 {
|
||||||
|
url := c.extractHostname(c.scheme, node.HTTP.PublishAddress)
|
||||||
if url != "" {
|
if url != "" {
|
||||||
nodes = append(nodes, newConn(nodeID, url))
|
nodes = append(nodes, newConn(nodeID, url))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
|
||||||
for nodeID, node := range info.Nodes {
|
|
||||||
url := c.extractHostname("http", node.HTTPAddress)
|
|
||||||
if url != "" {
|
|
||||||
nodes = append(nodes, newConn(nodeID, url))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -839,11 +912,10 @@ func (c *Client) extractHostname(scheme, address string) string {
|
|||||||
func (c *Client) updateConns(conns []*conn) {
|
func (c *Client) updateConns(conns []*conn) {
|
||||||
c.connsMu.Lock()
|
c.connsMu.Lock()
|
||||||
|
|
||||||
newConns := make([]*conn, 0)
|
|
||||||
|
|
||||||
// Build up new connections:
|
// Build up new connections:
|
||||||
// If we find an existing connection, use that (including no. of failures etc.).
|
// If we find an existing connection, use that (including no. of failures etc.).
|
||||||
// If we find a new connection, add it.
|
// If we find a new connection, add it.
|
||||||
|
var newConns []*conn
|
||||||
for _, conn := range conns {
|
for _, conn := range conns {
|
||||||
var found bool
|
var found bool
|
||||||
for _, oldConn := range c.conns {
|
for _, oldConn := range c.conns {
|
||||||
@ -870,9 +942,10 @@ func (c *Client) updateConns(conns []*conn) {
|
|||||||
func (c *Client) healthchecker() {
|
func (c *Client) healthchecker() {
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
timeout := c.healthcheckTimeout
|
timeout := c.healthcheckTimeout
|
||||||
|
interval := c.healthcheckInterval
|
||||||
c.mu.RUnlock()
|
c.mu.RUnlock()
|
||||||
|
|
||||||
ticker := time.NewTicker(timeout)
|
ticker := time.NewTicker(interval)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@ -906,34 +979,52 @@ func (c *Client) healthcheck(timeout time.Duration, force bool) {
|
|||||||
conns := c.conns
|
conns := c.conns
|
||||||
c.connsMu.RUnlock()
|
c.connsMu.RUnlock()
|
||||||
|
|
||||||
timeoutInMillis := int64(timeout / time.Millisecond)
|
|
||||||
|
|
||||||
for _, conn := range conns {
|
for _, conn := range conns {
|
||||||
params := make(url.Values)
|
// Run the HEAD request against ES with a timeout
|
||||||
params.Set("timeout", fmt.Sprintf("%dms", timeoutInMillis))
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
req, err := NewRequest("HEAD", conn.URL()+"/?"+params.Encode())
|
defer cancel()
|
||||||
if err == nil {
|
|
||||||
|
// Goroutine executes the HTTP request, returns an error and sets status
|
||||||
|
var status int
|
||||||
|
errc := make(chan error, 1)
|
||||||
|
go func(url string) {
|
||||||
|
req, err := NewRequest("HEAD", url)
|
||||||
|
if err != nil {
|
||||||
|
errc <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
if basicAuth {
|
if basicAuth {
|
||||||
req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
|
req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
|
||||||
}
|
}
|
||||||
res, err := c.c.Do((*http.Request)(req))
|
res, err := c.c.Do((*http.Request)(req))
|
||||||
if err == nil {
|
if res != nil {
|
||||||
|
status = res.StatusCode
|
||||||
if res.Body != nil {
|
if res.Body != nil {
|
||||||
defer res.Body.Close()
|
res.Body.Close()
|
||||||
}
|
}
|
||||||
if res.StatusCode >= 200 && res.StatusCode < 300 {
|
}
|
||||||
|
errc <- err
|
||||||
|
}(conn.URL())
|
||||||
|
|
||||||
|
// Wait for the Goroutine (or its timeout)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done(): // timeout
|
||||||
|
c.errorf("elastic: %s is dead", conn.URL())
|
||||||
|
conn.MarkAsDead()
|
||||||
|
break
|
||||||
|
case err := <-errc:
|
||||||
|
if err != nil {
|
||||||
|
c.errorf("elastic: %s is dead", conn.URL())
|
||||||
|
conn.MarkAsDead()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if status >= 200 && status < 300 {
|
||||||
conn.MarkAsAlive()
|
conn.MarkAsAlive()
|
||||||
} else {
|
} else {
|
||||||
conn.MarkAsDead()
|
conn.MarkAsDead()
|
||||||
c.errorf("elastic: %s is dead [status=%d]", conn.URL(), res.StatusCode)
|
c.errorf("elastic: %s is dead [status=%d]", conn.URL(), status)
|
||||||
}
|
}
|
||||||
} else {
|
break
|
||||||
c.errorf("elastic: %s is dead", conn.URL())
|
|
||||||
conn.MarkAsDead()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
c.errorf("elastic: %s is dead", conn.URL())
|
|
||||||
conn.MarkAsDead()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -987,11 +1078,11 @@ func (c *Client) next() (*conn, error) {
|
|||||||
i := 0
|
i := 0
|
||||||
numConns := len(c.conns)
|
numConns := len(c.conns)
|
||||||
for {
|
for {
|
||||||
i += 1
|
i++
|
||||||
if i > numConns {
|
if i > numConns {
|
||||||
break // we visited all conns: they all seem to be dead
|
break // we visited all conns: they all seem to be dead
|
||||||
}
|
}
|
||||||
c.cindex += 1
|
c.cindex++
|
||||||
if c.cindex >= numConns {
|
if c.cindex >= numConns {
|
||||||
c.cindex = 0
|
c.cindex = 0
|
||||||
}
|
}
|
||||||
@ -1031,17 +1122,16 @@ func (c *Client) mustActiveConn() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PerformRequest does a HTTP request to Elasticsearch.
|
// PerformRequest does a HTTP request to Elasticsearch.
|
||||||
// It returns a response and an error on failure.
|
// It returns a response (which might be nil) and an error on failure.
|
||||||
//
|
//
|
||||||
// Optionally, a list of HTTP error codes to ignore can be passed.
|
// Optionally, a list of HTTP error codes to ignore can be passed.
|
||||||
// This is necessary for services that expect e.g. HTTP status 404 as a
|
// This is necessary for services that expect e.g. HTTP status 404 as a
|
||||||
// valid outcome (Exists, IndicesExists, IndicesTypeExists).
|
// valid outcome (Exists, IndicesExists, IndicesTypeExists).
|
||||||
func (c *Client) PerformRequest(method, path string, params url.Values, body interface{}, ignoreErrors ...int) (*Response, error) {
|
func (c *Client) PerformRequest(ctx context.Context, method, path string, params url.Values, body interface{}, ignoreErrors ...int) (*Response, error) {
|
||||||
start := time.Now().UTC()
|
start := time.Now().UTC()
|
||||||
|
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
timeout := c.healthcheckTimeout
|
timeout := c.healthcheckTimeout
|
||||||
retries := c.maxRetries
|
|
||||||
basicAuth := c.basicAuth
|
basicAuth := c.basicAuth
|
||||||
basicAuthUsername := c.basicAuthUsername
|
basicAuthUsername := c.basicAuthUsername
|
||||||
basicAuthPassword := c.basicAuthPassword
|
basicAuthPassword := c.basicAuthPassword
|
||||||
@ -1054,10 +1144,7 @@ func (c *Client) PerformRequest(method, path string, params url.Values, body int
|
|||||||
var req *Request
|
var req *Request
|
||||||
var resp *Response
|
var resp *Response
|
||||||
var retried bool
|
var retried bool
|
||||||
|
var n int
|
||||||
// We wait between retries, using simple exponential back-off.
|
|
||||||
// TODO: Make this configurable, including the jitter.
|
|
||||||
retryWaitMsec := int64(100 + (rand.Intn(20) - 10))
|
|
||||||
|
|
||||||
// Change method if sendGetBodyAs is specified.
|
// Change method if sendGetBodyAs is specified.
|
||||||
if method == "GET" && body != nil && sendGetBodyAs != "GET" {
|
if method == "GET" && body != nil && sendGetBodyAs != "GET" {
|
||||||
@ -1073,17 +1160,20 @@ func (c *Client) PerformRequest(method, path string, params url.Values, body int
|
|||||||
// Get a connection
|
// Get a connection
|
||||||
conn, err = c.next()
|
conn, err = c.next()
|
||||||
if err == ErrNoClient {
|
if err == ErrNoClient {
|
||||||
|
n++
|
||||||
if !retried {
|
if !retried {
|
||||||
// Force a healtcheck as all connections seem to be dead.
|
// Force a healtcheck as all connections seem to be dead.
|
||||||
c.healthcheck(timeout, false)
|
c.healthcheck(timeout, false)
|
||||||
}
|
}
|
||||||
retries -= 1
|
wait, ok, rerr := c.retrier.Retry(ctx, n, nil, nil, err)
|
||||||
if retries <= 0 {
|
if rerr != nil {
|
||||||
|
return nil, rerr
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
retried = true
|
retried = true
|
||||||
time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
|
time.Sleep(wait)
|
||||||
retryWaitMsec += retryWaitMsec
|
|
||||||
continue // try again
|
continue // try again
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1114,17 +1204,26 @@ func (c *Client) PerformRequest(method, path string, params url.Values, body int
|
|||||||
c.dumpRequest((*http.Request)(req))
|
c.dumpRequest((*http.Request)(req))
|
||||||
|
|
||||||
// Get response
|
// Get response
|
||||||
res, err := c.c.Do((*http.Request)(req))
|
res, err := ctxhttp.Do(ctx, c.c, (*http.Request)(req))
|
||||||
|
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||||
|
// Proceed, but don't mark the node as dead
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
retries -= 1
|
n++
|
||||||
if retries <= 0 {
|
wait, ok, rerr := c.retrier.Retry(ctx, n, (*http.Request)(req), res, err)
|
||||||
|
if rerr != nil {
|
||||||
|
c.errorf("elastic: %s is dead", conn.URL())
|
||||||
|
conn.MarkAsDead()
|
||||||
|
return nil, rerr
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
c.errorf("elastic: %s is dead", conn.URL())
|
c.errorf("elastic: %s is dead", conn.URL())
|
||||||
conn.MarkAsDead()
|
conn.MarkAsDead()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
retried = true
|
retried = true
|
||||||
time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
|
time.Sleep(wait)
|
||||||
retryWaitMsec += retryWaitMsec
|
|
||||||
continue // try again
|
continue // try again
|
||||||
}
|
}
|
||||||
if res.Body != nil {
|
if res.Body != nil {
|
||||||
@ -1134,7 +1233,9 @@ func (c *Client) PerformRequest(method, path string, params url.Values, body int
|
|||||||
// Check for errors
|
// Check for errors
|
||||||
if err := checkResponse((*http.Request)(req), res, ignoreErrors...); err != nil {
|
if err := checkResponse((*http.Request)(req), res, ignoreErrors...); err != nil {
|
||||||
// No retry if request succeeded
|
// No retry if request succeeded
|
||||||
return nil, err
|
// We still try to return a response.
|
||||||
|
resp, _ = c.newResponse(res)
|
||||||
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tracing
|
// Tracing
|
||||||
@ -1213,28 +1314,11 @@ func (c *Client) BulkProcessor() *BulkProcessorService {
|
|||||||
return NewBulkProcessorService(c)
|
return NewBulkProcessorService(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reindex returns a service that will reindex documents from a source
|
// Reindex copies data from a source index into a destination index.
|
||||||
// index into a target index.
|
|
||||||
//
|
//
|
||||||
// Notice that this Reindexer is an Elastic-specific solution that pre-dated
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-reindex.html
|
||||||
// the Reindex API introduced in Elasticsearch 2.3.0 (see ReindexTask).
|
|
||||||
//
|
|
||||||
// See http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html
|
|
||||||
// for more information about reindexing.
|
|
||||||
func (c *Client) Reindex(sourceIndex, targetIndex string) *Reindexer {
|
|
||||||
return NewReindexer(c, sourceIndex, CopyToTargetIndex(targetIndex))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReindexTask copies data from a source index into a destination index.
|
|
||||||
//
|
|
||||||
// The Reindex API has been introduced in Elasticsearch 2.3.0. Notice that
|
|
||||||
// there is a Elastic-specific Reindexer that pre-dates the Reindex API from
|
|
||||||
// Elasticsearch. If you rely on that, use the ReindexerService via
|
|
||||||
// Client.Reindex.
|
|
||||||
//
|
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html
|
|
||||||
// for details on the Reindex API.
|
// for details on the Reindex API.
|
||||||
func (c *Client) ReindexTask() *ReindexService {
|
func (c *Client) Reindex() *ReindexService {
|
||||||
return NewReindexService(c)
|
return NewReindexService(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1279,12 +1363,6 @@ func (c *Client) Explain(index, typ, id string) *ExplainService {
|
|||||||
return NewExplainService(c).Index(index).Type(typ).Id(id)
|
return NewExplainService(c).Index(index).Type(typ).Id(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Percolate allows to send a document and return matching queries.
|
|
||||||
// See http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html.
|
|
||||||
func (c *Client) Percolate() *PercolateService {
|
|
||||||
return NewPercolateService(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO Search Template
|
// TODO Search Template
|
||||||
// TODO Search Shards API
|
// TODO Search Shards API
|
||||||
// TODO Search Exists API
|
// TODO Search Exists API
|
||||||
@ -1300,15 +1378,8 @@ func (c *Client) Exists() *ExistsService {
|
|||||||
return NewExistsService(c)
|
return NewExistsService(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scan through documents. Use this to iterate inside a server process
|
|
||||||
// where the results will be processed without returning them to a client.
|
|
||||||
func (c *Client) Scan(indices ...string) *ScanService {
|
|
||||||
return NewScanService(c).Index(indices...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scroll through documents. Use this to efficiently scroll through results
|
// Scroll through documents. Use this to efficiently scroll through results
|
||||||
// while returning the results to a client. Use Scan when you don't need
|
// while returning the results to a client.
|
||||||
// to return requests to a client (i.e. not paginating via request/response).
|
|
||||||
func (c *Client) Scroll(indices ...string) *ScrollService {
|
func (c *Client) Scroll(indices ...string) *ScrollService {
|
||||||
return NewScrollService(c).Index(indices...)
|
return NewScrollService(c).Index(indices...)
|
||||||
}
|
}
|
||||||
@ -1335,6 +1406,17 @@ func (c *Client) IndexExists(indices ...string) *IndicesExistsService {
|
|||||||
return NewIndicesExistsService(c).Index(indices)
|
return NewIndicesExistsService(c).Index(indices)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShrinkIndex returns a service to shrink one index into another.
|
||||||
|
func (c *Client) ShrinkIndex(source, target string) *IndicesShrinkService {
|
||||||
|
return NewIndicesShrinkService(c).Source(source).Target(target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RolloverIndex rolls an alias over to a new index when the existing index
|
||||||
|
// is considered to be too large or too old.
|
||||||
|
func (c *Client) RolloverIndex(alias string) *IndicesRolloverService {
|
||||||
|
return NewIndicesRolloverService(c).Alias(alias)
|
||||||
|
}
|
||||||
|
|
||||||
// TypeExists allows to check if one or more types exist in one or more indices.
|
// TypeExists allows to check if one or more types exist in one or more indices.
|
||||||
func (c *Client) TypeExists() *IndicesExistsTypeService {
|
func (c *Client) TypeExists() *IndicesExistsTypeService {
|
||||||
return NewIndicesExistsTypeService(c)
|
return NewIndicesExistsTypeService(c)
|
||||||
@ -1372,10 +1454,10 @@ func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService
|
|||||||
return NewIndicesPutSettingsService(c).Index(indices...)
|
return NewIndicesPutSettingsService(c).Index(indices...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Optimize asks Elasticsearch to optimize one or more indices.
|
// IndexAnalyze performs the analysis process on a text and returns the
|
||||||
// Optimize is deprecated as of Elasticsearch 2.1 and replaced by Forcemerge.
|
// token breakdown of the text.
|
||||||
func (c *Client) Optimize(indices ...string) *OptimizeService {
|
func (c *Client) IndexAnalyze() *IndicesAnalyzeService {
|
||||||
return NewOptimizeService(c).Index(indices...)
|
return NewIndicesAnalyzeService(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Forcemerge optimizes one or more indices.
|
// Forcemerge optimizes one or more indices.
|
||||||
@ -1457,19 +1539,9 @@ func (c *Client) PutMapping() *IndicesPutMappingService {
|
|||||||
return NewIndicesPutMappingService(c)
|
return NewIndicesPutMappingService(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetWarmer gets one or more warmers by name.
|
// GetFieldMapping gets mapping for fields.
|
||||||
func (c *Client) GetWarmer() *IndicesGetWarmerService {
|
func (c *Client) GetFieldMapping() *IndicesGetFieldMappingService {
|
||||||
return NewIndicesGetWarmerService(c)
|
return NewIndicesGetFieldMappingService(c)
|
||||||
}
|
|
||||||
|
|
||||||
// PutWarmer registers a warmer.
|
|
||||||
func (c *Client) PutWarmer() *IndicesPutWarmerService {
|
|
||||||
return NewIndicesPutWarmerService(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteWarmer deletes one or more warmers.
|
|
||||||
func (c *Client) DeleteWarmer() *IndicesDeleteWarmerService {
|
|
||||||
return NewIndicesDeleteWarmerService(c)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// -- cat APIs --
|
// -- cat APIs --
|
||||||
@ -1489,6 +1561,30 @@ func (c *Client) DeleteWarmer() *IndicesDeleteWarmerService {
|
|||||||
// TODO cat shards
|
// TODO cat shards
|
||||||
// TODO cat segments
|
// TODO cat segments
|
||||||
|
|
||||||
|
// -- Ingest APIs --
|
||||||
|
|
||||||
|
// IngestPutPipeline adds pipelines and updates existing pipelines in
|
||||||
|
// the cluster.
|
||||||
|
func (c *Client) IngestPutPipeline(id string) *IngestPutPipelineService {
|
||||||
|
return NewIngestPutPipelineService(c).Id(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IngestGetPipeline returns pipelines based on ID.
|
||||||
|
func (c *Client) IngestGetPipeline(ids ...string) *IngestGetPipelineService {
|
||||||
|
return NewIngestGetPipelineService(c).Id(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IngestDeletePipeline deletes a pipeline by ID.
|
||||||
|
func (c *Client) IngestDeletePipeline(id string) *IngestDeletePipelineService {
|
||||||
|
return NewIngestDeletePipelineService(c).Id(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IngestSimulatePipeline executes a specific pipeline against the set of
|
||||||
|
// documents provided in the body of the request.
|
||||||
|
func (c *Client) IngestSimulatePipeline() *IngestSimulatePipelineService {
|
||||||
|
return NewIngestSimulatePipelineService(c)
|
||||||
|
}
|
||||||
|
|
||||||
// -- Cluster APIs --
|
// -- Cluster APIs --
|
||||||
|
|
||||||
// ClusterHealth retrieves the health of the cluster.
|
// ClusterHealth retrieves the health of the cluster.
|
||||||
@ -1511,6 +1607,11 @@ func (c *Client) NodesInfo() *NodesInfoService {
|
|||||||
return NewNodesInfoService(c)
|
return NewNodesInfoService(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodesStats retrieves one or more or all of the cluster nodes statistics.
|
||||||
|
func (c *Client) NodesStats() *NodesStatsService {
|
||||||
|
return NewNodesStatsService(c)
|
||||||
|
}
|
||||||
|
|
||||||
// TasksCancel cancels tasks running on the specified nodes.
|
// TasksCancel cancels tasks running on the specified nodes.
|
||||||
func (c *Client) TasksCancel() *TasksCancelService {
|
func (c *Client) TasksCancel() *TasksCancelService {
|
||||||
return NewTasksCancelService(c)
|
return NewTasksCancelService(c)
|
||||||
@ -1544,7 +1645,7 @@ func (c *Client) TasksList() *TasksListService {
|
|||||||
// ElasticsearchVersion returns the version number of Elasticsearch
|
// ElasticsearchVersion returns the version number of Elasticsearch
|
||||||
// running on the given URL.
|
// running on the given URL.
|
||||||
func (c *Client) ElasticsearchVersion(url string) (string, error) {
|
func (c *Client) ElasticsearchVersion(url string) (string, error) {
|
||||||
res, _, err := c.Ping(url).Do()
|
res, _, err := c.Ping(url).Do(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -1553,12 +1654,12 @@ func (c *Client) ElasticsearchVersion(url string) (string, error) {
|
|||||||
|
|
||||||
// IndexNames returns the names of all indices in the cluster.
|
// IndexNames returns the names of all indices in the cluster.
|
||||||
func (c *Client) IndexNames() ([]string, error) {
|
func (c *Client) IndexNames() ([]string, error) {
|
||||||
res, err := c.IndexGetSettings().Index("_all").Do()
|
res, err := c.IndexGetSettings().Index("_all").Do(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var names []string
|
var names []string
|
||||||
for name, _ := range res {
|
for name := range res {
|
||||||
names = append(names, name)
|
names = append(names, name)
|
||||||
}
|
}
|
||||||
return names, nil
|
return names, nil
|
||||||
@ -1580,7 +1681,7 @@ func (c *Client) Ping(url string) *PingService {
|
|||||||
// If the cluster will have the given state within the timeout, nil is returned.
|
// If the cluster will have the given state within the timeout, nil is returned.
|
||||||
// If the request timed out, ErrTimeout is returned.
|
// If the request timed out, ErrTimeout is returned.
|
||||||
func (c *Client) WaitForStatus(status string, timeout string) error {
|
func (c *Client) WaitForStatus(status string, timeout string) error {
|
||||||
health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do()
|
health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
@ -9,12 +9,14 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClusterHealthService allows to get a very simple status on the health of the cluster.
|
// ClusterHealthService allows to get a very simple status on the health of the cluster.
|
||||||
//
|
//
|
||||||
// See http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html
|
// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-health.html
|
||||||
// for details.
|
// for details.
|
||||||
type ClusterHealthService struct {
|
type ClusterHealthService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -26,7 +28,7 @@ type ClusterHealthService struct {
|
|||||||
timeout string
|
timeout string
|
||||||
waitForActiveShards *int
|
waitForActiveShards *int
|
||||||
waitForNodes string
|
waitForNodes string
|
||||||
waitForRelocatingShards *int
|
waitForNoRelocatingShards *bool
|
||||||
waitForStatus string
|
waitForStatus string
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,9 +84,9 @@ func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthS
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished.
|
// WaitForNoRelocatingShards can be used to wait until all shard relocations are finished.
|
||||||
func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService {
|
func (s *ClusterHealthService) WaitForNoRelocatingShards(waitForNoRelocatingShards bool) *ClusterHealthService {
|
||||||
s.waitForRelocatingShards = &waitForRelocatingShards
|
s.waitForNoRelocatingShards = &waitForNoRelocatingShards
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,8 +152,8 @@ func (s *ClusterHealthService) buildURL() (string, url.Values, error) {
|
|||||||
if s.waitForNodes != "" {
|
if s.waitForNodes != "" {
|
||||||
params.Set("wait_for_nodes", s.waitForNodes)
|
params.Set("wait_for_nodes", s.waitForNodes)
|
||||||
}
|
}
|
||||||
if s.waitForRelocatingShards != nil {
|
if s.waitForNoRelocatingShards != nil {
|
||||||
params.Set("wait_for_relocating_shards", fmt.Sprintf("%v", s.waitForRelocatingShards))
|
params.Set("wait_for_no_relocating_shards", fmt.Sprintf("%v", *s.waitForNoRelocatingShards))
|
||||||
}
|
}
|
||||||
if s.waitForStatus != "" {
|
if s.waitForStatus != "" {
|
||||||
params.Set("wait_for_status", s.waitForStatus)
|
params.Set("wait_for_status", s.waitForStatus)
|
||||||
@ -165,7 +167,7 @@ func (s *ClusterHealthService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) {
|
func (s *ClusterHealthService) Do(ctx context.Context) (*ClusterHealthResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -178,7 +180,7 @@ func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -9,12 +9,14 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClusterStateService allows to get a comprehensive state information of the whole cluster.
|
// ClusterStateService allows to get a comprehensive state information of the whole cluster.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-state.html
|
||||||
// for details.
|
// for details.
|
||||||
type ClusterStateService struct {
|
type ClusterStateService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -151,7 +153,7 @@ func (s *ClusterStateService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *ClusterStateService) Do() (*ClusterStateResponse, error) {
|
func (s *ClusterStateService) Do(ctx context.Context) (*ClusterStateResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -164,7 +166,7 @@ func (s *ClusterStateService) Do() (*ClusterStateResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -258,7 +260,7 @@ type shardRouting struct {
|
|||||||
RelocatingNode string `json:"relocating_node"`
|
RelocatingNode string `json:"relocating_node"`
|
||||||
Shard int `json:"shard"`
|
Shard int `json:"shard"`
|
||||||
Index string `json:"index"`
|
Index string `json:"index"`
|
||||||
Version int64 `json:"state"`
|
Version int64 `json:"version"`
|
||||||
RestoreSource *RestoreSource `json:"restore_source"`
|
RestoreSource *RestoreSource `json:"restore_source"`
|
||||||
AllocationId *allocationId `json:"allocation_id"`
|
AllocationId *allocationId `json:"allocation_id"`
|
||||||
UnassignedInfo *unassignedInfo `json:"unassigned_info"`
|
UnassignedInfo *unassignedInfo `json:"unassigned_info"`
|
@ -9,10 +9,13 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClusterStatsService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-stats.html.
|
// ClusterStatsService is documented at
|
||||||
|
// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-stats.html.
|
||||||
type ClusterStatsService struct {
|
type ClusterStatsService struct {
|
||||||
client *Client
|
client *Client
|
||||||
pretty bool
|
pretty bool
|
||||||
@ -93,7 +96,7 @@ func (s *ClusterStatsService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *ClusterStatsService) Do() (*ClusterStatsResponse, error) {
|
func (s *ClusterStatsService) Do(ctx context.Context) (*ClusterStatsResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -106,7 +109,7 @@ func (s *ClusterStatsService) Do() (*ClusterStatsResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
@ -9,7 +9,9 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CountService is a convenient service for determining the
|
// CountService is a convenient service for determining the
|
||||||
@ -256,7 +258,7 @@ func (s *CountService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *CountService) Do() (int64, error) {
|
func (s *CountService) Do(ctx context.Context) (int64, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -285,7 +287,7 @@ func (s *CountService) Do() (int64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("POST", path, params, body)
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -8,13 +8,15 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DeleteService allows to delete a typed JSON document from a specified
|
// DeleteService allows to delete a typed JSON document from a specified
|
||||||
// index based on its id.
|
// index based on its id.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-delete.html
|
||||||
// for details.
|
// for details.
|
||||||
type DeleteService struct {
|
type DeleteService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -26,10 +28,9 @@ type DeleteService struct {
|
|||||||
timeout string
|
timeout string
|
||||||
version interface{}
|
version interface{}
|
||||||
versionType string
|
versionType string
|
||||||
consistency string
|
waitForActiveShards string
|
||||||
parent string
|
parent string
|
||||||
refresh *bool
|
refresh string
|
||||||
replication string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDeleteService creates a new DeleteService.
|
// NewDeleteService creates a new DeleteService.
|
||||||
@ -57,12 +58,6 @@ func (s *DeleteService) Index(index string) *DeleteService {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replication specifies a replication type.
|
|
||||||
func (s *DeleteService) Replication(replication string) *DeleteService {
|
|
||||||
s.replication = replication
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Routing is a specific routing value.
|
// Routing is a specific routing value.
|
||||||
func (s *DeleteService) Routing(routing string) *DeleteService {
|
func (s *DeleteService) Routing(routing string) *DeleteService {
|
||||||
s.routing = routing
|
s.routing = routing
|
||||||
@ -87,9 +82,13 @@ func (s *DeleteService) VersionType(versionType string) *DeleteService {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Consistency defines a specific write consistency setting for the operation.
|
// WaitForActiveShards sets the number of shard copies that must be active
|
||||||
func (s *DeleteService) Consistency(consistency string) *DeleteService {
|
// before proceeding with the delete operation. Defaults to 1, meaning the
|
||||||
s.consistency = consistency
|
// primary shard only. Set to `all` for all shard copies, otherwise set to
|
||||||
|
// any non-negative value less than or equal to the total number of copies
|
||||||
|
// for the shard (number of replicas + 1).
|
||||||
|
func (s *DeleteService) WaitForActiveShards(waitForActiveShards string) *DeleteService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,8 +99,8 @@ func (s *DeleteService) Parent(parent string) *DeleteService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Refresh the index after performing the operation.
|
// Refresh the index after performing the operation.
|
||||||
func (s *DeleteService) Refresh(refresh bool) *DeleteService {
|
func (s *DeleteService) Refresh(refresh string) *DeleteService {
|
||||||
s.refresh = &refresh
|
s.refresh = refresh
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,11 +127,8 @@ func (s *DeleteService) buildURL() (string, url.Values, error) {
|
|||||||
if s.pretty {
|
if s.pretty {
|
||||||
params.Set("pretty", "1")
|
params.Set("pretty", "1")
|
||||||
}
|
}
|
||||||
if s.refresh != nil {
|
if s.refresh != "" {
|
||||||
params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
|
params.Set("refresh", s.refresh)
|
||||||
}
|
|
||||||
if s.replication != "" {
|
|
||||||
params.Set("replication", s.replication)
|
|
||||||
}
|
}
|
||||||
if s.routing != "" {
|
if s.routing != "" {
|
||||||
params.Set("routing", s.routing)
|
params.Set("routing", s.routing)
|
||||||
@ -146,8 +142,8 @@ func (s *DeleteService) buildURL() (string, url.Values, error) {
|
|||||||
if s.versionType != "" {
|
if s.versionType != "" {
|
||||||
params.Set("version_type", s.versionType)
|
params.Set("version_type", s.versionType)
|
||||||
}
|
}
|
||||||
if s.consistency != "" {
|
if s.waitForActiveShards != "" {
|
||||||
params.Set("consistency", s.consistency)
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
}
|
}
|
||||||
if s.parent != "" {
|
if s.parent != "" {
|
||||||
params.Set("parent", s.parent)
|
params.Set("parent", s.parent)
|
||||||
@ -174,7 +170,7 @@ func (s *DeleteService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *DeleteService) Do() (*DeleteResponse, error) {
|
func (s *DeleteService) Do(ctx context.Context) (*DeleteResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -187,7 +183,7 @@ func (s *DeleteService) Do() (*DeleteResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("DELETE", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
649
vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go
generated
vendored
Normal file
649
vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go
generated
vendored
Normal file
@ -0,0 +1,649 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeleteByQueryService deletes documents that match a query.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-delete-by-query.html.
|
||||||
|
type DeleteByQueryService struct {
|
||||||
|
client *Client
|
||||||
|
index []string
|
||||||
|
typ []string
|
||||||
|
query Query
|
||||||
|
body interface{}
|
||||||
|
xSource []string
|
||||||
|
xSourceExclude []string
|
||||||
|
xSourceInclude []string
|
||||||
|
analyzer string
|
||||||
|
analyzeWildcard *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
conflicts string
|
||||||
|
defaultOperator string
|
||||||
|
df string
|
||||||
|
docvalueFields []string
|
||||||
|
expandWildcards string
|
||||||
|
explain *bool
|
||||||
|
from *int
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
lenient *bool
|
||||||
|
lowercaseExpandedTerms *bool
|
||||||
|
preference string
|
||||||
|
q string
|
||||||
|
refresh string
|
||||||
|
requestCache *bool
|
||||||
|
requestsPerSecond *int
|
||||||
|
routing []string
|
||||||
|
scroll string
|
||||||
|
scrollSize *int
|
||||||
|
searchTimeout string
|
||||||
|
searchType string
|
||||||
|
size *int
|
||||||
|
sort []string
|
||||||
|
stats []string
|
||||||
|
storedFields []string
|
||||||
|
suggestField string
|
||||||
|
suggestMode string
|
||||||
|
suggestSize *int
|
||||||
|
suggestText string
|
||||||
|
terminateAfter *int
|
||||||
|
timeout string
|
||||||
|
trackScores *bool
|
||||||
|
version *bool
|
||||||
|
waitForActiveShards string
|
||||||
|
waitForCompletion *bool
|
||||||
|
pretty bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDeleteByQueryService creates a new DeleteByQueryService.
|
||||||
|
// You typically use the client's DeleteByQuery to get a reference to
|
||||||
|
// the service.
|
||||||
|
func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
|
||||||
|
builder := &DeleteByQueryService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
return builder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index sets the indices on which to perform the delete operation.
|
||||||
|
func (s *DeleteByQueryService) Index(index ...string) *DeleteByQueryService {
|
||||||
|
s.index = append(s.index, index...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type limits the delete operation to the given types.
|
||||||
|
func (s *DeleteByQueryService) Type(typ ...string) *DeleteByQueryService {
|
||||||
|
s.typ = append(s.typ, typ...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// XSource is true or false to return the _source field or not,
|
||||||
|
// or a list of fields to return.
|
||||||
|
func (s *DeleteByQueryService) XSource(xSource ...string) *DeleteByQueryService {
|
||||||
|
s.xSource = append(s.xSource, xSource...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// XSourceExclude represents a list of fields to exclude from the returned _source field.
|
||||||
|
func (s *DeleteByQueryService) XSourceExclude(xSourceExclude ...string) *DeleteByQueryService {
|
||||||
|
s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// XSourceInclude represents a list of fields to extract and return from the _source field.
|
||||||
|
func (s *DeleteByQueryService) XSourceInclude(xSourceInclude ...string) *DeleteByQueryService {
|
||||||
|
s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Analyzer to use for the query string.
|
||||||
|
func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
|
||||||
|
s.analyzer = analyzer
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnalyzeWildcard specifies whether wildcard and prefix queries should be
|
||||||
|
// analyzed (default: false).
|
||||||
|
func (s *DeleteByQueryService) AnalyzeWildcard(analyzeWildcard bool) *DeleteByQueryService {
|
||||||
|
s.analyzeWildcard = &analyzeWildcard
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices (including the _all string
|
||||||
|
// or when no indices have been specified).
|
||||||
|
func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
|
||||||
|
s.allowNoIndices = &allow
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conflicts indicates what to do when the process detects version conflicts.
|
||||||
|
// Possible values are "proceed" and "abort".
|
||||||
|
func (s *DeleteByQueryService) Conflicts(conflicts string) *DeleteByQueryService {
|
||||||
|
s.conflicts = conflicts
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AbortOnVersionConflict aborts the request on version conflicts.
|
||||||
|
// It is an alias to setting Conflicts("abort").
|
||||||
|
func (s *DeleteByQueryService) AbortOnVersionConflict() *DeleteByQueryService {
|
||||||
|
s.conflicts = "abort"
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProceedOnVersionConflict aborts the request on version conflicts.
|
||||||
|
// It is an alias to setting Conflicts("proceed").
|
||||||
|
func (s *DeleteByQueryService) ProceedOnVersionConflict() *DeleteByQueryService {
|
||||||
|
s.conflicts = "proceed"
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultOperator for query string query (AND or OR).
|
||||||
|
func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
|
||||||
|
s.defaultOperator = defaultOperator
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DF is the field to use as default where no field prefix is given in the query string.
|
||||||
|
func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
|
||||||
|
s.df = defaultField
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultField is the field to use as default where no field prefix is given in the query string.
|
||||||
|
// It is an alias to the DF func.
|
||||||
|
func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
|
||||||
|
s.df = defaultField
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit.
|
||||||
|
func (s *DeleteByQueryService) DocvalueFields(docvalueFields ...string) *DeleteByQueryService {
|
||||||
|
s.docvalueFields = docvalueFields
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both. It can be "open" or "closed".
|
||||||
|
func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
|
||||||
|
s.expandWildcards = expand
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Explain specifies whether to return detailed information about score
|
||||||
|
// computation as part of a hit.
|
||||||
|
func (s *DeleteByQueryService) Explain(explain bool) *DeleteByQueryService {
|
||||||
|
s.explain = &explain
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// From is the starting offset (default: 0).
|
||||||
|
func (s *DeleteByQueryService) From(from int) *DeleteByQueryService {
|
||||||
|
s.from = &from
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
|
||||||
|
s.ignoreUnavailable = &ignore
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lenient specifies whether format-based query failures
|
||||||
|
// (such as providing text to a numeric field) should be ignored.
|
||||||
|
func (s *DeleteByQueryService) Lenient(lenient bool) *DeleteByQueryService {
|
||||||
|
s.lenient = &lenient
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// LowercaseExpandedTerms specifies whether query terms should be lowercased.
|
||||||
|
func (s *DeleteByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *DeleteByQueryService {
|
||||||
|
s.lowercaseExpandedTerms = &lowercaseExpandedTerms
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preference specifies the node or shard the operation should be performed on
|
||||||
|
// (default: random).
|
||||||
|
func (s *DeleteByQueryService) Preference(preference string) *DeleteByQueryService {
|
||||||
|
s.preference = preference
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Q specifies the query in Lucene query string syntax. You can also use
|
||||||
|
// Query to programmatically specify the query.
|
||||||
|
func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
|
||||||
|
s.q = query
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryString is an alias to Q. Notice that you can also use Query to
|
||||||
|
// programmatically set the query.
|
||||||
|
func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
|
||||||
|
s.q = query
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query sets the query programmatically.
|
||||||
|
func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
|
||||||
|
s.query = query
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh indicates whether the effected indexes should be refreshed.
|
||||||
|
func (s *DeleteByQueryService) Refresh(refresh string) *DeleteByQueryService {
|
||||||
|
s.refresh = refresh
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestCache specifies if request cache should be used for this request
|
||||||
|
// or not, defaults to index level setting.
|
||||||
|
func (s *DeleteByQueryService) RequestCache(requestCache bool) *DeleteByQueryService {
|
||||||
|
s.requestCache = &requestCache
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestsPerSecond sets the throttle on this request in sub-requests per second.
|
||||||
|
// -1 means set no throttle as does "unlimited" which is the only non-float this accepts.
|
||||||
|
func (s *DeleteByQueryService) RequestsPerSecond(requestsPerSecond int) *DeleteByQueryService {
|
||||||
|
s.requestsPerSecond = &requestsPerSecond
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing is a list of specific routing values.
|
||||||
|
func (s *DeleteByQueryService) Routing(routing ...string) *DeleteByQueryService {
|
||||||
|
s.routing = append(s.routing, routing...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scroll specifies how long a consistent view of the index should be maintained
|
||||||
|
// for scrolled search.
|
||||||
|
func (s *DeleteByQueryService) Scroll(scroll string) *DeleteByQueryService {
|
||||||
|
s.scroll = scroll
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScrollSize is the size on the scroll request powering the update_by_query.
|
||||||
|
func (s *DeleteByQueryService) ScrollSize(scrollSize int) *DeleteByQueryService {
|
||||||
|
s.scrollSize = &scrollSize
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchTimeout defines an explicit timeout for each search request.
|
||||||
|
// Defaults to no timeout.
|
||||||
|
func (s *DeleteByQueryService) SearchTimeout(searchTimeout string) *DeleteByQueryService {
|
||||||
|
s.searchTimeout = searchTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchType is the search operation type. Possible values are
|
||||||
|
// "query_then_fetch" and "dfs_query_then_fetch".
|
||||||
|
func (s *DeleteByQueryService) SearchType(searchType string) *DeleteByQueryService {
|
||||||
|
s.searchType = searchType
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size represents the number of hits to return (default: 10).
|
||||||
|
func (s *DeleteByQueryService) Size(size int) *DeleteByQueryService {
|
||||||
|
s.size = &size
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort is a list of <field>:<direction> pairs.
|
||||||
|
func (s *DeleteByQueryService) Sort(sort ...string) *DeleteByQueryService {
|
||||||
|
s.sort = append(s.sort, sort...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortByField adds a sort order.
|
||||||
|
func (s *DeleteByQueryService) SortByField(field string, ascending bool) *DeleteByQueryService {
|
||||||
|
if ascending {
|
||||||
|
s.sort = append(s.sort, fmt.Sprintf("%s:asc", field))
|
||||||
|
} else {
|
||||||
|
s.sort = append(s.sort, fmt.Sprintf("%s:desc", field))
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats specifies specific tag(s) of the request for logging and statistical purposes.
|
||||||
|
func (s *DeleteByQueryService) Stats(stats ...string) *DeleteByQueryService {
|
||||||
|
s.stats = append(s.stats, stats...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoredFields specifies the list of stored fields to return as part of a hit.
|
||||||
|
func (s *DeleteByQueryService) StoredFields(storedFields ...string) *DeleteByQueryService {
|
||||||
|
s.storedFields = storedFields
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SuggestField specifies which field to use for suggestions.
|
||||||
|
func (s *DeleteByQueryService) SuggestField(suggestField string) *DeleteByQueryService {
|
||||||
|
s.suggestField = suggestField
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SuggestMode specifies the suggest mode. Possible values are
|
||||||
|
// "missing", "popular", and "always".
|
||||||
|
func (s *DeleteByQueryService) SuggestMode(suggestMode string) *DeleteByQueryService {
|
||||||
|
s.suggestMode = suggestMode
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SuggestSize specifies how many suggestions to return in response.
|
||||||
|
func (s *DeleteByQueryService) SuggestSize(suggestSize int) *DeleteByQueryService {
|
||||||
|
s.suggestSize = &suggestSize
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SuggestText specifies the source text for which the suggestions should be returned.
|
||||||
|
func (s *DeleteByQueryService) SuggestText(suggestText string) *DeleteByQueryService {
|
||||||
|
s.suggestText = suggestText
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// TerminateAfter indicates the maximum number of documents to collect
|
||||||
|
// for each shard, upon reaching which the query execution will terminate early.
|
||||||
|
func (s *DeleteByQueryService) TerminateAfter(terminateAfter int) *DeleteByQueryService {
|
||||||
|
s.terminateAfter = &terminateAfter
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is the time each individual bulk request should wait for shards
|
||||||
|
// that are unavailable.
|
||||||
|
func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeoutInMillis sets the timeout in milliseconds.
|
||||||
|
func (s *DeleteByQueryService) TimeoutInMillis(timeoutInMillis int) *DeleteByQueryService {
|
||||||
|
s.timeout = fmt.Sprintf("%dms", timeoutInMillis)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrackScores indicates whether to calculate and return scores even if
|
||||||
|
// they are not used for sorting.
|
||||||
|
func (s *DeleteByQueryService) TrackScores(trackScores bool) *DeleteByQueryService {
|
||||||
|
s.trackScores = &trackScores
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version specifies whether to return document version as part of a hit.
|
||||||
|
func (s *DeleteByQueryService) Version(version bool) *DeleteByQueryService {
|
||||||
|
s.version = &version
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards sets the number of shard copies that must be active before proceeding
|
||||||
|
// with the update by query operation. Defaults to 1, meaning the primary shard only.
|
||||||
|
// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal
|
||||||
|
// to the total number of copies for the shard (number of replicas + 1).
|
||||||
|
func (s *DeleteByQueryService) WaitForActiveShards(waitForActiveShards string) *DeleteByQueryService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForCompletion indicates if the request should block until the reindex is complete.
|
||||||
|
func (s *DeleteByQueryService) WaitForCompletion(waitForCompletion bool) *DeleteByQueryService {
|
||||||
|
s.waitForCompletion = &waitForCompletion
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty indents the JSON output from Elasticsearch.
|
||||||
|
func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
|
||||||
|
s.pretty = pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Body specifies the body of the request. It overrides data being specified via SearchService.
|
||||||
|
func (s *DeleteByQueryService) Body(body string) *DeleteByQueryService {
|
||||||
|
s.body = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *DeleteByQueryService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
if len(s.typ) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/{type}/_delete_by_query", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
"type": strings.Join(s.typ, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_delete_by_query", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if len(s.xSource) > 0 {
|
||||||
|
params.Set("_source", strings.Join(s.xSource, ","))
|
||||||
|
}
|
||||||
|
if len(s.xSourceExclude) > 0 {
|
||||||
|
params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
|
||||||
|
}
|
||||||
|
if len(s.xSourceInclude) > 0 {
|
||||||
|
params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
|
||||||
|
}
|
||||||
|
if s.analyzer != "" {
|
||||||
|
params.Set("analyzer", s.analyzer)
|
||||||
|
}
|
||||||
|
if s.analyzeWildcard != nil {
|
||||||
|
params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
|
||||||
|
}
|
||||||
|
if s.defaultOperator != "" {
|
||||||
|
params.Set("default_operator", s.defaultOperator)
|
||||||
|
}
|
||||||
|
if s.df != "" {
|
||||||
|
params.Set("df", s.df)
|
||||||
|
}
|
||||||
|
if s.explain != nil {
|
||||||
|
params.Set("explain", fmt.Sprintf("%v", *s.explain))
|
||||||
|
}
|
||||||
|
if len(s.storedFields) > 0 {
|
||||||
|
params.Set("stored_fields", strings.Join(s.storedFields, ","))
|
||||||
|
}
|
||||||
|
if len(s.docvalueFields) > 0 {
|
||||||
|
params.Set("docvalue_fields", strings.Join(s.docvalueFields, ","))
|
||||||
|
}
|
||||||
|
if s.from != nil {
|
||||||
|
params.Set("from", fmt.Sprintf("%d", *s.from))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.conflicts != "" {
|
||||||
|
params.Set("conflicts", s.conflicts)
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.lenient != nil {
|
||||||
|
params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
|
||||||
|
}
|
||||||
|
if s.lowercaseExpandedTerms != nil {
|
||||||
|
params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
|
||||||
|
}
|
||||||
|
if s.preference != "" {
|
||||||
|
params.Set("preference", s.preference)
|
||||||
|
}
|
||||||
|
if s.q != "" {
|
||||||
|
params.Set("q", s.q)
|
||||||
|
}
|
||||||
|
if len(s.routing) > 0 {
|
||||||
|
params.Set("routing", strings.Join(s.routing, ","))
|
||||||
|
}
|
||||||
|
if s.scroll != "" {
|
||||||
|
params.Set("scroll", s.scroll)
|
||||||
|
}
|
||||||
|
if s.searchType != "" {
|
||||||
|
params.Set("search_type", s.searchType)
|
||||||
|
}
|
||||||
|
if s.searchTimeout != "" {
|
||||||
|
params.Set("search_timeout", s.searchTimeout)
|
||||||
|
}
|
||||||
|
if s.size != nil {
|
||||||
|
params.Set("size", fmt.Sprintf("%d", *s.size))
|
||||||
|
}
|
||||||
|
if len(s.sort) > 0 {
|
||||||
|
params.Set("sort", strings.Join(s.sort, ","))
|
||||||
|
}
|
||||||
|
if s.terminateAfter != nil {
|
||||||
|
params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter))
|
||||||
|
}
|
||||||
|
if len(s.stats) > 0 {
|
||||||
|
params.Set("stats", strings.Join(s.stats, ","))
|
||||||
|
}
|
||||||
|
if s.suggestField != "" {
|
||||||
|
params.Set("suggest_field", s.suggestField)
|
||||||
|
}
|
||||||
|
if s.suggestMode != "" {
|
||||||
|
params.Set("suggest_mode", s.suggestMode)
|
||||||
|
}
|
||||||
|
if s.suggestSize != nil {
|
||||||
|
params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize))
|
||||||
|
}
|
||||||
|
if s.suggestText != "" {
|
||||||
|
params.Set("suggest_text", s.suggestText)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.trackScores != nil {
|
||||||
|
params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores))
|
||||||
|
}
|
||||||
|
if s.version != nil {
|
||||||
|
params.Set("version", fmt.Sprintf("%v", *s.version))
|
||||||
|
}
|
||||||
|
if s.requestCache != nil {
|
||||||
|
params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache))
|
||||||
|
}
|
||||||
|
if s.refresh != "" {
|
||||||
|
params.Set("refresh", s.refresh)
|
||||||
|
}
|
||||||
|
if s.waitForActiveShards != "" {
|
||||||
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
|
}
|
||||||
|
if s.scrollSize != nil {
|
||||||
|
params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize))
|
||||||
|
}
|
||||||
|
if s.waitForCompletion != nil {
|
||||||
|
params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
|
||||||
|
}
|
||||||
|
if s.requestsPerSecond != nil {
|
||||||
|
params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond))
|
||||||
|
}
|
||||||
|
if s.pretty {
|
||||||
|
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *DeleteByQueryService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if len(s.index) == 0 {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the delete-by-query operation.
|
||||||
|
func (s *DeleteByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set body if there is a query set
|
||||||
|
var body interface{}
|
||||||
|
if s.body != nil {
|
||||||
|
body = s.body
|
||||||
|
} else if s.query != nil {
|
||||||
|
src, err := s.query.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
body = map[string]interface{}{
|
||||||
|
"query": src,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get response
|
||||||
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return result
|
||||||
|
ret := new(BulkIndexByScrollResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BulkIndexByScrollResponse is the outcome of executing Do with
|
||||||
|
// DeleteByQueryService and UpdateByQueryService.
|
||||||
|
type BulkIndexByScrollResponse struct {
|
||||||
|
Took int64 `json:"took"`
|
||||||
|
TimedOut bool `json:"timed_out"`
|
||||||
|
Total int64 `json:"total"`
|
||||||
|
Updated int64 `json:"updated"`
|
||||||
|
Created int64 `json:"created"`
|
||||||
|
Deleted int64 `json:"deleted"`
|
||||||
|
Batches int64 `json:"batches"`
|
||||||
|
VersionConflicts int64 `json:"version_conflicts"`
|
||||||
|
Noops int64 `json:"noops"`
|
||||||
|
Retries struct {
|
||||||
|
Bulk int64 `json:"bulk"`
|
||||||
|
Search int64 `json:"search"`
|
||||||
|
} `json:"retries"`
|
||||||
|
Throttled string `json:"throttled"`
|
||||||
|
ThrottledMillis int64 `json:"throttled_millis"`
|
||||||
|
RequestsPerSecond float64 `json:"requests_per_second"`
|
||||||
|
Canceled string `json:"canceled"`
|
||||||
|
ThrottledUntil string `json:"throttled_until"`
|
||||||
|
ThrottledUntilMillis int64 `json:"throttled_until_millis"`
|
||||||
|
Failures []bulkIndexByScrollResponseFailure `json:"failures"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type bulkIndexByScrollResponseFailure struct {
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
|
Status int `json:"status,omitempty"`
|
||||||
|
Shard int `json:"shard,omitempty"`
|
||||||
|
Node int `json:"node,omitempty"`
|
||||||
|
// TOOD "cause" contains exception details
|
||||||
|
// TOOD "reason" contains exception details
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -8,11 +8,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DeleteTemplateService deletes a search template. More information can
|
// DeleteTemplateService deletes a search template. More information can
|
||||||
// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
|
// be found at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-template.html.
|
||||||
type DeleteTemplateService struct {
|
type DeleteTemplateService struct {
|
||||||
client *Client
|
client *Client
|
||||||
pretty bool
|
pretty bool
|
||||||
@ -81,7 +83,7 @@ func (s *DeleteTemplateService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) {
|
func (s *DeleteTemplateService) Do(ctx context.Context) (*AcknowledgedResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -94,24 +96,15 @@ func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("DELETE", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return operation response
|
// Return operation response
|
||||||
ret := new(DeleteTemplateResponse)
|
ret := new(AcknowledgedResponse)
|
||||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteTemplateResponse is the response of DeleteTemplateService.Do.
|
|
||||||
type DeleteTemplateResponse struct {
|
|
||||||
Found bool `json:"found"`
|
|
||||||
Index string `json:"_index"`
|
|
||||||
Type string `json:"_type"`
|
|
||||||
Id string `json:"_id"`
|
|
||||||
Version int `json:"_version"`
|
|
||||||
}
|
|
@ -1,10 +1,10 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package elastic provides an interface to the Elasticsearch server
|
Package elastic provides an interface to the Elasticsearch server
|
||||||
(http://www.elasticsearch.org/).
|
(https://www.elastic.co/products/elasticsearch).
|
||||||
|
|
||||||
The first thing you do is to create a Client. If you have Elasticsearch
|
The first thing you do is to create a Client. If you have Elasticsearch
|
||||||
installed and running with its default settings
|
installed and running with its default settings
|
||||||
@ -35,7 +35,7 @@ methods to prepare the query and a Do function to execute it against the
|
|||||||
Elasticsearch REST interface and return a response. Here is an example
|
Elasticsearch REST interface and return a response. Here is an example
|
||||||
of the IndexExists service that checks if a given index already exists.
|
of the IndexExists service that checks if a given index already exists.
|
||||||
|
|
||||||
exists, err := client.IndexExists("twitter").Do()
|
exists, err := client.IndexExists("twitter").Do(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Handle error
|
// Handle error
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,12 +9,14 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ExistsService checks for the existence of a document using HEAD.
|
// ExistsService checks for the existence of a document using HEAD.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-get.html
|
||||||
// for details.
|
// for details.
|
||||||
type ExistsService struct {
|
type ExistsService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -24,7 +26,7 @@ type ExistsService struct {
|
|||||||
typ string
|
typ string
|
||||||
preference string
|
preference string
|
||||||
realtime *bool
|
realtime *bool
|
||||||
refresh *bool
|
refresh string
|
||||||
routing string
|
routing string
|
||||||
parent string
|
parent string
|
||||||
}
|
}
|
||||||
@ -68,8 +70,8 @@ func (s *ExistsService) Realtime(realtime bool) *ExistsService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Refresh the shard containing the document before performing the operation.
|
// Refresh the shard containing the document before performing the operation.
|
||||||
func (s *ExistsService) Refresh(refresh bool) *ExistsService {
|
func (s *ExistsService) Refresh(refresh string) *ExistsService {
|
||||||
s.refresh = &refresh
|
s.refresh = refresh
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,8 +113,8 @@ func (s *ExistsService) buildURL() (string, url.Values, error) {
|
|||||||
if s.realtime != nil {
|
if s.realtime != nil {
|
||||||
params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
|
params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
|
||||||
}
|
}
|
||||||
if s.refresh != nil {
|
if s.refresh != "" {
|
||||||
params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
|
params.Set("refresh", s.refresh)
|
||||||
}
|
}
|
||||||
if s.routing != "" {
|
if s.routing != "" {
|
||||||
params.Set("routing", s.routing)
|
params.Set("routing", s.routing)
|
||||||
@ -145,7 +147,7 @@ func (s *ExistsService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *ExistsService) Do() (bool, error) {
|
func (s *ExistsService) Do(ctx context.Context) (bool, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@ -158,7 +160,7 @@ func (s *ExistsService) Do() (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("HEAD", path, params, nil, 404)
|
res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -6,24 +6,17 @@ package elastic
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
_ = fmt.Print
|
|
||||||
_ = log.Print
|
|
||||||
_ = strings.Index
|
|
||||||
_ = uritemplates.Expand
|
|
||||||
_ = url.Parse
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ExplainService computes a score explanation for a query and
|
// ExplainService computes a score explanation for a query and
|
||||||
// a specific document.
|
// a specific document.
|
||||||
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-explain.html.
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-explain.html.
|
||||||
type ExplainService struct {
|
type ExplainService struct {
|
||||||
client *Client
|
client *Client
|
||||||
pretty bool
|
pretty bool
|
||||||
@ -285,7 +278,7 @@ func (s *ExplainService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *ExplainService) Do() (*ExplainResponse, error) {
|
func (s *ExplainService) Do(ctx context.Context) (*ExplainResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -306,7 +299,7 @@ func (s *ExplainService) Do() (*ExplainResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, body)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
@ -10,7 +10,9 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -21,7 +23,7 @@ const (
|
|||||||
// FieldStatsService allows finding statistical properties of a field without executing a search,
|
// FieldStatsService allows finding statistical properties of a field without executing a search,
|
||||||
// but looking up measurements that are natively available in the Lucene index.
|
// but looking up measurements that are natively available in the Lucene index.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-stats.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-field-stats.html
|
||||||
// for details
|
// for details
|
||||||
type FieldStatsService struct {
|
type FieldStatsService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -166,7 +168,7 @@ func (s *FieldStatsService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *FieldStatsService) Do() (*FieldStatsResponse, error) {
|
func (s *FieldStatsService) Do(ctx context.Context) (*FieldStatsResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -187,7 +189,7 @@ func (s *FieldStatsService) Do() (*FieldStatsResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("POST", path, params, body, http.StatusNotFound)
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, body, http.StatusNotFound)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -243,11 +245,14 @@ type IndexFieldStats struct {
|
|||||||
|
|
||||||
// FieldStats contains stats of an individual field
|
// FieldStats contains stats of an individual field
|
||||||
type FieldStats struct {
|
type FieldStats struct {
|
||||||
|
Type string `json:"type"`
|
||||||
MaxDoc int64 `json:"max_doc"`
|
MaxDoc int64 `json:"max_doc"`
|
||||||
DocCount int64 `json:"doc_count"`
|
DocCount int64 `json:"doc_count"`
|
||||||
Density int64 `json:"density"`
|
Density int64 `json:"density"`
|
||||||
SumDocFrequeny int64 `json:"sum_doc_freq"`
|
SumDocFrequeny int64 `json:"sum_doc_freq"`
|
||||||
SumTotalTermFrequency int64 `json:"sum_total_term_freq"`
|
SumTotalTermFrequency int64 `json:"sum_total_term_freq"`
|
||||||
|
Searchable bool `json:"searchable"`
|
||||||
|
Aggregatable bool `json:"aggregatable"`
|
||||||
MinValue interface{} `json:"min_value"`
|
MinValue interface{} `json:"min_value"`
|
||||||
MinValueAsString string `json:"min_value_as_string"`
|
MinValueAsString string `json:"min_value_as_string"`
|
||||||
MaxValue interface{} `json:"max_value"`
|
MaxValue interface{} `json:"max_value"`
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
50
vendor/gopkg.in/olivere/elastic.v3/get.go → vendor/gopkg.in/olivere/elastic.v5/get.go
generated
vendored
50
vendor/gopkg.in/olivere/elastic.v3/get.go → vendor/gopkg.in/olivere/elastic.v5/get.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -10,13 +10,15 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetService allows to get a typed JSON document from the index based
|
// GetService allows to get a typed JSON document from the index based
|
||||||
// on its id.
|
// on its id.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-get.html
|
||||||
// for details.
|
// for details.
|
||||||
type GetService struct {
|
type GetService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -26,8 +28,8 @@ type GetService struct {
|
|||||||
id string
|
id string
|
||||||
routing string
|
routing string
|
||||||
preference string
|
preference string
|
||||||
fields []string
|
storedFields []string
|
||||||
refresh *bool
|
refresh string
|
||||||
realtime *bool
|
realtime *bool
|
||||||
fsc *FetchSourceContext
|
fsc *FetchSourceContext
|
||||||
version interface{}
|
version interface{}
|
||||||
@ -44,17 +46,6 @@ func NewGetService(client *Client) *GetService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
// String returns a string representation of the GetService request.
|
|
||||||
func (s *GetService) String() string {
|
|
||||||
return fmt.Sprintf("[%v][%v][%v]: routing [%v]",
|
|
||||||
s.index,
|
|
||||||
s.typ,
|
|
||||||
s.id,
|
|
||||||
s.routing)
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Index is the name of the index.
|
// Index is the name of the index.
|
||||||
func (s *GetService) Index(index string) *GetService {
|
func (s *GetService) Index(index string) *GetService {
|
||||||
s.index = index
|
s.index = index
|
||||||
@ -92,12 +83,9 @@ func (s *GetService) Preference(preference string) *GetService {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fields is a list of fields to return in the response.
|
// StoredFields is a list of fields to return in the response.
|
||||||
func (s *GetService) Fields(fields ...string) *GetService {
|
func (s *GetService) StoredFields(storedFields ...string) *GetService {
|
||||||
if s.fields == nil {
|
s.storedFields = append(s.storedFields, storedFields...)
|
||||||
s.fields = make([]string, 0)
|
|
||||||
}
|
|
||||||
s.fields = append(s.fields, fields...)
|
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -116,8 +104,8 @@ func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Refresh the shard containing the document before performing the operation.
|
// Refresh the shard containing the document before performing the operation.
|
||||||
func (s *GetService) Refresh(refresh bool) *GetService {
|
func (s *GetService) Refresh(refresh string) *GetService {
|
||||||
s.refresh = &refresh
|
s.refresh = refresh
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,11 +184,11 @@ func (s *GetService) buildURL() (string, url.Values, error) {
|
|||||||
if s.preference != "" {
|
if s.preference != "" {
|
||||||
params.Set("preference", s.preference)
|
params.Set("preference", s.preference)
|
||||||
}
|
}
|
||||||
if len(s.fields) > 0 {
|
if len(s.storedFields) > 0 {
|
||||||
params.Set("fields", strings.Join(s.fields, ","))
|
params.Set("stored_fields", strings.Join(s.storedFields, ","))
|
||||||
}
|
}
|
||||||
if s.refresh != nil {
|
if s.refresh != "" {
|
||||||
params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
|
params.Set("refresh", s.refresh)
|
||||||
}
|
}
|
||||||
if s.version != nil {
|
if s.version != nil {
|
||||||
params.Set("version", fmt.Sprintf("%v", s.version))
|
params.Set("version", fmt.Sprintf("%v", s.version))
|
||||||
@ -223,7 +211,7 @@ func (s *GetService) buildURL() (string, url.Values, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *GetService) Do() (*GetResult, error) {
|
func (s *GetService) Do(ctx context.Context) (*GetResult, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -236,7 +224,7 @@ func (s *GetService) Do() (*GetResult, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -257,8 +245,6 @@ type GetResult struct {
|
|||||||
Type string `json:"_type"` // type meta field
|
Type string `json:"_type"` // type meta field
|
||||||
Id string `json:"_id"` // id meta field
|
Id string `json:"_id"` // id meta field
|
||||||
Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields)
|
Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields)
|
||||||
Timestamp int64 `json:"_timestamp"` // timestamp meta field
|
|
||||||
TTL int64 `json:"_ttl"` // ttl meta field
|
|
||||||
Routing string `json:"_routing"` // routing meta field
|
Routing string `json:"_routing"` // routing meta field
|
||||||
Parent string `json:"_parent"` // parent meta field
|
Parent string `json:"_parent"` // parent meta field
|
||||||
Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService
|
Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -8,11 +8,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetTemplateService reads a search template.
|
// GetTemplateService reads a search template.
|
||||||
// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
|
// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-template.html.
|
||||||
type GetTemplateService struct {
|
type GetTemplateService struct {
|
||||||
client *Client
|
client *Client
|
||||||
pretty bool
|
pretty bool
|
||||||
@ -81,7 +83,7 @@ func (s *GetTemplateService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation and returns the template.
|
// Do executes the operation and returns the template.
|
||||||
func (s *GetTemplateService) Do() (*GetTemplateResponse, error) {
|
func (s *GetTemplateService) Do(ctx context.Context) (*GetTemplateResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -94,7 +96,7 @@ func (s *GetTemplateService) Do() (*GetTemplateResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -6,7 +6,7 @@ package elastic
|
|||||||
|
|
||||||
// Highlight allows highlighting search results on one or more fields.
|
// Highlight allows highlighting search results on one or more fields.
|
||||||
// For details, see:
|
// For details, see:
|
||||||
// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
|
// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-highlighting.html
|
||||||
type Highlight struct {
|
type Highlight struct {
|
||||||
fields []*HighlighterField
|
fields []*HighlighterField
|
||||||
tagsSchema *string
|
tagsSchema *string
|
||||||
@ -19,7 +19,7 @@ type Highlight struct {
|
|||||||
encoder *string
|
encoder *string
|
||||||
requireFieldMatch *bool
|
requireFieldMatch *bool
|
||||||
boundaryMaxScan *int
|
boundaryMaxScan *int
|
||||||
boundaryChars []rune
|
boundaryChars *string
|
||||||
highlighterType *string
|
highlighterType *string
|
||||||
fragmenter *string
|
fragmenter *string
|
||||||
highlightQuery Query
|
highlightQuery Query
|
||||||
@ -32,10 +32,6 @@ type Highlight struct {
|
|||||||
|
|
||||||
func NewHighlight() *Highlight {
|
func NewHighlight() *Highlight {
|
||||||
hl := &Highlight{
|
hl := &Highlight{
|
||||||
fields: make([]*HighlighterField, 0),
|
|
||||||
preTags: make([]string, 0),
|
|
||||||
postTags: make([]string, 0),
|
|
||||||
boundaryChars: make([]rune, 0),
|
|
||||||
options: make(map[string]interface{}),
|
options: make(map[string]interface{}),
|
||||||
}
|
}
|
||||||
return hl
|
return hl
|
||||||
@ -102,8 +98,8 @@ func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight {
|
|||||||
return hl
|
return hl
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight {
|
func (hl *Highlight) BoundaryChars(boundaryChars string) *Highlight {
|
||||||
hl.boundaryChars = append(hl.boundaryChars, boundaryChars...)
|
hl.boundaryChars = &boundaryChars
|
||||||
return hl
|
return hl
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,8 +175,8 @@ func (hl *Highlight) Source() (interface{}, error) {
|
|||||||
if hl.boundaryMaxScan != nil {
|
if hl.boundaryMaxScan != nil {
|
||||||
source["boundary_max_scan"] = *hl.boundaryMaxScan
|
source["boundary_max_scan"] = *hl.boundaryMaxScan
|
||||||
}
|
}
|
||||||
if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 {
|
if hl.boundaryChars != nil {
|
||||||
source["boundary_chars"] = hl.boundaryChars
|
source["boundary_chars"] = *hl.boundaryChars
|
||||||
}
|
}
|
||||||
if hl.highlighterType != nil {
|
if hl.highlighterType != nil {
|
||||||
source["type"] = *hl.highlighterType
|
source["type"] = *hl.highlighterType
|
||||||
@ -211,7 +207,7 @@ func (hl *Highlight) Source() (interface{}, error) {
|
|||||||
if hl.fields != nil && len(hl.fields) > 0 {
|
if hl.fields != nil && len(hl.fields) > 0 {
|
||||||
if hl.useExplicitFieldOrder {
|
if hl.useExplicitFieldOrder {
|
||||||
// Use a slice for the fields
|
// Use a slice for the fields
|
||||||
fields := make([]map[string]interface{}, 0)
|
var fields []map[string]interface{}
|
||||||
for _, field := range hl.fields {
|
for _, field := range hl.fields {
|
||||||
src, err := field.Source()
|
src, err := field.Source()
|
||||||
if err != nil {
|
if err != nil {
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -8,13 +8,15 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndexService adds or updates a typed JSON document in a specified index,
|
// IndexService adds or updates a typed JSON document in a specified index,
|
||||||
// making it searchable.
|
// making it searchable.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html
|
||||||
// for details.
|
// for details.
|
||||||
type IndexService struct {
|
type IndexService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -23,7 +25,6 @@ type IndexService struct {
|
|||||||
index string
|
index string
|
||||||
typ string
|
typ string
|
||||||
parent string
|
parent string
|
||||||
replication string
|
|
||||||
routing string
|
routing string
|
||||||
timeout string
|
timeout string
|
||||||
timestamp string
|
timestamp string
|
||||||
@ -31,8 +32,9 @@ type IndexService struct {
|
|||||||
version interface{}
|
version interface{}
|
||||||
opType string
|
opType string
|
||||||
versionType string
|
versionType string
|
||||||
refresh *bool
|
refresh string
|
||||||
consistency string
|
waitForActiveShards string
|
||||||
|
pipeline string
|
||||||
bodyJson interface{}
|
bodyJson interface{}
|
||||||
bodyString string
|
bodyString string
|
||||||
}
|
}
|
||||||
@ -62,15 +64,25 @@ func (s *IndexService) Type(typ string) *IndexService {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Consistency is an explicit write consistency setting for the operation.
|
// WaitForActiveShards sets the number of shard copies that must be active
|
||||||
func (s *IndexService) Consistency(consistency string) *IndexService {
|
// before proceeding with the index operation. Defaults to 1, meaning the
|
||||||
s.consistency = consistency
|
// primary shard only. Set to `all` for all shard copies, otherwise set to
|
||||||
|
// any non-negative value less than or equal to the total number of copies
|
||||||
|
// for the shard (number of replicas + 1).
|
||||||
|
func (s *IndexService) WaitForActiveShards(waitForActiveShards string) *IndexService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pipeline specifies the pipeline id to preprocess incoming documents with.
|
||||||
|
func (s *IndexService) Pipeline(pipeline string) *IndexService {
|
||||||
|
s.pipeline = pipeline
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Refresh the index after performing the operation.
|
// Refresh the index after performing the operation.
|
||||||
func (s *IndexService) Refresh(refresh bool) *IndexService {
|
func (s *IndexService) Refresh(refresh string) *IndexService {
|
||||||
s.refresh = &refresh
|
s.refresh = refresh
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -104,12 +116,6 @@ func (s *IndexService) Parent(parent string) *IndexService {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replication is a specific replication type.
|
|
||||||
func (s *IndexService) Replication(replication string) *IndexService {
|
|
||||||
s.replication = replication
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Routing is a specific routing value.
|
// Routing is a specific routing value.
|
||||||
func (s *IndexService) Routing(routing string) *IndexService {
|
func (s *IndexService) Routing(routing string) *IndexService {
|
||||||
s.routing = routing
|
s.routing = routing
|
||||||
@ -167,7 +173,7 @@ func (s *IndexService) buildURL() (string, string, url.Values, error) {
|
|||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
// Automatic ID generation
|
// Automatic ID generation
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#index-creation
|
||||||
method = "POST"
|
method = "POST"
|
||||||
path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{
|
path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{
|
||||||
"index": s.index,
|
"index": s.index,
|
||||||
@ -183,11 +189,11 @@ func (s *IndexService) buildURL() (string, string, url.Values, error) {
|
|||||||
if s.pretty {
|
if s.pretty {
|
||||||
params.Set("pretty", "1")
|
params.Set("pretty", "1")
|
||||||
}
|
}
|
||||||
if s.consistency != "" {
|
if s.waitForActiveShards != "" {
|
||||||
params.Set("consistency", s.consistency)
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
}
|
}
|
||||||
if s.refresh != nil {
|
if s.refresh != "" {
|
||||||
params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
|
params.Set("refresh", s.refresh)
|
||||||
}
|
}
|
||||||
if s.opType != "" {
|
if s.opType != "" {
|
||||||
params.Set("op_type", s.opType)
|
params.Set("op_type", s.opType)
|
||||||
@ -195,8 +201,8 @@ func (s *IndexService) buildURL() (string, string, url.Values, error) {
|
|||||||
if s.parent != "" {
|
if s.parent != "" {
|
||||||
params.Set("parent", s.parent)
|
params.Set("parent", s.parent)
|
||||||
}
|
}
|
||||||
if s.replication != "" {
|
if s.pipeline != "" {
|
||||||
params.Set("replication", s.replication)
|
params.Set("pipeline", s.pipeline)
|
||||||
}
|
}
|
||||||
if s.routing != "" {
|
if s.routing != "" {
|
||||||
params.Set("routing", s.routing)
|
params.Set("routing", s.routing)
|
||||||
@ -238,7 +244,7 @@ func (s *IndexService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndexService) Do() (*IndexResponse, error) {
|
func (s *IndexService) Do(ctx context.Context) (*IndexResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -259,7 +265,7 @@ func (s *IndexService) Do() (*IndexResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest(method, path, params, body)
|
res, err := s.client.PerformRequest(ctx, method, path, params, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
280
vendor/gopkg.in/olivere/elastic.v5/indices_analyze.go
generated
vendored
Normal file
280
vendor/gopkg.in/olivere/elastic.v5/indices_analyze.go
generated
vendored
Normal file
@ -0,0 +1,280 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesAnalyzeService performs the analysis process on a text and returns
|
||||||
|
// the tokens breakdown of the text.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-analyze.html
|
||||||
|
// for detail.
|
||||||
|
type IndicesAnalyzeService struct {
|
||||||
|
client *Client
|
||||||
|
pretty bool
|
||||||
|
index string
|
||||||
|
request *IndicesAnalyzeRequest
|
||||||
|
format string
|
||||||
|
preferLocal *bool
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesAnalyzeService creates a new IndicesAnalyzeService.
|
||||||
|
func NewIndicesAnalyzeService(client *Client) *IndicesAnalyzeService {
|
||||||
|
return &IndicesAnalyzeService{
|
||||||
|
client: client,
|
||||||
|
request: new(IndicesAnalyzeRequest),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is the name of the index to scope the operation.
|
||||||
|
func (s *IndicesAnalyzeService) Index(index string) *IndicesAnalyzeService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format of the output.
|
||||||
|
func (s *IndicesAnalyzeService) Format(format string) *IndicesAnalyzeService {
|
||||||
|
s.format = format
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreferLocal, when true, specifies that a local shard should be used
|
||||||
|
// if available. When false, a random shard is used (default: true).
|
||||||
|
func (s *IndicesAnalyzeService) PreferLocal(preferLocal bool) *IndicesAnalyzeService {
|
||||||
|
s.preferLocal = &preferLocal
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request passes the analyze request to use.
|
||||||
|
func (s *IndicesAnalyzeService) Request(request *IndicesAnalyzeRequest) *IndicesAnalyzeService {
|
||||||
|
if request == nil {
|
||||||
|
s.request = new(IndicesAnalyzeRequest)
|
||||||
|
} else {
|
||||||
|
s.request = request
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Analyzer is the name of the analyzer to use.
|
||||||
|
func (s *IndicesAnalyzeService) Analyzer(analyzer string) *IndicesAnalyzeService {
|
||||||
|
s.request.Analyzer = analyzer
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attributes is a list of token attributes to output; this parameter works
|
||||||
|
// only with explain=true.
|
||||||
|
func (s *IndicesAnalyzeService) Attributes(attributes ...string) *IndicesAnalyzeService {
|
||||||
|
s.request.Attributes = attributes
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// CharFilter is a list of character filters to use for the analysis.
|
||||||
|
func (s *IndicesAnalyzeService) CharFilter(charFilter ...string) *IndicesAnalyzeService {
|
||||||
|
s.request.CharFilter = charFilter
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Explain, when true, outputs more advanced details (default: false).
|
||||||
|
func (s *IndicesAnalyzeService) Explain(explain bool) *IndicesAnalyzeService {
|
||||||
|
s.request.Explain = explain
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field specifies to use a specific analyzer configured for this field (instead of passing the analyzer name).
|
||||||
|
func (s *IndicesAnalyzeService) Field(field string) *IndicesAnalyzeService {
|
||||||
|
s.request.Field = field
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter is a list of filters to use for the analysis.
|
||||||
|
func (s *IndicesAnalyzeService) Filter(filter ...string) *IndicesAnalyzeService {
|
||||||
|
s.request.Filter = filter
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Text is the text on which the analysis should be performed (when request body is not used).
|
||||||
|
func (s *IndicesAnalyzeService) Text(text ...string) *IndicesAnalyzeService {
|
||||||
|
s.request.Text = text
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tokenizer is the name of the tokenizer to use for the analysis.
|
||||||
|
func (s *IndicesAnalyzeService) Tokenizer(tokenizer string) *IndicesAnalyzeService {
|
||||||
|
s.request.Tokenizer = tokenizer
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty indicates that the JSON response be indented and human readable.
|
||||||
|
func (s *IndicesAnalyzeService) Pretty(pretty bool) *IndicesAnalyzeService {
|
||||||
|
s.pretty = pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson is the text on which the analysis should be performed.
|
||||||
|
func (s *IndicesAnalyzeService) BodyJson(body interface{}) *IndicesAnalyzeService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString is the text on which the analysis should be performed.
|
||||||
|
func (s *IndicesAnalyzeService) BodyString(body string) *IndicesAnalyzeService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesAnalyzeService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
if s.index == "" {
|
||||||
|
path = "/_analyze"
|
||||||
|
} else {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_analyze", map[string]string{
|
||||||
|
"index": s.index,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if s.pretty {
|
||||||
|
params.Set("pretty", "1")
|
||||||
|
}
|
||||||
|
if s.format != "" {
|
||||||
|
params.Set("format", s.format)
|
||||||
|
}
|
||||||
|
if s.preferLocal != nil {
|
||||||
|
params.Set("prefer_local", fmt.Sprintf("%v", *s.preferLocal))
|
||||||
|
}
|
||||||
|
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do will execute the request with the given context.
|
||||||
|
func (s *IndicesAnalyzeService) Do(ctx context.Context) (*IndicesAnalyzeResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else if s.bodyString != "" {
|
||||||
|
body = s.bodyString
|
||||||
|
} else {
|
||||||
|
// Request parameters are deprecated in 5.1.1, and we must use a JSON
|
||||||
|
// structure in the body to pass the parameters.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-analyze.html
|
||||||
|
body = s.request
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := new(IndicesAnalyzeResponse)
|
||||||
|
if err = s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *IndicesAnalyzeService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.bodyJson == nil && s.bodyString == "" {
|
||||||
|
if len(s.request.Text) == 0 {
|
||||||
|
invalid = append(invalid, "Text")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesAnalyzeRequest specifies the parameters of the analyze request.
|
||||||
|
type IndicesAnalyzeRequest struct {
|
||||||
|
Text []string `json:"text,omitempty"`
|
||||||
|
Analyzer string `json:"analyzer,omitempty"`
|
||||||
|
Tokenizer string `json:"tokenizer,omitempty"`
|
||||||
|
Filter []string `json:"filter,omitempty"`
|
||||||
|
CharFilter []string `json:"char_filter,omitempty"`
|
||||||
|
Field string `json:"field,omitempty"`
|
||||||
|
Explain bool `json:"explain,omitempty"`
|
||||||
|
Attributes []string `json:"attributes,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndicesAnalyzeResponse struct {
|
||||||
|
Tokens []IndicesAnalyzeResponseToken `json:"tokens"` // json part for normal message
|
||||||
|
Detail IndicesAnalyzeResponseDetail `json:"detail"` // json part for verbose message of explain request
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndicesAnalyzeResponseToken struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
StartOffset int `json:"start_offset"`
|
||||||
|
EndOffset int `json:"end_offset"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Position int `json:"position"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndicesAnalyzeResponseDetail struct {
|
||||||
|
CustomAnalyzer bool `json:"custom_analyzer"`
|
||||||
|
Charfilters []interface{} `json:"charfilters"`
|
||||||
|
Analyzer struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Tokens []struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
StartOffset int `json:"start_offset"`
|
||||||
|
EndOffset int `json:"end_offset"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Position int `json:"position"`
|
||||||
|
Bytes string `json:"bytes"`
|
||||||
|
PositionLength int `json:"positionLength"`
|
||||||
|
} `json:"tokens"`
|
||||||
|
} `json:"analyzer"`
|
||||||
|
Tokenizer struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Tokens []struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
StartOffset int `json:"start_offset"`
|
||||||
|
EndOffset int `json:"end_offset"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Position int `json:"position"`
|
||||||
|
} `json:"tokens"`
|
||||||
|
} `json:"tokenizer"`
|
||||||
|
Tokenfilters []struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Tokens []struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
StartOffset int `json:"start_offset"`
|
||||||
|
EndOffset int `json:"end_offset"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Position int `json:"position"`
|
||||||
|
Keyword bool `json:"keyword"`
|
||||||
|
} `json:"tokens"`
|
||||||
|
} `json:"tokenfilters"`
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -8,12 +8,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesCloseService closes an index.
|
// IndicesCloseService closes an index.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-open-close.html
|
||||||
// for details.
|
// for details.
|
||||||
type IndicesCloseService struct {
|
type IndicesCloseService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -120,7 +122,7 @@ func (s *IndicesCloseService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesCloseService) Do() (*IndicesCloseResponse, error) {
|
func (s *IndicesCloseService) Do(ctx context.Context) (*IndicesCloseResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -133,7 +135,7 @@ func (s *IndicesCloseService) Do() (*IndicesCloseResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("POST", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -8,12 +8,14 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesCreateService creates a new index.
|
// IndicesCreateService creates a new index.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-create-index.html
|
||||||
// for details.
|
// for details.
|
||||||
type IndicesCreateService struct {
|
type IndicesCreateService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -75,7 +77,7 @@ func (b *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (b *IndicesCreateService) Do() (*IndicesCreateResult, error) {
|
func (b *IndicesCreateService) Do(ctx context.Context) (*IndicesCreateResult, error) {
|
||||||
if b.index == "" {
|
if b.index == "" {
|
||||||
return nil, errors.New("missing index name")
|
return nil, errors.New("missing index name")
|
||||||
}
|
}
|
||||||
@ -108,7 +110,7 @@ func (b *IndicesCreateService) Do() (*IndicesCreateResult, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get response
|
// Get response
|
||||||
res, err := b.client.PerformRequest("PUT", path, params, body)
|
res, err := b.client.PerformRequest(ctx, "PUT", path, params, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -125,4 +127,5 @@ func (b *IndicesCreateService) Do() (*IndicesCreateResult, error) {
|
|||||||
// IndicesCreateResult is the outcome of creating a new index.
|
// IndicesCreateResult is the outcome of creating a new index.
|
||||||
type IndicesCreateResult struct {
|
type IndicesCreateResult struct {
|
||||||
Acknowledged bool `json:"acknowledged"`
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,12 +9,14 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesDeleteService allows to delete existing indices.
|
// IndicesDeleteService allows to delete existing indices.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-delete-index.html
|
||||||
// for details.
|
// for details.
|
||||||
type IndicesDeleteService struct {
|
type IndicesDeleteService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -94,7 +96,7 @@ func (s *IndicesDeleteService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesDeleteService) Do() (*IndicesDeleteResponse, error) {
|
func (s *IndicesDeleteService) Do(ctx context.Context) (*IndicesDeleteResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -107,7 +109,7 @@ func (s *IndicesDeleteService) Do() (*IndicesDeleteResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("DELETE", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -8,11 +8,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesDeleteTemplateService deletes index templates.
|
// IndicesDeleteTemplateService deletes index templates.
|
||||||
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html.
|
||||||
type IndicesDeleteTemplateService struct {
|
type IndicesDeleteTemplateService struct {
|
||||||
client *Client
|
client *Client
|
||||||
pretty bool
|
pretty bool
|
||||||
@ -89,7 +91,7 @@ func (s *IndicesDeleteTemplateService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, error) {
|
func (s *IndicesDeleteTemplateService) Do(ctx context.Context) (*IndicesDeleteTemplateResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -102,7 +104,7 @@ func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("DELETE", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -10,12 +10,14 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesExistsService checks if an index or indices exist or not.
|
// IndicesExistsService checks if an index or indices exist or not.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-exists.html
|
||||||
// for details.
|
// for details.
|
||||||
type IndicesExistsService struct {
|
type IndicesExistsService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -119,7 +121,7 @@ func (s *IndicesExistsService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesExistsService) Do() (bool, error) {
|
func (s *IndicesExistsService) Do(ctx context.Context) (bool, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@ -132,7 +134,7 @@ func (s *IndicesExistsService) Do() (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("HEAD", path, params, nil, 404)
|
res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,11 +9,13 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesExistsTemplateService checks if a given template exists.
|
// IndicesExistsTemplateService checks if a given template exists.
|
||||||
// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#indices-templates-exists
|
// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html#indices-templates-exists
|
||||||
// for documentation.
|
// for documentation.
|
||||||
type IndicesExistsTemplateService struct {
|
type IndicesExistsTemplateService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -82,7 +84,7 @@ func (s *IndicesExistsTemplateService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesExistsTemplateService) Do() (bool, error) {
|
func (s *IndicesExistsTemplateService) Do(ctx context.Context) (bool, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@ -95,7 +97,7 @@ func (s *IndicesExistsTemplateService) Do() (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("HEAD", path, params, nil, 404)
|
res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -10,12 +10,14 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesExistsTypeService checks if one or more types exist in one or more indices.
|
// IndicesExistsTypeService checks if one or more types exist in one or more indices.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-types-exists.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-types-exists.html
|
||||||
// for details.
|
// for details.
|
||||||
type IndicesExistsTypeService struct {
|
type IndicesExistsTypeService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -32,8 +34,6 @@ type IndicesExistsTypeService struct {
|
|||||||
func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService {
|
func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService {
|
||||||
return &IndicesExistsTypeService{
|
return &IndicesExistsTypeService{
|
||||||
client: client,
|
client: client,
|
||||||
index: make([]string, 0),
|
|
||||||
typ: make([]string, 0),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,7 +87,7 @@ func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService
|
|||||||
// buildURL builds the URL for the operation.
|
// buildURL builds the URL for the operation.
|
||||||
func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) {
|
func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) {
|
||||||
// Build URL
|
// Build URL
|
||||||
path, err := uritemplates.Expand("/{index}/{type}", map[string]string{
|
path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
|
||||||
"index": strings.Join(s.index, ","),
|
"index": strings.Join(s.index, ","),
|
||||||
"type": strings.Join(s.typ, ","),
|
"type": strings.Join(s.typ, ","),
|
||||||
})
|
})
|
||||||
@ -131,7 +131,7 @@ func (s *IndicesExistsTypeService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesExistsTypeService) Do() (bool, error) {
|
func (s *IndicesExistsTypeService) Do(ctx context.Context) (bool, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@ -144,7 +144,7 @@ func (s *IndicesExistsTypeService) Do() (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("HEAD", path, params, nil, 404)
|
res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,14 +9,16 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Flush allows to flush one or more indices. The flush process of an index
|
// Flush allows to flush one or more indices. The flush process of an index
|
||||||
// basically frees memory from the index by flushing data to the index
|
// basically frees memory from the index by flushing data to the index
|
||||||
// storage and clearing the internal transaction log.
|
// storage and clearing the internal transaction log.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-flush.html
|
||||||
// for details.
|
// for details.
|
||||||
type IndicesFlushService struct {
|
type IndicesFlushService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -135,7 +137,7 @@ func (s *IndicesFlushService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the service.
|
// Do executes the service.
|
||||||
func (s *IndicesFlushService) Do() (*IndicesFlushResponse, error) {
|
func (s *IndicesFlushService) Do(ctx context.Context) (*IndicesFlushResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -148,7 +150,7 @@ func (s *IndicesFlushService) Do() (*IndicesFlushResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("POST", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,7 +9,9 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesForcemergeService allows to force merging of one or more indices.
|
// IndicesForcemergeService allows to force merging of one or more indices.
|
||||||
@ -17,7 +19,7 @@ import (
|
|||||||
// within each shard. The force merge operation allows to reduce the number
|
// within each shard. The force merge operation allows to reduce the number
|
||||||
// of segments by merging them.
|
// of segments by merging them.
|
||||||
//
|
//
|
||||||
// See http://www.elastic.co/guide/en/elasticsearch/reference/2.1/indices-forcemerge.html
|
// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-forcemerge.html
|
||||||
// for more information.
|
// for more information.
|
||||||
type IndicesForcemergeService struct {
|
type IndicesForcemergeService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -30,7 +32,6 @@ type IndicesForcemergeService struct {
|
|||||||
maxNumSegments interface{}
|
maxNumSegments interface{}
|
||||||
onlyExpungeDeletes *bool
|
onlyExpungeDeletes *bool
|
||||||
operationThreading interface{}
|
operationThreading interface{}
|
||||||
waitForMerge *bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIndicesForcemergeService creates a new IndicesForcemergeService.
|
// NewIndicesForcemergeService creates a new IndicesForcemergeService.
|
||||||
@ -99,13 +100,6 @@ func (s *IndicesForcemergeService) OperationThreading(operationThreading interfa
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForMerge specifies whether the request should block until the
|
|
||||||
// merge process is finished (default: true).
|
|
||||||
func (s *IndicesForcemergeService) WaitForMerge(waitForMerge bool) *IndicesForcemergeService {
|
|
||||||
s.waitForMerge = &waitForMerge
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pretty indicates that the JSON response be indented and human readable.
|
// Pretty indicates that the JSON response be indented and human readable.
|
||||||
func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService {
|
func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService {
|
||||||
s.pretty = pretty
|
s.pretty = pretty
|
||||||
@ -155,9 +149,6 @@ func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) {
|
|||||||
if s.operationThreading != nil {
|
if s.operationThreading != nil {
|
||||||
params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading))
|
params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading))
|
||||||
}
|
}
|
||||||
if s.waitForMerge != nil {
|
|
||||||
params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge))
|
|
||||||
}
|
|
||||||
return path, params, nil
|
return path, params, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,7 +158,7 @@ func (s *IndicesForcemergeService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesForcemergeService) Do() (*IndicesForcemergeResponse, error) {
|
func (s *IndicesForcemergeService) Do(ctx context.Context) (*IndicesForcemergeResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -180,7 +171,7 @@ func (s *IndicesForcemergeService) Do() (*IndicesForcemergeResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("POST", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,12 +9,14 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesGetService retrieves information about one or more indices.
|
// IndicesGetService retrieves information about one or more indices.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-index.html
|
||||||
// for more details.
|
// for more details.
|
||||||
type IndicesGetService struct {
|
type IndicesGetService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -166,7 +168,7 @@ func (s *IndicesGetService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesGetService) Do() (map[string]*IndicesGetResponse, error) {
|
func (s *IndicesGetService) Do(ctx context.Context) (map[string]*IndicesGetResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -179,7 +181,7 @@ func (s *IndicesGetService) Do() (map[string]*IndicesGetResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,65 +9,71 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// AliasesService returns the aliases associated with one or more indices.
|
||||||
|
// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-aliases.html.
|
||||||
type AliasesService struct {
|
type AliasesService struct {
|
||||||
client *Client
|
client *Client
|
||||||
indices []string
|
index []string
|
||||||
pretty bool
|
pretty bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewAliasesService instantiates a new AliasesService.
|
||||||
func NewAliasesService(client *Client) *AliasesService {
|
func NewAliasesService(client *Client) *AliasesService {
|
||||||
builder := &AliasesService{
|
builder := &AliasesService{
|
||||||
client: client,
|
client: client,
|
||||||
indices: make([]string, 0),
|
|
||||||
}
|
}
|
||||||
return builder
|
return builder
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Pretty asks Elasticsearch to indent the returned JSON.
|
||||||
func (s *AliasesService) Pretty(pretty bool) *AliasesService {
|
func (s *AliasesService) Pretty(pretty bool) *AliasesService {
|
||||||
s.pretty = pretty
|
s.pretty = pretty
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *AliasesService) Index(indices ...string) *AliasesService {
|
// Index adds one or more indices.
|
||||||
s.indices = append(s.indices, indices...)
|
func (s *AliasesService) Index(index ...string) *AliasesService {
|
||||||
|
s.index = append(s.index, index...)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *AliasesService) Do() (*AliasesResult, error) {
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *AliasesService) buildURL() (string, url.Values, error) {
|
||||||
var err error
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
// Build url
|
if len(s.index) > 0 {
|
||||||
path := "/"
|
path, err = uritemplates.Expand("/{index}/_aliases", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
// Indices part
|
|
||||||
indexPart := make([]string, 0)
|
|
||||||
for _, index := range s.indices {
|
|
||||||
index, err = uritemplates.Expand("{index}", map[string]string{
|
|
||||||
"index": index,
|
|
||||||
})
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_aliases"
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", url.Values{}, err
|
||||||
}
|
}
|
||||||
indexPart = append(indexPart, index)
|
|
||||||
}
|
|
||||||
path += strings.Join(indexPart, ",")
|
|
||||||
|
|
||||||
// TODO Add types here
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
// Search
|
|
||||||
path += "/_aliases"
|
|
||||||
|
|
||||||
// Parameters
|
|
||||||
params := make(url.Values)
|
|
||||||
if s.pretty {
|
if s.pretty {
|
||||||
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
||||||
}
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *AliasesService) Do(ctx context.Context) (*AliasesResult, error) {
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Get response
|
// Get response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -131,8 +137,7 @@ type aliasResult struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
|
func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
|
||||||
indices := make([]string, 0)
|
var indices []string
|
||||||
|
|
||||||
for indexName, indexInfo := range ar.Indices {
|
for indexName, indexInfo := range ar.Indices {
|
||||||
for _, aliasInfo := range indexInfo.Aliases {
|
for _, aliasInfo := range indexInfo.Aliases {
|
||||||
if aliasInfo.AliasName == aliasName {
|
if aliasInfo.AliasName == aliasName {
|
||||||
@ -140,7 +145,6 @@ func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return indices
|
return indices
|
||||||
}
|
}
|
||||||
|
|
184
vendor/gopkg.in/olivere/elastic.v5/indices_get_field_mapping.go
generated
vendored
Normal file
184
vendor/gopkg.in/olivere/elastic.v5/indices_get_field_mapping.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesGetFieldMappingService retrieves the mapping definitions for the fields in an index
|
||||||
|
// or index/type.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-field-mapping.html
|
||||||
|
// for details.
|
||||||
|
type IndicesGetFieldMappingService struct {
|
||||||
|
client *Client
|
||||||
|
pretty bool
|
||||||
|
index []string
|
||||||
|
typ []string
|
||||||
|
field []string
|
||||||
|
local *bool
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetFieldMappingService is an alias for NewIndicesGetFieldMappingService.
|
||||||
|
// Use NewIndicesGetFieldMappingService.
|
||||||
|
func NewGetFieldMappingService(client *Client) *IndicesGetFieldMappingService {
|
||||||
|
return NewIndicesGetFieldMappingService(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesGetFieldMappingService creates a new IndicesGetFieldMappingService.
|
||||||
|
func NewIndicesGetFieldMappingService(client *Client) *IndicesGetFieldMappingService {
|
||||||
|
return &IndicesGetFieldMappingService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of index names.
|
||||||
|
func (s *IndicesGetFieldMappingService) Index(indices ...string) *IndicesGetFieldMappingService {
|
||||||
|
s.index = append(s.index, indices...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is a list of document types.
|
||||||
|
func (s *IndicesGetFieldMappingService) Type(types ...string) *IndicesGetFieldMappingService {
|
||||||
|
s.typ = append(s.typ, types...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field is a list of fields.
|
||||||
|
func (s *IndicesGetFieldMappingService) Field(fields ...string) *IndicesGetFieldMappingService {
|
||||||
|
s.field = append(s.field, fields...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices.
|
||||||
|
// This includes `_all` string or when no indices have been specified.
|
||||||
|
func (s *IndicesGetFieldMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetFieldMappingService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both..
|
||||||
|
func (s *IndicesGetFieldMappingService) ExpandWildcards(expandWildcards string) *IndicesGetFieldMappingService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates whether to return local information, do not retrieve
|
||||||
|
// the state from master node (default: false).
|
||||||
|
func (s *IndicesGetFieldMappingService) Local(local bool) *IndicesGetFieldMappingService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesGetFieldMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetFieldMappingService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty indicates that the JSON response be indented and human readable.
|
||||||
|
func (s *IndicesGetFieldMappingService) Pretty(pretty bool) *IndicesGetFieldMappingService {
|
||||||
|
s.pretty = pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesGetFieldMappingService) buildURL() (string, url.Values, error) {
|
||||||
|
var index, typ, field []string
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
index = s.index
|
||||||
|
} else {
|
||||||
|
index = []string{"_all"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.typ) > 0 {
|
||||||
|
typ = s.typ
|
||||||
|
} else {
|
||||||
|
typ = []string{"_all"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.field) > 0 {
|
||||||
|
field = s.field
|
||||||
|
} else {
|
||||||
|
field = []string{"*"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{index}/_mapping/{type}/field/{field}", map[string]string{
|
||||||
|
"index": strings.Join(index, ","),
|
||||||
|
"type": strings.Join(typ, ","),
|
||||||
|
"field": strings.Join(field, ","),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if s.pretty {
|
||||||
|
params.Set("pretty", "1")
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.local != nil {
|
||||||
|
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesGetFieldMappingService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation. It returns mapping definitions for an index
|
||||||
|
// or index/type.
|
||||||
|
func (s *IndicesGetFieldMappingService) Do(ctx context.Context) (map[string]interface{}, error) {
|
||||||
|
var ret map[string]interface{}
|
||||||
|
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,13 +9,15 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesGetMappingService retrieves the mapping definitions for an index or
|
// IndicesGetMappingService retrieves the mapping definitions for an index or
|
||||||
// index/type.
|
// index/type.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-mapping.html
|
||||||
// for details.
|
// for details.
|
||||||
type IndicesGetMappingService struct {
|
type IndicesGetMappingService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -142,7 +144,7 @@ func (s *IndicesGetMappingService) Validate() error {
|
|||||||
|
|
||||||
// Do executes the operation. It returns mapping definitions for an index
|
// Do executes the operation. It returns mapping definitions for an index
|
||||||
// or index/type.
|
// or index/type.
|
||||||
func (s *IndicesGetMappingService) Do() (map[string]interface{}, error) {
|
func (s *IndicesGetMappingService) Do(ctx context.Context) (map[string]interface{}, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -155,7 +157,7 @@ func (s *IndicesGetMappingService) Do() (map[string]interface{}, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,13 +9,15 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesGetSettingsService allows to retrieve settings of one
|
// IndicesGetSettingsService allows to retrieve settings of one
|
||||||
// or more indices.
|
// or more indices.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-settings.html
|
||||||
// for more details.
|
// for more details.
|
||||||
type IndicesGetSettingsService struct {
|
type IndicesGetSettingsService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -150,7 +152,7 @@ func (s *IndicesGetSettingsService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesGetSettingsService) Do() (map[string]*IndicesGetSettingsResponse, error) {
|
func (s *IndicesGetSettingsService) Do(ctx context.Context) (map[string]*IndicesGetSettingsResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -163,7 +165,7 @@ func (s *IndicesGetSettingsService) Do() (map[string]*IndicesGetSettingsResponse
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,11 +9,13 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesGetTemplateService returns an index template.
|
// IndicesGetTemplateService returns an index template.
|
||||||
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html.
|
||||||
type IndicesGetTemplateService struct {
|
type IndicesGetTemplateService struct {
|
||||||
client *Client
|
client *Client
|
||||||
pretty bool
|
pretty bool
|
||||||
@ -91,7 +93,7 @@ func (s *IndicesGetTemplateService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesGetTemplateService) Do() (map[string]*IndicesGetTemplateResponse, error) {
|
func (s *IndicesGetTemplateService) Do(ctx context.Context) (map[string]*IndicesGetTemplateResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -104,7 +106,7 @@ func (s *IndicesGetTemplateService) Do() (map[string]*IndicesGetTemplateResponse
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -8,12 +8,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesOpenService opens an index.
|
// IndicesOpenService opens an index.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-open-close.html
|
||||||
// for details.
|
// for details.
|
||||||
type IndicesOpenService struct {
|
type IndicesOpenService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -124,7 +126,7 @@ func (s *IndicesOpenService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesOpenService) Do() (*IndicesOpenResponse, error) {
|
func (s *IndicesOpenService) Do(ctx context.Context) (*IndicesOpenResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -137,7 +139,7 @@ func (s *IndicesOpenService) Do() (*IndicesOpenResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("POST", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
296
vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go
generated
vendored
Normal file
296
vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go
generated
vendored
Normal file
@ -0,0 +1,296 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// -- Actions --
|
||||||
|
|
||||||
|
// AliasAction is an action to apply to an alias, e.g. "add" or "remove".
|
||||||
|
type AliasAction interface {
|
||||||
|
Source() (interface{}, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AliasAddAction is an action to add to an alias.
|
||||||
|
type AliasAddAction struct {
|
||||||
|
index []string // index name(s)
|
||||||
|
alias string // alias name
|
||||||
|
filter Query
|
||||||
|
routing string
|
||||||
|
searchRouting string
|
||||||
|
indexRouting string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAliasAddAction returns an action to add an alias.
|
||||||
|
func NewAliasAddAction(alias string) *AliasAddAction {
|
||||||
|
return &AliasAddAction{
|
||||||
|
alias: alias,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index associates one or more indices to the alias.
|
||||||
|
func (a *AliasAddAction) Index(index ...string) *AliasAddAction {
|
||||||
|
a.index = append(a.index, index...)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AliasAddAction) removeBlankIndexNames() {
|
||||||
|
var indices []string
|
||||||
|
for _, index := range a.index {
|
||||||
|
if len(index) > 0 {
|
||||||
|
indices = append(indices, index)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
a.index = indices
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter associates a filter to the alias.
|
||||||
|
func (a *AliasAddAction) Filter(filter Query) *AliasAddAction {
|
||||||
|
a.filter = filter
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing associates a routing value to the alias.
|
||||||
|
// This basically sets index and search routing to the same value.
|
||||||
|
func (a *AliasAddAction) Routing(routing string) *AliasAddAction {
|
||||||
|
a.routing = routing
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexRouting associates an index routing value to the alias.
|
||||||
|
func (a *AliasAddAction) IndexRouting(routing string) *AliasAddAction {
|
||||||
|
a.indexRouting = routing
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchRouting associates a search routing value to the alias.
|
||||||
|
func (a *AliasAddAction) SearchRouting(routing ...string) *AliasAddAction {
|
||||||
|
a.searchRouting = strings.Join(routing, ",")
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (a *AliasAddAction) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if len(a.alias) == 0 {
|
||||||
|
invalid = append(invalid, "Alias")
|
||||||
|
}
|
||||||
|
if len(a.index) == 0 {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source returns the JSON-serializable data.
|
||||||
|
func (a *AliasAddAction) Source() (interface{}, error) {
|
||||||
|
a.removeBlankIndexNames()
|
||||||
|
if err := a.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
src := make(map[string]interface{})
|
||||||
|
act := make(map[string]interface{})
|
||||||
|
src["add"] = act
|
||||||
|
act["alias"] = a.alias
|
||||||
|
switch len(a.index) {
|
||||||
|
case 1:
|
||||||
|
act["index"] = a.index[0]
|
||||||
|
default:
|
||||||
|
act["indices"] = a.index
|
||||||
|
}
|
||||||
|
if a.filter != nil {
|
||||||
|
f, err := a.filter.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
act["filter"] = f
|
||||||
|
}
|
||||||
|
if len(a.routing) > 0 {
|
||||||
|
act["routing"] = a.routing
|
||||||
|
}
|
||||||
|
if len(a.indexRouting) > 0 {
|
||||||
|
act["index_routing"] = a.indexRouting
|
||||||
|
}
|
||||||
|
if len(a.searchRouting) > 0 {
|
||||||
|
act["search_routing"] = a.searchRouting
|
||||||
|
}
|
||||||
|
return src, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AliasRemoveAction is an action to remove an alias.
|
||||||
|
type AliasRemoveAction struct {
|
||||||
|
index []string // index name(s)
|
||||||
|
alias string // alias name
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAliasRemoveAction returns an action to remove an alias.
|
||||||
|
func NewAliasRemoveAction(alias string) *AliasRemoveAction {
|
||||||
|
return &AliasRemoveAction{
|
||||||
|
alias: alias,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index associates one or more indices to the alias.
|
||||||
|
func (a *AliasRemoveAction) Index(index ...string) *AliasRemoveAction {
|
||||||
|
a.index = append(a.index, index...)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AliasRemoveAction) removeBlankIndexNames() {
|
||||||
|
var indices []string
|
||||||
|
for _, index := range a.index {
|
||||||
|
if len(index) > 0 {
|
||||||
|
indices = append(indices, index)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
a.index = indices
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (a *AliasRemoveAction) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if len(a.alias) == 0 {
|
||||||
|
invalid = append(invalid, "Alias")
|
||||||
|
}
|
||||||
|
if len(a.index) == 0 {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source returns the JSON-serializable data.
|
||||||
|
func (a *AliasRemoveAction) Source() (interface{}, error) {
|
||||||
|
a.removeBlankIndexNames()
|
||||||
|
if err := a.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
src := make(map[string]interface{})
|
||||||
|
act := make(map[string]interface{})
|
||||||
|
src["remove"] = act
|
||||||
|
act["alias"] = a.alias
|
||||||
|
switch len(a.index) {
|
||||||
|
case 1:
|
||||||
|
act["index"] = a.index[0]
|
||||||
|
default:
|
||||||
|
act["indices"] = a.index
|
||||||
|
}
|
||||||
|
return src, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Service --
|
||||||
|
|
||||||
|
// AliasService enables users to add or remove an alias.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-aliases.html
|
||||||
|
// for details.
|
||||||
|
type AliasService struct {
|
||||||
|
client *Client
|
||||||
|
actions []AliasAction
|
||||||
|
pretty bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAliasService implements a service to manage aliases.
|
||||||
|
func NewAliasService(client *Client) *AliasService {
|
||||||
|
builder := &AliasService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
return builder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty asks Elasticsearch to indent the HTTP response.
|
||||||
|
func (s *AliasService) Pretty(pretty bool) *AliasService {
|
||||||
|
s.pretty = pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds an alias to an index.
|
||||||
|
func (s *AliasService) Add(indexName string, aliasName string) *AliasService {
|
||||||
|
action := NewAliasAddAction(aliasName).Index(indexName)
|
||||||
|
s.actions = append(s.actions, action)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds an alias to an index and associates a filter to the alias.
|
||||||
|
func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService {
|
||||||
|
action := NewAliasAddAction(aliasName).Index(indexName).Filter(filter)
|
||||||
|
s.actions = append(s.actions, action)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes an alias.
|
||||||
|
func (s *AliasService) Remove(indexName string, aliasName string) *AliasService {
|
||||||
|
action := NewAliasRemoveAction(aliasName).Index(indexName)
|
||||||
|
s.actions = append(s.actions, action)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Action accepts one or more AliasAction instances which can be
|
||||||
|
// of type AliasAddAction or AliasRemoveAction.
|
||||||
|
func (s *AliasService) Action(action ...AliasAction) *AliasService {
|
||||||
|
s.actions = append(s.actions, action...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *AliasService) buildURL() (string, url.Values, error) {
|
||||||
|
path := "/_aliases"
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if s.pretty {
|
||||||
|
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the command.
|
||||||
|
func (s *AliasService) Do(ctx context.Context) (*AliasResult, error) {
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Body with actions
|
||||||
|
body := make(map[string]interface{})
|
||||||
|
var actions []interface{}
|
||||||
|
for _, action := range s.actions {
|
||||||
|
src, err := action.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
actions = append(actions, src)
|
||||||
|
}
|
||||||
|
body["actions"] = actions
|
||||||
|
|
||||||
|
// Get response
|
||||||
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return results
|
||||||
|
ret := new(AliasResult)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of an alias request.
|
||||||
|
|
||||||
|
// AliasResult is the outcome of calling Do on AliasService.
|
||||||
|
type AliasResult struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,13 +9,15 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesPutMappingService allows to register specific mapping definition
|
// IndicesPutMappingService allows to register specific mapping definition
|
||||||
// for a specific type.
|
// for a specific type.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-put-mapping.html
|
||||||
// for details.
|
// for details.
|
||||||
type IndicesPutMappingService struct {
|
type IndicesPutMappingService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -180,7 +182,7 @@ func (s *IndicesPutMappingService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesPutMappingService) Do() (*PutMappingResponse, error) {
|
func (s *IndicesPutMappingService) Do(ctx context.Context) (*PutMappingResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -201,7 +203,7 @@ func (s *IndicesPutMappingService) Do() (*PutMappingResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("PUT", path, params, body)
|
res, err := s.client.PerformRequest(ctx, "PUT", path, params, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2016 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,14 +9,16 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesPutSettingsService changes specific index level settings in
|
// IndicesPutSettingsService changes specific index level settings in
|
||||||
// real time.
|
// real time.
|
||||||
//
|
//
|
||||||
// See the documentation at
|
// See the documentation at
|
||||||
// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html.
|
// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-update-settings.html.
|
||||||
type IndicesPutSettingsService struct {
|
type IndicesPutSettingsService struct {
|
||||||
client *Client
|
client *Client
|
||||||
pretty bool
|
pretty bool
|
||||||
@ -143,7 +145,7 @@ func (s *IndicesPutSettingsService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesPutSettingsService) Do() (*IndicesPutSettingsResponse, error) {
|
func (s *IndicesPutSettingsService) Do(ctx context.Context) (*IndicesPutSettingsResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -164,7 +166,7 @@ func (s *IndicesPutSettingsService) Do() (*IndicesPutSettingsResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("PUT", path, params, body)
|
res, err := s.client.PerformRequest(ctx, "PUT", path, params, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -8,11 +8,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesPutTemplateService creates or updates index mappings.
|
// IndicesPutTemplateService creates or updates index mappings.
|
||||||
// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html.
|
||||||
type IndicesPutTemplateService struct {
|
type IndicesPutTemplateService struct {
|
||||||
client *Client
|
client *Client
|
||||||
pretty bool
|
pretty bool
|
||||||
@ -138,7 +140,7 @@ func (s *IndicesPutTemplateService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesPutTemplateService) Do() (*IndicesPutTemplateResponse, error) {
|
func (s *IndicesPutTemplateService) Do(ctx context.Context) (*IndicesPutTemplateResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -159,7 +161,7 @@ func (s *IndicesPutTemplateService) Do() (*IndicesPutTemplateResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("PUT", path, params, body)
|
res, err := s.client.PerformRequest(ctx, "PUT", path, params, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
105
vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go
generated
vendored
Normal file
105
vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RefreshService explicitly refreshes one or more indices.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-refresh.html.
|
||||||
|
type RefreshService struct {
|
||||||
|
client *Client
|
||||||
|
index []string
|
||||||
|
force *bool
|
||||||
|
pretty bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRefreshService creates a new instance of RefreshService.
|
||||||
|
func NewRefreshService(client *Client) *RefreshService {
|
||||||
|
builder := &RefreshService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
return builder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index specifies the indices to refresh.
|
||||||
|
func (s *RefreshService) Index(index ...string) *RefreshService {
|
||||||
|
s.index = append(s.index, index...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force forces a refresh.
|
||||||
|
func (s *RefreshService) Force(force bool) *RefreshService {
|
||||||
|
s.force = &force
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty asks Elasticsearch to return indented JSON.
|
||||||
|
func (s *RefreshService) Pretty(pretty bool) *RefreshService {
|
||||||
|
s.pretty = pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *RefreshService) buildURL() (string, url.Values, error) {
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_refresh", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_refresh"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if s.force != nil {
|
||||||
|
params.Set("force", fmt.Sprintf("%v", *s.force))
|
||||||
|
}
|
||||||
|
if s.pretty {
|
||||||
|
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the request.
|
||||||
|
func (s *RefreshService) Do(ctx context.Context) (*RefreshResult, error) {
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get response
|
||||||
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return result
|
||||||
|
ret := new(RefreshResult)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a refresh request.
|
||||||
|
|
||||||
|
// RefreshResult is the outcome of RefreshService.Do.
|
||||||
|
type RefreshResult struct {
|
||||||
|
Shards shardsInfo `json:"_shards,omitempty"`
|
||||||
|
}
|
268
vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go
generated
vendored
Normal file
268
vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go
generated
vendored
Normal file
@ -0,0 +1,268 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesRolloverService rolls an alias over to a new index when the
|
||||||
|
// existing index is considered to be too large or too old.
|
||||||
|
//
|
||||||
|
// It is documented at
|
||||||
|
// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-rollover-index.html.
|
||||||
|
type IndicesRolloverService struct {
|
||||||
|
client *Client
|
||||||
|
pretty bool
|
||||||
|
dryRun bool
|
||||||
|
newIndex string
|
||||||
|
alias string
|
||||||
|
masterTimeout string
|
||||||
|
timeout string
|
||||||
|
waitForActiveShards string
|
||||||
|
conditions map[string]interface{}
|
||||||
|
settings map[string]interface{}
|
||||||
|
mappings map[string]interface{}
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesRolloverService creates a new IndicesRolloverService.
|
||||||
|
func NewIndicesRolloverService(client *Client) *IndicesRolloverService {
|
||||||
|
return &IndicesRolloverService{
|
||||||
|
client: client,
|
||||||
|
conditions: make(map[string]interface{}),
|
||||||
|
settings: make(map[string]interface{}),
|
||||||
|
mappings: make(map[string]interface{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alias is the name of the alias to rollover.
|
||||||
|
func (s *IndicesRolloverService) Alias(alias string) *IndicesRolloverService {
|
||||||
|
s.alias = alias
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndex is the name of the rollover index.
|
||||||
|
func (s *IndicesRolloverService) NewIndex(newIndex string) *IndicesRolloverService {
|
||||||
|
s.newIndex = newIndex
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies the timeout for connection to master.
|
||||||
|
func (s *IndicesRolloverService) MasterTimeout(masterTimeout string) *IndicesRolloverService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout sets an explicit operation timeout.
|
||||||
|
func (s *IndicesRolloverService) Timeout(timeout string) *IndicesRolloverService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards sets the number of active shards to wait for on the
|
||||||
|
// newly created rollover index before the operation returns.
|
||||||
|
func (s *IndicesRolloverService) WaitForActiveShards(waitForActiveShards string) *IndicesRolloverService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty indicates that the JSON response be indented and human readable.
|
||||||
|
func (s *IndicesRolloverService) Pretty(pretty bool) *IndicesRolloverService {
|
||||||
|
s.pretty = pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DryRun, when set, specifies that only conditions are checked without
|
||||||
|
// performing the actual rollover.
|
||||||
|
func (s *IndicesRolloverService) DryRun(dryRun bool) *IndicesRolloverService {
|
||||||
|
s.dryRun = dryRun
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conditions allows to specify all conditions as a dictionary.
|
||||||
|
func (s *IndicesRolloverService) Conditions(conditions map[string]interface{}) *IndicesRolloverService {
|
||||||
|
s.conditions = conditions
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddCondition adds a condition to the rollover decision.
|
||||||
|
func (s *IndicesRolloverService) AddCondition(name string, value interface{}) *IndicesRolloverService {
|
||||||
|
s.conditions[name] = value
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMaxIndexAgeCondition adds a condition to set the max index age.
|
||||||
|
func (s *IndicesRolloverService) AddMaxIndexAgeCondition(time string) *IndicesRolloverService {
|
||||||
|
s.conditions["max_age"] = time
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMaxIndexDocsCondition adds a condition to set the max documents in the index.
|
||||||
|
func (s *IndicesRolloverService) AddMaxIndexDocsCondition(docs int64) *IndicesRolloverService {
|
||||||
|
s.conditions["max_docs"] = docs
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Settings adds the index settings.
|
||||||
|
func (s *IndicesRolloverService) Settings(settings map[string]interface{}) *IndicesRolloverService {
|
||||||
|
s.settings = settings
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSetting adds an index setting.
|
||||||
|
func (s *IndicesRolloverService) AddSetting(name string, value interface{}) *IndicesRolloverService {
|
||||||
|
s.settings[name] = value
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mappings adds the index mappings.
|
||||||
|
func (s *IndicesRolloverService) Mappings(mappings map[string]interface{}) *IndicesRolloverService {
|
||||||
|
s.mappings = mappings
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMapping adds a mapping for the given type.
|
||||||
|
func (s *IndicesRolloverService) AddMapping(typ string, mapping interface{}) *IndicesRolloverService {
|
||||||
|
s.mappings[typ] = mapping
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson sets the conditions that needs to be met for executing rollover,
|
||||||
|
// specified as a serializable JSON instance which is sent as the body of
|
||||||
|
// the request.
|
||||||
|
func (s *IndicesRolloverService) BodyJson(body interface{}) *IndicesRolloverService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString sets the conditions that needs to be met for executing rollover,
|
||||||
|
// specified as a string which is sent as the body of the request.
|
||||||
|
func (s *IndicesRolloverService) BodyString(body string) *IndicesRolloverService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBody returns the body of the request, if not explicitly set via
|
||||||
|
// BodyJson or BodyString.
|
||||||
|
func (s *IndicesRolloverService) getBody() interface{} {
|
||||||
|
body := make(map[string]interface{})
|
||||||
|
if len(s.conditions) > 0 {
|
||||||
|
body["conditions"] = s.conditions
|
||||||
|
}
|
||||||
|
if len(s.settings) > 0 {
|
||||||
|
body["settings"] = s.settings
|
||||||
|
}
|
||||||
|
if len(s.mappings) > 0 {
|
||||||
|
body["mappings"] = s.mappings
|
||||||
|
}
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesRolloverService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
if s.newIndex != "" {
|
||||||
|
path, err = uritemplates.Expand("/{alias}/_rollover/{new_index}", map[string]string{
|
||||||
|
"alias": s.alias,
|
||||||
|
"new_index": s.newIndex,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path, err = uritemplates.Expand("/{alias}/_rollover", map[string]string{
|
||||||
|
"alias": s.alias,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if s.pretty {
|
||||||
|
params.Set("pretty", "1")
|
||||||
|
}
|
||||||
|
if s.dryRun {
|
||||||
|
params.Set("dry_run", "1")
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.waitForActiveShards != "" {
|
||||||
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesRolloverService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.alias == "" {
|
||||||
|
invalid = append(invalid, "Alias")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesRolloverService) Do(ctx context.Context) (*IndicesRolloverResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else if s.bodyString != "" {
|
||||||
|
body = s.bodyString
|
||||||
|
} else {
|
||||||
|
body = s.getBody()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesRolloverResponse)
|
||||||
|
if err := json.Unmarshal(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesRolloverResponse is the response of IndicesRolloverService.Do.
|
||||||
|
type IndicesRolloverResponse struct {
|
||||||
|
OldIndex string `json:"old_index"`
|
||||||
|
NewIndex string `json:"new_index"`
|
||||||
|
RolledOver bool `json:"rolled_over"`
|
||||||
|
DryRun bool `json:"dry_run"`
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
Conditions map[string]bool `json:"conditions"`
|
||||||
|
}
|
174
vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go
generated
vendored
Normal file
174
vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesShrinkService allows you to shrink an existing index into a
|
||||||
|
// new index with fewer primary shards.
|
||||||
|
//
|
||||||
|
// For further details, see
|
||||||
|
// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-shrink-index.html.
|
||||||
|
type IndicesShrinkService struct {
|
||||||
|
client *Client
|
||||||
|
pretty bool
|
||||||
|
source string
|
||||||
|
target string
|
||||||
|
masterTimeout string
|
||||||
|
timeout string
|
||||||
|
waitForActiveShards string
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesShrinkService creates a new IndicesShrinkService.
|
||||||
|
func NewIndicesShrinkService(client *Client) *IndicesShrinkService {
|
||||||
|
return &IndicesShrinkService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source is the name of the source index to shrink.
|
||||||
|
func (s *IndicesShrinkService) Source(source string) *IndicesShrinkService {
|
||||||
|
s.source = source
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Target is the name of the target index to shrink into.
|
||||||
|
func (s *IndicesShrinkService) Target(target string) *IndicesShrinkService {
|
||||||
|
s.target = target
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies the timeout for connection to master.
|
||||||
|
func (s *IndicesShrinkService) MasterTimeout(masterTimeout string) *IndicesShrinkService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is an explicit operation timeout.
|
||||||
|
func (s *IndicesShrinkService) Timeout(timeout string) *IndicesShrinkService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards sets the number of active shards to wait for on
|
||||||
|
// the shrunken index before the operation returns.
|
||||||
|
func (s *IndicesShrinkService) WaitForActiveShards(waitForActiveShards string) *IndicesShrinkService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty indicates that the JSON response be indented and human readable.
|
||||||
|
func (s *IndicesShrinkService) Pretty(pretty bool) *IndicesShrinkService {
|
||||||
|
s.pretty = pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson is the configuration for the target index (`settings` and `aliases`)
|
||||||
|
// defined as a JSON-serializable instance to be sent as the request body.
|
||||||
|
func (s *IndicesShrinkService) BodyJson(body interface{}) *IndicesShrinkService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString is the configuration for the target index (`settings` and `aliases`)
|
||||||
|
// defined as a string to send as the request body.
|
||||||
|
func (s *IndicesShrinkService) BodyString(body string) *IndicesShrinkService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesShrinkService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{source}/_shrink/{target}", map[string]string{
|
||||||
|
"source": s.source,
|
||||||
|
"target": s.target,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if s.pretty {
|
||||||
|
params.Set("pretty", "1")
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.waitForActiveShards != "" {
|
||||||
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesShrinkService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.source == "" {
|
||||||
|
invalid = append(invalid, "Source")
|
||||||
|
}
|
||||||
|
if s.target == "" {
|
||||||
|
invalid = append(invalid, "Target")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesShrinkService) Do(ctx context.Context) (*IndicesShrinkResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else if s.bodyString != "" {
|
||||||
|
body = s.bodyString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesShrinkResponse)
|
||||||
|
if err := json.Unmarshal(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesShrinkResponse is the response of IndicesShrinkService.Do.
|
||||||
|
type IndicesShrinkResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,11 +9,13 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndicesStatsService provides stats on various metrics of one or more
|
// IndicesStatsService provides stats on various metrics of one or more
|
||||||
// indices. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-stats.html.
|
// indices. See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-stats.html.
|
||||||
type IndicesStatsService struct {
|
type IndicesStatsService struct {
|
||||||
client *Client
|
client *Client
|
||||||
pretty bool
|
pretty bool
|
||||||
@ -166,7 +168,7 @@ func (s *IndicesStatsService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *IndicesStatsService) Do() (*IndicesStatsResponse, error) {
|
func (s *IndicesStatsService) Do(ctx context.Context) (*IndicesStatsResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -179,7 +181,7 @@ func (s *IndicesStatsService) Do() (*IndicesStatsResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
124
vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go
generated
vendored
Normal file
124
vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IngestDeletePipelineService deletes pipelines by ID.
|
||||||
|
// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/delete-pipeline-api.html.
|
||||||
|
type IngestDeletePipelineService struct {
|
||||||
|
client *Client
|
||||||
|
pretty bool
|
||||||
|
id string
|
||||||
|
masterTimeout string
|
||||||
|
timeout string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIngestDeletePipelineService creates a new IngestDeletePipelineService.
|
||||||
|
func NewIngestDeletePipelineService(client *Client) *IngestDeletePipelineService {
|
||||||
|
return &IngestDeletePipelineService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id is documented as: Pipeline ID.
|
||||||
|
func (s *IngestDeletePipelineService) Id(id string) *IngestDeletePipelineService {
|
||||||
|
s.id = id
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout is documented as: Explicit operation timeout for connection to master node.
|
||||||
|
func (s *IngestDeletePipelineService) MasterTimeout(masterTimeout string) *IngestDeletePipelineService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is documented as: Explicit operation timeout.
|
||||||
|
func (s *IngestDeletePipelineService) Timeout(timeout string) *IngestDeletePipelineService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty indicates that the JSON response be indented and human readable.
|
||||||
|
func (s *IngestDeletePipelineService) Pretty(pretty bool) *IngestDeletePipelineService {
|
||||||
|
s.pretty = pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IngestDeletePipelineService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{
|
||||||
|
"id": s.id,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if s.pretty {
|
||||||
|
params.Set("pretty", "1")
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IngestDeletePipelineService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.id == "" {
|
||||||
|
invalid = append(invalid, "Id")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IngestDeletePipelineService) Do(ctx context.Context) (*IngestDeletePipelineResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IngestDeletePipelineResponse)
|
||||||
|
if err := json.Unmarshal(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IngestDeletePipelineResponse is the response of IngestDeletePipelineService.Do.
|
||||||
|
type IngestDeletePipelineResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
}
|
118
vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go
generated
vendored
Normal file
118
vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IngestGetPipelineService returns pipelines based on ID.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/get-pipeline-api.html
|
||||||
|
// for documentation.
|
||||||
|
type IngestGetPipelineService struct {
|
||||||
|
client *Client
|
||||||
|
pretty bool
|
||||||
|
id []string
|
||||||
|
masterTimeout string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIngestGetPipelineService creates a new IngestGetPipelineService.
|
||||||
|
func NewIngestGetPipelineService(client *Client) *IngestGetPipelineService {
|
||||||
|
return &IngestGetPipelineService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id is a list of pipeline ids. Wildcards supported.
|
||||||
|
func (s *IngestGetPipelineService) Id(id ...string) *IngestGetPipelineService {
|
||||||
|
s.id = append(s.id, id...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout is an explicit operation timeout for connection to master node.
|
||||||
|
func (s *IngestGetPipelineService) MasterTimeout(masterTimeout string) *IngestGetPipelineService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty indicates that the JSON response be indented and human readable.
|
||||||
|
func (s *IngestGetPipelineService) Pretty(pretty bool) *IngestGetPipelineService {
|
||||||
|
s.pretty = pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IngestGetPipelineService) buildURL() (string, url.Values, error) {
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
// Build URL
|
||||||
|
if len(s.id) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{
|
||||||
|
"id": strings.Join(s.id, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_ingest/pipeline"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if s.pretty {
|
||||||
|
params.Set("pretty", "1")
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IngestGetPipelineService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IngestGetPipelineService) Do(ctx context.Context) (IngestGetPipelineResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
var ret IngestGetPipelineResponse
|
||||||
|
if err := json.Unmarshal(res.Body, &ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IngestGetPipelineResponse is the response of IngestGetPipelineService.Do.
|
||||||
|
type IngestGetPipelineResponse map[string]*IngestGetPipeline
|
||||||
|
|
||||||
|
type IngestGetPipeline struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Config map[string]interface{} `json:"config"`
|
||||||
|
}
|
152
vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go
generated
vendored
Normal file
152
vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IngestPutPipelineService adds pipelines and updates existing pipelines in
|
||||||
|
// the cluster.
|
||||||
|
//
|
||||||
|
// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/put-pipeline-api.html.
|
||||||
|
type IngestPutPipelineService struct {
|
||||||
|
client *Client
|
||||||
|
pretty bool
|
||||||
|
id string
|
||||||
|
masterTimeout string
|
||||||
|
timeout string
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIngestPutPipelineService creates a new IngestPutPipelineService.
|
||||||
|
func NewIngestPutPipelineService(client *Client) *IngestPutPipelineService {
|
||||||
|
return &IngestPutPipelineService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id is the pipeline ID.
|
||||||
|
func (s *IngestPutPipelineService) Id(id string) *IngestPutPipelineService {
|
||||||
|
s.id = id
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout is an explicit operation timeout for connection to master node.
|
||||||
|
func (s *IngestPutPipelineService) MasterTimeout(masterTimeout string) *IngestPutPipelineService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout specifies an explicit operation timeout.
|
||||||
|
func (s *IngestPutPipelineService) Timeout(timeout string) *IngestPutPipelineService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty indicates that the JSON response be indented and human readable.
|
||||||
|
func (s *IngestPutPipelineService) Pretty(pretty bool) *IngestPutPipelineService {
|
||||||
|
s.pretty = pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson is the ingest definition, defined as a JSON-serializable document.
|
||||||
|
// Use e.g. a map[string]interface{} here.
|
||||||
|
func (s *IngestPutPipelineService) BodyJson(body interface{}) *IngestPutPipelineService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString is the ingest definition, specified as a string.
|
||||||
|
func (s *IngestPutPipelineService) BodyString(body string) *IngestPutPipelineService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IngestPutPipelineService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{
|
||||||
|
"id": s.id,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if s.pretty {
|
||||||
|
params.Set("pretty", "1")
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IngestPutPipelineService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.id == "" {
|
||||||
|
invalid = append(invalid, "Id")
|
||||||
|
}
|
||||||
|
if s.bodyString == "" && s.bodyJson == nil {
|
||||||
|
invalid = append(invalid, "BodyJson")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IngestPutPipelineService) Do(ctx context.Context) (*IngestPutPipelineResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else {
|
||||||
|
body = s.bodyString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, "PUT", path, params, body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IngestPutPipelineResponse)
|
||||||
|
if err := json.Unmarshal(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IngestPutPipelineResponse is the response of IngestPutPipelineService.Do.
|
||||||
|
type IngestPutPipelineResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
}
|
157
vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go
generated
vendored
Normal file
157
vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IngestSimulatePipelineService executes a specific pipeline against the set of
|
||||||
|
// documents provided in the body of the request.
|
||||||
|
//
|
||||||
|
// The API is documented at
|
||||||
|
// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/simulate-pipeline-api.html.
|
||||||
|
type IngestSimulatePipelineService struct {
|
||||||
|
client *Client
|
||||||
|
pretty bool
|
||||||
|
id string
|
||||||
|
verbose *bool
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIngestSimulatePipelineService creates a new IngestSimulatePipeline.
|
||||||
|
func NewIngestSimulatePipelineService(client *Client) *IngestSimulatePipelineService {
|
||||||
|
return &IngestSimulatePipelineService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id specifies the pipeline ID.
|
||||||
|
func (s *IngestSimulatePipelineService) Id(id string) *IngestSimulatePipelineService {
|
||||||
|
s.id = id
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verbose mode. Display data output for each processor in executed pipeline.
|
||||||
|
func (s *IngestSimulatePipelineService) Verbose(verbose bool) *IngestSimulatePipelineService {
|
||||||
|
s.verbose = &verbose
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty indicates that the JSON response be indented and human readable.
|
||||||
|
func (s *IngestSimulatePipelineService) Pretty(pretty bool) *IngestSimulatePipelineService {
|
||||||
|
s.pretty = pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson is the ingest definition, defined as a JSON-serializable simulate
|
||||||
|
// definition. Use e.g. a map[string]interface{} here.
|
||||||
|
func (s *IngestSimulatePipelineService) BodyJson(body interface{}) *IngestSimulatePipelineService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString is the simulate definition, defined as a string.
|
||||||
|
func (s *IngestSimulatePipelineService) BodyString(body string) *IngestSimulatePipelineService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IngestSimulatePipelineService) buildURL() (string, url.Values, error) {
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
// Build URL
|
||||||
|
if s.id != "" {
|
||||||
|
path, err = uritemplates.Expand("/_ingest/pipeline/{id}/_simulate", map[string]string{
|
||||||
|
"id": s.id,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_ingest/pipeline/_simulate"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if s.pretty {
|
||||||
|
params.Set("pretty", "1")
|
||||||
|
}
|
||||||
|
if s.verbose != nil {
|
||||||
|
params.Set("verbose", fmt.Sprintf("%v", *s.verbose))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IngestSimulatePipelineService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.bodyString == "" && s.bodyJson == nil {
|
||||||
|
invalid = append(invalid, "BodyJson")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IngestSimulatePipelineService) Do(ctx context.Context) (*IngestSimulatePipelineResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else {
|
||||||
|
body = s.bodyString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IngestSimulatePipelineResponse)
|
||||||
|
if err := json.Unmarshal(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IngestSimulatePipelineResponse is the response of IngestSimulatePipeline.Do.
|
||||||
|
type IngestSimulatePipelineResponse struct {
|
||||||
|
Docs []*IngestSimulateDocumentResult `json:"docs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IngestSimulateDocumentResult struct {
|
||||||
|
Doc map[string]interface{} `json:"doc"`
|
||||||
|
ProcessorResults []*IngestSimulateProcessorResult `json:"processor_results"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IngestSimulateProcessorResult struct {
|
||||||
|
ProcessorTag string `json:"tag"`
|
||||||
|
Doc map[string]interface{} `json:"doc"`
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -7,7 +7,7 @@ package elastic
|
|||||||
// InnerHit implements a simple join for parent/child, nested, and even
|
// InnerHit implements a simple join for parent/child, nested, and even
|
||||||
// top-level documents in Elasticsearch.
|
// top-level documents in Elasticsearch.
|
||||||
// It is an experimental feature for Elasticsearch versions 1.5 (or greater).
|
// It is an experimental feature for Elasticsearch versions 1.5 (or greater).
|
||||||
// See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html
|
// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-inner-hits.html
|
||||||
// for documentation.
|
// for documentation.
|
||||||
//
|
//
|
||||||
// See the tests for SearchSource, HasChildFilter, HasChildQuery,
|
// See the tests for SearchSource, HasChildFilter, HasChildQuery,
|
||||||
@ -66,18 +66,18 @@ func (hit *InnerHit) Version(version bool) *InnerHit {
|
|||||||
return hit
|
return hit
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hit *InnerHit) Field(fieldName string) *InnerHit {
|
func (hit *InnerHit) StoredField(storedFieldName string) *InnerHit {
|
||||||
hit.source.Field(fieldName)
|
hit.source.StoredField(storedFieldName)
|
||||||
return hit
|
return hit
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hit *InnerHit) Fields(fieldNames ...string) *InnerHit {
|
func (hit *InnerHit) StoredFields(storedFieldNames ...string) *InnerHit {
|
||||||
hit.source.Fields(fieldNames...)
|
hit.source.StoredFields(storedFieldNames...)
|
||||||
return hit
|
return hit
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hit *InnerHit) NoFields() *InnerHit {
|
func (hit *InnerHit) NoStoredFields() *InnerHit {
|
||||||
hit.source.NoFields()
|
hit.source.NoStoredFields()
|
||||||
return hit
|
return hit
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,13 +91,13 @@ func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext)
|
|||||||
return hit
|
return hit
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hit *InnerHit) FieldDataFields(fieldDataFields ...string) *InnerHit {
|
func (hit *InnerHit) DocvalueFields(docvalueFields ...string) *InnerHit {
|
||||||
hit.source.FieldDataFields(fieldDataFields...)
|
hit.source.DocvalueFields(docvalueFields...)
|
||||||
return hit
|
return hit
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hit *InnerHit) FieldDataField(fieldDataField string) *InnerHit {
|
func (hit *InnerHit) DocvalueField(docvalueField string) *InnerHit {
|
||||||
hit.source.FieldDataField(fieldDataField)
|
hit.source.DocvalueField(docvalueField)
|
||||||
return hit
|
return hit
|
||||||
}
|
}
|
||||||
|
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2016 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
111
vendor/gopkg.in/olivere/elastic.v3/mget.go → vendor/gopkg.in/olivere/elastic.v5/mget.go
generated
vendored
111
vendor/gopkg.in/olivere/elastic.v3/mget.go → vendor/gopkg.in/olivere/elastic.v5/mget.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -7,6 +7,9 @@ package elastic
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MgetService allows to get multiple documents based on an index,
|
// MgetService allows to get multiple documents based on an index,
|
||||||
@ -14,38 +17,56 @@ import (
|
|||||||
// a docs array with all the fetched documents, each element similar
|
// a docs array with all the fetched documents, each element similar
|
||||||
// in structure to a document provided by the Get API.
|
// in structure to a document provided by the Get API.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-multi-get.html
|
||||||
// for details.
|
// for details.
|
||||||
type MgetService struct {
|
type MgetService struct {
|
||||||
client *Client
|
client *Client
|
||||||
pretty bool
|
pretty bool
|
||||||
preference string
|
preference string
|
||||||
realtime *bool
|
realtime *bool
|
||||||
refresh *bool
|
refresh string
|
||||||
|
routing string
|
||||||
|
storedFields []string
|
||||||
items []*MultiGetItem
|
items []*MultiGetItem
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewMgetService initializes a new Multi GET API request call.
|
||||||
func NewMgetService(client *Client) *MgetService {
|
func NewMgetService(client *Client) *MgetService {
|
||||||
builder := &MgetService{
|
builder := &MgetService{
|
||||||
client: client,
|
client: client,
|
||||||
items: make([]*MultiGetItem, 0),
|
|
||||||
}
|
}
|
||||||
return builder
|
return builder
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *MgetService) Preference(preference string) *MgetService {
|
// Preference specifies the node or shard the operation should be performed
|
||||||
b.preference = preference
|
// on (default: random).
|
||||||
return b
|
func (s *MgetService) Preference(preference string) *MgetService {
|
||||||
|
s.preference = preference
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *MgetService) Refresh(refresh bool) *MgetService {
|
// Refresh the shard containing the document before performing the operation.
|
||||||
b.refresh = &refresh
|
func (s *MgetService) Refresh(refresh string) *MgetService {
|
||||||
return b
|
s.refresh = refresh
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *MgetService) Realtime(realtime bool) *MgetService {
|
// Realtime specifies whether to perform the operation in realtime or search mode.
|
||||||
b.realtime = &realtime
|
func (s *MgetService) Realtime(realtime bool) *MgetService {
|
||||||
return b
|
s.realtime = &realtime
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing is the specific routing value.
|
||||||
|
func (s *MgetService) Routing(routing string) *MgetService {
|
||||||
|
s.routing = routing
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoredFields is a list of fields to return in the response.
|
||||||
|
func (s *MgetService) StoredFields(storedFields ...string) *MgetService {
|
||||||
|
s.storedFields = append(s.storedFields, storedFields...)
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pretty indicates that the JSON response be indented and human readable.
|
// Pretty indicates that the JSON response be indented and human readable.
|
||||||
@ -54,15 +75,17 @@ func (s *MgetService) Pretty(pretty bool) *MgetService {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *MgetService) Add(items ...*MultiGetItem) *MgetService {
|
// Add an item to the request.
|
||||||
b.items = append(b.items, items...)
|
func (s *MgetService) Add(items ...*MultiGetItem) *MgetService {
|
||||||
return b
|
s.items = append(s.items, items...)
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *MgetService) Source() (interface{}, error) {
|
// Source returns the request body, which will be serialized into JSON.
|
||||||
|
func (s *MgetService) Source() (interface{}, error) {
|
||||||
source := make(map[string]interface{})
|
source := make(map[string]interface{})
|
||||||
items := make([]interface{}, len(b.items))
|
items := make([]interface{}, len(s.items))
|
||||||
for i, item := range b.items {
|
for i, item := range s.items {
|
||||||
src, err := item.Source()
|
src, err := item.Source()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -73,36 +96,43 @@ func (b *MgetService) Source() (interface{}, error) {
|
|||||||
return source, nil
|
return source, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *MgetService) Do() (*MgetResponse, error) {
|
// Do executes the request.
|
||||||
|
func (s *MgetService) Do(ctx context.Context) (*MgetResponse, error) {
|
||||||
// Build url
|
// Build url
|
||||||
path := "/_mget"
|
path := "/_mget"
|
||||||
|
|
||||||
params := make(url.Values)
|
params := make(url.Values)
|
||||||
if b.realtime != nil {
|
if s.realtime != nil {
|
||||||
params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
|
params.Add("realtime", fmt.Sprintf("%v", *s.realtime))
|
||||||
}
|
}
|
||||||
if b.preference != "" {
|
if s.preference != "" {
|
||||||
params.Add("preference", b.preference)
|
params.Add("preference", s.preference)
|
||||||
}
|
}
|
||||||
if b.refresh != nil {
|
if s.refresh != "" {
|
||||||
params.Add("refresh", fmt.Sprintf("%v", *b.refresh))
|
params.Add("refresh", s.refresh)
|
||||||
|
}
|
||||||
|
if s.routing != "" {
|
||||||
|
params.Set("routing", s.routing)
|
||||||
|
}
|
||||||
|
if len(s.storedFields) > 0 {
|
||||||
|
params.Set("stored_fields", strings.Join(s.storedFields, ","))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set body
|
// Set body
|
||||||
body, err := b.Source()
|
body, err := s.Source()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get response
|
// Get response
|
||||||
res, err := b.client.PerformRequest("GET", path, params, body)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return result
|
// Return result
|
||||||
ret := new(MgetResponse)
|
ret := new(MgetResponse)
|
||||||
if err := b.client.decoder.Decode(res.Body, ret); err != nil {
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return ret, nil
|
return ret, nil
|
||||||
@ -116,41 +146,44 @@ type MultiGetItem struct {
|
|||||||
typ string
|
typ string
|
||||||
id string
|
id string
|
||||||
routing string
|
routing string
|
||||||
fields []string
|
storedFields []string
|
||||||
version *int64 // see org.elasticsearch.common.lucene.uid.Versions
|
version *int64 // see org.elasticsearch.common.lucene.uid.Versions
|
||||||
versionType string // see org.elasticsearch.index.VersionType
|
versionType string // see org.elasticsearch.index.VersionType
|
||||||
fsc *FetchSourceContext
|
fsc *FetchSourceContext
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewMultiGetItem initializes a new, single item for a Multi GET request.
|
||||||
func NewMultiGetItem() *MultiGetItem {
|
func NewMultiGetItem() *MultiGetItem {
|
||||||
return &MultiGetItem{}
|
return &MultiGetItem{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Index specifies the index name.
|
||||||
func (item *MultiGetItem) Index(index string) *MultiGetItem {
|
func (item *MultiGetItem) Index(index string) *MultiGetItem {
|
||||||
item.index = index
|
item.index = index
|
||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Type specifies the type name.
|
||||||
func (item *MultiGetItem) Type(typ string) *MultiGetItem {
|
func (item *MultiGetItem) Type(typ string) *MultiGetItem {
|
||||||
item.typ = typ
|
item.typ = typ
|
||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Id specifies the identifier of the document.
|
||||||
func (item *MultiGetItem) Id(id string) *MultiGetItem {
|
func (item *MultiGetItem) Id(id string) *MultiGetItem {
|
||||||
item.id = id
|
item.id = id
|
||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Routing is the specific routing value.
|
||||||
func (item *MultiGetItem) Routing(routing string) *MultiGetItem {
|
func (item *MultiGetItem) Routing(routing string) *MultiGetItem {
|
||||||
item.routing = routing
|
item.routing = routing
|
||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
func (item *MultiGetItem) Fields(fields ...string) *MultiGetItem {
|
// StoredFields is a list of fields to return in the response.
|
||||||
if item.fields == nil {
|
func (item *MultiGetItem) StoredFields(storedFields ...string) *MultiGetItem {
|
||||||
item.fields = make([]string, 0)
|
item.storedFields = append(item.storedFields, storedFields...)
|
||||||
}
|
|
||||||
item.fields = append(item.fields, fields...)
|
|
||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,6 +203,7 @@ func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem {
|
|||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FetchSource allows to specify source filtering.
|
||||||
func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem {
|
func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem {
|
||||||
item.fsc = fetchSourceContext
|
item.fsc = fetchSourceContext
|
||||||
return item
|
return item
|
||||||
@ -195,12 +229,12 @@ func (item *MultiGetItem) Source() (interface{}, error) {
|
|||||||
}
|
}
|
||||||
source["_source"] = src
|
source["_source"] = src
|
||||||
}
|
}
|
||||||
if item.fields != nil {
|
|
||||||
source["fields"] = item.fields
|
|
||||||
}
|
|
||||||
if item.routing != "" {
|
if item.routing != "" {
|
||||||
source["_routing"] = item.routing
|
source["_routing"] = item.routing
|
||||||
}
|
}
|
||||||
|
if len(item.storedFields) > 0 {
|
||||||
|
source["stored_fields"] = strings.Join(item.storedFields, ",")
|
||||||
|
}
|
||||||
if item.version != nil {
|
if item.version != nil {
|
||||||
source["version"] = fmt.Sprintf("%d", *item.version)
|
source["version"] = fmt.Sprintf("%d", *item.version)
|
||||||
}
|
}
|
||||||
@ -213,6 +247,7 @@ func (item *MultiGetItem) Source() (interface{}, error) {
|
|||||||
|
|
||||||
// -- Result of a Multi Get request.
|
// -- Result of a Multi Get request.
|
||||||
|
|
||||||
|
// MgetResponse is the outcome of a Multi GET API request.
|
||||||
type MgetResponse struct {
|
type MgetResponse struct {
|
||||||
Docs []*GetResult `json:"docs,omitempty"`
|
Docs []*GetResult `json:"docs,omitempty"`
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -9,10 +9,11 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MultiSearch executes one or more searches in one roundtrip.
|
// MultiSearch executes one or more searches in one roundtrip.
|
||||||
// See http://www.elasticsearch.org/guide/reference/api/multi-search/
|
|
||||||
type MultiSearchService struct {
|
type MultiSearchService struct {
|
||||||
client *Client
|
client *Client
|
||||||
requests []*SearchRequest
|
requests []*SearchRequest
|
||||||
@ -46,7 +47,7 @@ func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MultiSearchService) Do() (*MultiSearchResult, error) {
|
func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) {
|
||||||
// Build url
|
// Build url
|
||||||
path := "/_msearch"
|
path := "/_msearch"
|
||||||
|
|
||||||
@ -57,7 +58,7 @@ func (s *MultiSearchService) Do() (*MultiSearchResult, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set body
|
// Set body
|
||||||
lines := make([]string, 0)
|
var lines []string
|
||||||
for _, sr := range s.requests {
|
for _, sr := range s.requests {
|
||||||
// Set default indices if not specified in the request
|
// Set default indices if not specified in the request
|
||||||
if !sr.HasIndices() && len(s.indices) > 0 {
|
if !sr.HasIndices() && len(s.indices) > 0 {
|
||||||
@ -78,7 +79,7 @@ func (s *MultiSearchService) Do() (*MultiSearchResult, error) {
|
|||||||
body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n
|
body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n
|
||||||
|
|
||||||
// Get response
|
// Get response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, body)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -10,14 +10,16 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MultiTermvectorService returns information and statistics on terms in the
|
// MultiTermvectorService returns information and statistics on terms in the
|
||||||
// fields of a particular document. The document could be stored in the
|
// fields of a particular document. The document could be stored in the
|
||||||
// index or artificially provided by the user.
|
// index or artificially provided by the user.
|
||||||
//
|
//
|
||||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-multi-termvectors.html
|
||||||
// for documentation.
|
// for documentation.
|
||||||
type MultiTermvectorService struct {
|
type MultiTermvectorService struct {
|
||||||
client *Client
|
client *Client
|
||||||
@ -254,7 +256,7 @@ func (s *MultiTermvectorService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *MultiTermvectorService) Do() (*MultiTermvectorResponse, error) {
|
func (s *MultiTermvectorService) Do(ctx context.Context) (*MultiTermvectorResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -277,7 +279,7 @@ func (s *MultiTermvectorService) Do() (*MultiTermvectorResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, body)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -6,25 +6,18 @@ package elastic
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gopkg.in/olivere/elastic.v3/uritemplates"
|
"golang.org/x/net/context"
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
_ = fmt.Print
|
|
||||||
_ = log.Print
|
|
||||||
_ = strings.Index
|
|
||||||
_ = uritemplates.Expand
|
|
||||||
_ = url.Parse
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodesInfoService allows to retrieve one or more or all of the
|
// NodesInfoService allows to retrieve one or more or all of the
|
||||||
// cluster nodes information.
|
// cluster nodes information.
|
||||||
// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html.
|
// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-nodes-info.html.
|
||||||
type NodesInfoService struct {
|
type NodesInfoService struct {
|
||||||
client *Client
|
client *Client
|
||||||
pretty bool
|
pretty bool
|
||||||
@ -108,7 +101,7 @@ func (s *NodesInfoService) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the operation.
|
// Do executes the operation.
|
||||||
func (s *NodesInfoService) Do() (*NodesInfoResponse, error) {
|
func (s *NodesInfoService) Do(ctx context.Context) (*NodesInfoResponse, error) {
|
||||||
// Check pre-conditions
|
// Check pre-conditions
|
||||||
if err := s.Validate(); err != nil {
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -121,7 +114,7 @@ func (s *NodesInfoService) Do() (*NodesInfoResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get HTTP response
|
// Get HTTP response
|
||||||
res, err := s.client.PerformRequest("GET", path, params, nil)
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -171,7 +164,7 @@ type NodesInfoNode struct {
|
|||||||
Process *NodesInfoNodeProcess `json:"process"`
|
Process *NodesInfoNodeProcess `json:"process"`
|
||||||
|
|
||||||
// JVM information, e.g. VM version.
|
// JVM information, e.g. VM version.
|
||||||
JVM *NodesInfoNodeProcess `json:"jvm"`
|
JVM *NodesInfoNodeJVM `json:"jvm"`
|
||||||
|
|
||||||
// ThreadPool information.
|
// ThreadPool information.
|
||||||
ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"`
|
ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"`
|
707
vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go
generated
vendored
Normal file
707
vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go
generated
vendored
Normal file
@ -0,0 +1,707 @@
|
|||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NodesStatsService returns node statistics.
|
||||||
|
// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-nodes-stats.html
|
||||||
|
// for details.
|
||||||
|
type NodesStatsService struct {
|
||||||
|
client *Client
|
||||||
|
pretty bool
|
||||||
|
metric []string
|
||||||
|
indexMetric []string
|
||||||
|
nodeId []string
|
||||||
|
completionFields []string
|
||||||
|
fielddataFields []string
|
||||||
|
fields []string
|
||||||
|
groups *bool
|
||||||
|
human *bool
|
||||||
|
level string
|
||||||
|
timeout string
|
||||||
|
types []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNodesStatsService creates a new NodesStatsService.
|
||||||
|
func NewNodesStatsService(client *Client) *NodesStatsService {
|
||||||
|
return &NodesStatsService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metric limits the information returned to the specified metrics.
|
||||||
|
func (s *NodesStatsService) Metric(metric ...string) *NodesStatsService {
|
||||||
|
s.metric = append(s.metric, metric...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexMetric limits the information returned for `indices` metric
|
||||||
|
// to the specific index metrics. Isn't used if `indices` (or `all`)
|
||||||
|
// metric isn't specified..
|
||||||
|
func (s *NodesStatsService) IndexMetric(indexMetric ...string) *NodesStatsService {
|
||||||
|
s.indexMetric = append(s.indexMetric, indexMetric...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeId is a list of node IDs or names to limit the returned information;
|
||||||
|
// use `_local` to return information from the node you're connecting to,
|
||||||
|
// leave empty to get information from all nodes.
|
||||||
|
func (s *NodesStatsService) NodeId(nodeId ...string) *NodesStatsService {
|
||||||
|
s.nodeId = append(s.nodeId, nodeId...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompletionFields is a list of fields for `fielddata` and `suggest`
|
||||||
|
// index metric (supports wildcards).
|
||||||
|
func (s *NodesStatsService) CompletionFields(completionFields ...string) *NodesStatsService {
|
||||||
|
s.completionFields = append(s.completionFields, completionFields...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards).
|
||||||
|
func (s *NodesStatsService) FielddataFields(fielddataFields ...string) *NodesStatsService {
|
||||||
|
s.fielddataFields = append(s.fielddataFields, fielddataFields...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fields is a list of fields for `fielddata` and `completion` index metric (supports wildcards).
|
||||||
|
func (s *NodesStatsService) Fields(fields ...string) *NodesStatsService {
|
||||||
|
s.fields = append(s.fields, fields...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Groups is a list of search groups for `search` index metric.
|
||||||
|
func (s *NodesStatsService) Groups(groups bool) *NodesStatsService {
|
||||||
|
s.groups = &groups
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human indicates whether to return time and byte values in human-readable format.
|
||||||
|
func (s *NodesStatsService) Human(human bool) *NodesStatsService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Level specifies whether to return indices stats aggregated at node, index or shard level.
|
||||||
|
func (s *NodesStatsService) Level(level string) *NodesStatsService {
|
||||||
|
s.level = level
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout specifies an explicit operation timeout.
|
||||||
|
func (s *NodesStatsService) Timeout(timeout string) *NodesStatsService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Types a list of document types for the `indexing` index metric.
|
||||||
|
func (s *NodesStatsService) Types(types ...string) *NodesStatsService {
|
||||||
|
s.types = append(s.types, types...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty indicates that the JSON response be indented and human readable.
|
||||||
|
func (s *NodesStatsService) Pretty(pretty bool) *NodesStatsService {
|
||||||
|
s.pretty = pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *NodesStatsService) buildURL() (string, url.Values, error) {
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
if len(s.nodeId) > 0 && len(s.metric) > 0 && len(s.indexMetric) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/_nodes/{node_id}/stats/{metric}/{index_metric}", map[string]string{
|
||||||
|
"index_metric": strings.Join(s.indexMetric, ","),
|
||||||
|
"node_id": strings.Join(s.nodeId, ","),
|
||||||
|
"metric": strings.Join(s.metric, ","),
|
||||||
|
})
|
||||||
|
} else if len(s.nodeId) > 0 && len(s.metric) > 0 && len(s.indexMetric) == 0 {
|
||||||
|
path, err = uritemplates.Expand("/_nodes/{node_id}/stats/{metric}", map[string]string{
|
||||||
|
"node_id": strings.Join(s.nodeId, ","),
|
||||||
|
"metric": strings.Join(s.metric, ","),
|
||||||
|
})
|
||||||
|
} else if len(s.nodeId) > 0 && len(s.metric) == 0 && len(s.indexMetric) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/_nodes/{node_id}/stats/_all/{index_metric}", map[string]string{
|
||||||
|
"index_metric": strings.Join(s.indexMetric, ","),
|
||||||
|
"node_id": strings.Join(s.nodeId, ","),
|
||||||
|
})
|
||||||
|
} else if len(s.nodeId) > 0 && len(s.metric) == 0 && len(s.indexMetric) == 0 {
|
||||||
|
path, err = uritemplates.Expand("/_nodes/{node_id}/stats", map[string]string{
|
||||||
|
"node_id": strings.Join(s.nodeId, ","),
|
||||||
|
})
|
||||||
|
} else if len(s.nodeId) == 0 && len(s.metric) > 0 && len(s.indexMetric) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/_nodes/stats/{metric}/{index_metric}", map[string]string{
|
||||||
|
"index_metric": strings.Join(s.indexMetric, ","),
|
||||||
|
"metric": strings.Join(s.metric, ","),
|
||||||
|
})
|
||||||
|
} else if len(s.nodeId) == 0 && len(s.metric) > 0 && len(s.indexMetric) == 0 {
|
||||||
|
path, err = uritemplates.Expand("/_nodes/stats/{metric}", map[string]string{
|
||||||
|
"metric": strings.Join(s.metric, ","),
|
||||||
|
})
|
||||||
|
} else if len(s.nodeId) == 0 && len(s.metric) == 0 && len(s.indexMetric) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/_nodes/stats/_all/{index_metric}", map[string]string{
|
||||||
|
"index_metric": strings.Join(s.indexMetric, ","),
|
||||||
|
})
|
||||||
|
} else { // if len(s.nodeId) == 0 && len(s.metric) == 0 && len(s.indexMetric) == 0 {
|
||||||
|
path = "/_nodes/stats"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if s.pretty {
|
||||||
|
params.Set("pretty", "1")
|
||||||
|
}
|
||||||
|
if len(s.completionFields) > 0 {
|
||||||
|
params.Set("completion_fields", strings.Join(s.completionFields, ","))
|
||||||
|
}
|
||||||
|
if len(s.fielddataFields) > 0 {
|
||||||
|
params.Set("fielddata_fields", strings.Join(s.fielddataFields, ","))
|
||||||
|
}
|
||||||
|
if len(s.fields) > 0 {
|
||||||
|
params.Set("fields", strings.Join(s.fields, ","))
|
||||||
|
}
|
||||||
|
if s.groups != nil {
|
||||||
|
params.Set("groups", fmt.Sprintf("%v", *s.groups))
|
||||||
|
}
|
||||||
|
if s.human != nil {
|
||||||
|
params.Set("human", fmt.Sprintf("%v", *s.human))
|
||||||
|
}
|
||||||
|
if s.level != "" {
|
||||||
|
params.Set("level", s.level)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if len(s.types) > 0 {
|
||||||
|
params.Set("types", strings.Join(s.types, ","))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *NodesStatsService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *NodesStatsService) Do(ctx context.Context) (*NodesStatsResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(NodesStatsResponse)
|
||||||
|
if err := json.Unmarshal(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodesStatsResponse is the response of NodesStatsService.Do.
|
||||||
|
type NodesStatsResponse struct {
|
||||||
|
ClusterName string `json:"cluster_name"`
|
||||||
|
Nodes map[string]*NodesStatsNode `json:"nodes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNode struct {
|
||||||
|
// Timestamp when these stats we're gathered.
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
// Name of the node, e.g. "Mister Fear"
|
||||||
|
Name string `json:"name"`
|
||||||
|
// TransportAddress, e.g. "127.0.0.1:9300"
|
||||||
|
TransportAddress string `json:"transport_address"`
|
||||||
|
// Host is the host name, e.g. "macbookair"
|
||||||
|
Host string `json:"host"`
|
||||||
|
// IP is an IP address, e.g. "192.168.1.2"
|
||||||
|
IP string `json:"ip"`
|
||||||
|
// Roles is a list of the roles of the node, e.g. master, data, ingest.
|
||||||
|
Roles []string `json:"roles"`
|
||||||
|
|
||||||
|
// Attributes of the node.
|
||||||
|
Attributes map[string]interface{} `json:"attributes"`
|
||||||
|
|
||||||
|
// Indices returns index information.
|
||||||
|
Indices *NodesStatsIndex `json:"indices"`
|
||||||
|
|
||||||
|
// OS information, e.g. CPU and memory.
|
||||||
|
OS *NodesStatsNodeOS `json:"os"`
|
||||||
|
|
||||||
|
// Process information, e.g. max file descriptors.
|
||||||
|
Process *NodesStatsNodeProcess `json:"process"`
|
||||||
|
|
||||||
|
// JVM information, e.g. VM version.
|
||||||
|
JVM *NodesStatsNodeJVM `json:"jvm"`
|
||||||
|
|
||||||
|
// ThreadPool information.
|
||||||
|
ThreadPool map[string]*NodesStatsNodeThreadPool `json:"thread_pool"`
|
||||||
|
|
||||||
|
// FS returns information about the filesystem.
|
||||||
|
FS *NodesStatsNodeFS `json:"fs"`
|
||||||
|
|
||||||
|
// Network information.
|
||||||
|
Transport *NodesStatsNodeTransport `json:"transport"`
|
||||||
|
|
||||||
|
// HTTP information.
|
||||||
|
HTTP *NodesStatsNodeHTTP `json:"http"`
|
||||||
|
|
||||||
|
// Breaker contains information about circuit breakers.
|
||||||
|
Breaker map[string]*NodesStatsBreaker `json:"breaker"`
|
||||||
|
|
||||||
|
// ScriptStats information.
|
||||||
|
ScriptStats *NodesStatsScriptStats `json:"script"`
|
||||||
|
|
||||||
|
// Discovery information.
|
||||||
|
Discovery *NodesStatsDiscovery `json:"discovery"`
|
||||||
|
|
||||||
|
// Ingest information
|
||||||
|
Ingest *NodesStatsIngest `json:"ingest"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsIndex struct {
|
||||||
|
Docs *NodesStatsDocsStats `json:"docs"`
|
||||||
|
Store *NodesStatsStoreStats `json:"store"`
|
||||||
|
Indexing *NodesStatsIndexingStats `json:"indexing"`
|
||||||
|
Get *NodesStatsGetStats `json:"get"`
|
||||||
|
Search *NodesStatsSearchStats `json:"search"`
|
||||||
|
Merges *NodesStatsMergeStats `json:"merges"`
|
||||||
|
Refresh *NodesStatsRefreshStats `json:"refresh"`
|
||||||
|
Flush *NodesStatsFlushStats `json:"flush"`
|
||||||
|
Warmer *NodesStatsWarmerStats `json:"warmer"`
|
||||||
|
QueryCache *NodesStatsQueryCacheStats `json:"query_cache"`
|
||||||
|
Fielddata *NodesStatsFielddataStats `json:"fielddata"`
|
||||||
|
Percolate *NodesStatsPercolateStats `json:"percolate"`
|
||||||
|
Completion *NodesStatsCompletionStats `json:"completion"`
|
||||||
|
Segments *NodesStatsSegmentsStats `json:"segments"`
|
||||||
|
Translog *NodesStatsTranslogStats `json:"translog"`
|
||||||
|
Suggest *NodesStatsSuggestStats `json:"suggest"`
|
||||||
|
RequestCache *NodesStatsRequestCacheStats `json:"request_cache"`
|
||||||
|
Recovery NodesStatsRecoveryStats `json:"recovery"`
|
||||||
|
|
||||||
|
Indices map[string]*NodesStatsIndex `json:"indices"` // for level=indices
|
||||||
|
Shards map[string]*NodesStatsIndex `json:"shards"` // for level=shards
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsDocsStats struct {
|
||||||
|
Count int64 `json:"count"`
|
||||||
|
Deleted int64 `json:"deleted"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsStoreStats struct {
|
||||||
|
Size string `json:"size"`
|
||||||
|
SizeInBytes int64 `json:"size_in_bytes"`
|
||||||
|
ThrottleTime string `json:"throttle_time"`
|
||||||
|
ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsIndexingStats struct {
|
||||||
|
IndexTotal int64 `json:"index_total"`
|
||||||
|
IndexTime string `json:"index_time"`
|
||||||
|
IndexTimeInMillis int64 `json:"index_time_in_millis"`
|
||||||
|
IndexCurrent int64 `json:"index_current"`
|
||||||
|
IndexFailed int64 `json:"index_failed"`
|
||||||
|
DeleteTotal int64 `json:"delete_total"`
|
||||||
|
DeleteTime string `json:"delete_time"`
|
||||||
|
DeleteTimeInMillis int64 `json:"delete_time_in_millis"`
|
||||||
|
DeleteCurrent int64 `json:"delete_current"`
|
||||||
|
NoopUpdateTotal int64 `json:"noop_update_total"`
|
||||||
|
IsThrottled bool `json:"is_throttled"`
|
||||||
|
ThrottleTime string `json:"throttle_time"`
|
||||||
|
ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"`
|
||||||
|
|
||||||
|
Types map[string]*NodesStatsIndexingStats `json:"types"` // stats for individual types
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsGetStats struct {
|
||||||
|
Total int64 `json:"total"`
|
||||||
|
Time string `json:"get_time"`
|
||||||
|
TimeInMillis int64 `json:"time_in_millis"`
|
||||||
|
Exists int64 `json:"exists"`
|
||||||
|
ExistsTime string `json:"exists_time"`
|
||||||
|
ExistsTimeInMillis int64 `json:"exists_in_millis"`
|
||||||
|
Missing int64 `json:"missing"`
|
||||||
|
MissingTime string `json:"missing_time"`
|
||||||
|
MissingTimeInMillis int64 `json:"missing_in_millis"`
|
||||||
|
Current int64 `json:"current"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsSearchStats struct {
|
||||||
|
OpenContexts int64 `json:"open_contexts"`
|
||||||
|
QueryTotal int64 `json:"query_total"`
|
||||||
|
QueryTime string `json:"query_time"`
|
||||||
|
QueryTimeInMillis int64 `json:"query_time_in_millis"`
|
||||||
|
QueryCurrent int64 `json:"query_current"`
|
||||||
|
FetchTotal int64 `json:"fetch_total"`
|
||||||
|
FetchTime string `json:"fetch_time"`
|
||||||
|
FetchTimeInMillis int64 `json:"fetch_time_in_millis"`
|
||||||
|
FetchCurrent int64 `json:"fetch_current"`
|
||||||
|
ScrollTotal int64 `json:"scroll_total"`
|
||||||
|
ScrollTime string `json:"scroll_time"`
|
||||||
|
ScrollTimeInMillis int64 `json:"scroll_time_in_millis"`
|
||||||
|
ScrollCurrent int64 `json:"scroll_current"`
|
||||||
|
|
||||||
|
Groups map[string]*NodesStatsSearchStats `json:"groups"` // stats for individual groups
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsMergeStats struct {
|
||||||
|
Current int64 `json:"current"`
|
||||||
|
CurrentDocs int64 `json:"current_docs"`
|
||||||
|
CurrentSize string `json:"current_size"`
|
||||||
|
CurrentSizeInBytes int64 `json:"current_size_in_bytes"`
|
||||||
|
Total int64 `json:"total"`
|
||||||
|
TotalTime string `json:"total_time"`
|
||||||
|
TotalTimeInMillis int64 `json:"total_time_in_millis"`
|
||||||
|
TotalDocs int64 `json:"total_docs"`
|
||||||
|
TotalSize string `json:"total_size"`
|
||||||
|
TotalSizeInBytes int64 `json:"total_size_in_bytes"`
|
||||||
|
TotalStoppedTime string `json:"total_stopped_time"`
|
||||||
|
TotalStoppedTimeInMillis int64 `json:"total_stopped_time_in_millis"`
|
||||||
|
TotalThrottledTime string `json:"total_throttled_time"`
|
||||||
|
TotalThrottledTimeInMillis int64 `json:"total_throttled_time_in_millis"`
|
||||||
|
TotalThrottleBytes string `json:"total_auto_throttle"`
|
||||||
|
TotalThrottleBytesInBytes int64 `json:"total_auto_throttle_in_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsRefreshStats struct {
|
||||||
|
Total int64 `json:"total"`
|
||||||
|
TotalTime string `json:"total_time"`
|
||||||
|
TotalTimeInMillis int64 `json:"total_time_in_millis"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsFlushStats struct {
|
||||||
|
Total int64 `json:"total"`
|
||||||
|
TotalTime string `json:"total_time"`
|
||||||
|
TotalTimeInMillis int64 `json:"total_time_in_millis"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsWarmerStats struct {
|
||||||
|
Current int64 `json:"current"`
|
||||||
|
Total int64 `json:"total"`
|
||||||
|
TotalTime string `json:"total_time"`
|
||||||
|
TotalTimeInMillis int64 `json:"total_time_in_millis"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsQueryCacheStats struct {
|
||||||
|
MemorySize string `json:"memory_size"`
|
||||||
|
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
|
||||||
|
TotalCount int64 `json:"total_count"`
|
||||||
|
HitCount int64 `json:"hit_count"`
|
||||||
|
MissCount int64 `json:"miss_count"`
|
||||||
|
CacheSize int64 `json:"cache_size"`
|
||||||
|
CacheCount int64 `json:"cache_count"`
|
||||||
|
Evictions int64 `json:"evictions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsFielddataStats struct {
|
||||||
|
MemorySize string `json:"memory_size"`
|
||||||
|
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
|
||||||
|
Evictions int64 `json:"evictions"`
|
||||||
|
Fields map[string]struct {
|
||||||
|
MemorySize string `json:"memory_size"`
|
||||||
|
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
|
||||||
|
} `json:"fields"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsPercolateStats struct {
|
||||||
|
Total int64 `json:"total"`
|
||||||
|
Time string `json:"time"`
|
||||||
|
TimeInMillis int64 `json:"time_in_millis"`
|
||||||
|
Current int64 `json:"current"`
|
||||||
|
MemorySize string `json:"memory_size"`
|
||||||
|
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
|
||||||
|
Queries int64 `json:"queries"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsCompletionStats struct {
|
||||||
|
Size string `json:"size"`
|
||||||
|
SizeInBytes int64 `json:"size_in_bytes"`
|
||||||
|
Fields map[string]struct {
|
||||||
|
Size string `json:"size"`
|
||||||
|
SizeInBytes int64 `json:"size_in_bytes"`
|
||||||
|
} `json:"fields"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsSegmentsStats struct {
|
||||||
|
Count int64 `json:"count"`
|
||||||
|
Memory string `json:"memory"`
|
||||||
|
MemoryInBytes int64 `json:"memory_in_bytes"`
|
||||||
|
TermsMemory string `json:"terms_memory"`
|
||||||
|
TermsMemoryInBytes int64 `json:"terms_memory_in_bytes"`
|
||||||
|
StoredFieldsMemory string `json:"stored_fields_memory"`
|
||||||
|
StoredFieldsMemoryInBytes int64 `json:"stored_fields_memory_in_bytes"`
|
||||||
|
TermVectorsMemory string `json:"term_vectors_memory"`
|
||||||
|
TermVectorsMemoryInBytes int64 `json:"term_vectors_memory_in_bytes"`
|
||||||
|
NormsMemory string `json:"norms_memory"`
|
||||||
|
NormsMemoryInBytes int64 `json:"norms_memory_in_bytes"`
|
||||||
|
DocValuesMemory string `json:"doc_values_memory"`
|
||||||
|
DocValuesMemoryInBytes int64 `json:"doc_values_memory_in_bytes"`
|
||||||
|
IndexWriterMemory string `json:"index_writer_memory"`
|
||||||
|
IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"`
|
||||||
|
IndexWriterMaxMemory string `json:"index_writer_max_memory"`
|
||||||
|
IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"`
|
||||||
|
VersionMapMemory string `json:"version_map_memory"`
|
||||||
|
VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"`
|
||||||
|
FixedBitSetMemory string `json:"fixed_bit_set"` // not a typo
|
||||||
|
FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsTranslogStats struct {
|
||||||
|
Operations int64 `json:"operations"`
|
||||||
|
Size string `json:"size"`
|
||||||
|
SizeInBytes int64 `json:"size_in_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsSuggestStats struct {
|
||||||
|
Total int64 `json:"total"`
|
||||||
|
TotalTime string `json:"total_time"`
|
||||||
|
TotalTimeInMillis int64 `json:"total_time_in_millis"`
|
||||||
|
Current int64 `json:"current"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsRequestCacheStats struct {
|
||||||
|
MemorySize string `json:"memory_size"`
|
||||||
|
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
|
||||||
|
Evictions int64 `json:"evictions"`
|
||||||
|
HitCount int64 `json:"hit_count"`
|
||||||
|
MissCount int64 `json:"miss_count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsRecoveryStats struct {
|
||||||
|
CurrentAsSource int `json:"current_as_source"`
|
||||||
|
CurrentAsTarget int `json:"current_as_target"`
|
||||||
|
ThrottleTime string `json:"throttle_time"`
|
||||||
|
ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeOS struct {
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
CPU *NodesStatsNodeOSCPU `json:"cpu"`
|
||||||
|
Mem *NodesStatsNodeOSMem `json:"mem"`
|
||||||
|
Swap *NodesStatsNodeOSSwap `json:"swap"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeOSCPU struct {
|
||||||
|
Percent int `json:"percent"`
|
||||||
|
LoadAverage map[string]float64 `json:"load_average"` // keys are: 1m, 5m, and 15m
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeOSMem struct {
|
||||||
|
Total string `json:"total"`
|
||||||
|
TotalInBytes int64 `json:"total_in_bytes"`
|
||||||
|
Free string `json:"free"`
|
||||||
|
FreeInBytes int64 `json:"free_in_bytes"`
|
||||||
|
Used string `json:"used"`
|
||||||
|
UsedInBytes int64 `json:"used_in_bytes"`
|
||||||
|
FreePercent int `json:"free_percent"`
|
||||||
|
UsedPercent int `json:"used_percent"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeOSSwap struct {
|
||||||
|
Total string `json:"total"`
|
||||||
|
TotalInBytes int64 `json:"total_in_bytes"`
|
||||||
|
Free string `json:"free"`
|
||||||
|
FreeInBytes int64 `json:"free_in_bytes"`
|
||||||
|
Used string `json:"used"`
|
||||||
|
UsedInBytes int64 `json:"used_in_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeProcess struct {
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
OpenFileDescriptors int64 `json:"open_file_descriptors"`
|
||||||
|
MaxFileDescriptors int64 `json:"max_file_descriptors"`
|
||||||
|
CPU struct {
|
||||||
|
Percent int `json:"percent"`
|
||||||
|
Total string `json:"total"`
|
||||||
|
TotalInMillis int64 `json:"total_in_millis"`
|
||||||
|
} `json:"cpu"`
|
||||||
|
Mem struct {
|
||||||
|
TotalVirtual string `json:"total_virtual"`
|
||||||
|
TotalVirtualInBytes int64 `json:"total_virtual_in_bytes"`
|
||||||
|
} `json:"mem"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeJVM struct {
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
Uptime string `json:"uptime"`
|
||||||
|
UptimeInMillis int64 `json:"uptime_in_millis"`
|
||||||
|
Mem *NodesStatsNodeJVMMem `json:"mem"`
|
||||||
|
Threads *NodesStatsNodeJVMThreads `json:"threads"`
|
||||||
|
GC *NodesStatsNodeJVMGC `json:"gc"`
|
||||||
|
BufferPools map[string]*NodesStatsNodeJVMBufferPool `json:"buffer_pools"`
|
||||||
|
Classes *NodesStatsNodeJVMClasses `json:"classes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeJVMMem struct {
|
||||||
|
HeapUsed string `json:"heap_used"`
|
||||||
|
HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
|
||||||
|
HeapUsedPercent int `json:"heap_used_percent"`
|
||||||
|
HeapCommitted string `json:"heap_committed"`
|
||||||
|
HeapCommittedInBytes int64 `json:"heap_committed_in_bytes"`
|
||||||
|
HeapMax string `json:"heap_max"`
|
||||||
|
HeapMaxInBytes int64 `json:"heap_max_in_bytes"`
|
||||||
|
NonHeapUsed string `json:"non_heap_used"`
|
||||||
|
NonHeapUsedInBytes int64 `json:"non_heap_used_in_bytes"`
|
||||||
|
NonHeapCommitted string `json:"non_heap_committed"`
|
||||||
|
NonHeapCommittedInBytes int64 `json:"non_heap_committed_in_bytes"`
|
||||||
|
Pools map[string]struct {
|
||||||
|
Used string `json:"used"`
|
||||||
|
UsedInBytes int64 `json:"used_in_bytes"`
|
||||||
|
Max string `json:"max"`
|
||||||
|
MaxInBytes int64 `json:"max_in_bytes"`
|
||||||
|
PeakUsed string `json:"peak_used"`
|
||||||
|
PeakUsedInBytes int64 `json:"peak_used_in_bytes"`
|
||||||
|
PeakMax string `json:"peak_max"`
|
||||||
|
PeakMaxInBytes int64 `json:"peak_max_in_bytes"`
|
||||||
|
} `json:"pools"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeJVMThreads struct {
|
||||||
|
Count int64 `json:"count"`
|
||||||
|
PeakCount int64 `json:"peak_count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeJVMGC struct {
|
||||||
|
Collectors map[string]*NodesStatsNodeJVMGCCollector `json:"collectors"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeJVMGCCollector struct {
|
||||||
|
CollectionCount int64 `json:"collection_count"`
|
||||||
|
CollectionTime string `json:"collection_time"`
|
||||||
|
CollectionTimeInMillis int64 `json:"collection_time_in_millis"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeJVMBufferPool struct {
|
||||||
|
Count int64 `json:"count"`
|
||||||
|
TotalCapacity string `json:"total_capacity"`
|
||||||
|
TotalCapacityInBytes int64 `json:"total_capacity_in_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeJVMClasses struct {
|
||||||
|
CurrentLoadedCount int64 `json:"current_loaded_count"`
|
||||||
|
TotalLoadedCount int64 `json:"total_loaded_count"`
|
||||||
|
TotalUnloadedCount int64 `json:"total_unloaded_count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeThreadPool struct {
|
||||||
|
Threads int `json:"threads"`
|
||||||
|
Queue int `json:"queue"`
|
||||||
|
Active int `json:"active"`
|
||||||
|
Rejected int64 `json:"rejected"`
|
||||||
|
Largest int `json:"largest"`
|
||||||
|
Completed int64 `json:"completed"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeFS struct {
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
Total *NodesStatsNodeFSEntry `json:"total"`
|
||||||
|
Data []*NodesStatsNodeFSEntry `json:"data"`
|
||||||
|
IOStats *NodesStatsNodeFSIOStats `json:"io_stats"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeFSEntry struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
Mount string `json:"mount"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Total string `json:"total"`
|
||||||
|
TotalInBytes int64 `json:"total_in_bytes"`
|
||||||
|
Free string `json:"free"`
|
||||||
|
FreeInBytes int64 `json:"free_in_bytes"`
|
||||||
|
Available string `json:"available"`
|
||||||
|
AvailableInBytes int64 `json:"available_in_bytes"`
|
||||||
|
Spins string `json:"spins"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeFSIOStats struct {
|
||||||
|
Devices []*NodesStatsNodeFSIOStatsEntry `json:"devices"`
|
||||||
|
Total *NodesStatsNodeFSIOStatsEntry `json:"total"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeFSIOStatsEntry struct {
|
||||||
|
DeviceName string `json:"device_name"`
|
||||||
|
Operations int64 `json:"operations"`
|
||||||
|
ReadOperations int64 `json:"read_operations"`
|
||||||
|
WriteOperations int64 `json:"write_operations"`
|
||||||
|
ReadKilobytes int64 `json:"read_kilobytes"`
|
||||||
|
WriteKilobytes int64 `json:"write_kilobytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeTransport struct {
|
||||||
|
ServerOpen int `json:"server_open"`
|
||||||
|
RxCount int64 `json:"rx_count"`
|
||||||
|
RxSize string `json:"rx_size"`
|
||||||
|
RxSizeInBytes int64 `json:"rx_size_in_bytes"`
|
||||||
|
TxCount int64 `json:"tx_count"`
|
||||||
|
TxSize string `json:"tx_size"`
|
||||||
|
TxSizeInBytes int64 `json:"tx_size_in_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsNodeHTTP struct {
|
||||||
|
CurrentOpen int `json:"current_open"`
|
||||||
|
TotalOpened int `json:"total_opened"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsBreaker struct {
|
||||||
|
LimitSize string `json:"limit_size"`
|
||||||
|
LimitSizeInBytes int64 `json:"limit_size_in_bytes"`
|
||||||
|
EstimatedSize string `json:"estimated_size"`
|
||||||
|
EstimatedSizeInBytes int64 `json:"estimated_size_in_bytes"`
|
||||||
|
Overhead float64 `json:"overhead"`
|
||||||
|
Tripped int64 `json:"tripped"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsScriptStats struct {
|
||||||
|
Compilations int64 `json:"compilations"`
|
||||||
|
CacheEvictions int64 `json:"cache_evictions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsDiscovery struct {
|
||||||
|
ClusterStateQueue *NodesStatsDiscoveryStats `json:"cluster_state_queue"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsDiscoveryStats struct {
|
||||||
|
Total int64 `json:"total"`
|
||||||
|
Pending int64 `json:"pending"`
|
||||||
|
Committed int64 `json:"committed"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsIngest struct {
|
||||||
|
Total *NodesStatsIngestStats `json:"total"`
|
||||||
|
Pipelines interface{} `json:"pipelines"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodesStatsIngestStats struct {
|
||||||
|
Count int64 `json:"count"`
|
||||||
|
Time string `json:"time"`
|
||||||
|
TimeInMillis int64 `json:"time_in_millis"`
|
||||||
|
Current int64 `json:"current"`
|
||||||
|
Failed int64 `json:"failed"`
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
@ -8,6 +8,9 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/context/ctxhttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PingService checks if an Elasticsearch server on a given URL is alive.
|
// PingService checks if an Elasticsearch server on a given URL is alive.
|
||||||
@ -71,7 +74,7 @@ func (s *PingService) Pretty(pretty bool) *PingService {
|
|||||||
|
|
||||||
// Do returns the PingResult, the HTTP status code of the Elasticsearch
|
// Do returns the PingResult, the HTTP status code of the Elasticsearch
|
||||||
// server, and an error.
|
// server, and an error.
|
||||||
func (s *PingService) Do() (*PingResult, int, error) {
|
func (s *PingService) Do(ctx context.Context) (*PingResult, int, error) {
|
||||||
s.client.mu.RLock()
|
s.client.mu.RLock()
|
||||||
basicAuth := s.client.basicAuth
|
basicAuth := s.client.basicAuth
|
||||||
basicAuthUsername := s.client.basicAuthUsername
|
basicAuthUsername := s.client.basicAuthUsername
|
||||||
@ -108,7 +111,7 @@ func (s *PingService) Do() (*PingResult, int, error) {
|
|||||||
req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
|
req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := s.client.c.Do((*http.Request)(req))
|
res, err := ctxhttp.Do(ctx, s.client.c, (*http.Request)(req))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
@ -1,9 +1,11 @@
|
|||||||
// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-license.
|
// Use of this source code is governed by a MIT-license.
|
||||||
// See http://olivere.mit-license.org/license.txt for details.
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
package elastic
|
package elastic
|
||||||
|
|
||||||
|
import "golang.org/x/net/context"
|
||||||
|
|
||||||
// HasPlugin indicates whether the cluster has the named plugin.
|
// HasPlugin indicates whether the cluster has the named plugin.
|
||||||
func (c *Client) HasPlugin(name string) (bool, error) {
|
func (c *Client) HasPlugin(name string) (bool, error) {
|
||||||
plugins, err := c.Plugins()
|
plugins, err := c.Plugins()
|
||||||
@ -20,7 +22,7 @@ func (c *Client) HasPlugin(name string) (bool, error) {
|
|||||||
|
|
||||||
// Plugins returns the list of all registered plugins.
|
// Plugins returns the list of all registered plugins.
|
||||||
func (c *Client) Plugins() ([]string, error) {
|
func (c *Client) Plugins() ([]string, error) {
|
||||||
stats, err := c.ClusterStats().Do()
|
stats, err := c.ClusterStats().Do(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user