Enable event persistence in kafka (#7633)

This commit is contained in:
Praveen raj Mani 2019-05-30 01:49:48 +05:30 committed by kannappanr
parent da8214845a
commit 763fce909b
8 changed files with 139 additions and 21 deletions

View File

@ -99,6 +99,8 @@ var (
"enable": false, "enable": false,
"brokers": null, "brokers": null,
"topic": "", "topic": "",
"queueDir": "",
"queueLimit": 0,
"tls": { "tls": {
"enable": false, "enable": false,
"skipVerify": false, "skipVerify": false,

View File

@ -325,7 +325,7 @@ func (s *serverConfig) TestNotificationTargets() error {
if !v.Enable { if !v.Enable {
continue continue
} }
t, err := target.NewKafkaTarget(k, v) t, err := target.NewKafkaTarget(k, v, GlobalServiceDoneCh)
if err != nil { if err != nil {
return fmt.Errorf("kafka(%s): %s", k, err.Error()) return fmt.Errorf("kafka(%s): %s", k, err.Error())
} }
@ -667,7 +667,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
for id, args := range config.Notify.Kafka { for id, args := range config.Notify.Kafka {
if args.Enable { if args.Enable {
newTarget, err := target.NewKafkaTarget(id, args) newTarget, err := target.NewKafkaTarget(id, args, GlobalServiceDoneCh)
if err != nil { if err != nil {
logger.LogIf(context.Background(), err) logger.LogIf(context.Background(), err)
continue continue

View File

@ -200,7 +200,7 @@ func TestValidateConfig(t *testing.T) {
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false}, {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
// Test 16 - Test Kafka // Test 16 - Test Kafka
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, false}, {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "", "queueDir": "", "queueLimit": 0 } }}}`, false},
// Test 17 - Test Webhook // Test 17 - Test Webhook
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, false}, {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, false},

View File

@ -884,7 +884,7 @@ type serverConfigV32 struct {
} `json:"policy"` } `json:"policy"`
} }
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir, queueLimit with MQTT. // serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir, queueLimit in MQTT and kafka.
type serverConfigV33 struct { type serverConfigV33 struct {
quick.Config `json:"-"` // ignore interfaces quick.Config `json:"-"` // ignore interfaces

View File

@ -931,10 +931,23 @@ The MinIO server configuration file is stored on the backend in json format. Upd
"1": { "1": {
"enable": true, "enable": true,
"brokers": ["localhost:9092"], "brokers": ["localhost:9092"],
"topic": "bucketevents" "topic": "bucketevents",
"queueDir": "",
"queueLimit": 0,
"tls": {
"enable": false,
"skipVerify": false,
"clientAuth": 0
},
"sasl": {
"enable": false,
"username": "",
"password": ""
}
} }
} }
``` ```
MinIO supports persistent event store. The persistent store will backup events when the kafka broker goes offline and replays it when the broker comes back online. The event store can be configured by setting the directory path in `queueDir` field and the maximum limit of events in the queueDir in `queueLimit` field. For eg, the `queueDir` can be `/home/events` and `queueLimit` can be `1000`. By default, the `queueLimit` is set to 10000.
To update the configuration, use `mc admin config get` command to get the current configuration file for the minio deployment in json format, and save it locally. To update the configuration, use `mc admin config get` command to get the current configuration file for the minio deployment in json format, and save it locally.

View File

@ -64,6 +64,8 @@
"enable": false, "enable": false,
"brokers": null, "brokers": null,
"topic": "", "topic": "",
"queueDir": "",
"queueLimit": 0,
"tls": { "tls": {
"enable": false, "enable": false,
"skipVerify": false, "skipVerify": false,

View File

@ -20,7 +20,10 @@ import (
"crypto/tls" "crypto/tls"
"encoding/json" "encoding/json"
"errors" "errors"
"net"
"net/url" "net/url"
"os"
"path/filepath"
"github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event"
xnet "github.com/minio/minio/pkg/net" xnet "github.com/minio/minio/pkg/net"
@ -30,10 +33,12 @@ import (
// KafkaArgs - Kafka target arguments. // KafkaArgs - Kafka target arguments.
type KafkaArgs struct { type KafkaArgs struct {
Enable bool `json:"enable"` Enable bool `json:"enable"`
Brokers []xnet.Host `json:"brokers"` Brokers []xnet.Host `json:"brokers"`
Topic string `json:"topic"` Topic string `json:"topic"`
TLS struct { QueueDir string `json:"queueDir"`
QueueLimit uint16 `json:"queueLimit"`
TLS struct {
Enable bool `json:"enable"` Enable bool `json:"enable"`
SkipVerify bool `json:"skipVerify"` SkipVerify bool `json:"skipVerify"`
ClientAuth tls.ClientAuthType `json:"clientAuth"` ClientAuth tls.ClientAuthType `json:"clientAuth"`
@ -58,6 +63,14 @@ func (k KafkaArgs) Validate() error {
return err return err
} }
} }
if k.QueueDir != "" {
if !filepath.IsAbs(k.QueueDir) {
return errors.New("queueDir path should be absolute")
}
}
if k.QueueLimit > 10000 {
return errors.New("queueLimit should not exceed 10000")
}
return nil return nil
} }
@ -66,6 +79,8 @@ type KafkaTarget struct {
id event.TargetID id event.TargetID
args KafkaArgs args KafkaArgs
producer sarama.SyncProducer producer sarama.SyncProducer
config *sarama.Config
store Store
} }
// ID - returns target ID. // ID - returns target ID.
@ -73,11 +88,18 @@ func (target *KafkaTarget) ID() event.TargetID {
return target.id return target.id
} }
// Save - Sends event directly without persisting. // Save - saves the events to the store which will be replayed when the Kafka connection is active.
func (target *KafkaTarget) Save(eventData event.Event) error { func (target *KafkaTarget) Save(eventData event.Event) error {
if target.store != nil {
return target.store.Put(eventData)
}
if !target.args.pingBrokers() {
return errNotConnected
}
return target.send(eventData) return target.send(eventData)
} }
// send - sends an event to the kafka.
func (target *KafkaTarget) send(eventData event.Event) error { func (target *KafkaTarget) send(eventData event.Event) error {
objectName, err := url.QueryUnescape(eventData.S3.Object.Key) objectName, err := url.QueryUnescape(eventData.S3.Object.Key)
if err != nil { if err != nil {
@ -95,23 +117,79 @@ func (target *KafkaTarget) send(eventData event.Event) error {
Key: sarama.StringEncoder(key), Key: sarama.StringEncoder(key),
Value: sarama.ByteEncoder(data), Value: sarama.ByteEncoder(data),
} }
_, _, err = target.producer.SendMessage(&msg) _, _, err = target.producer.SendMessage(&msg)
return err return err
} }
// Send - interface compatible method does no-op. // Send - reads an event from store and sends it to Kafka.
func (target *KafkaTarget) Send(eventKey string) error { func (target *KafkaTarget) Send(eventKey string) error {
return nil var err error
if !target.args.pingBrokers() {
return errNotConnected
}
if target.producer == nil {
brokers := []string{}
for _, broker := range target.args.Brokers {
brokers = append(brokers, broker.String())
}
target.producer, err = sarama.NewSyncProducer(brokers, target.config)
if err != nil {
if err != sarama.ErrOutOfBrokers {
return err
}
return errNotConnected
}
}
eventData, eErr := target.store.Get(eventKey)
if eErr != nil {
// The last event key in a successful batch will be sent in the channel atmost once by the replayEvents()
// Such events will not exist and wouldve been already been sent successfully.
if os.IsNotExist(eErr) {
return nil
}
return eErr
}
err = target.send(eventData)
if err != nil {
// Sarama opens the ciruit breaker after 3 consecutive connection failures.
if err == sarama.ErrLeaderNotAvailable || err.Error() == "circuit breaker is open" {
return errNotConnected
}
return err
}
// Delete the event from store.
return target.store.Del(eventKey)
} }
// Close - closes underneath kafka connection. // Close - closes underneath kafka connection.
func (target *KafkaTarget) Close() error { func (target *KafkaTarget) Close() error {
return target.producer.Close() if target.producer != nil {
return target.producer.Close()
}
return nil
}
// Check if atleast one broker in cluster is active
func (k KafkaArgs) pingBrokers() bool {
for _, broker := range k.Brokers {
_, dErr := net.Dial("tcp", broker.String())
if dErr == nil {
return true
}
}
return false
} }
// NewKafkaTarget - creates new Kafka target with auth credentials. // NewKafkaTarget - creates new Kafka target with auth credentials.
func NewKafkaTarget(id string, args KafkaArgs) (*KafkaTarget, error) { func NewKafkaTarget(id string, args KafkaArgs, doneCh <-chan struct{}) (*KafkaTarget, error) {
config := sarama.NewConfig() config := sarama.NewConfig()
config.Net.SASL.User = args.SASL.User config.Net.SASL.User = args.SASL.User
@ -132,14 +210,38 @@ func NewKafkaTarget(id string, args KafkaArgs) (*KafkaTarget, error) {
for _, broker := range args.Brokers { for _, broker := range args.Brokers {
brokers = append(brokers, broker.String()) brokers = append(brokers, broker.String())
} }
producer, err := sarama.NewSyncProducer(brokers, config)
if err != nil { var store Store
return nil, err
if args.QueueDir != "" {
queueDir := filepath.Join(args.QueueDir, storePrefix+"-kafka-"+id)
store = NewQueueStore(queueDir, args.QueueLimit)
if oErr := store.Open(); oErr != nil {
return nil, oErr
}
} }
return &KafkaTarget{ producer, err := sarama.NewSyncProducer(brokers, config)
if err != nil {
if store == nil || err != sarama.ErrOutOfBrokers {
return nil, err
}
}
target := &KafkaTarget{
id: event.TargetID{ID: id, Name: "kafka"}, id: event.TargetID{ID: id, Name: "kafka"},
args: args, args: args,
producer: producer, producer: producer,
}, nil config: config,
store: store,
}
if target.store != nil {
// Replays the events from the store.
eventKeyCh := replayEvents(target.store, doneCh)
// Start replaying events from the store.
go sendEvents(target, eventKeyCh, doneCh)
}
return target, nil
} }

View File

@ -111,7 +111,6 @@ func (target *MQTTTarget) send(eventData event.Event) error {
if token.Error() != nil { if token.Error() != nil {
return token.Error() return token.Error()
} }
return nil return nil
} }