Fix queueStore stops working with concurrent PUT/DELETE requests (#8381)

- This PR fixes situation to avoid underflow, this is possible
  because of disconnected operations in replay/sendEvents
- Hold right locks if Del() operation is performed in Get()
- Remove panic in the code and use loggerOnce
- Remove Timer and instead use Ticker instead for proper ticks
This commit is contained in:
Harshavardhana
2019-10-11 17:46:03 -07:00
committed by GitHub
parent f16df2a4e7
commit 175b07d6e4
14 changed files with 231 additions and 185 deletions

View File

@@ -17,6 +17,7 @@
package target
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
@@ -191,7 +192,7 @@ func (k KafkaArgs) pingBrokers() bool {
}
// NewKafkaTarget - creates new Kafka target with auth credentials.
func NewKafkaTarget(id string, args KafkaArgs, doneCh <-chan struct{}) (*KafkaTarget, error) {
func NewKafkaTarget(id string, args KafkaArgs, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{})) (*KafkaTarget, error) {
config := sarama.NewConfig()
config.Net.SASL.User = args.SASL.User
@@ -242,9 +243,9 @@ func NewKafkaTarget(id string, args KafkaArgs, doneCh <-chan struct{}) (*KafkaTa
if target.store != nil {
// Replays the events from the store.
eventKeyCh := replayEvents(target.store, doneCh)
eventKeyCh := replayEvents(target.store, doneCh, loggerOnce, target.ID())
// Start replaying events from the store.
go sendEvents(target, eventKeyCh, doneCh)
go sendEvents(target, eventKeyCh, doneCh, loggerOnce)
}
return target, nil