2024-03-07 15:17:46 -05:00
|
|
|
// Copyright (c) 2015-2024 MinIO, Inc.
|
2021-04-18 15:41:13 -04:00
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2018-07-19 18:55:06 -04:00
|
|
|
|
2018-11-19 17:47:03 -05:00
|
|
|
package http
|
2018-07-19 18:55:06 -04:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2020-08-16 13:25:00 -04:00
|
|
|
"context"
|
2018-07-19 18:55:06 -04:00
|
|
|
"errors"
|
2020-08-16 13:25:00 -04:00
|
|
|
"fmt"
|
2019-07-03 01:34:32 -04:00
|
|
|
"net/http"
|
2023-02-22 00:19:46 -05:00
|
|
|
"net/url"
|
2023-05-09 00:20:31 -04:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2024-08-28 11:31:56 -04:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2022-02-17 14:11:15 -05:00
|
|
|
"sync"
|
2022-11-10 13:20:21 -05:00
|
|
|
"sync/atomic"
|
2020-08-16 13:25:00 -04:00
|
|
|
"time"
|
2019-02-06 15:07:03 -05:00
|
|
|
|
2024-03-07 15:17:46 -05:00
|
|
|
jsoniter "github.com/json-iterator/go"
|
2021-06-01 17:59:40 -04:00
|
|
|
xhttp "github.com/minio/minio/internal/http"
|
2024-01-28 13:04:17 -05:00
|
|
|
xioutil "github.com/minio/minio/internal/ioutil"
|
2022-02-24 12:05:33 -05:00
|
|
|
"github.com/minio/minio/internal/logger/target/types"
|
2023-05-09 00:20:31 -04:00
|
|
|
"github.com/minio/minio/internal/once"
|
|
|
|
"github.com/minio/minio/internal/store"
|
2024-05-24 19:05:23 -04:00
|
|
|
xnet "github.com/minio/pkg/v3/net"
|
2024-03-07 15:17:46 -05:00
|
|
|
"github.com/valyala/bytebufferpool"
|
2018-07-19 18:55:06 -04:00
|
|
|
)
|
|
|
|
|
2022-11-10 13:20:21 -05:00
|
|
|
const (
|
|
|
|
|
2023-07-29 15:49:18 -04:00
|
|
|
// maxWorkers is the maximum number of concurrent http loggers
|
2023-03-21 13:50:40 -04:00
|
|
|
maxWorkers = 16
|
2023-05-09 00:20:31 -04:00
|
|
|
|
2024-03-07 15:17:46 -05:00
|
|
|
// maxWorkers is the maximum number of concurrent batch http loggers
|
|
|
|
maxWorkersWithBatchEvents = 4
|
|
|
|
|
2023-05-09 00:20:31 -04:00
|
|
|
// the suffix for the configured queue dir where the logs will be persisted.
|
|
|
|
httpLoggerExtension = ".http.log"
|
2023-03-21 13:50:40 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
statusOffline = iota
|
|
|
|
statusOnline
|
|
|
|
statusClosed
|
2022-11-10 13:20:21 -05:00
|
|
|
)
|
2021-05-27 12:54:10 -04:00
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
var (
|
|
|
|
logChBuffers = make(map[string]chan interface{})
|
|
|
|
logChLock = sync.Mutex{}
|
|
|
|
)
|
|
|
|
|
2021-07-13 12:39:13 -04:00
|
|
|
// Config http logger target
|
|
|
|
type Config struct {
|
2024-09-11 18:20:42 -04:00
|
|
|
Enabled bool `json:"enabled"`
|
|
|
|
Name string `json:"name"`
|
|
|
|
UserAgent string `json:"userAgent"`
|
|
|
|
Endpoint *xnet.URL `json:"endpoint"`
|
|
|
|
AuthToken string `json:"authToken"`
|
|
|
|
ClientCert string `json:"clientCert"`
|
|
|
|
ClientKey string `json:"clientKey"`
|
|
|
|
BatchSize int `json:"batchSize"`
|
|
|
|
QueueSize int `json:"queueSize"`
|
|
|
|
QueueDir string `json:"queueDir"`
|
|
|
|
MaxRetry int `json:"maxRetry"`
|
|
|
|
RetryIntvl time.Duration `json:"retryInterval"`
|
|
|
|
Proxy string `json:"string"`
|
|
|
|
Transport http.RoundTripper `json:"-"`
|
|
|
|
HTTPTimeout time.Duration `json:"httpTimeout"`
|
2021-07-13 12:39:13 -04:00
|
|
|
|
|
|
|
// Custom logger
|
2024-03-25 12:44:20 -04:00
|
|
|
LogOnceIf func(ctx context.Context, err error, id string, errKind ...interface{}) `json:"-"`
|
2021-07-13 12:39:13 -04:00
|
|
|
}
|
|
|
|
|
2018-11-19 17:47:03 -05:00
|
|
|
// Target implements logger.Target and sends the json
|
2018-07-19 18:55:06 -04:00
|
|
|
// format of a log entry to the configured http endpoint.
|
|
|
|
// An internal buffer of logs is maintained but when the
|
|
|
|
// buffer is full, new logs are just ignored and an error
|
|
|
|
// is returned to the caller.
|
2018-11-19 17:47:03 -05:00
|
|
|
type Target struct {
|
2024-06-24 13:59:48 -04:00
|
|
|
totalMessages atomic.Int64
|
|
|
|
failedMessages atomic.Int64
|
|
|
|
status atomic.Int32
|
2022-11-10 13:20:21 -05:00
|
|
|
|
|
|
|
// Worker control
|
2024-06-24 13:59:48 -04:00
|
|
|
workers atomic.Int64
|
2024-03-25 12:44:20 -04:00
|
|
|
maxWorkers int64
|
2024-06-24 13:59:48 -04:00
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
// workerStartMu sync.Mutex
|
|
|
|
lastStarted time.Time
|
2022-11-10 13:20:21 -05:00
|
|
|
|
2023-03-21 13:50:40 -04:00
|
|
|
wg sync.WaitGroup
|
2022-02-17 14:11:15 -05:00
|
|
|
|
2023-03-21 13:50:40 -04:00
|
|
|
// Channel of log entries.
|
|
|
|
// Reading logCh must hold read lock on logChMu (to avoid read race)
|
|
|
|
// Sending a value on logCh must hold read lock on logChMu (to avoid closing)
|
|
|
|
logCh chan interface{}
|
|
|
|
logChMu sync.RWMutex
|
2018-10-12 15:25:59 -04:00
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
// If this webhook is being re-configured we will
|
|
|
|
// assign the new webhook target to this field.
|
|
|
|
// The Send() method will then re-direct entries
|
|
|
|
// to the new target when the current one
|
|
|
|
// has been set to status "statusClosed".
|
|
|
|
// Once the glogal target slice has been migrated
|
|
|
|
// the current target will stop receiving entries.
|
|
|
|
migrateTarget *Target
|
|
|
|
|
2024-03-07 15:17:46 -05:00
|
|
|
// Number of events per HTTP send to webhook target
|
|
|
|
// this is ideally useful only if your endpoint can
|
|
|
|
// support reading multiple events on a stream for example
|
|
|
|
// like : Splunk HTTP Event collector, if you are unsure
|
|
|
|
// set this to '1'.
|
2024-03-25 12:44:20 -04:00
|
|
|
batchSize int
|
|
|
|
payloadType string
|
2022-11-28 11:03:26 -05:00
|
|
|
|
2023-05-09 00:20:31 -04:00
|
|
|
// store to persist and replay the logs to the target
|
|
|
|
// to avoid missing events when the target is down.
|
2023-06-07 14:47:00 -04:00
|
|
|
store store.Store[interface{}]
|
|
|
|
storeCtxCancel context.CancelFunc
|
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
initQueueOnce once.Init
|
2023-05-09 00:20:31 -04:00
|
|
|
|
2024-09-11 18:20:42 -04:00
|
|
|
config Config
|
|
|
|
client *http.Client
|
|
|
|
httpTimeout time.Duration
|
2018-07-19 18:55:06 -04:00
|
|
|
}
|
|
|
|
|
2023-05-09 00:20:31 -04:00
|
|
|
// Name returns the name of the target
|
|
|
|
func (h *Target) Name() string {
|
|
|
|
return "minio-http-" + h.config.Name
|
|
|
|
}
|
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
// Type - returns type of the target
|
|
|
|
func (h *Target) Type() types.TargetType {
|
|
|
|
return types.TargetHTTP
|
|
|
|
}
|
|
|
|
|
2020-10-02 19:19:44 -04:00
|
|
|
// Endpoint returns the backend endpoint
|
|
|
|
func (h *Target) Endpoint() string {
|
2023-08-03 05:47:07 -04:00
|
|
|
return h.config.Endpoint.String()
|
2020-10-02 19:19:44 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (h *Target) String() string {
|
2021-07-13 12:39:13 -04:00
|
|
|
return h.config.Name
|
2020-10-02 19:19:44 -04:00
|
|
|
}
|
|
|
|
|
2023-09-21 19:58:24 -04:00
|
|
|
// IsOnline returns true if the target is reachable using a cached value
|
2023-05-09 00:20:31 -04:00
|
|
|
func (h *Target) IsOnline(ctx context.Context) bool {
|
2024-06-24 13:59:48 -04:00
|
|
|
return h.status.Load() == statusOnline
|
2023-09-21 19:58:24 -04:00
|
|
|
}
|
|
|
|
|
2022-11-10 13:20:21 -05:00
|
|
|
// Stats returns the target statistics.
|
|
|
|
func (h *Target) Stats() types.TargetStats {
|
2023-03-21 13:50:40 -04:00
|
|
|
h.logChMu.RLock()
|
2023-06-07 14:47:00 -04:00
|
|
|
queueLength := len(h.logCh)
|
2023-03-21 13:50:40 -04:00
|
|
|
h.logChMu.RUnlock()
|
|
|
|
stats := types.TargetStats{
|
2024-06-24 13:59:48 -04:00
|
|
|
TotalMessages: h.totalMessages.Load(),
|
|
|
|
FailedMessages: h.failedMessages.Load(),
|
2023-06-07 14:47:00 -04:00
|
|
|
QueueLength: queueLength,
|
2022-11-10 13:20:21 -05:00
|
|
|
}
|
2023-03-21 13:50:40 -04:00
|
|
|
|
|
|
|
return stats
|
2022-11-10 13:20:21 -05:00
|
|
|
}
|
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
// AssignMigrateTarget assigns a target
|
|
|
|
// which will eventually replace the current target.
|
|
|
|
func (h *Target) AssignMigrateTarget(migrateTgt *Target) {
|
|
|
|
h.migrateTarget = migrateTgt
|
|
|
|
}
|
|
|
|
|
2021-07-13 12:39:13 -04:00
|
|
|
// Init validate and initialize the http target
|
2023-05-09 00:20:31 -04:00
|
|
|
func (h *Target) Init(ctx context.Context) (err error) {
|
|
|
|
if h.config.QueueDir != "" {
|
2024-03-25 12:44:20 -04:00
|
|
|
return h.initQueueOnce.DoWithContext(ctx, h.initDiskStore)
|
2023-05-09 00:20:31 -04:00
|
|
|
}
|
2024-03-25 12:44:20 -04:00
|
|
|
return h.initQueueOnce.DoWithContext(ctx, h.initMemoryStore)
|
2023-05-09 00:20:31 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
func (h *Target) initDiskStore(ctx context.Context) (err error) {
|
2023-05-09 00:20:31 -04:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
h.storeCtxCancel = cancel
|
2024-03-25 12:44:20 -04:00
|
|
|
h.lastStarted = time.Now()
|
|
|
|
go h.startQueueProcessor(ctx, true)
|
2024-06-01 23:03:39 -04:00
|
|
|
|
|
|
|
queueStore := store.NewQueueStore[interface{}](
|
|
|
|
filepath.Join(h.config.QueueDir, h.Name()),
|
|
|
|
uint64(h.config.QueueSize),
|
|
|
|
httpLoggerExtension,
|
|
|
|
)
|
|
|
|
|
|
|
|
if err := queueStore.Open(); err != nil {
|
|
|
|
return fmt.Errorf("unable to initialize the queue store of %s webhook: %w", h.Name(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.store = queueStore
|
2024-03-25 12:44:20 -04:00
|
|
|
store.StreamItems(h.store, h, ctx.Done(), h.config.LogOnceIf)
|
2024-06-01 23:03:39 -04:00
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
return nil
|
2023-05-09 00:20:31 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
func (h *Target) initMemoryStore(ctx context.Context) (err error) {
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
h.storeCtxCancel = cancel
|
|
|
|
h.lastStarted = time.Now()
|
|
|
|
go h.startQueueProcessor(ctx, true)
|
2020-08-16 13:25:00 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-08-28 11:31:56 -04:00
|
|
|
func (h *Target) send(ctx context.Context, payload []byte, payloadCount int, payloadType string, timeout time.Duration) (err error) {
|
2023-09-21 19:58:24 -04:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2024-08-28 11:31:56 -04:00
|
|
|
if xnet.IsNetworkOrHostDown(err, false) {
|
|
|
|
h.status.Store(statusOffline)
|
|
|
|
}
|
2024-09-08 08:15:09 -04:00
|
|
|
h.failedMessages.Add(int64(payloadCount))
|
2023-09-21 19:58:24 -04:00
|
|
|
} else {
|
2024-06-24 13:59:48 -04:00
|
|
|
h.status.Store(statusOnline)
|
2023-09-21 19:58:24 -04:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2023-05-09 00:20:31 -04:00
|
|
|
ctx, cancel := context.WithTimeout(ctx, timeout)
|
|
|
|
defer cancel()
|
2024-08-28 11:31:56 -04:00
|
|
|
|
2023-05-09 00:20:31 -04:00
|
|
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
2023-08-03 05:47:07 -04:00
|
|
|
h.Endpoint(), bytes.NewReader(payload))
|
2023-05-09 00:20:31 -04:00
|
|
|
if err != nil {
|
2023-08-03 05:47:07 -04:00
|
|
|
return fmt.Errorf("invalid configuration for '%s'; %v", h.Endpoint(), err)
|
2023-05-09 00:20:31 -04:00
|
|
|
}
|
2024-03-07 15:17:46 -05:00
|
|
|
if payloadType != "" {
|
|
|
|
req.Header.Set(xhttp.ContentType, payloadType)
|
|
|
|
}
|
2024-08-28 11:31:56 -04:00
|
|
|
req.Header.Set(xhttp.WebhookEventPayloadCount, strconv.Itoa(payloadCount))
|
2023-05-09 00:20:31 -04:00
|
|
|
req.Header.Set(xhttp.MinIOVersion, xhttp.GlobalMinIOVersion)
|
|
|
|
req.Header.Set(xhttp.MinioDeploymentID, xhttp.GlobalDeploymentID)
|
|
|
|
|
|
|
|
// Set user-agent to indicate MinIO release
|
|
|
|
// version to the configured log endpoint
|
|
|
|
req.Header.Set("User-Agent", h.config.UserAgent)
|
|
|
|
|
|
|
|
if h.config.AuthToken != "" {
|
|
|
|
req.Header.Set("Authorization", h.config.AuthToken)
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := h.client.Do(req)
|
|
|
|
if err != nil {
|
2023-08-03 05:47:07 -04:00
|
|
|
return fmt.Errorf("%s returned '%w', please check your endpoint configuration", h.Endpoint(), err)
|
2023-05-09 00:20:31 -04:00
|
|
|
}
|
2021-09-10 17:27:37 -04:00
|
|
|
|
2023-05-09 00:20:31 -04:00
|
|
|
// Drain any response.
|
|
|
|
xhttp.DrainBody(resp.Body)
|
|
|
|
|
2024-09-06 18:53:34 -04:00
|
|
|
if resp.StatusCode >= 200 && resp.StatusCode <= 299 {
|
2023-05-09 00:20:31 -04:00
|
|
|
// accepted HTTP status codes.
|
|
|
|
return nil
|
2024-09-06 18:53:34 -04:00
|
|
|
} else if resp.StatusCode == http.StatusForbidden {
|
2023-08-03 05:47:07 -04:00
|
|
|
return fmt.Errorf("%s returned '%s', please check if your auth token is correctly set", h.Endpoint(), resp.Status)
|
2023-05-09 00:20:31 -04:00
|
|
|
}
|
2024-09-06 18:53:34 -04:00
|
|
|
return fmt.Errorf("%s returned '%s', please check your endpoint configuration", h.Endpoint(), resp.Status)
|
2021-09-10 17:27:37 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) {
|
|
|
|
h.logChMu.RLock()
|
|
|
|
if h.logCh == nil {
|
|
|
|
h.logChMu.RUnlock()
|
|
|
|
return
|
2022-02-17 14:11:15 -05:00
|
|
|
}
|
2024-03-25 12:44:20 -04:00
|
|
|
h.logChMu.RUnlock()
|
2022-02-17 14:11:15 -05:00
|
|
|
|
2024-06-24 13:59:48 -04:00
|
|
|
h.workers.Add(1)
|
|
|
|
defer h.workers.Add(-1)
|
2024-03-06 11:09:46 -05:00
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
h.wg.Add(1)
|
|
|
|
defer h.wg.Done()
|
|
|
|
|
|
|
|
entries := make([]interface{}, 0)
|
|
|
|
name := h.Name()
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
// re-load the global buffer pointer
|
|
|
|
// in case it was modified by a new target.
|
|
|
|
logChLock.Lock()
|
|
|
|
currentGlobalBuffer, ok := logChBuffers[name]
|
|
|
|
logChLock.Unlock()
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, v := range entries {
|
|
|
|
select {
|
|
|
|
case currentGlobalBuffer <- v:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if mainWorker {
|
|
|
|
drain:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case v, ok := <-h.logCh:
|
|
|
|
if !ok {
|
|
|
|
break drain
|
|
|
|
}
|
|
|
|
|
|
|
|
currentGlobalBuffer <- v
|
|
|
|
default:
|
|
|
|
break drain
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
lastBatchProcess := time.Now()
|
2024-03-06 11:09:46 -05:00
|
|
|
|
2024-03-07 15:17:46 -05:00
|
|
|
buf := bytebufferpool.Get()
|
2024-03-25 12:44:20 -04:00
|
|
|
enc := jsoniter.ConfigCompatibleWithStandardLibrary.NewEncoder(buf)
|
2024-03-07 15:17:46 -05:00
|
|
|
defer bytebufferpool.Put(buf)
|
|
|
|
|
2024-06-01 23:03:39 -04:00
|
|
|
isDirQueue := h.config.QueueDir != ""
|
2024-03-07 15:17:46 -05:00
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
// globalBuffer is always created or adjusted
|
|
|
|
// before this method is launched.
|
|
|
|
logChLock.Lock()
|
|
|
|
globalBuffer := logChBuffers[name]
|
|
|
|
logChLock.Unlock()
|
|
|
|
|
2024-09-08 08:15:09 -04:00
|
|
|
ticker := time.NewTicker(time.Second)
|
|
|
|
defer ticker.Stop()
|
2024-03-25 12:44:20 -04:00
|
|
|
|
2024-09-08 08:15:09 -04:00
|
|
|
var count int
|
2024-03-25 12:44:20 -04:00
|
|
|
for {
|
2024-09-08 08:15:09 -04:00
|
|
|
var (
|
|
|
|
ok bool
|
|
|
|
entry any
|
|
|
|
)
|
|
|
|
|
|
|
|
if count < h.batchSize {
|
|
|
|
tickered := false
|
|
|
|
select {
|
|
|
|
case _ = <-ticker.C:
|
|
|
|
tickered = true
|
|
|
|
case entry, _ = <-globalBuffer:
|
|
|
|
case entry, ok = <-h.logCh:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-ctx.Done():
|
2024-03-25 12:44:20 -04:00
|
|
|
return
|
|
|
|
}
|
2024-03-07 15:17:46 -05:00
|
|
|
|
2024-09-08 08:15:09 -04:00
|
|
|
if !tickered {
|
|
|
|
h.totalMessages.Add(1)
|
|
|
|
if !isDirQueue {
|
|
|
|
if err := enc.Encode(&entry); err != nil {
|
|
|
|
h.config.LogOnceIf(
|
|
|
|
ctx,
|
|
|
|
fmt.Errorf("unable to encode webhook log entry, err '%w' entry: %v\n", err, entry),
|
|
|
|
h.Name(),
|
|
|
|
)
|
|
|
|
h.failedMessages.Add(1)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
entries = append(entries, entry)
|
2024-03-25 12:44:20 -04:00
|
|
|
}
|
2024-08-28 11:31:56 -04:00
|
|
|
count++
|
2024-03-25 12:44:20 -04:00
|
|
|
}
|
|
|
|
|
2024-08-28 11:31:56 -04:00
|
|
|
if len(h.logCh) > 0 || len(globalBuffer) > 0 || count == 0 {
|
2024-09-08 08:15:09 -04:00
|
|
|
// there is something in the log queue
|
|
|
|
// process it first, even if we tickered
|
|
|
|
// first, or we have not received any events
|
|
|
|
// yet, still wait on it.
|
2024-03-25 12:44:20 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-09-08 08:15:09 -04:00
|
|
|
// If we are doing batching, we should wait
|
|
|
|
// at least for a second, before sending.
|
|
|
|
// Even if there is nothing in the queue.
|
|
|
|
if h.batchSize > 1 && time.Since(lastBatchProcess) < time.Second {
|
|
|
|
continue
|
2024-03-25 12:44:20 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-08 08:15:09 -04:00
|
|
|
// if we have reached the count send at once
|
|
|
|
// or we have crossed last second before batch was sent, send at once
|
2024-03-25 12:44:20 -04:00
|
|
|
lastBatchProcess = time.Now()
|
|
|
|
|
2024-09-08 08:15:09 -04:00
|
|
|
var retries int
|
|
|
|
retryIntvl := h.config.RetryIntvl
|
|
|
|
if retryIntvl <= 0 {
|
|
|
|
retryIntvl = 3 * time.Second
|
|
|
|
}
|
|
|
|
|
|
|
|
maxRetries := h.config.MaxRetry
|
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
retry:
|
|
|
|
// If the channel reaches above half capacity
|
|
|
|
// we spawn more workers. The workers spawned
|
|
|
|
// from this main worker routine will exit
|
|
|
|
// once the channel drops below half capacity
|
|
|
|
// and when it's been at least 30 seconds since
|
|
|
|
// we launched a new worker.
|
|
|
|
if mainWorker && len(h.logCh) > cap(h.logCh)/2 {
|
2024-06-24 13:59:48 -04:00
|
|
|
nWorkers := h.workers.Load()
|
2024-03-25 12:44:20 -04:00
|
|
|
if nWorkers < h.maxWorkers {
|
|
|
|
if time.Since(h.lastStarted).Milliseconds() > 10 {
|
|
|
|
h.lastStarted = time.Now()
|
|
|
|
go h.startQueueProcessor(ctx, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-08 08:15:09 -04:00
|
|
|
var err error
|
2024-03-25 12:44:20 -04:00
|
|
|
if !isDirQueue {
|
2024-09-11 18:20:42 -04:00
|
|
|
err = h.send(ctx, buf.Bytes(), count, h.payloadType, h.httpTimeout)
|
2024-03-25 12:44:20 -04:00
|
|
|
} else {
|
2024-09-06 19:06:30 -04:00
|
|
|
_, err = h.store.PutMultiple(entries)
|
2024-03-25 12:44:20 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2024-09-08 08:15:09 -04:00
|
|
|
if errors.Is(err, context.Canceled) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
h.config.LogOnceIf(
|
|
|
|
context.Background(),
|
2024-09-08 08:15:09 -04:00
|
|
|
fmt.Errorf("unable to send audit/log entry(s) to '%s' err '%w': %d", name, err, count),
|
2024-03-25 12:44:20 -04:00
|
|
|
name,
|
|
|
|
)
|
|
|
|
|
2024-09-08 08:15:09 -04:00
|
|
|
time.Sleep(retryIntvl)
|
|
|
|
if maxRetries == 0 {
|
|
|
|
goto retry
|
|
|
|
}
|
|
|
|
retries++
|
|
|
|
if retries <= maxRetries {
|
|
|
|
goto retry
|
2024-03-25 12:44:20 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
entries = make([]interface{}, 0)
|
2024-08-28 11:31:56 -04:00
|
|
|
count = 0
|
2024-03-25 12:44:20 -04:00
|
|
|
if !isDirQueue {
|
2024-03-07 15:17:46 -05:00
|
|
|
buf.Reset()
|
2024-03-25 12:44:20 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if !mainWorker && len(h.logCh) < cap(h.logCh)/2 {
|
|
|
|
if time.Since(h.lastStarted).Seconds() > 30 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateOrAdjustGlobalBuffer will create or adjust the global log entry buffers
|
|
|
|
// which are used to migrate log entries between old and new targets.
|
|
|
|
func CreateOrAdjustGlobalBuffer(currentTgt *Target, newTgt *Target) {
|
|
|
|
logChLock.Lock()
|
|
|
|
defer logChLock.Unlock()
|
|
|
|
|
|
|
|
requiredCap := currentTgt.config.QueueSize + (currentTgt.config.BatchSize * int(currentTgt.maxWorkers))
|
|
|
|
currentCap := 0
|
|
|
|
name := newTgt.Name()
|
|
|
|
|
|
|
|
currentBuff, ok := logChBuffers[name]
|
|
|
|
if !ok {
|
|
|
|
logChBuffers[name] = make(chan interface{}, requiredCap)
|
|
|
|
currentCap = requiredCap
|
|
|
|
} else {
|
|
|
|
currentCap = cap(currentBuff)
|
|
|
|
requiredCap += len(currentBuff)
|
|
|
|
}
|
|
|
|
|
|
|
|
if requiredCap > currentCap {
|
|
|
|
logChBuffers[name] = make(chan interface{}, requiredCap)
|
|
|
|
|
|
|
|
if len(currentBuff) > 0 {
|
|
|
|
drain:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case v, ok := <-currentBuff:
|
|
|
|
if !ok {
|
|
|
|
break drain
|
|
|
|
}
|
|
|
|
logChBuffers[newTgt.Name()] <- v
|
|
|
|
default:
|
|
|
|
break drain
|
|
|
|
}
|
|
|
|
}
|
2024-03-07 15:17:46 -05:00
|
|
|
}
|
2023-03-21 13:50:40 -04:00
|
|
|
}
|
2018-07-19 18:55:06 -04:00
|
|
|
}
|
|
|
|
|
2018-11-19 17:47:03 -05:00
|
|
|
// New initializes a new logger target which
|
2018-07-19 18:55:06 -04:00
|
|
|
// sends log over http to the specified endpoint
|
2024-03-25 12:44:20 -04:00
|
|
|
func New(config Config) (*Target, error) {
|
|
|
|
maxWorkers := maxWorkers
|
|
|
|
if config.BatchSize > 100 {
|
|
|
|
maxWorkers = maxWorkersWithBatchEvents
|
|
|
|
} else if config.BatchSize <= 0 {
|
|
|
|
config.BatchSize = 1
|
|
|
|
}
|
|
|
|
|
2020-04-01 23:53:07 -04:00
|
|
|
h := &Target{
|
2024-09-11 18:20:42 -04:00
|
|
|
logCh: make(chan interface{}, config.QueueSize),
|
|
|
|
config: config,
|
|
|
|
batchSize: config.BatchSize,
|
|
|
|
maxWorkers: int64(maxWorkers),
|
|
|
|
httpTimeout: config.HTTPTimeout,
|
2024-03-25 12:44:20 -04:00
|
|
|
}
|
2024-06-24 13:59:48 -04:00
|
|
|
h.status.Store(statusOffline)
|
2024-03-25 12:44:20 -04:00
|
|
|
|
|
|
|
if config.BatchSize > 1 {
|
|
|
|
h.payloadType = ""
|
|
|
|
} else {
|
|
|
|
h.payloadType = "application/json"
|
2023-03-21 13:50:40 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// If proxy available, set the same
|
|
|
|
if h.config.Proxy != "" {
|
|
|
|
proxyURL, _ := url.Parse(h.config.Proxy)
|
|
|
|
transport := h.config.Transport
|
|
|
|
ctransport := transport.(*http.Transport).Clone()
|
|
|
|
ctransport.Proxy = http.ProxyURL(proxyURL)
|
|
|
|
h.config.Transport = ctransport
|
2020-04-01 23:53:07 -04:00
|
|
|
}
|
2024-03-25 12:44:20 -04:00
|
|
|
|
2023-03-21 13:50:40 -04:00
|
|
|
h.client = &http.Client{Transport: h.config.Transport}
|
2024-03-25 12:44:20 -04:00
|
|
|
return h, nil
|
2018-07-19 18:55:06 -04:00
|
|
|
}
|
|
|
|
|
2023-05-09 00:20:31 -04:00
|
|
|
// SendFromStore - reads the log from store and sends it to webhook.
|
2023-10-07 11:07:38 -04:00
|
|
|
func (h *Target) SendFromStore(key store.Key) (err error) {
|
2024-03-25 12:44:20 -04:00
|
|
|
var eventData []byte
|
2024-09-06 19:06:30 -04:00
|
|
|
eventData, err = h.store.GetRaw(key)
|
2023-05-09 00:20:31 -04:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2024-03-25 12:44:20 -04:00
|
|
|
|
2024-08-28 11:31:56 -04:00
|
|
|
count := 1
|
|
|
|
v := strings.Split(key.Name, ":")
|
|
|
|
if len(v) == 2 {
|
|
|
|
count, err = strconv.Atoi(v[0])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2024-06-24 13:59:48 -04:00
|
|
|
}
|
2024-08-28 11:31:56 -04:00
|
|
|
}
|
2024-06-24 13:59:48 -04:00
|
|
|
|
2024-09-11 18:20:42 -04:00
|
|
|
if err := h.send(context.Background(), eventData, count, h.payloadType, h.httpTimeout); err != nil {
|
2023-05-09 00:20:31 -04:00
|
|
|
return err
|
|
|
|
}
|
2024-06-24 13:59:48 -04:00
|
|
|
|
2023-05-09 00:20:31 -04:00
|
|
|
// Delete the event from store.
|
2024-09-06 19:06:30 -04:00
|
|
|
return h.store.Del(key)
|
2023-05-09 00:20:31 -04:00
|
|
|
}
|
|
|
|
|
2023-07-29 15:49:18 -04:00
|
|
|
// Send the log message 'entry' to the http target.
|
|
|
|
// Messages are queued in the disk if the store is enabled
|
2023-03-21 13:50:40 -04:00
|
|
|
// If Cancel has been called the message is ignored.
|
2023-05-09 00:20:31 -04:00
|
|
|
func (h *Target) Send(ctx context.Context, entry interface{}) error {
|
2024-06-24 13:59:48 -04:00
|
|
|
if h.status.Load() == statusClosed {
|
2024-03-25 12:44:20 -04:00
|
|
|
if h.migrateTarget != nil {
|
|
|
|
return h.migrateTarget.Send(ctx, entry)
|
|
|
|
}
|
2023-07-29 15:49:18 -04:00
|
|
|
return nil
|
|
|
|
}
|
2024-03-25 12:44:20 -04:00
|
|
|
|
2023-03-21 13:50:40 -04:00
|
|
|
h.logChMu.RLock()
|
|
|
|
defer h.logChMu.RUnlock()
|
|
|
|
if h.logCh == nil {
|
|
|
|
// We are closing...
|
2022-02-17 14:11:15 -05:00
|
|
|
return nil
|
|
|
|
}
|
2023-07-29 15:49:18 -04:00
|
|
|
|
2018-07-19 18:55:06 -04:00
|
|
|
select {
|
|
|
|
case h.logCh <- entry:
|
2024-06-24 13:59:48 -04:00
|
|
|
h.totalMessages.Add(1)
|
2023-07-29 15:49:18 -04:00
|
|
|
case <-ctx.Done():
|
2023-08-17 17:53:43 -04:00
|
|
|
// return error only for context timedout.
|
|
|
|
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
return nil
|
2018-07-19 18:55:06 -04:00
|
|
|
default:
|
2024-06-24 13:59:48 -04:00
|
|
|
h.totalMessages.Add(1)
|
|
|
|
h.failedMessages.Add(1)
|
2023-07-29 15:49:18 -04:00
|
|
|
return errors.New("log buffer full")
|
2018-07-19 18:55:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2022-02-17 14:11:15 -05:00
|
|
|
|
2023-03-21 13:50:40 -04:00
|
|
|
// Cancel - cancels the target.
|
|
|
|
// All queued messages are flushed and the function returns afterwards.
|
|
|
|
// All messages sent to the target after this function has been called will be dropped.
|
2022-02-17 14:11:15 -05:00
|
|
|
func (h *Target) Cancel() {
|
2024-06-24 13:59:48 -04:00
|
|
|
h.status.Store(statusClosed)
|
2024-03-25 12:44:20 -04:00
|
|
|
h.storeCtxCancel()
|
2023-03-21 13:50:40 -04:00
|
|
|
|
2024-03-25 12:44:20 -04:00
|
|
|
// Wait for messages to be sent...
|
|
|
|
h.wg.Wait()
|
2023-05-09 00:20:31 -04:00
|
|
|
|
2023-03-21 13:50:40 -04:00
|
|
|
// Set logch to nil and close it.
|
|
|
|
// This will block all Send operations,
|
|
|
|
// and finish the existing ones.
|
|
|
|
// All future ones will be discarded.
|
|
|
|
h.logChMu.Lock()
|
2024-01-28 13:04:17 -05:00
|
|
|
xioutil.SafeClose(h.logCh)
|
2023-03-21 13:50:40 -04:00
|
|
|
h.logCh = nil
|
|
|
|
h.logChMu.Unlock()
|
2022-02-24 12:05:33 -05:00
|
|
|
}
|