fix: regression in counting total requests (#17024)

This commit is contained in:
Harshavardhana
2023-04-12 14:37:19 -07:00
committed by GitHub
parent b19620b324
commit a5835cecbf
8 changed files with 65 additions and 189 deletions

View File

@@ -36,7 +36,6 @@ import (
"github.com/minio/minio/internal/config/dns"
"github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/http/stats"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/mcontext"
)
@@ -136,9 +135,8 @@ func setRequestLimitHandler(h http.Handler) http.Handler {
// Reserved bucket.
const (
minioReservedBucket = "minio"
minioReservedBucketPath = SlashSeparator + minioReservedBucket
minioReservedBucketPathWithSlash = SlashSeparator + minioReservedBucket + SlashSeparator
minioReservedBucket = "minio"
minioReservedBucketPath = SlashSeparator + minioReservedBucket
loginPathPrefix = SlashSeparator + "login"
)
@@ -280,60 +278,6 @@ func parseAmzDateHeader(req *http.Request) (time.Time, APIErrorCode) {
return time.Time{}, ErrMissingDateHeader
}
// splitStr splits a string into n parts, empty strings are added
// if we are not able to reach n elements
func splitStr(path, sep string, n int) []string {
splits := strings.SplitN(path, sep, n)
// Add empty strings if we found elements less than nr
for i := n - len(splits); i > 0; i-- {
splits = append(splits, "")
}
return splits
}
func url2Bucket(p string) (bucket string) {
tokens := splitStr(p, SlashSeparator, 3)
return tokens[1]
}
// setHttpStatsHandler sets a http Stats handler to gather HTTP statistics
func setHTTPStatsHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Meters s3 connection stats.
meteredRequest := &stats.IncomingTrafficMeter{ReadCloser: r.Body}
meteredResponse := &stats.OutgoingTrafficMeter{ResponseWriter: w}
// Execute the request
r.Body = meteredRequest
h.ServeHTTP(meteredResponse, r)
if strings.HasPrefix(r.URL.Path, storageRESTPrefix) ||
strings.HasPrefix(r.URL.Path, peerRESTPrefix) ||
strings.HasPrefix(r.URL.Path, peerS3Prefix) ||
strings.HasPrefix(r.URL.Path, lockRESTPrefix) {
globalConnStats.incInputBytes(meteredRequest.BytesRead())
globalConnStats.incOutputBytes(meteredResponse.BytesWritten())
return
}
if strings.HasPrefix(r.URL.Path, minioReservedBucketPath) {
globalConnStats.incAdminInputBytes(meteredRequest.BytesRead())
globalConnStats.incAdminOutputBytes(meteredResponse.BytesWritten())
return
}
globalConnStats.incS3InputBytes(meteredRequest.BytesRead())
globalConnStats.incS3OutputBytes(meteredResponse.BytesWritten())
if r.URL != nil {
bucket := url2Bucket(r.URL.Path)
if bucket != "" && bucket != minioReservedBucket {
globalBucketConnStats.incS3InputBytes(bucket, meteredRequest.BytesRead())
globalBucketConnStats.incS3OutputBytes(bucket, meteredResponse.BytesWritten())
}
}
})
}
// Bad path components to be rejected by the path validity handler.
const (
dotdotComponent = ".."

View File

@@ -34,6 +34,7 @@ import (
"github.com/minio/minio/internal/handlers"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/mcontext"
xnet "github.com/minio/pkg/net"
)
@@ -357,8 +358,42 @@ func collectAPIStats(api string, f http.HandlerFunc) http.HandlerFunc {
f.ServeHTTP(w, r)
if sw, ok := w.(*xhttp.ResponseRecorder); ok {
globalHTTPStats.updateStats(api, r, sw)
tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt)
if !ok {
return
}
if tc != nil {
if strings.HasPrefix(r.URL.Path, storageRESTPrefix) ||
strings.HasPrefix(r.URL.Path, peerRESTPrefix) ||
strings.HasPrefix(r.URL.Path, peerS3Prefix) ||
strings.HasPrefix(r.URL.Path, lockRESTPrefix) {
globalConnStats.incInputBytes(int64(tc.RequestRecorder.Size()))
globalConnStats.incOutputBytes(int64(tc.ResponseRecorder.Size()))
return
}
if strings.HasPrefix(r.URL.Path, minioReservedBucketPath) {
globalConnStats.incAdminInputBytes(int64(tc.RequestRecorder.Size()))
globalConnStats.incAdminOutputBytes(int64(tc.ResponseRecorder.Size()))
return
}
globalHTTPStats.updateStats(api, tc.ResponseRecorder)
globalConnStats.incS3InputBytes(int64(tc.RequestRecorder.Size()))
globalConnStats.incS3OutputBytes(int64(tc.ResponseRecorder.Size()))
resource, err := getResource(r.URL.Path, r.Host, globalDomainNames)
if err != nil {
logger.LogIf(r.Context(), fmt.Errorf("Unable to get the actual resource in the incoming request: %v", err))
return
}
bucket, _ := path2BucketObject(resource)
if bucket != "" && bucket != minioReservedBucket {
globalBucketConnStats.incS3InputBytes(bucket, int64(tc.RequestRecorder.Size()))
globalBucketConnStats.incS3OutputBytes(bucket, int64(tc.ResponseRecorder.Size()))
}
}
}
}

View File

@@ -19,7 +19,6 @@ package cmd
import (
"net/http"
"strings"
"sync"
"sync/atomic"
@@ -299,12 +298,7 @@ func (st *HTTPStats) toServerHTTPStats() ServerHTTPStats {
}
// Update statistics from http request and response data
func (st *HTTPStats) updateStats(api string, r *http.Request, w *xhttp.ResponseRecorder) {
// Ignore non S3 requests
if strings.HasSuffix(r.URL.Path, minioReservedBucketPathWithSlash) {
return
}
func (st *HTTPStats) updateStats(api string, w *xhttp.ResponseRecorder) {
st.totalS3Requests.Inc(api)
// Increment the prometheus http request response histogram with appropriate label

View File

@@ -71,40 +71,33 @@ func httpTracer(h http.Handler) http.Handler {
// http stats requests and audit if enabled.
respRecorder := xhttp.NewResponseRecorder(w)
if globalTrace.NumSubscribers(madmin.TraceS3|madmin.TraceInternal) == 0 {
h.ServeHTTP(respRecorder, r)
return
}
// Setup a http request body recorder
reqRecorder := &xhttp.RequestRecorder{Reader: r.Body}
r.Body = reqRecorder
// Create tracing data structure and associate it to the request context
tc := mcontext.TraceCtxt{
AmzReqID: r.Header.Get(xhttp.AmzRequestID),
AmzReqID: r.Header.Get(xhttp.AmzRequestID),
RequestRecorder: reqRecorder,
ResponseRecorder: respRecorder,
}
ctx := context.WithValue(r.Context(), mcontext.ContextTraceKey, &tc)
r = r.WithContext(ctx)
// Setup a http request body recorder
reqRecorder := &xhttp.RequestRecorder{Reader: r.Body}
tc.RequestRecorder = reqRecorder
tc.ResponseRecorder = respRecorder
// Execute call.
r.Body = reqRecorder
r = r.WithContext(context.WithValue(r.Context(), mcontext.ContextTraceKey, &tc))
reqStartTime := time.Now().UTC()
h.ServeHTTP(respRecorder, r)
reqEndTime := time.Now().UTC()
if globalTrace.NumSubscribers(madmin.TraceS3|madmin.TraceInternal) == 0 {
// no subscribers nothing to trace.
return
}
tt := madmin.TraceInternal
if strings.HasPrefix(tc.FuncName, "s3.") {
tt = madmin.TraceS3
}
// No need to continue if no subscribers for actual type...
if globalTrace.NumSubscribers(tt) == 0 {
return
}
// Calculate input body size with headers
reqHeaders := r.Header.Clone()
reqHeaders.Set("Host", r.Host)
@@ -113,7 +106,7 @@ func httpTracer(h http.Handler) http.Handler {
} else {
reqHeaders.Set("Transfer-Encoding", strings.Join(r.TransferEncoding, ","))
}
inputBytes := reqRecorder.BodySize()
inputBytes := reqRecorder.Size()
for k, v := range reqHeaders {
inputBytes += len(k) + len(v)
}

View File

@@ -58,8 +58,6 @@ var globalHandlers = []mux.MiddlewareFunc{
setCrossDomainPolicy,
// Limits all body and header sizes to a maximum fixed limit
setRequestLimitHandler,
// Network statistics
setHTTPStatsHandler,
// Validate all the incoming requests.
setRequestValidityHandler,
// set x-amz-request-id header.