2022-03-22 02:25:24 -04:00
|
|
|
// Copyright (c) 2015-2022 MinIO, Inc.
|
2021-04-18 15:41:13 -04:00
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2016-12-16 01:26:15 -05:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2022-03-25 05:02:17 -04:00
|
|
|
"bytes"
|
2018-04-05 18:04:40 -04:00
|
|
|
"context"
|
2021-07-09 14:29:16 -04:00
|
|
|
crand "crypto/rand"
|
2022-11-02 16:36:38 -04:00
|
|
|
"crypto/rsa"
|
2019-09-04 16:19:44 -04:00
|
|
|
"crypto/subtle"
|
2022-11-02 16:36:38 -04:00
|
|
|
"crypto/x509"
|
|
|
|
"encoding/base64"
|
2016-12-16 01:26:15 -05:00
|
|
|
"encoding/json"
|
2022-11-02 16:36:38 -04:00
|
|
|
"encoding/pem"
|
2022-03-04 23:01:26 -05:00
|
|
|
"errors"
|
2019-08-27 14:37:47 -04:00
|
|
|
"fmt"
|
2021-07-14 03:23:22 -04:00
|
|
|
"hash/crc32"
|
2018-01-11 02:06:36 -05:00
|
|
|
"io"
|
2022-07-05 17:45:49 -04:00
|
|
|
"math"
|
2016-12-16 01:26:15 -05:00
|
|
|
"net/http"
|
2019-08-28 18:04:43 -04:00
|
|
|
"net/url"
|
2018-09-18 19:46:35 -04:00
|
|
|
"os"
|
2019-08-28 18:04:43 -04:00
|
|
|
"path"
|
2021-07-14 03:23:22 -04:00
|
|
|
"regexp"
|
2019-08-28 18:04:43 -04:00
|
|
|
"runtime"
|
2019-01-24 10:22:14 -05:00
|
|
|
"sort"
|
2018-09-06 11:03:18 -04:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2017-01-04 02:39:22 -05:00
|
|
|
"time"
|
2017-10-31 14:54:32 -04:00
|
|
|
|
2021-10-01 14:50:00 -04:00
|
|
|
"github.com/dustin/go-humanize"
|
2021-07-09 14:29:16 -04:00
|
|
|
"github.com/klauspost/compress/zip"
|
2022-12-06 16:46:50 -05:00
|
|
|
"github.com/minio/madmin-go/v2"
|
|
|
|
"github.com/minio/madmin-go/v2/estream"
|
2023-01-19 21:05:44 -05:00
|
|
|
"github.com/minio/minio-go/v7/pkg/set"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/dsync"
|
|
|
|
"github.com/minio/minio/internal/handlers"
|
|
|
|
xhttp "github.com/minio/minio/internal/http"
|
|
|
|
"github.com/minio/minio/internal/kms"
|
|
|
|
"github.com/minio/minio/internal/logger"
|
2023-01-23 06:12:47 -05:00
|
|
|
"github.com/minio/mux"
|
2021-05-30 00:16:42 -04:00
|
|
|
iampolicy "github.com/minio/pkg/iam/policy"
|
2023-02-22 00:21:17 -05:00
|
|
|
"github.com/minio/pkg/logger/message/log"
|
2021-06-14 17:54:37 -04:00
|
|
|
xnet "github.com/minio/pkg/net"
|
2021-07-09 14:29:16 -04:00
|
|
|
"github.com/secure-io/sio-go"
|
2016-12-16 01:26:15 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2022-05-06 15:41:07 -04:00
|
|
|
maxEConfigJSONSize = 262272
|
|
|
|
kubernetesVersionEndpoint = "https://kubernetes.default.svc/version"
|
2016-12-16 01:26:15 -05:00
|
|
|
)
|
|
|
|
|
2017-03-16 03:15:06 -04:00
|
|
|
// Only valid query params for mgmt admin APIs.
|
2017-01-17 13:02:58 -05:00
|
|
|
const (
|
2020-09-24 11:40:21 -04:00
|
|
|
mgmtBucket = "bucket"
|
|
|
|
mgmtPrefix = "prefix"
|
|
|
|
mgmtClientToken = "clientToken"
|
|
|
|
mgmtForceStart = "forceStart"
|
|
|
|
mgmtForceStop = "forceStop"
|
2017-01-17 13:02:58 -05:00
|
|
|
)
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// ServerUpdateHandler - POST /minio/admin/v3/update?updateURL={updateURL}
|
2016-12-20 21:49:48 -05:00
|
|
|
// ----------
|
2019-08-28 18:04:43 -04:00
|
|
|
// updates all minio servers and restarts them gracefully.
|
|
|
|
func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "ServerUpdate")
|
2018-11-12 14:07:43 -05:00
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2019-11-19 05:03:18 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerUpdateAdminAction)
|
2019-02-12 04:25:52 -05:00
|
|
|
if objectAPI == nil {
|
2016-12-16 01:26:15 -05:00
|
|
|
return
|
|
|
|
}
|
2017-01-23 11:56:06 -05:00
|
|
|
|
2019-08-28 18:04:43 -04:00
|
|
|
if globalInplaceUpdateDisabled {
|
2020-10-25 01:39:44 -04:00
|
|
|
// if MINIO_UPDATE=off - inplace update is disabled, mostly in containers.
|
2019-08-28 18:04:43 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
2019-08-27 14:37:47 -04:00
|
|
|
return
|
2018-01-22 17:54:55 -05:00
|
|
|
}
|
2017-01-23 11:56:06 -05:00
|
|
|
|
2019-08-28 18:04:43 -04:00
|
|
|
vars := mux.Vars(r)
|
2020-07-23 11:03:31 -04:00
|
|
|
updateURL := vars["updateURL"]
|
2019-08-28 18:04:43 -04:00
|
|
|
mode := getMinioMode()
|
2020-07-23 11:03:31 -04:00
|
|
|
if updateURL == "" {
|
|
|
|
updateURL = minioReleaseInfoURL
|
|
|
|
if runtime.GOOS == globalWindowsOSName {
|
|
|
|
updateURL = minioReleaseWindowsInfoURL
|
2019-08-27 14:37:47 -04:00
|
|
|
}
|
2020-07-23 11:03:31 -04:00
|
|
|
}
|
2017-02-08 03:13:02 -05:00
|
|
|
|
2020-07-23 11:03:31 -04:00
|
|
|
u, err := url.Parse(updateURL)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
content, err := downloadReleaseURL(u, updateTimeout, mode)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-04-08 12:51:11 -04:00
|
|
|
sha256Sum, lrTime, releaseInfo, err := parseReleaseData(content)
|
2020-07-23 11:03:31 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-04-08 12:51:11 -04:00
|
|
|
u.Path = path.Dir(u.Path) + SlashSeparator + releaseInfo
|
2020-07-23 11:03:31 -04:00
|
|
|
crTime, err := GetCurrentReleaseTime()
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-11-12 17:18:59 -05:00
|
|
|
if lrTime.Sub(crTime) <= 0 {
|
2020-07-23 11:03:31 -04:00
|
|
|
updateStatus := madmin.ServerUpdateStatus{
|
|
|
|
CurrentVersion: Version,
|
|
|
|
UpdatedVersion: Version,
|
2019-08-27 14:37:47 -04:00
|
|
|
}
|
2017-01-23 11:56:06 -05:00
|
|
|
|
2020-07-23 11:03:31 -04:00
|
|
|
// Marshal API response
|
|
|
|
jsonBytes, err := json.Marshal(updateStatus)
|
2019-08-27 14:37:47 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-23 11:03:31 -04:00
|
|
|
writeSuccessResponseJSON(w, jsonBytes)
|
|
|
|
return
|
2019-08-28 18:04:43 -04:00
|
|
|
}
|
2019-08-27 14:37:47 -04:00
|
|
|
|
2022-07-29 11:34:30 -04:00
|
|
|
// Download Binary Once
|
|
|
|
reader, err := downloadBinary(u, mode)
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push binary to other servers
|
|
|
|
for _, nerr := range globalNotificationSys.VerifyBinary(ctx, u, sha256Sum, releaseInfo, reader) {
|
2019-08-28 18:04:43 -04:00
|
|
|
if nerr.Err != nil {
|
2021-09-23 15:57:21 -04:00
|
|
|
err := AdminError{
|
|
|
|
Code: AdminUpdateApplyFailure,
|
|
|
|
Message: nerr.Err.Error(),
|
|
|
|
StatusCode: http.StatusInternalServerError,
|
|
|
|
}
|
2019-08-28 18:04:43 -04:00
|
|
|
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
2021-09-23 15:57:21 -04:00
|
|
|
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
2020-07-23 11:03:31 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
2019-08-27 14:37:47 -04:00
|
|
|
}
|
2019-08-28 18:04:43 -04:00
|
|
|
}
|
|
|
|
|
2022-07-29 11:34:30 -04:00
|
|
|
err = verifyBinary(u, sha256Sum, releaseInfo, mode, reader)
|
2019-08-28 18:04:43 -04:00
|
|
|
if err != nil {
|
2021-09-23 15:57:21 -04:00
|
|
|
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
2019-08-28 18:04:43 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2016-12-20 21:05:25 -05:00
|
|
|
return
|
2016-12-16 01:26:15 -05:00
|
|
|
}
|
2019-01-14 01:44:20 -05:00
|
|
|
|
2022-07-25 20:49:47 -04:00
|
|
|
for _, nerr := range globalNotificationSys.CommitBinary(ctx) {
|
|
|
|
if nerr.Err != nil {
|
|
|
|
err := AdminError{
|
|
|
|
Code: AdminUpdateApplyFailure,
|
|
|
|
Message: nerr.Err.Error(),
|
|
|
|
StatusCode: http.StatusInternalServerError,
|
|
|
|
}
|
|
|
|
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = commitBinary()
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
updateStatus := madmin.ServerUpdateStatus{
|
|
|
|
CurrentVersion: Version,
|
|
|
|
UpdatedVersion: lrTime.Format(minioReleaseTagTimeLayout),
|
|
|
|
}
|
|
|
|
|
2019-08-28 18:04:43 -04:00
|
|
|
// Marshal API response
|
|
|
|
jsonBytes, err := json.Marshal(updateStatus)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2016-12-16 01:26:15 -05:00
|
|
|
|
2019-08-28 18:04:43 -04:00
|
|
|
writeSuccessResponseJSON(w, jsonBytes)
|
2018-11-12 14:07:43 -05:00
|
|
|
|
2020-07-23 11:03:31 -04:00
|
|
|
// Notify all other MinIO peers signal service.
|
|
|
|
for _, nerr := range globalNotificationSys.SignalService(serviceRestart) {
|
|
|
|
if nerr.Err != nil {
|
|
|
|
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
|
|
|
logger.LogIf(ctx, nerr.Err)
|
|
|
|
}
|
2019-08-28 18:04:43 -04:00
|
|
|
}
|
2020-07-23 11:03:31 -04:00
|
|
|
|
|
|
|
globalServiceSignalCh <- serviceRestart
|
2019-08-28 18:04:43 -04:00
|
|
|
}
|
2017-01-17 17:25:59 -05:00
|
|
|
|
2020-05-11 13:34:08 -04:00
|
|
|
// ServiceHandler - POST /minio/admin/v3/service?action={action}
|
2019-08-28 18:04:43 -04:00
|
|
|
// ----------
|
2021-11-23 15:02:16 -05:00
|
|
|
// Supports following actions:
|
|
|
|
// - restart (restarts all the MinIO instances in a setup)
|
|
|
|
// - stop (stops all the MinIO instances in a setup)
|
|
|
|
// - freeze (freezes all incoming S3 API calls)
|
|
|
|
// - unfreeze (unfreezes previously frozen S3 API calls)
|
2020-05-11 13:34:08 -04:00
|
|
|
func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "Service")
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2017-01-17 17:25:59 -05:00
|
|
|
|
2019-08-28 18:04:43 -04:00
|
|
|
vars := mux.Vars(r)
|
|
|
|
action := vars["action"]
|
|
|
|
|
|
|
|
var serviceSig serviceSignal
|
|
|
|
switch madmin.ServiceAction(action) {
|
|
|
|
case madmin.ServiceActionRestart:
|
|
|
|
serviceSig = serviceRestart
|
|
|
|
case madmin.ServiceActionStop:
|
|
|
|
serviceSig = serviceStop
|
2021-11-23 15:02:16 -05:00
|
|
|
case madmin.ServiceActionFreeze:
|
|
|
|
serviceSig = serviceFreeze
|
|
|
|
case madmin.ServiceActionUnfreeze:
|
|
|
|
serviceSig = serviceUnFreeze
|
2019-08-28 18:04:43 -04:00
|
|
|
default:
|
2019-10-11 21:50:54 -04:00
|
|
|
logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.Application)
|
2019-08-28 18:04:43 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
2017-01-17 17:25:59 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-05-11 13:34:08 -04:00
|
|
|
var objectAPI ObjectLayer
|
2020-12-04 12:32:35 -05:00
|
|
|
switch serviceSig {
|
|
|
|
case serviceRestart:
|
2020-05-11 13:34:08 -04:00
|
|
|
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceRestartAdminAction)
|
2020-12-04 12:32:35 -05:00
|
|
|
case serviceStop:
|
2020-05-11 13:34:08 -04:00
|
|
|
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceStopAdminAction)
|
2021-11-23 15:02:16 -05:00
|
|
|
case serviceFreeze, serviceUnFreeze:
|
|
|
|
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceFreezeAdminAction)
|
2020-05-11 13:34:08 -04:00
|
|
|
}
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-04-09 14:39:42 -04:00
|
|
|
// Notify all other MinIO peers signal service.
|
2019-01-14 01:44:20 -05:00
|
|
|
for _, nerr := range globalNotificationSys.SignalService(serviceSig) {
|
|
|
|
if nerr.Err != nil {
|
|
|
|
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
|
|
|
logger.LogIf(ctx, nerr.Err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-27 14:37:47 -04:00
|
|
|
// Reply to the client before restarting, stopping MinIO server.
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
|
|
|
|
2021-11-23 15:02:16 -05:00
|
|
|
switch serviceSig {
|
|
|
|
case serviceFreeze:
|
|
|
|
freezeServices()
|
|
|
|
case serviceUnFreeze:
|
|
|
|
unfreezeServices()
|
|
|
|
case serviceRestart, serviceStop:
|
|
|
|
globalServiceSignalCh <- serviceSig
|
|
|
|
}
|
2017-01-17 17:25:59 -05:00
|
|
|
}
|
|
|
|
|
2017-02-15 13:45:45 -05:00
|
|
|
// ServerProperties holds some server information such as, version, region
|
|
|
|
// uptime, etc..
|
|
|
|
type ServerProperties struct {
|
2019-12-11 20:56:02 -05:00
|
|
|
Uptime int64 `json:"uptime"`
|
|
|
|
Version string `json:"version"`
|
|
|
|
CommitID string `json:"commitID"`
|
|
|
|
DeploymentID string `json:"deploymentID"`
|
|
|
|
Region string `json:"region"`
|
|
|
|
SQSARN []string `json:"sqsARN"`
|
2017-02-15 13:45:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServerConnStats holds transferred bytes from/to the server
|
|
|
|
type ServerConnStats struct {
|
|
|
|
TotalInputBytes uint64 `json:"transferred"`
|
|
|
|
TotalOutputBytes uint64 `json:"received"`
|
|
|
|
Throughput uint64 `json:"throughput,omitempty"`
|
2019-10-23 00:01:14 -04:00
|
|
|
S3InputBytes uint64 `json:"transferredS3"`
|
|
|
|
S3OutputBytes uint64 `json:"receivedS3"`
|
2022-06-14 18:14:24 -04:00
|
|
|
AdminInputBytes uint64 `json:"transferredAdmin"`
|
|
|
|
AdminOutputBytes uint64 `json:"receivedAdmin"`
|
2017-02-15 13:45:45 -05:00
|
|
|
}
|
|
|
|
|
2019-10-23 00:01:14 -04:00
|
|
|
// ServerHTTPAPIStats holds total number of HTTP operations from/to the server,
|
2017-04-07 02:08:33 -04:00
|
|
|
// including the average duration the call was spent.
|
2019-10-23 00:01:14 -04:00
|
|
|
type ServerHTTPAPIStats struct {
|
|
|
|
APIStats map[string]int `json:"apiStats"`
|
2017-04-07 02:08:33 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServerHTTPStats holds all type of http operations performed to/from the server
|
|
|
|
// including their average execution time.
|
|
|
|
type ServerHTTPStats struct {
|
2021-03-31 02:19:36 -04:00
|
|
|
S3RequestsInQueue int32 `json:"s3RequestsInQueue"`
|
2022-02-07 19:30:14 -05:00
|
|
|
S3RequestsIncoming uint64 `json:"s3RequestsIncoming"`
|
2021-03-31 02:19:36 -04:00
|
|
|
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
|
|
|
|
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
|
|
|
|
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
|
2022-06-08 14:22:34 -04:00
|
|
|
TotalS35xxErrors ServerHTTPAPIStats `json:"totalS35xxErrors"`
|
|
|
|
TotalS34xxErrors ServerHTTPAPIStats `json:"totalS34xxErrors"`
|
2021-03-31 02:19:36 -04:00
|
|
|
TotalS3Canceled ServerHTTPAPIStats `json:"totalS3Canceled"`
|
|
|
|
TotalS3RejectedAuth uint64 `json:"totalS3RejectedAuth"`
|
|
|
|
TotalS3RejectedTime uint64 `json:"totalS3RejectedTime"`
|
|
|
|
TotalS3RejectedHeader uint64 `json:"totalS3RejectedHeader"`
|
|
|
|
TotalS3RejectedInvalid uint64 `json:"totalS3RejectedInvalid"`
|
2017-04-07 02:08:33 -04:00
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// StorageInfoHandler - GET /minio/admin/v3/storageinfo
|
2019-10-23 00:01:14 -04:00
|
|
|
// ----------
|
|
|
|
// Get server information
|
|
|
|
func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "StorageInfo")
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.StorageInfoAdminAction)
|
2019-10-23 00:01:14 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-12-01 17:31:35 -05:00
|
|
|
storageInfo := objectAPI.StorageInfo(ctx)
|
2019-10-23 00:01:14 -04:00
|
|
|
|
2020-09-28 22:39:32 -04:00
|
|
|
// Collect any disk healing.
|
2021-03-04 17:36:23 -05:00
|
|
|
healing, _ := getAggregatedBackgroundHealState(ctx, nil)
|
2020-09-28 22:39:32 -04:00
|
|
|
healDisks := make(map[string]struct{}, len(healing.HealDisks))
|
|
|
|
for _, disk := range healing.HealDisks {
|
|
|
|
healDisks[disk] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// find all disks which belong to each respective endpoints
|
|
|
|
for i, disk := range storageInfo.Disks {
|
|
|
|
if _, ok := healDisks[disk.Endpoint]; ok {
|
|
|
|
storageInfo.Disks[i].Healing = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-23 00:01:14 -04:00
|
|
|
// Marshal API response
|
|
|
|
jsonBytes, err := json.Marshal(storageInfo)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reply with storage information (across nodes in a
|
|
|
|
// distributed setup) as json.
|
|
|
|
writeSuccessResponseJSON(w, jsonBytes)
|
|
|
|
}
|
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
// MetricsHandler - GET /minio/admin/v3/metrics
|
|
|
|
// ----------
|
|
|
|
// Get realtime server metrics
|
|
|
|
func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "Metrics")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction)
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
const defaultMetricsInterval = time.Second
|
|
|
|
|
|
|
|
interval, err := time.ParseDuration(r.Form.Get("interval"))
|
|
|
|
if err != nil || interval < time.Second {
|
|
|
|
interval = defaultMetricsInterval
|
|
|
|
}
|
2022-09-05 11:06:35 -04:00
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
n, err := strconv.Atoi(r.Form.Get("n"))
|
|
|
|
if err != nil || n <= 0 {
|
|
|
|
n = math.MaxInt32
|
|
|
|
}
|
2022-09-05 11:06:35 -04:00
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
var types madmin.MetricType
|
|
|
|
if t, _ := strconv.ParseUint(r.Form.Get("types"), 10, 64); t != 0 {
|
|
|
|
types = madmin.MetricType(t)
|
|
|
|
} else {
|
|
|
|
types = madmin.MetricsAll
|
|
|
|
}
|
2022-08-16 10:13:49 -04:00
|
|
|
|
|
|
|
disks := strings.Split(r.Form.Get("disks"), ",")
|
|
|
|
byDisk := strings.EqualFold(r.Form.Get("by-disk"), "true")
|
|
|
|
var diskMap map[string]struct{}
|
|
|
|
if len(disks) > 0 && disks[0] != "" {
|
|
|
|
diskMap = make(map[string]struct{}, len(disks))
|
|
|
|
for _, k := range disks {
|
|
|
|
if k != "" {
|
|
|
|
diskMap[k] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
jobID := r.Form.Get("by-jobID")
|
2022-08-16 10:13:49 -04:00
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
hosts := strings.Split(r.Form.Get("hosts"), ",")
|
2022-08-16 10:13:49 -04:00
|
|
|
byHost := strings.EqualFold(r.Form.Get("by-host"), "true")
|
2022-07-05 17:45:49 -04:00
|
|
|
var hostMap map[string]struct{}
|
|
|
|
if len(hosts) > 0 && hosts[0] != "" {
|
|
|
|
hostMap = make(map[string]struct{}, len(hosts))
|
|
|
|
for _, k := range hosts {
|
|
|
|
if k != "" {
|
|
|
|
hostMap[k] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-11-14 10:16:40 -05:00
|
|
|
dID := r.Form.Get("by-depID")
|
2022-07-05 17:45:49 -04:00
|
|
|
done := ctx.Done()
|
|
|
|
ticker := time.NewTicker(interval)
|
|
|
|
defer ticker.Stop()
|
|
|
|
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
|
|
|
|
2022-09-05 11:06:35 -04:00
|
|
|
enc := json.NewEncoder(w)
|
2022-07-05 17:45:49 -04:00
|
|
|
for n > 0 {
|
|
|
|
var m madmin.RealtimeMetrics
|
2022-10-03 05:10:15 -04:00
|
|
|
mLocal := collectLocalMetrics(types, collectMetricsOpts{
|
|
|
|
hosts: hostMap,
|
|
|
|
disks: diskMap,
|
|
|
|
jobID: jobID,
|
2022-11-14 10:16:40 -05:00
|
|
|
depID: dID,
|
2022-10-03 05:10:15 -04:00
|
|
|
})
|
2022-07-05 17:45:49 -04:00
|
|
|
m.Merge(&mLocal)
|
|
|
|
// Allow half the interval for collecting remote...
|
|
|
|
cctx, cancel := context.WithTimeout(ctx, interval/2)
|
2022-10-03 05:10:15 -04:00
|
|
|
mRemote := collectRemoteMetrics(cctx, types, collectMetricsOpts{
|
|
|
|
hosts: hostMap,
|
|
|
|
disks: diskMap,
|
|
|
|
jobID: jobID,
|
2022-11-14 10:16:40 -05:00
|
|
|
depID: dID,
|
2022-10-03 05:10:15 -04:00
|
|
|
})
|
2022-07-05 17:45:49 -04:00
|
|
|
cancel()
|
|
|
|
m.Merge(&mRemote)
|
2022-08-16 10:13:49 -04:00
|
|
|
if !byHost {
|
2022-07-05 17:45:49 -04:00
|
|
|
m.ByHost = nil
|
|
|
|
}
|
2022-08-16 10:13:49 -04:00
|
|
|
if !byDisk {
|
|
|
|
m.ByDisk = nil
|
|
|
|
}
|
2022-07-05 17:45:49 -04:00
|
|
|
|
|
|
|
m.Final = n <= 1
|
2022-09-05 11:06:35 -04:00
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
// Marshal API reesponse
|
2022-09-05 11:06:35 -04:00
|
|
|
if err := enc.Encode(&m); err != nil {
|
2022-07-05 17:45:49 -04:00
|
|
|
n = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
n--
|
|
|
|
if n <= 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Flush before waiting for next...
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// DataUsageInfoHandler - GET /minio/admin/v3/datausage
|
2019-12-12 09:02:37 -05:00
|
|
|
// ----------
|
|
|
|
// Get server/cluster data usage info
|
|
|
|
func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "DataUsageInfo")
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DataUsageInfoAdminAction)
|
2019-12-12 09:02:37 -05:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
|
|
|
if err != nil {
|
2020-01-21 17:07:49 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2019-12-12 09:02:37 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dataUsageInfoJSON, err := json.Marshal(dataUsageInfo)
|
|
|
|
if err != nil {
|
2020-01-21 17:07:49 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2019-12-12 09:02:37 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
writeSuccessResponseJSON(w, dataUsageInfoJSON)
|
|
|
|
}
|
|
|
|
|
2022-11-15 10:57:52 -05:00
|
|
|
func lriToLockEntry(l lockRequesterInfo, now time.Time, resource, server string) *madmin.LockEntry {
|
2019-11-13 15:17:45 -05:00
|
|
|
entry := &madmin.LockEntry{
|
|
|
|
Timestamp: l.Timestamp,
|
2022-11-15 10:57:52 -05:00
|
|
|
Elapsed: now.Sub(l.Timestamp),
|
2019-11-13 15:17:45 -05:00
|
|
|
Resource: resource,
|
|
|
|
ServerList: []string{server},
|
|
|
|
Source: l.Source,
|
2020-09-25 22:21:52 -04:00
|
|
|
Owner: l.Owner,
|
2019-11-13 15:17:45 -05:00
|
|
|
ID: l.UID,
|
2020-10-24 16:23:16 -04:00
|
|
|
Quorum: l.Quorum,
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
2019-01-24 10:22:14 -05:00
|
|
|
if l.Writer {
|
2020-06-20 09:33:01 -04:00
|
|
|
entry.Type = "WRITE"
|
2019-01-24 10:22:14 -05:00
|
|
|
} else {
|
2020-06-20 09:33:01 -04:00
|
|
|
entry.Type = "READ"
|
2019-01-24 10:22:14 -05:00
|
|
|
}
|
|
|
|
return entry
|
|
|
|
}
|
|
|
|
|
2020-10-24 16:23:16 -04:00
|
|
|
func topLockEntries(peerLocks []*PeerLocks, stale bool) madmin.LockEntries {
|
2022-11-15 10:57:52 -05:00
|
|
|
now := time.Now().UTC()
|
2019-01-24 10:22:14 -05:00
|
|
|
entryMap := make(map[string]*madmin.LockEntry)
|
|
|
|
for _, peerLock := range peerLocks {
|
|
|
|
if peerLock == nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-12-10 10:28:37 -05:00
|
|
|
for k, v := range peerLock.Locks {
|
|
|
|
for _, lockReqInfo := range v {
|
2021-08-27 16:07:55 -04:00
|
|
|
if val, ok := entryMap[lockReqInfo.Name]; ok {
|
2020-12-10 10:28:37 -05:00
|
|
|
val.ServerList = append(val.ServerList, peerLock.Addr)
|
|
|
|
} else {
|
2022-11-15 10:57:52 -05:00
|
|
|
entryMap[lockReqInfo.Name] = lriToLockEntry(lockReqInfo, now, k, peerLock.Addr)
|
2019-01-24 10:22:14 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-09-25 22:21:52 -04:00
|
|
|
var lockEntries madmin.LockEntries
|
2019-01-24 10:22:14 -05:00
|
|
|
for _, v := range entryMap {
|
2020-09-25 22:21:52 -04:00
|
|
|
if stale {
|
|
|
|
lockEntries = append(lockEntries, *v)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(v.ServerList) >= v.Quorum {
|
|
|
|
lockEntries = append(lockEntries, *v)
|
|
|
|
}
|
2019-01-24 10:22:14 -05:00
|
|
|
}
|
|
|
|
sort.Sort(lockEntries)
|
|
|
|
return lockEntries
|
|
|
|
}
|
|
|
|
|
|
|
|
// PeerLocks holds server information result of one node
|
|
|
|
type PeerLocks struct {
|
|
|
|
Addr string
|
2020-12-10 10:28:37 -05:00
|
|
|
Locks map[string][]lockRequesterInfo
|
2019-01-24 10:22:14 -05:00
|
|
|
}
|
|
|
|
|
2021-01-25 13:01:27 -05:00
|
|
|
// ForceUnlockHandler force unlocks requested resource
|
|
|
|
func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "ForceUnlock")
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2021-01-25 13:01:27 -05:00
|
|
|
|
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ForceUnlockAdminAction)
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
z, ok := objectAPI.(*erasureServerPools)
|
|
|
|
if !ok {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
|
|
|
var args dsync.LockArgs
|
2021-06-21 20:47:35 -04:00
|
|
|
var lockers []dsync.NetLocker
|
2021-01-25 13:01:27 -05:00
|
|
|
for _, path := range strings.Split(vars["paths"], ",") {
|
|
|
|
if path == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
args.Resources = append(args.Resources, path)
|
|
|
|
}
|
|
|
|
|
2021-06-21 20:47:35 -04:00
|
|
|
for _, lks := range z.serverPools[0].erasureLockers {
|
|
|
|
lockers = append(lockers, lks...)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, locker := range lockers {
|
2021-01-25 13:01:27 -05:00
|
|
|
locker.ForceUnlock(ctx, args)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-24 10:22:14 -05:00
|
|
|
// TopLocksHandler Get list of locks in use
|
|
|
|
func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "TopLocks")
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.TopLocksAdminAction)
|
2019-02-12 04:25:52 -05:00
|
|
|
if objectAPI == nil {
|
2019-01-24 10:22:14 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-06-20 09:33:01 -04:00
|
|
|
count := 10 // by default list only top 10 entries
|
2021-08-08 01:43:01 -04:00
|
|
|
if countStr := r.Form.Get("count"); countStr != "" {
|
2020-06-20 09:33:01 -04:00
|
|
|
var err error
|
|
|
|
count, err = strconv.Atoi(countStr)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2021-08-08 01:43:01 -04:00
|
|
|
stale := r.Form.Get("stale") == "true" // list also stale locks
|
2020-06-20 09:33:01 -04:00
|
|
|
|
2020-09-25 22:21:52 -04:00
|
|
|
peerLocks := globalNotificationSys.GetLocks(ctx, r)
|
2020-06-20 09:33:01 -04:00
|
|
|
|
2020-10-24 16:23:16 -04:00
|
|
|
topLocks := topLockEntries(peerLocks, stale)
|
2020-10-08 15:32:32 -04:00
|
|
|
|
|
|
|
// Marshal API response upto requested count.
|
|
|
|
if len(topLocks) > count && count > 0 {
|
|
|
|
topLocks = topLocks[:count]
|
|
|
|
}
|
2019-01-24 10:22:14 -05:00
|
|
|
|
|
|
|
jsonBytes, err := json.Marshal(topLocks)
|
|
|
|
if err != nil {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2019-01-24 10:22:14 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reply with storage information (across nodes in a
|
|
|
|
// distributed setup) as json.
|
|
|
|
writeSuccessResponseJSON(w, jsonBytes)
|
|
|
|
}
|
|
|
|
|
2018-09-18 19:46:35 -04:00
|
|
|
// StartProfilingResult contains the status of the starting
|
2022-04-08 15:44:35 -04:00
|
|
|
// profiling action in a given server - deprecated API
|
2018-09-18 19:46:35 -04:00
|
|
|
type StartProfilingResult struct {
|
|
|
|
NodeName string `json:"nodeName"`
|
|
|
|
Success bool `json:"success"`
|
|
|
|
Error string `json:"error"`
|
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// StartProfilingHandler - POST /minio/admin/v3/profiling/start?profilerType={profilerType}
|
2018-09-18 19:46:35 -04:00
|
|
|
// ----------
|
2018-09-27 00:02:05 -04:00
|
|
|
// Enable server profiling
|
2018-09-18 19:46:35 -04:00
|
|
|
func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.Request) {
|
2018-11-12 14:07:43 -05:00
|
|
|
ctx := newContext(r, w, "StartProfiling")
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2020-12-18 14:51:15 -05:00
|
|
|
// Validate request signature.
|
|
|
|
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ProfilingAdminAction, "")
|
|
|
|
if adminAPIErr != ErrNone {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if globalNotificationSys == nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
2018-09-18 19:46:35 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
2020-01-10 20:19:58 -05:00
|
|
|
profiles := strings.Split(vars["profilerType"], ",")
|
2021-03-26 14:37:58 -04:00
|
|
|
thisAddr, err := xnet.ParseHost(globalLocalNodeName)
|
2019-01-14 01:44:20 -05:00
|
|
|
if err != nil {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2019-01-14 01:44:20 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-01-10 20:19:58 -05:00
|
|
|
globalProfilerMu.Lock()
|
|
|
|
defer globalProfilerMu.Unlock()
|
2019-01-14 01:44:20 -05:00
|
|
|
|
2020-01-10 20:19:58 -05:00
|
|
|
if globalProfiler == nil {
|
|
|
|
globalProfiler = make(map[string]minioProfiler, 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop profiler of all types if already running
|
|
|
|
for k, v := range globalProfiler {
|
|
|
|
for _, p := range profiles {
|
|
|
|
if p == k {
|
|
|
|
v.Stop()
|
|
|
|
delete(globalProfiler, k)
|
|
|
|
}
|
2019-01-14 01:44:20 -05:00
|
|
|
}
|
2020-01-10 20:19:58 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start profiling on remote servers.
|
|
|
|
var hostErrs []NotificationPeerErr
|
|
|
|
for _, profiler := range profiles {
|
|
|
|
hostErrs = append(hostErrs, globalNotificationSys.StartProfiling(profiler)...)
|
|
|
|
|
|
|
|
// Start profiling locally as well.
|
|
|
|
prof, err := startProfiler(profiler)
|
2019-01-14 01:44:20 -05:00
|
|
|
if err != nil {
|
|
|
|
hostErrs = append(hostErrs, NotificationPeerErr{
|
|
|
|
Host: *thisAddr,
|
|
|
|
Err: err,
|
|
|
|
})
|
|
|
|
} else {
|
2020-01-10 20:19:58 -05:00
|
|
|
globalProfiler[profiler] = prof
|
2019-01-14 01:44:20 -05:00
|
|
|
hostErrs = append(hostErrs, NotificationPeerErr{
|
|
|
|
Host: *thisAddr,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var startProfilingResult []StartProfilingResult
|
|
|
|
|
|
|
|
for _, nerr := range hostErrs {
|
|
|
|
result := StartProfilingResult{NodeName: nerr.Host.String()}
|
|
|
|
if nerr.Err != nil {
|
|
|
|
result.Error = nerr.Err.Error()
|
|
|
|
} else {
|
2018-09-18 19:46:35 -04:00
|
|
|
result.Success = true
|
2019-01-14 01:44:20 -05:00
|
|
|
}
|
|
|
|
startProfilingResult = append(startProfilingResult, result)
|
2018-09-18 19:46:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create JSON result and send it to the client
|
|
|
|
startProfilingResultInBytes, err := json.Marshal(startProfilingResult)
|
|
|
|
if err != nil {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2018-09-18 19:46:35 -04:00
|
|
|
return
|
|
|
|
}
|
2019-01-14 01:44:20 -05:00
|
|
|
|
2020-01-10 20:19:58 -05:00
|
|
|
writeSuccessResponseJSON(w, startProfilingResultInBytes)
|
2018-09-18 19:46:35 -04:00
|
|
|
}
|
|
|
|
|
2022-04-08 15:44:35 -04:00
|
|
|
// ProfileHandler - POST /minio/admin/v3/profile/?profilerType={profilerType}
|
|
|
|
// ----------
|
|
|
|
// Enable server profiling
|
|
|
|
func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "Profile")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
// Validate request signature.
|
|
|
|
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ProfilingAdminAction, "")
|
|
|
|
if adminAPIErr != ErrNone {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if globalNotificationSys == nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
profileStr := r.Form.Get("profilerType")
|
|
|
|
profiles := strings.Split(profileStr, ",")
|
|
|
|
duration := time.Minute
|
|
|
|
if dstr := r.Form.Get("duration"); dstr != "" {
|
|
|
|
var err error
|
|
|
|
duration, err = time.ParseDuration(dstr)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// read request body
|
2022-09-19 14:05:16 -04:00
|
|
|
io.CopyN(io.Discard, r.Body, 1)
|
2022-04-08 15:44:35 -04:00
|
|
|
|
|
|
|
globalProfilerMu.Lock()
|
|
|
|
|
|
|
|
if globalProfiler == nil {
|
|
|
|
globalProfiler = make(map[string]minioProfiler, 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop profiler of all types if already running
|
|
|
|
for k, v := range globalProfiler {
|
|
|
|
v.Stop()
|
|
|
|
delete(globalProfiler, k)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start profiling on remote servers.
|
|
|
|
for _, profiler := range profiles {
|
|
|
|
globalNotificationSys.StartProfiling(profiler)
|
|
|
|
|
|
|
|
// Start profiling locally as well.
|
|
|
|
prof, err := startProfiler(profiler)
|
|
|
|
if err == nil {
|
|
|
|
globalProfiler[profiler] = prof
|
|
|
|
}
|
|
|
|
}
|
|
|
|
globalProfilerMu.Unlock()
|
|
|
|
|
|
|
|
timer := time.NewTimer(duration)
|
2022-05-18 01:42:59 -04:00
|
|
|
defer timer.Stop()
|
2022-04-08 15:44:35 -04:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
for k, v := range globalProfiler {
|
|
|
|
v.Stop()
|
|
|
|
delete(globalProfiler, k)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
case <-timer.C:
|
|
|
|
if !globalNotificationSys.DownloadProfilingData(ctx, w) {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminProfilerNotEnabled), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-18 19:46:35 -04:00
|
|
|
// dummyFileInfo represents a dummy representation of a profile data file
|
|
|
|
// present only in memory, it helps to generate the zip stream.
|
|
|
|
type dummyFileInfo struct {
|
|
|
|
name string
|
|
|
|
size int64
|
|
|
|
mode os.FileMode
|
|
|
|
modTime time.Time
|
|
|
|
isDir bool
|
|
|
|
sys interface{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f dummyFileInfo) Name() string { return f.name }
|
|
|
|
func (f dummyFileInfo) Size() int64 { return f.size }
|
|
|
|
func (f dummyFileInfo) Mode() os.FileMode { return f.mode }
|
|
|
|
func (f dummyFileInfo) ModTime() time.Time { return f.modTime }
|
|
|
|
func (f dummyFileInfo) IsDir() bool { return f.isDir }
|
|
|
|
func (f dummyFileInfo) Sys() interface{} { return f.sys }
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// DownloadProfilingHandler - POST /minio/admin/v3/profiling/download
|
2018-09-18 19:46:35 -04:00
|
|
|
// ----------
|
2022-04-08 15:44:35 -04:00
|
|
|
// Download profiling information of all nodes in a zip format - deprecated API
|
2018-09-18 19:46:35 -04:00
|
|
|
func (a adminAPIHandlers) DownloadProfilingHandler(w http.ResponseWriter, r *http.Request) {
|
2018-11-12 14:07:43 -05:00
|
|
|
ctx := newContext(r, w, "DownloadProfiling")
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2020-12-18 14:51:15 -05:00
|
|
|
// Validate request signature.
|
|
|
|
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ProfilingAdminAction, "")
|
|
|
|
if adminAPIErr != ErrNone {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if globalNotificationSys == nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
2018-09-18 19:46:35 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-01-14 01:44:20 -05:00
|
|
|
if !globalNotificationSys.DownloadProfilingData(ctx, w) {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminProfilerNotEnabled), r.URL)
|
2018-09-27 13:34:37 -04:00
|
|
|
return
|
|
|
|
}
|
2018-09-18 19:46:35 -04:00
|
|
|
}
|
|
|
|
|
2019-08-29 16:53:27 -04:00
|
|
|
type healInitParams struct {
|
|
|
|
bucket, objPrefix string
|
|
|
|
hs madmin.HealOpts
|
|
|
|
clientToken string
|
|
|
|
forceStart, forceStop bool
|
|
|
|
}
|
2017-01-17 13:02:58 -05:00
|
|
|
|
2019-08-29 16:53:27 -04:00
|
|
|
// extractHealInitParams - Validates params for heal init API.
|
|
|
|
func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reader) (hip healInitParams, err APIErrorCode) {
|
2020-09-24 11:40:21 -04:00
|
|
|
hip.bucket = vars[mgmtBucket]
|
|
|
|
hip.objPrefix = vars[mgmtPrefix]
|
2017-01-17 13:02:58 -05:00
|
|
|
|
2019-08-29 16:53:27 -04:00
|
|
|
if hip.bucket == "" {
|
|
|
|
if hip.objPrefix != "" {
|
2018-01-22 17:54:55 -05:00
|
|
|
// Bucket is required if object-prefix is given
|
|
|
|
err = ErrHealMissingBucket
|
|
|
|
return
|
2017-03-16 03:15:06 -04:00
|
|
|
}
|
2019-08-29 16:53:27 -04:00
|
|
|
} else if isReservedOrInvalidBucket(hip.bucket, false) {
|
2018-01-22 17:54:55 -05:00
|
|
|
err = ErrInvalidBucketName
|
2017-01-17 13:02:58 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// empty prefix is valid.
|
2019-08-29 16:53:27 -04:00
|
|
|
if !IsValidObjectPrefix(hip.objPrefix) {
|
2018-01-22 17:54:55 -05:00
|
|
|
err = ErrInvalidObjectName
|
2018-01-11 13:21:41 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-09-24 11:40:21 -04:00
|
|
|
if len(qParms[mgmtClientToken]) > 0 {
|
|
|
|
hip.clientToken = qParms[mgmtClientToken][0]
|
2017-01-17 13:02:58 -05:00
|
|
|
}
|
2020-09-24 11:40:21 -04:00
|
|
|
if _, ok := qParms[mgmtForceStart]; ok {
|
2019-08-29 16:53:27 -04:00
|
|
|
hip.forceStart = true
|
2017-01-17 13:02:58 -05:00
|
|
|
}
|
2020-09-24 11:40:21 -04:00
|
|
|
if _, ok := qParms[mgmtForceStop]; ok {
|
2019-08-29 16:53:27 -04:00
|
|
|
hip.forceStop = true
|
2018-11-04 22:24:16 -05:00
|
|
|
}
|
2019-08-29 16:53:27 -04:00
|
|
|
|
|
|
|
// Invalid request conditions:
|
|
|
|
//
|
|
|
|
// Cannot have both forceStart and forceStop in the same
|
|
|
|
// request; If clientToken is provided, request can only be
|
|
|
|
// to continue receiving logs, so it cannot be start or
|
|
|
|
// stop;
|
|
|
|
if (hip.forceStart && hip.forceStop) ||
|
|
|
|
(hip.clientToken != "" && (hip.forceStart || hip.forceStop)) {
|
|
|
|
err = ErrInvalidRequest
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// ignore body if clientToken is provided
|
2019-08-29 16:53:27 -04:00
|
|
|
if hip.clientToken == "" {
|
|
|
|
jerr := json.NewDecoder(r).Decode(&hip.hs)
|
2018-01-22 17:54:55 -05:00
|
|
|
if jerr != nil {
|
2020-04-09 12:30:02 -04:00
|
|
|
logger.LogIf(GlobalContext, jerr, logger.Application)
|
2018-01-22 17:54:55 -05:00
|
|
|
err = ErrRequestBodyParse
|
|
|
|
return
|
|
|
|
}
|
2017-04-14 13:28:35 -04:00
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
err = ErrNone
|
|
|
|
return
|
2017-03-31 20:55:15 -04:00
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// HealHandler - POST /minio/admin/v3/heal/
|
2018-01-22 17:54:55 -05:00
|
|
|
// -----------
|
|
|
|
// Start heal processing and return heal status items.
|
|
|
|
//
|
|
|
|
// On a successful heal sequence start, a unique client token is
|
|
|
|
// returned. Subsequent requests to this endpoint providing the client
|
|
|
|
// token will receive heal status records from the running heal
|
|
|
|
// sequence.
|
|
|
|
//
|
|
|
|
// If no client token is provided, and a heal sequence is in progress
|
|
|
|
// an error is returned with information about the running heal
|
|
|
|
// sequence. However, if the force-start flag is provided, the server
|
|
|
|
// aborts the running heal sequence and starts a new one.
|
|
|
|
func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
ctx := newContext(r, w, "Heal")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2019-11-19 05:03:18 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
2019-02-12 04:25:52 -05:00
|
|
|
if objectAPI == nil {
|
2017-01-17 13:02:58 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-08-08 01:43:01 -04:00
|
|
|
hip, errCode := extractHealInitParams(mux.Vars(r), r.Form, r.Body)
|
2019-02-12 04:25:52 -05:00
|
|
|
if errCode != ErrNone {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
2017-01-23 03:32:55 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-03 22:27:13 -04:00
|
|
|
// Analyze the heal token and route the request accordingly
|
|
|
|
token, success := proxyRequestByToken(ctx, w, r, hip.clientToken)
|
|
|
|
if success {
|
|
|
|
return
|
2020-07-03 14:53:03 -04:00
|
|
|
}
|
2020-07-03 22:27:13 -04:00
|
|
|
hip.clientToken = token
|
|
|
|
// if request was not successful, try this server locally if token
|
|
|
|
// is not found the call will fail anyways. if token is empty
|
|
|
|
// try this server to generate a new token.
|
2020-07-03 14:53:03 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
type healResp struct {
|
|
|
|
respBytes []byte
|
2019-02-12 04:25:52 -05:00
|
|
|
apiErr APIError
|
2018-01-22 17:54:55 -05:00
|
|
|
errBody string
|
|
|
|
}
|
|
|
|
|
|
|
|
// Define a closure to start sending whitespace to client
|
|
|
|
// after 10s unless a response item comes in
|
2020-05-04 01:35:40 -04:00
|
|
|
keepConnLive := func(w http.ResponseWriter, r *http.Request, respCh chan healResp) {
|
2018-01-22 17:54:55 -05:00
|
|
|
ticker := time.NewTicker(time.Second * 10)
|
2018-05-04 13:43:20 -04:00
|
|
|
defer ticker.Stop()
|
2018-01-22 17:54:55 -05:00
|
|
|
started := false
|
|
|
|
forLoop:
|
|
|
|
for {
|
|
|
|
select {
|
2020-05-04 01:35:40 -04:00
|
|
|
case <-r.Context().Done():
|
|
|
|
return
|
2018-01-22 17:54:55 -05:00
|
|
|
case <-ticker.C:
|
|
|
|
if !started {
|
|
|
|
// Start writing response to client
|
|
|
|
started = true
|
|
|
|
setCommonHeaders(w)
|
2020-07-30 22:45:12 -04:00
|
|
|
setEventStreamHeaders(w)
|
2018-01-22 17:54:55 -05:00
|
|
|
// Set 200 OK status
|
|
|
|
w.WriteHeader(200)
|
|
|
|
}
|
|
|
|
// Send whitespace and keep connection open
|
2021-11-18 20:19:58 -05:00
|
|
|
if _, err := w.Write([]byte(" ")); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
case hr := <-respCh:
|
2019-02-12 04:25:52 -05:00
|
|
|
switch hr.apiErr {
|
|
|
|
case noError:
|
2018-11-04 22:24:16 -05:00
|
|
|
if started {
|
2021-11-18 20:19:58 -05:00
|
|
|
if _, err := w.Write(hr.respBytes); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2018-11-04 22:24:16 -05:00
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
} else {
|
|
|
|
writeSuccessResponseJSON(w, hr.respBytes)
|
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
default:
|
2018-11-04 22:24:16 -05:00
|
|
|
var errorRespJSON []byte
|
|
|
|
if hr.errBody == "" {
|
2019-02-13 19:07:21 -05:00
|
|
|
errorRespJSON = encodeResponseJSON(getAPIErrorResponse(ctx, hr.apiErr,
|
2019-07-03 01:34:32 -04:00
|
|
|
r.URL.Path, w.Header().Get(xhttp.AmzRequestID),
|
2019-07-01 15:22:01 -04:00
|
|
|
globalDeploymentID))
|
2018-11-04 22:24:16 -05:00
|
|
|
} else {
|
|
|
|
errorRespJSON = encodeResponseJSON(APIErrorResponse{
|
2019-02-12 04:25:52 -05:00
|
|
|
Code: hr.apiErr.Code,
|
2018-11-04 22:24:16 -05:00
|
|
|
Message: hr.errBody,
|
|
|
|
Resource: r.URL.Path,
|
2019-07-03 01:34:32 -04:00
|
|
|
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
2019-07-01 15:22:01 -04:00
|
|
|
HostID: globalDeploymentID,
|
2018-11-04 22:24:16 -05:00
|
|
|
})
|
|
|
|
}
|
|
|
|
if !started {
|
|
|
|
setCommonHeaders(w)
|
2019-07-03 01:34:32 -04:00
|
|
|
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
2019-02-12 04:25:52 -05:00
|
|
|
w.WriteHeader(hr.apiErr.HTTPStatusCode)
|
2018-11-04 22:24:16 -05:00
|
|
|
}
|
2021-11-18 20:19:58 -05:00
|
|
|
if _, err := w.Write(errorRespJSON); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2018-11-04 22:24:16 -05:00
|
|
|
w.(http.Flusher).Flush()
|
2018-01-22 17:54:55 -05:00
|
|
|
}
|
|
|
|
break forLoop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-29 16:53:27 -04:00
|
|
|
healPath := pathJoin(hip.bucket, hip.objPrefix)
|
|
|
|
if hip.clientToken == "" && !hip.forceStart && !hip.forceStop {
|
2018-11-04 22:24:16 -05:00
|
|
|
nh, exists := globalAllHealState.getHealSequence(healPath)
|
|
|
|
if exists && !nh.hasEnded() && len(nh.currentStatus.Items) > 0 {
|
2020-07-03 22:27:13 -04:00
|
|
|
clientToken := nh.clientToken
|
|
|
|
if globalIsDistErasure {
|
|
|
|
clientToken = fmt.Sprintf("%s@%d", nh.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
|
|
|
}
|
2018-11-04 22:24:16 -05:00
|
|
|
b, err := json.Marshal(madmin.HealStartSuccess{
|
2020-07-03 22:27:13 -04:00
|
|
|
ClientToken: clientToken,
|
2018-11-04 22:24:16 -05:00
|
|
|
ClientAddress: nh.clientAddress,
|
|
|
|
StartTime: nh.startTime,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2018-11-04 22:24:16 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Client token not specified but a heal sequence exists on a path,
|
|
|
|
// Send the token back to client.
|
|
|
|
writeSuccessResponseJSON(w, b)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
2019-08-29 16:53:27 -04:00
|
|
|
if hip.clientToken != "" && !hip.forceStart && !hip.forceStop {
|
2018-01-22 17:54:55 -05:00
|
|
|
// Since clientToken is given, fetch heal status from running
|
|
|
|
// heal sequence.
|
|
|
|
respBytes, errCode := globalAllHealState.PopHealStatusJSON(
|
2019-08-29 16:53:27 -04:00
|
|
|
healPath, hip.clientToken)
|
2018-01-22 17:54:55 -05:00
|
|
|
if errCode != ErrNone {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
2018-01-22 17:54:55 -05:00
|
|
|
} else {
|
|
|
|
writeSuccessResponseJSON(w, respBytes)
|
|
|
|
}
|
2018-11-04 22:24:16 -05:00
|
|
|
return
|
2017-01-23 03:32:55 -05:00
|
|
|
}
|
2018-11-04 22:24:16 -05:00
|
|
|
|
|
|
|
respCh := make(chan healResp)
|
|
|
|
switch {
|
2019-08-29 16:53:27 -04:00
|
|
|
case hip.forceStop:
|
2018-11-04 22:24:16 -05:00
|
|
|
go func() {
|
2019-02-12 04:25:52 -05:00
|
|
|
respBytes, apiErr := globalAllHealState.stopHealSequence(healPath)
|
|
|
|
hr := healResp{respBytes: respBytes, apiErr: apiErr}
|
2018-11-04 22:24:16 -05:00
|
|
|
respCh <- hr
|
|
|
|
}()
|
2019-08-29 16:53:27 -04:00
|
|
|
case hip.clientToken == "":
|
2020-06-29 16:07:26 -04:00
|
|
|
nh := newHealSequence(GlobalContext, hip.bucket, hip.objPrefix, handlers.GetSourceIP(r), hip.hs, hip.forceStart)
|
2018-11-04 22:24:16 -05:00
|
|
|
go func() {
|
2020-11-10 12:02:06 -05:00
|
|
|
respBytes, apiErr, errMsg := globalAllHealState.LaunchNewHealSequence(nh, objectAPI)
|
2019-02-12 04:25:52 -05:00
|
|
|
hr := healResp{respBytes, apiErr, errMsg}
|
2018-11-04 22:24:16 -05:00
|
|
|
respCh <- hr
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Due to the force-starting functionality, the Launch
|
|
|
|
// call above can take a long time - to keep the
|
|
|
|
// connection alive, we start sending whitespace
|
2020-05-04 01:35:40 -04:00
|
|
|
keepConnLive(w, r, respCh)
|
2017-01-23 03:32:55 -05:00
|
|
|
}
|
2017-02-20 15:58:50 -05:00
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
// getAggregatedBackgroundHealState returns the heal state of disks.
|
|
|
|
// If no ObjectLayer is provided no set status is returned.
|
|
|
|
func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmin.BgHealState, error) {
|
|
|
|
// Get local heal status first
|
2023-01-16 18:32:00 -05:00
|
|
|
bgHealStates, ok := getLocalBackgroundHealStatus(ctx, o)
|
2020-08-07 22:43:06 -04:00
|
|
|
if !ok {
|
2021-03-04 17:36:23 -05:00
|
|
|
return bgHealStates, errServerNotInitialized
|
2020-08-07 22:43:06 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if globalIsDistErasure {
|
2019-06-25 19:42:24 -04:00
|
|
|
// Get heal status from other peers
|
2020-08-07 16:22:53 -04:00
|
|
|
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus()
|
2020-09-03 01:54:56 -04:00
|
|
|
var errCount int
|
2020-08-07 16:22:53 -04:00
|
|
|
for _, nerr := range nerrs {
|
|
|
|
if nerr.Err != nil {
|
|
|
|
logger.LogIf(ctx, nerr.Err)
|
2020-09-03 01:54:56 -04:00
|
|
|
errCount++
|
2020-08-07 16:22:53 -04:00
|
|
|
}
|
|
|
|
}
|
2020-09-03 01:54:56 -04:00
|
|
|
if errCount == len(nerrs) {
|
|
|
|
return madmin.BgHealState{}, fmt.Errorf("all remote servers failed to report heal status, cluster is unhealthy")
|
|
|
|
}
|
2021-03-04 17:36:23 -05:00
|
|
|
bgHealStates.Merge(peersHealStates...)
|
2019-06-25 19:42:24 -04:00
|
|
|
}
|
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
return bgHealStates, nil
|
2020-08-07 16:22:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "HealBackgroundStatus")
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-08-07 16:22:53 -04:00
|
|
|
|
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context(), objectAPI)
|
2020-09-03 01:54:56 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-07 16:22:53 -04:00
|
|
|
if err := json.NewEncoder(w).Encode(aggregateHealStateResult); err != nil {
|
2019-06-25 19:42:24 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-08 12:54:38 -05:00
|
|
|
// NetperfHandler - perform mesh style network throughput test
|
|
|
|
func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "NetperfHandler")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if !globalIsDistErasure {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
nsLock := objectAPI.NewNSLock(minioMetaBucket, "netperf")
|
|
|
|
lkctx, err := nsLock.GetLock(ctx, globalOperationTimeout)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(toAPIErrorCode(ctx, err)), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2022-12-23 22:49:07 -05:00
|
|
|
defer nsLock.Unlock(lkctx)
|
2022-03-08 12:54:38 -05:00
|
|
|
|
|
|
|
durationStr := r.Form.Get(peerRESTDuration)
|
|
|
|
duration, err := time.ParseDuration(durationStr)
|
|
|
|
if err != nil {
|
|
|
|
duration = globalNetPerfMinDuration
|
|
|
|
}
|
|
|
|
|
|
|
|
if duration < globalNetPerfMinDuration {
|
|
|
|
// We need sample size of minimum 10 secs.
|
|
|
|
duration = globalNetPerfMinDuration
|
|
|
|
}
|
|
|
|
|
|
|
|
duration = duration.Round(time.Second)
|
|
|
|
|
|
|
|
results := globalNotificationSys.Netperf(ctx, duration)
|
|
|
|
enc := json.NewEncoder(w)
|
|
|
|
if err := enc.Encode(madmin.NetperfResult{NodeResults: results}); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-12 13:12:47 -04:00
|
|
|
// SpeedtestHandler - Deprecated. See ObjectSpeedTestHandler
|
|
|
|
func (a adminAPIHandlers) SpeedTestHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
a.ObjectSpeedTestHandler(w, r)
|
2022-02-02 01:38:05 -05:00
|
|
|
}
|
|
|
|
|
2022-07-12 13:12:47 -04:00
|
|
|
// ObjectSpeedTestHandler - reports maximum speed of a cluster by performing PUT and
|
2021-11-23 15:02:16 -05:00
|
|
|
// GET operations on the server, supports auto tuning by default by automatically
|
|
|
|
// increasing concurrency and stopping when we have reached the limits on the
|
|
|
|
// system.
|
2022-07-12 13:12:47 -04:00
|
|
|
func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "ObjectSpeedTestHandler")
|
2021-07-27 15:55:56 -04:00
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
2021-11-23 15:02:16 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
2021-07-27 15:55:56 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-08-08 01:43:01 -04:00
|
|
|
sizeStr := r.Form.Get(peerRESTSize)
|
|
|
|
durationStr := r.Form.Get(peerRESTDuration)
|
|
|
|
concurrentStr := r.Form.Get(peerRESTConcurrent)
|
2022-07-12 13:12:47 -04:00
|
|
|
storageClass := strings.TrimSpace(r.Form.Get(peerRESTStorageClass))
|
|
|
|
customBucket := strings.TrimSpace(r.Form.Get(peerRESTBucket))
|
2021-11-23 15:02:16 -05:00
|
|
|
autotune := r.Form.Get("autotune") == "true"
|
2023-02-22 00:21:40 -05:00
|
|
|
noClear := r.Form.Get("noclear") == "true"
|
2021-07-27 15:55:56 -04:00
|
|
|
|
|
|
|
size, err := strconv.Atoi(sizeStr)
|
|
|
|
if err != nil {
|
|
|
|
size = 64 * humanize.MiByte
|
|
|
|
}
|
|
|
|
|
|
|
|
concurrent, err := strconv.Atoi(concurrentStr)
|
|
|
|
if err != nil {
|
|
|
|
concurrent = 32
|
|
|
|
}
|
|
|
|
|
|
|
|
duration, err := time.ParseDuration(durationStr)
|
|
|
|
if err != nil {
|
|
|
|
duration = time.Second * 10
|
|
|
|
}
|
|
|
|
|
2022-12-01 17:31:35 -05:00
|
|
|
storageInfo := objectAPI.StorageInfo(ctx)
|
2022-07-27 17:41:59 -04:00
|
|
|
|
|
|
|
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, storageInfo, concurrent, size, autotune)
|
2022-04-12 16:17:44 -04:00
|
|
|
if !sufficientCapacity {
|
2022-02-24 12:06:18 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{
|
2022-04-12 16:17:44 -04:00
|
|
|
Code: "XMinioSpeedtestInsufficientCapacity",
|
|
|
|
Message: capacityErrMsg,
|
2022-02-24 12:06:18 -05:00
|
|
|
StatusCode: http.StatusInsufficientStorage,
|
|
|
|
}), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-04-12 16:17:44 -04:00
|
|
|
if autotune && !canAutotune {
|
2022-02-24 12:06:18 -05:00
|
|
|
autotune = false
|
|
|
|
}
|
|
|
|
|
2022-07-12 13:12:47 -04:00
|
|
|
if customBucket == "" {
|
|
|
|
customBucket = globalObjectPerfBucket
|
|
|
|
|
|
|
|
bucketExists, err := makeObjectPerfBucket(ctx, objectAPI, customBucket)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2022-04-08 00:20:40 -04:00
|
|
|
|
2023-02-22 00:21:40 -05:00
|
|
|
if !noClear && !bucketExists {
|
2022-07-12 13:12:47 -04:00
|
|
|
defer deleteObjectPerfBucket(objectAPI)
|
|
|
|
}
|
2022-04-08 00:20:40 -04:00
|
|
|
}
|
2021-11-04 15:11:52 -04:00
|
|
|
|
2023-02-22 00:21:40 -05:00
|
|
|
if !noClear {
|
|
|
|
defer objectAPI.DeleteObject(ctx, customBucket, speedTest+SlashSeparator, ObjectOptions{
|
|
|
|
DeletePrefix: true,
|
|
|
|
})
|
|
|
|
}
|
2022-07-12 13:12:47 -04:00
|
|
|
|
2021-11-23 15:02:16 -05:00
|
|
|
// Freeze all incoming S3 API calls before running speedtest.
|
|
|
|
globalNotificationSys.ServiceFreeze(ctx, true)
|
|
|
|
|
|
|
|
// unfreeze all incoming S3 API calls after speedtest.
|
|
|
|
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
|
|
|
|
2021-09-10 20:43:34 -04:00
|
|
|
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
|
|
|
defer keepAliveTicker.Stop()
|
|
|
|
|
2021-11-04 15:11:52 -04:00
|
|
|
enc := json.NewEncoder(w)
|
2022-07-12 13:12:47 -04:00
|
|
|
ch := objectSpeedTest(ctx, speedTestOpts{
|
|
|
|
objectSize: size,
|
|
|
|
concurrencyStart: concurrent,
|
|
|
|
duration: duration,
|
|
|
|
autotune: autotune,
|
|
|
|
storageClass: storageClass,
|
|
|
|
bucketName: customBucket,
|
|
|
|
})
|
2022-08-08 02:04:03 -04:00
|
|
|
var prevResult madmin.SpeedTestResult
|
2021-11-02 18:27:03 -04:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-keepAliveTicker.C:
|
2022-08-08 02:04:03 -04:00
|
|
|
// if previous result is set keep writing the
|
|
|
|
// previous result back to the client
|
|
|
|
if prevResult.Version != "" {
|
|
|
|
if err := enc.Encode(prevResult); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// first result is not yet obtained, keep writing
|
|
|
|
// empty entry to prevent client from disconnecting.
|
|
|
|
if err := enc.Encode(madmin.SpeedTestResult{}); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-11-02 18:27:03 -04:00
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
case result, ok := <-ch:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2021-11-04 15:11:52 -04:00
|
|
|
if err := enc.Encode(result); err != nil {
|
2021-09-10 20:43:34 -04:00
|
|
|
return
|
|
|
|
}
|
2022-08-08 02:04:03 -04:00
|
|
|
prevResult = result
|
2021-11-04 15:11:52 -04:00
|
|
|
w.(http.Flusher).Flush()
|
2021-09-10 20:43:34 -04:00
|
|
|
}
|
|
|
|
}
|
2021-07-27 15:55:56 -04:00
|
|
|
}
|
|
|
|
|
2022-07-12 13:12:47 -04:00
|
|
|
func makeObjectPerfBucket(ctx context.Context, objectAPI ObjectLayer, bucketName string) (bucketExists bool, err error) {
|
2022-12-23 10:46:00 -05:00
|
|
|
if err = objectAPI.MakeBucket(ctx, bucketName, MakeBucketOptions{}); err != nil {
|
2022-04-12 16:17:44 -04:00
|
|
|
if _, ok := err.(BucketExists); !ok {
|
|
|
|
// Only BucketExists error can be ignored.
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
bucketExists = true
|
|
|
|
}
|
|
|
|
return bucketExists, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func deleteObjectPerfBucket(objectAPI ObjectLayer) {
|
|
|
|
objectAPI.DeleteBucket(context.Background(), globalObjectPerfBucket, DeleteBucketOptions{
|
|
|
|
Force: true,
|
2022-07-25 20:51:32 -04:00
|
|
|
SRDeleteOp: getSRBucketDeleteOp(globalSiteReplicationSys.isEnabled()),
|
2022-04-12 16:17:44 -04:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-07-27 17:41:59 -04:00
|
|
|
func validateObjPerfOptions(ctx context.Context, storageInfo madmin.StorageInfo, concurrent int, size int, autotune bool) (bool, bool, string) {
|
2022-04-12 16:17:44 -04:00
|
|
|
capacityNeeded := uint64(concurrent * size)
|
2022-07-06 16:29:49 -04:00
|
|
|
capacity := GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo)
|
2022-04-12 16:17:44 -04:00
|
|
|
|
|
|
|
if capacity < capacityNeeded {
|
|
|
|
return false, false, fmt.Sprintf("not enough usable space available to perform speedtest - expected %s, got %s",
|
|
|
|
humanize.IBytes(capacityNeeded), humanize.IBytes(capacity))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify if we can employ autotune without running out of capacity,
|
|
|
|
// if we do run out of capacity, make sure to turn-off autotuning
|
|
|
|
// in such situations.
|
|
|
|
if autotune {
|
|
|
|
newConcurrent := concurrent + (concurrent+1)/2
|
|
|
|
autoTunedCapacityNeeded := uint64(newConcurrent * size)
|
|
|
|
if capacity < autoTunedCapacityNeeded {
|
|
|
|
// Turn-off auto-tuning if next possible concurrency would reach beyond disk capacity.
|
|
|
|
return true, false, ""
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, autotune, ""
|
|
|
|
}
|
|
|
|
|
2022-02-02 01:38:05 -05:00
|
|
|
// NetSpeedtestHandler - reports maximum network throughput
|
|
|
|
func (a adminAPIHandlers) NetSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "NetSpeedtestHandler")
|
|
|
|
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DriveSpeedtestHandler - reports throughput of drives available in the cluster
|
|
|
|
func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "DriveSpeedtestHandler")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Freeze all incoming S3 API calls before running speedtest.
|
|
|
|
globalNotificationSys.ServiceFreeze(ctx, true)
|
|
|
|
|
|
|
|
// unfreeze all incoming S3 API calls after speedtest.
|
|
|
|
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
|
|
|
|
|
|
|
serial := r.Form.Get("serial") == "true"
|
|
|
|
blockSizeStr := r.Form.Get("blocksize")
|
|
|
|
fileSizeStr := r.Form.Get("filesize")
|
|
|
|
|
|
|
|
blockSize, err := strconv.ParseUint(blockSizeStr, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
blockSize = 4 * humanize.MiByte // default value
|
|
|
|
}
|
|
|
|
|
|
|
|
fileSize, err := strconv.ParseUint(fileSizeStr, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
fileSize = 1 * humanize.GiByte // default value
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := madmin.DriveSpeedTestOpts{
|
|
|
|
Serial: serial,
|
|
|
|
BlockSize: blockSize,
|
|
|
|
FileSize: fileSize,
|
|
|
|
}
|
|
|
|
|
|
|
|
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
|
|
|
defer keepAliveTicker.Stop()
|
|
|
|
|
|
|
|
ch := globalNotificationSys.DriveSpeedTest(ctx, opts)
|
|
|
|
|
2022-06-15 17:51:42 -04:00
|
|
|
enc := json.NewEncoder(w)
|
2022-02-02 01:38:05 -05:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2022-06-15 17:51:42 -04:00
|
|
|
return
|
2022-02-02 01:38:05 -05:00
|
|
|
case <-keepAliveTicker.C:
|
|
|
|
// Write a blank entry to prevent client from disconnecting
|
|
|
|
if err := enc.Encode(madmin.DriveSpeedTestResult{}); err != nil {
|
2022-06-15 17:51:42 -04:00
|
|
|
return
|
2022-02-02 01:38:05 -05:00
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
case result, ok := <-ch:
|
|
|
|
if !ok {
|
2022-06-15 17:51:42 -04:00
|
|
|
return
|
2022-02-02 01:38:05 -05:00
|
|
|
}
|
|
|
|
if err := enc.Encode(result); err != nil {
|
2022-06-15 17:51:42 -04:00
|
|
|
return
|
2022-02-02 01:38:05 -05:00
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-27 14:37:47 -04:00
|
|
|
// Admin API errors
|
|
|
|
const (
|
|
|
|
AdminUpdateUnexpectedFailure = "XMinioAdminUpdateUnexpectedFailure"
|
|
|
|
AdminUpdateURLNotReachable = "XMinioAdminUpdateURLNotReachable"
|
|
|
|
AdminUpdateApplyFailure = "XMinioAdminUpdateApplyFailure"
|
|
|
|
)
|
|
|
|
|
2021-05-06 11:52:02 -04:00
|
|
|
// Returns true if the madmin.TraceInfo should be traced,
|
2019-07-31 14:08:39 -04:00
|
|
|
// false if certain conditions are not met.
|
2021-05-06 11:52:02 -04:00
|
|
|
// - input entry is not of the type *madmin.TraceInfo*
|
2019-07-31 14:08:39 -04:00
|
|
|
// - errOnly entries are to be traced, not status code 2xx, 3xx.
|
2021-05-06 11:52:02 -04:00
|
|
|
// - madmin.TraceInfo type is asked by opts
|
2022-07-05 17:45:49 -04:00
|
|
|
func shouldTrace(trcInfo madmin.TraceInfo, opts madmin.ServiceTraceOpts) (shouldTrace bool) {
|
|
|
|
// Reject all unwanted types.
|
|
|
|
want := opts.TraceTypes()
|
|
|
|
if !want.Contains(trcInfo.TraceType) {
|
2019-07-31 14:08:39 -04:00
|
|
|
return false
|
|
|
|
}
|
2020-11-21 01:52:17 -05:00
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
isHTTP := trcInfo.TraceType.Overlaps(madmin.TraceInternal|madmin.TraceS3) && trcInfo.HTTP != nil
|
2021-03-27 02:24:07 -04:00
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
// Check latency...
|
|
|
|
if opts.Threshold > 0 && trcInfo.Duration < opts.Threshold {
|
|
|
|
return false
|
2021-03-27 02:24:07 -04:00
|
|
|
}
|
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
// Check internal path
|
|
|
|
isInternal := isHTTP && HasPrefix(trcInfo.HTTP.ReqInfo.Path, minioReservedBucketPath+SlashSeparator)
|
|
|
|
if isInternal && !opts.Internal {
|
|
|
|
return false
|
2021-03-27 02:24:07 -04:00
|
|
|
}
|
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
// Filter non-errors.
|
|
|
|
if isHTTP && opts.OnlyErrors && trcInfo.HTTP.RespInfo.StatusCode < http.StatusBadRequest {
|
|
|
|
return false
|
2020-11-21 01:52:17 -05:00
|
|
|
}
|
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
return true
|
2021-03-27 02:24:07 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func extractTraceOptions(r *http.Request) (opts madmin.ServiceTraceOpts, err error) {
|
2022-07-05 17:45:49 -04:00
|
|
|
if err := opts.ParseParams(r); err != nil {
|
|
|
|
return opts, err
|
|
|
|
}
|
2021-03-27 02:24:07 -04:00
|
|
|
// Support deprecated 'all' query
|
2022-07-05 17:45:49 -04:00
|
|
|
if r.Form.Get("all") == "true" {
|
2021-03-27 02:24:07 -04:00
|
|
|
opts.S3 = true
|
|
|
|
opts.Internal = true
|
|
|
|
opts.Storage = true
|
|
|
|
opts.OS = true
|
2022-07-05 17:45:49 -04:00
|
|
|
// Older mc - cannot deal with more types...
|
2019-07-31 14:08:39 -04:00
|
|
|
}
|
2021-03-27 02:24:07 -04:00
|
|
|
return
|
2019-07-31 14:08:39 -04:00
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// TraceHandler - POST /minio/admin/v3/trace
|
2019-06-08 18:54:41 -04:00
|
|
|
// ----------
|
|
|
|
// The handler sends http trace to the connected HTTP client.
|
|
|
|
func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "HTTPTrace")
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2019-06-21 19:47:51 -04:00
|
|
|
// Validate request signature.
|
2020-12-18 14:51:15 -05:00
|
|
|
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.TraceAdminAction, "")
|
2019-06-21 19:47:51 -04:00
|
|
|
if adminAPIErr != ErrNone {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-03-27 02:24:07 -04:00
|
|
|
traceOpts, err := extractTraceOptions(r)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2020-07-30 22:45:12 -04:00
|
|
|
setEventStreamHeaders(w)
|
2019-06-08 18:54:41 -04:00
|
|
|
|
2019-06-27 01:41:12 -04:00
|
|
|
// Trace Publisher and peer-trace-client uses nonblocking send and hence does not wait for slow receivers.
|
|
|
|
// Use buffered channel to take care of burst sends or slow w.Write()
|
2022-10-28 13:55:42 -04:00
|
|
|
traceCh := make(chan madmin.TraceInfo, 4000)
|
2019-06-27 01:41:12 -04:00
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
peers, _ := newPeerRestClients(globalEndpoints)
|
2019-07-31 14:08:39 -04:00
|
|
|
|
2022-10-28 13:55:42 -04:00
|
|
|
err = globalTrace.Subscribe(traceOpts.TraceTypes(), traceCh, ctx.Done(), func(entry madmin.TraceInfo) bool {
|
|
|
|
return shouldTrace(entry, traceOpts)
|
2022-07-05 17:45:49 -04:00
|
|
|
})
|
2022-06-05 17:29:12 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrSlowDown), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2019-06-27 01:41:12 -04:00
|
|
|
|
|
|
|
for _, peer := range peers {
|
2019-11-09 12:27:23 -05:00
|
|
|
if peer == nil {
|
|
|
|
continue
|
|
|
|
}
|
2021-03-27 02:24:07 -04:00
|
|
|
peer.Trace(traceCh, ctx.Done(), traceOpts)
|
2019-06-27 01:41:12 -04:00
|
|
|
}
|
|
|
|
|
2019-07-31 14:08:39 -04:00
|
|
|
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
|
|
|
defer keepAliveTicker.Stop()
|
|
|
|
|
2019-06-27 01:41:12 -04:00
|
|
|
enc := json.NewEncoder(w)
|
2019-06-08 18:54:41 -04:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case entry := <-traceCh:
|
2019-06-27 01:41:12 -04:00
|
|
|
if err := enc.Encode(entry); err != nil {
|
2019-06-08 18:54:41 -04:00
|
|
|
return
|
|
|
|
}
|
2021-07-12 13:49:50 -04:00
|
|
|
if len(traceCh) == 0 {
|
|
|
|
// Flush if nothing is queued
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
}
|
2019-07-31 14:08:39 -04:00
|
|
|
case <-keepAliveTicker.C:
|
2021-07-12 13:49:50 -04:00
|
|
|
if len(traceCh) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
2019-07-31 14:08:39 -04:00
|
|
|
if _, err := w.Write([]byte(" ")); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
2020-04-16 13:56:18 -04:00
|
|
|
case <-ctx.Done():
|
2019-06-08 18:54:41 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-09-03 14:10:48 -04:00
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
// The ConsoleLogHandler handler sends console logs to the connected HTTP client.
|
2019-09-03 14:10:48 -04:00
|
|
|
func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "ConsoleLog")
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConsoleLogAdminAction)
|
2019-09-03 14:10:48 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
2021-08-08 01:43:01 -04:00
|
|
|
node := r.Form.Get("node")
|
2019-09-03 14:10:48 -04:00
|
|
|
// limit buffered console entries if client requested it.
|
2021-08-08 01:43:01 -04:00
|
|
|
limitStr := r.Form.Get("limit")
|
2019-09-03 14:10:48 -04:00
|
|
|
limitLines, err := strconv.Atoi(limitStr)
|
|
|
|
if err != nil {
|
|
|
|
limitLines = 10
|
|
|
|
}
|
2019-10-11 21:50:54 -04:00
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
logKind := madmin.LogKind(strings.ToUpper(r.Form.Get("logType"))).LogMask()
|
|
|
|
if logKind == 0 {
|
|
|
|
logKind = madmin.LogMaskAll
|
2019-10-11 21:50:54 -04:00
|
|
|
}
|
|
|
|
|
2019-09-03 14:10:48 -04:00
|
|
|
// Avoid reusing tcp connection if read timeout is hit
|
|
|
|
// This is needed to make r.Context().Done() work as
|
|
|
|
// expected in case of read timeout
|
2020-09-01 19:58:13 -04:00
|
|
|
w.Header().Set("Connection", "close")
|
2020-07-30 22:45:12 -04:00
|
|
|
|
|
|
|
setEventStreamHeaders(w)
|
2019-09-03 14:10:48 -04:00
|
|
|
|
2022-10-28 13:55:42 -04:00
|
|
|
logCh := make(chan log.Info, 4000)
|
2019-09-03 14:10:48 -04:00
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
peers, _ := newPeerRestClients(globalEndpoints)
|
2019-09-03 14:10:48 -04:00
|
|
|
|
2022-06-05 17:29:12 -04:00
|
|
|
err = globalConsoleSys.Subscribe(logCh, ctx.Done(), node, limitLines, logKind, nil)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrSlowDown), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2019-09-03 14:10:48 -04:00
|
|
|
|
|
|
|
for _, peer := range peers {
|
2019-11-09 12:27:23 -05:00
|
|
|
if peer == nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-09-05 18:51:27 -04:00
|
|
|
if node == "" || strings.EqualFold(peer.host.Name, node) {
|
2020-04-16 13:56:18 -04:00
|
|
|
peer.ConsoleLog(logCh, ctx.Done())
|
2019-09-03 14:10:48 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
enc := json.NewEncoder(w)
|
|
|
|
|
|
|
|
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
|
|
|
defer keepAliveTicker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2022-11-07 13:38:08 -05:00
|
|
|
case log, ok := <-logCh:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2022-10-28 13:55:42 -04:00
|
|
|
if log.SendLog(node, logKind) {
|
2019-09-03 14:10:48 -04:00
|
|
|
if err := enc.Encode(log); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-11-18 20:19:58 -05:00
|
|
|
if len(logCh) == 0 {
|
|
|
|
// Flush if nothing is queued
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
}
|
2019-09-03 14:10:48 -04:00
|
|
|
}
|
|
|
|
case <-keepAliveTicker.C:
|
2021-07-12 13:49:50 -04:00
|
|
|
if len(logCh) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
2019-09-03 14:10:48 -04:00
|
|
|
if _, err := w.Write([]byte(" ")); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
2020-04-16 13:56:18 -04:00
|
|
|
case <-ctx.Done():
|
2019-09-03 14:10:48 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-09-04 16:19:44 -04:00
|
|
|
|
2020-07-08 21:50:43 -04:00
|
|
|
// KMSCreateKeyHandler - POST /minio/admin/v3/kms/key/create?key-id=<master-key-id>
|
|
|
|
func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "KMSCreateKey")
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-07-08 21:50:43 -04:00
|
|
|
|
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSCreateKeyAdminAction)
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if GlobalKMS == nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-07-18 21:54:27 -04:00
|
|
|
if err := GlobalKMS.CreateKey(ctx, r.Form.Get("key-id")); err != nil {
|
2020-07-08 21:50:43 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
|
|
|
}
|
|
|
|
|
2021-06-11 18:04:26 -04:00
|
|
|
// KMSKeyStatusHandler - GET /minio/admin/v3/kms/status
|
|
|
|
func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "KMSStatus")
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSKeyStatusAdminAction)
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if GlobalKMS == nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-07-18 21:54:27 -04:00
|
|
|
stat, err := GlobalKMS.Stat(ctx)
|
2021-06-11 18:04:26 -04:00
|
|
|
if err != nil {
|
|
|
|
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
status := madmin.KMSStatus{
|
|
|
|
Name: stat.Name,
|
|
|
|
DefaultKeyID: stat.DefaultKey,
|
|
|
|
Endpoints: make(map[string]madmin.ItemState, len(stat.Endpoints)),
|
|
|
|
}
|
|
|
|
for _, endpoint := range stat.Endpoints {
|
|
|
|
status.Endpoints[endpoint] = madmin.ItemOnline // TODO(aead): Implement an online check for mTLS
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := json.Marshal(status)
|
|
|
|
if err != nil {
|
|
|
|
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
writeSuccessResponseJSON(w, resp)
|
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// KMSKeyStatusHandler - GET /minio/admin/v3/kms/key/status?key-id=<master-key-id>
|
2019-09-04 16:19:44 -04:00
|
|
|
func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Request) {
|
2020-05-11 13:34:08 -04:00
|
|
|
ctx := newContext(r, w, "KMSKeyStatus")
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2019-09-04 16:19:44 -04:00
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSKeyStatusAdminAction)
|
2019-09-04 16:19:44 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if GlobalKMS == nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2021-05-18 10:22:31 -04:00
|
|
|
|
2022-07-18 21:54:27 -04:00
|
|
|
stat, err := GlobalKMS.Stat(ctx)
|
2021-04-15 11:47:33 -04:00
|
|
|
if err != nil {
|
|
|
|
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2019-09-04 16:19:44 -04:00
|
|
|
|
2021-08-08 01:43:01 -04:00
|
|
|
keyID := r.Form.Get("key-id")
|
2019-09-04 16:19:44 -04:00
|
|
|
if keyID == "" {
|
2021-04-15 11:47:33 -04:00
|
|
|
keyID = stat.DefaultKey
|
2019-09-04 16:19:44 -04:00
|
|
|
}
|
2022-01-02 12:15:06 -05:00
|
|
|
response := madmin.KMSKeyStatus{
|
2019-09-04 16:19:44 -04:00
|
|
|
KeyID: keyID,
|
|
|
|
}
|
|
|
|
|
2021-04-15 11:47:33 -04:00
|
|
|
kmsContext := kms.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation
|
2019-09-04 16:19:44 -04:00
|
|
|
// 1. Generate a new key using the KMS.
|
2022-07-18 21:54:27 -04:00
|
|
|
key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsContext)
|
2019-09-04 16:19:44 -04:00
|
|
|
if err != nil {
|
|
|
|
response.EncryptionErr = err.Error()
|
|
|
|
resp, err := json.Marshal(response)
|
|
|
|
if err != nil {
|
|
|
|
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
writeSuccessResponseJSON(w, resp)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-02-05 12:17:35 -05:00
|
|
|
// 2. Verify that we can indeed decrypt the (encrypted) key
|
2021-05-18 10:22:31 -04:00
|
|
|
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
|
2019-09-04 16:19:44 -04:00
|
|
|
if err != nil {
|
|
|
|
response.DecryptionErr = err.Error()
|
|
|
|
resp, err := json.Marshal(response)
|
|
|
|
if err != nil {
|
|
|
|
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
writeSuccessResponseJSON(w, resp)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-02-05 12:17:35 -05:00
|
|
|
// 3. Compare generated key with decrypted key
|
2021-04-15 11:47:33 -04:00
|
|
|
if subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1 {
|
2019-09-04 16:19:44 -04:00
|
|
|
response.DecryptionErr = "The generated and the decrypted data key do not match"
|
|
|
|
resp, err := json.Marshal(response)
|
|
|
|
if err != nil {
|
|
|
|
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
writeSuccessResponseJSON(w, resp)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := json.Marshal(response)
|
|
|
|
if err != nil {
|
|
|
|
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
writeSuccessResponseJSON(w, resp)
|
|
|
|
}
|
2019-10-03 10:48:38 -04:00
|
|
|
|
2023-02-11 08:01:56 -05:00
|
|
|
func getPoolsInfo(ctx context.Context, allDisks []madmin.Disk) (map[int]map[int]madmin.ErasureSetInfo, error) {
|
|
|
|
objectAPI := newObjectLayerFn()
|
|
|
|
if objectAPI == nil {
|
|
|
|
return nil, errServerNotInitialized
|
|
|
|
}
|
|
|
|
|
|
|
|
z, _ := objectAPI.(*erasureServerPools)
|
|
|
|
|
|
|
|
poolsInfo := make(map[int]map[int]madmin.ErasureSetInfo)
|
|
|
|
for _, d := range allDisks {
|
|
|
|
poolInfo, ok := poolsInfo[d.PoolIndex]
|
|
|
|
if !ok {
|
|
|
|
poolInfo = make(map[int]madmin.ErasureSetInfo)
|
|
|
|
}
|
|
|
|
erasureSet, ok := poolInfo[d.SetIndex]
|
|
|
|
if !ok {
|
|
|
|
erasureSet.ID = d.SetIndex
|
|
|
|
cache := dataUsageCache{}
|
|
|
|
if err := cache.load(ctx, z.serverPools[d.PoolIndex].sets[d.SetIndex], dataUsageCacheName); err == nil {
|
|
|
|
dataUsageInfo := cache.dui(dataUsageRoot, nil)
|
|
|
|
erasureSet.ObjectsCount = dataUsageInfo.ObjectsTotalCount
|
|
|
|
erasureSet.VersionsCount = dataUsageInfo.VersionsTotalCount
|
|
|
|
erasureSet.Usage = dataUsageInfo.ObjectsTotalSize
|
|
|
|
}
|
|
|
|
}
|
|
|
|
erasureSet.RawCapacity += d.TotalSpace
|
|
|
|
erasureSet.RawUsage += d.UsedSpace
|
|
|
|
if d.Healing {
|
|
|
|
erasureSet.HealDisks = 1
|
|
|
|
}
|
|
|
|
poolInfo[d.SetIndex] = erasureSet
|
|
|
|
poolsInfo[d.PoolIndex] = poolInfo
|
|
|
|
}
|
|
|
|
return poolsInfo, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getServerInfo(ctx context.Context, poolsInfoEnabled bool, r *http.Request) madmin.InfoMessage {
|
2021-06-01 11:55:49 -04:00
|
|
|
kmsStat := fetchKMSStatus()
|
|
|
|
|
|
|
|
ldap := madmin.LDAP{}
|
2022-10-08 01:12:36 -04:00
|
|
|
if globalLDAPConfig.Enabled() {
|
|
|
|
ldapConn, err := globalLDAPConfig.LDAP.Connect()
|
2021-11-16 12:28:29 -05:00
|
|
|
//nolint:gocritic
|
2021-06-01 11:55:49 -04:00
|
|
|
if err != nil {
|
|
|
|
ldap.Status = string(madmin.ItemOffline)
|
|
|
|
} else if ldapConn == nil {
|
|
|
|
ldap.Status = "Not Configured"
|
|
|
|
} else {
|
|
|
|
// Close ldap connection to avoid leaks.
|
|
|
|
ldapConn.Close()
|
|
|
|
ldap.Status = string(madmin.ItemOnline)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log, audit := fetchLoggerInfo()
|
|
|
|
|
|
|
|
// Get the notification target info
|
|
|
|
notifyTarget := fetchLambdaInfo()
|
|
|
|
|
|
|
|
local := getLocalServerProperty(globalEndpoints, r)
|
|
|
|
servers := globalNotificationSys.ServerInfo()
|
|
|
|
servers = append(servers, local)
|
|
|
|
|
|
|
|
assignPoolNumbers(servers)
|
|
|
|
|
2023-02-11 08:01:56 -05:00
|
|
|
var poolsInfo map[int]map[int]madmin.ErasureSetInfo
|
2021-06-01 11:55:49 -04:00
|
|
|
var backend interface{}
|
2023-02-11 08:01:56 -05:00
|
|
|
|
2021-06-01 11:55:49 -04:00
|
|
|
mode := madmin.ItemInitializing
|
|
|
|
|
|
|
|
buckets := madmin.Buckets{}
|
|
|
|
objects := madmin.Objects{}
|
2022-04-26 01:04:10 -04:00
|
|
|
versions := madmin.Versions{}
|
2021-06-01 11:55:49 -04:00
|
|
|
usage := madmin.Usage{}
|
|
|
|
|
|
|
|
objectAPI := newObjectLayerFn()
|
|
|
|
if objectAPI != nil {
|
|
|
|
mode = madmin.ItemOnline
|
|
|
|
|
|
|
|
// Load data usage
|
|
|
|
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
|
|
|
if err == nil {
|
|
|
|
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
|
|
|
|
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
|
2022-04-26 01:04:10 -04:00
|
|
|
versions = madmin.Versions{Count: dataUsageInfo.VersionsTotalCount}
|
2021-06-01 11:55:49 -04:00
|
|
|
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
|
|
|
|
} else {
|
|
|
|
buckets = madmin.Buckets{Error: err.Error()}
|
|
|
|
objects = madmin.Objects{Error: err.Error()}
|
|
|
|
usage = madmin.Usage{Error: err.Error()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetching the backend information
|
|
|
|
backendInfo := objectAPI.BackendInfo()
|
2023-02-11 08:01:56 -05:00
|
|
|
// Calculate the number of online/offline disks of all nodes
|
|
|
|
var allDisks []madmin.Disk
|
|
|
|
for _, s := range servers {
|
|
|
|
allDisks = append(allDisks, s.Disks...)
|
|
|
|
}
|
|
|
|
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(allDisks)
|
|
|
|
|
|
|
|
backend = madmin.ErasureBackend{
|
|
|
|
Type: madmin.ErasureType,
|
|
|
|
OnlineDisks: onlineDisks.Sum(),
|
|
|
|
OfflineDisks: offlineDisks.Sum(),
|
|
|
|
StandardSCParity: backendInfo.StandardSCParity,
|
|
|
|
RRSCParity: backendInfo.RRSCParity,
|
|
|
|
}
|
|
|
|
|
|
|
|
if poolsInfoEnabled {
|
|
|
|
poolsInfo, _ = getPoolsInfo(ctx, allDisks)
|
2021-06-01 11:55:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
domain := globalDomainNames
|
|
|
|
services := madmin.Services{
|
|
|
|
KMS: kmsStat,
|
|
|
|
LDAP: ldap,
|
|
|
|
Logger: log,
|
|
|
|
Audit: audit,
|
|
|
|
Notifications: notifyTarget,
|
|
|
|
}
|
|
|
|
|
|
|
|
return madmin.InfoMessage{
|
|
|
|
Mode: string(mode),
|
|
|
|
Domain: domain,
|
2021-11-25 16:06:25 -05:00
|
|
|
Region: globalSite.Region,
|
2022-08-24 09:42:36 -04:00
|
|
|
SQSARN: globalEventNotifier.GetARNList(false),
|
2021-06-01 11:55:49 -04:00
|
|
|
DeploymentID: globalDeploymentID,
|
|
|
|
Buckets: buckets,
|
|
|
|
Objects: objects,
|
2022-04-26 01:04:10 -04:00
|
|
|
Versions: versions,
|
2021-06-01 11:55:49 -04:00
|
|
|
Usage: usage,
|
|
|
|
Services: services,
|
|
|
|
Backend: backend,
|
|
|
|
Servers: servers,
|
2023-02-11 08:01:56 -05:00
|
|
|
Pools: poolsInfo,
|
2021-06-01 11:55:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-06 15:41:07 -04:00
|
|
|
func getKubernetesInfo(dctx context.Context) madmin.KubernetesInfo {
|
|
|
|
ctx, cancel := context.WithCancel(dctx)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
ki := madmin.KubernetesInfo{}
|
|
|
|
|
|
|
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, kubernetesVersionEndpoint, nil)
|
|
|
|
if err != nil {
|
|
|
|
ki.Error = err.Error()
|
|
|
|
return ki
|
|
|
|
}
|
|
|
|
|
|
|
|
client := &http.Client{
|
2022-10-24 20:44:15 -04:00
|
|
|
Transport: NewHTTPTransport(),
|
2022-05-06 15:41:07 -04:00
|
|
|
Timeout: 10 * time.Second,
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := client.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
ki.Error = err.Error()
|
|
|
|
return ki
|
|
|
|
}
|
2023-02-06 13:41:41 -05:00
|
|
|
defer resp.Body.Close()
|
2022-05-06 15:41:07 -04:00
|
|
|
decoder := json.NewDecoder(resp.Body)
|
|
|
|
if err := decoder.Decode(&ki); err != nil {
|
|
|
|
ki.Error = err.Error()
|
|
|
|
}
|
|
|
|
return ki
|
|
|
|
}
|
|
|
|
|
2022-11-15 16:53:05 -05:00
|
|
|
func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *url.Values, healthInfoCh chan madmin.HealthInfo, healthInfo madmin.HealthInfo) {
|
2021-07-14 03:23:22 -04:00
|
|
|
hostAnonymizer := createHostAnonymizer()
|
|
|
|
// anonAddr - Anonymizes hosts in given input string.
|
|
|
|
anonAddr := func(addr string) string {
|
|
|
|
newAddr, found := hostAnonymizer[addr]
|
|
|
|
if found {
|
|
|
|
return newAddr
|
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2021-07-14 03:23:22 -04:00
|
|
|
// If we reach here, it means that the given addr doesn't contain any of the hosts.
|
|
|
|
// Return it as is. Can happen for drive paths in non-distributed mode
|
|
|
|
return addr
|
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2021-07-14 03:23:22 -04:00
|
|
|
// anonymizedAddr - Updated the addr of the node info with anonymized one
|
|
|
|
anonymizeAddr := func(info madmin.NodeInfo) {
|
|
|
|
info.SetAddr(anonAddr(info.GetAddr()))
|
|
|
|
}
|
|
|
|
|
2022-06-03 08:58:45 -04:00
|
|
|
partialWrite := func(oinfo madmin.HealthInfo) {
|
|
|
|
select {
|
|
|
|
case healthInfoCh <- oinfo:
|
|
|
|
case <-healthCtx.Done():
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-06 15:41:07 -04:00
|
|
|
getAndWritePlatformInfo := func() {
|
|
|
|
if IsKubernetes() {
|
2022-06-03 08:58:45 -04:00
|
|
|
healthInfo.Sys.KubernetesInfo = getKubernetesInfo(healthCtx)
|
2022-05-06 15:41:07 -04:00
|
|
|
partialWrite(healthInfo)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-14 03:23:22 -04:00
|
|
|
getAndWriteCPUs := func() {
|
2021-06-01 11:55:49 -04:00
|
|
|
if query.Get("syscpu") == "true" {
|
2022-06-03 08:58:45 -04:00
|
|
|
localCPUInfo := madmin.GetCPUs(healthCtx, globalLocalNodeName)
|
2021-07-14 03:23:22 -04:00
|
|
|
anonymizeAddr(&localCPUInfo)
|
|
|
|
healthInfo.Sys.CPUInfo = append(healthInfo.Sys.CPUInfo, localCPUInfo)
|
|
|
|
|
2022-06-03 08:58:45 -04:00
|
|
|
peerCPUInfo := globalNotificationSys.GetCPUs(healthCtx)
|
2021-07-14 03:23:22 -04:00
|
|
|
for _, cpuInfo := range peerCPUInfo {
|
|
|
|
anonymizeAddr(&cpuInfo)
|
|
|
|
healthInfo.Sys.CPUInfo = append(healthInfo.Sys.CPUInfo, cpuInfo)
|
|
|
|
}
|
|
|
|
|
2020-11-20 15:52:53 -05:00
|
|
|
partialWrite(healthInfo)
|
2020-04-16 13:56:18 -04:00
|
|
|
}
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2021-07-14 03:23:22 -04:00
|
|
|
getAndWritePartitions := func() {
|
2021-06-01 11:55:49 -04:00
|
|
|
if query.Get("sysdrivehw") == "true" {
|
2022-06-03 08:58:45 -04:00
|
|
|
localPartitions := madmin.GetPartitions(healthCtx, globalLocalNodeName)
|
2021-07-14 03:23:22 -04:00
|
|
|
anonymizeAddr(&localPartitions)
|
|
|
|
healthInfo.Sys.Partitions = append(healthInfo.Sys.Partitions, localPartitions)
|
|
|
|
|
2022-06-03 08:58:45 -04:00
|
|
|
peerPartitions := globalNotificationSys.GetPartitions(healthCtx)
|
2021-07-14 03:23:22 -04:00
|
|
|
for _, p := range peerPartitions {
|
|
|
|
anonymizeAddr(&p)
|
|
|
|
healthInfo.Sys.Partitions = append(healthInfo.Sys.Partitions, p)
|
|
|
|
}
|
2020-11-20 15:52:53 -05:00
|
|
|
partialWrite(healthInfo)
|
2020-04-16 13:56:18 -04:00
|
|
|
}
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2021-07-14 03:23:22 -04:00
|
|
|
getAndWriteOSInfo := func() {
|
2021-06-01 11:55:49 -04:00
|
|
|
if query.Get("sysosinfo") == "true" {
|
2022-06-03 08:58:45 -04:00
|
|
|
localOSInfo := madmin.GetOSInfo(healthCtx, globalLocalNodeName)
|
2021-07-14 03:23:22 -04:00
|
|
|
anonymizeAddr(&localOSInfo)
|
|
|
|
healthInfo.Sys.OSInfo = append(healthInfo.Sys.OSInfo, localOSInfo)
|
|
|
|
|
2022-06-03 08:58:45 -04:00
|
|
|
peerOSInfos := globalNotificationSys.GetOSInfo(healthCtx)
|
2021-07-14 03:23:22 -04:00
|
|
|
for _, o := range peerOSInfos {
|
|
|
|
anonymizeAddr(&o)
|
|
|
|
healthInfo.Sys.OSInfo = append(healthInfo.Sys.OSInfo, o)
|
|
|
|
}
|
2020-11-20 15:52:53 -05:00
|
|
|
partialWrite(healthInfo)
|
2020-04-16 13:56:18 -04:00
|
|
|
}
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2021-07-14 03:23:22 -04:00
|
|
|
getAndWriteMemInfo := func() {
|
2021-06-01 11:55:49 -04:00
|
|
|
if query.Get("sysmem") == "true" {
|
2022-06-03 08:58:45 -04:00
|
|
|
localMemInfo := madmin.GetMemInfo(healthCtx, globalLocalNodeName)
|
2021-07-14 03:23:22 -04:00
|
|
|
anonymizeAddr(&localMemInfo)
|
|
|
|
healthInfo.Sys.MemInfo = append(healthInfo.Sys.MemInfo, localMemInfo)
|
|
|
|
|
2022-06-03 08:58:45 -04:00
|
|
|
peerMemInfos := globalNotificationSys.GetMemInfo(healthCtx)
|
2021-07-14 03:23:22 -04:00
|
|
|
for _, m := range peerMemInfos {
|
|
|
|
anonymizeAddr(&m)
|
|
|
|
healthInfo.Sys.MemInfo = append(healthInfo.Sys.MemInfo, m)
|
|
|
|
}
|
2020-11-20 15:52:53 -05:00
|
|
|
partialWrite(healthInfo)
|
2020-04-16 13:56:18 -04:00
|
|
|
}
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
|
|
|
|
2021-07-30 02:05:34 -04:00
|
|
|
getAndWriteSysErrors := func() {
|
|
|
|
if query.Get(string(madmin.HealthDataTypeSysErrors)) == "true" {
|
2022-06-03 08:58:45 -04:00
|
|
|
localSysErrors := madmin.GetSysErrors(healthCtx, globalLocalNodeName)
|
2021-07-30 02:05:34 -04:00
|
|
|
anonymizeAddr(&localSysErrors)
|
|
|
|
healthInfo.Sys.SysErrs = append(healthInfo.Sys.SysErrs, localSysErrors)
|
|
|
|
partialWrite(healthInfo)
|
|
|
|
|
2022-06-03 08:58:45 -04:00
|
|
|
peerSysErrs := globalNotificationSys.GetSysErrors(healthCtx)
|
2021-07-30 02:05:34 -04:00
|
|
|
for _, se := range peerSysErrs {
|
|
|
|
anonymizeAddr(&se)
|
|
|
|
healthInfo.Sys.SysErrs = append(healthInfo.Sys.SysErrs, se)
|
|
|
|
}
|
|
|
|
partialWrite(healthInfo)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-24 20:09:37 -04:00
|
|
|
getAndWriteSysConfig := func() {
|
|
|
|
if query.Get(string(madmin.HealthDataTypeSysConfig)) == "true" {
|
2022-06-03 08:58:45 -04:00
|
|
|
localSysConfig := madmin.GetSysConfig(healthCtx, globalLocalNodeName)
|
2021-08-24 20:09:37 -04:00
|
|
|
anonymizeAddr(&localSysConfig)
|
|
|
|
healthInfo.Sys.SysConfig = append(healthInfo.Sys.SysConfig, localSysConfig)
|
|
|
|
partialWrite(healthInfo)
|
|
|
|
|
2022-06-03 08:58:45 -04:00
|
|
|
peerSysConfig := globalNotificationSys.GetSysConfig(healthCtx)
|
2021-08-24 20:09:37 -04:00
|
|
|
for _, sc := range peerSysConfig {
|
|
|
|
anonymizeAddr(&sc)
|
|
|
|
healthInfo.Sys.SysConfig = append(healthInfo.Sys.SysConfig, sc)
|
|
|
|
}
|
|
|
|
partialWrite(healthInfo)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-12 21:58:40 -04:00
|
|
|
getAndWriteSysServices := func() {
|
|
|
|
if query.Get(string(madmin.HealthDataTypeSysServices)) == "true" {
|
2022-06-03 08:58:45 -04:00
|
|
|
localSysServices := madmin.GetSysServices(healthCtx, globalLocalNodeName)
|
2021-08-12 21:58:40 -04:00
|
|
|
anonymizeAddr(&localSysServices)
|
|
|
|
healthInfo.Sys.SysServices = append(healthInfo.Sys.SysServices, localSysServices)
|
|
|
|
partialWrite(healthInfo)
|
|
|
|
|
2022-06-03 08:58:45 -04:00
|
|
|
peerSysServices := globalNotificationSys.GetSysServices(healthCtx)
|
2021-08-24 20:09:37 -04:00
|
|
|
for _, ss := range peerSysServices {
|
|
|
|
anonymizeAddr(&ss)
|
|
|
|
healthInfo.Sys.SysServices = append(healthInfo.Sys.SysServices, ss)
|
2021-08-12 21:58:40 -04:00
|
|
|
}
|
|
|
|
partialWrite(healthInfo)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-14 03:23:22 -04:00
|
|
|
anonymizeCmdLine := func(cmdLine string) string {
|
|
|
|
if !globalIsDistErasure {
|
|
|
|
// FS mode - single server - hard code to `server1`
|
2021-11-16 12:28:29 -05:00
|
|
|
anonCmdLine := strings.ReplaceAll(cmdLine, globalLocalNodeName, "server1")
|
2022-10-21 03:34:32 -04:00
|
|
|
if len(globalMinioConsoleHost) > 0 {
|
|
|
|
anonCmdLine = strings.ReplaceAll(anonCmdLine, globalMinioConsoleHost, "server1")
|
|
|
|
}
|
|
|
|
return anonCmdLine
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Server start command regex groups:
|
|
|
|
// 1 - minio server
|
|
|
|
// 2 - flags e.g. `--address :9000 --certs-dir /etc/minio/certs`
|
|
|
|
// 3 - pool args e.g. `https://node{01...16}.domain/data/disk{001...204} https://node{17...32}.domain/data/disk{001...204}`
|
|
|
|
re := regexp.MustCompile(`^(.*minio\s+server\s+)(--[^\s]+\s+[^\s]+\s+)*(.*)`)
|
|
|
|
|
|
|
|
// stays unchanged in the anonymized version
|
|
|
|
cmdLineWithoutPools := re.ReplaceAllString(cmdLine, `$1$2`)
|
|
|
|
|
|
|
|
// to be anonymized
|
|
|
|
poolsArgs := re.ReplaceAllString(cmdLine, `$3`)
|
|
|
|
var anonPools []string
|
|
|
|
|
|
|
|
if !(strings.Contains(poolsArgs, "{") && strings.Contains(poolsArgs, "}")) {
|
|
|
|
// No ellipses pattern. Anonymize host name from every pool arg
|
|
|
|
pools := strings.Fields(poolsArgs)
|
|
|
|
anonPools = make([]string, len(pools))
|
2023-02-08 09:58:50 -05:00
|
|
|
for index, arg := range pools {
|
|
|
|
anonPools[index] = anonAddr(arg)
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
|
|
|
return cmdLineWithoutPools + strings.Join(anonPools, " ")
|
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2021-07-14 03:23:22 -04:00
|
|
|
// Ellipses pattern in pool args. Regex groups:
|
|
|
|
// 1 - server prefix
|
|
|
|
// 2 - number sequence for servers
|
|
|
|
// 3 - server suffix
|
|
|
|
// 4 - drive prefix (starting with /)
|
|
|
|
// 5 - number sequence for drives
|
|
|
|
// 6 - drive suffix
|
|
|
|
re = regexp.MustCompile(`([^\s^{]*)({\d+...\d+})?([^\s^{^/]*)(/[^\s^{]*)({\d+...\d+})?([^\s]*)`)
|
|
|
|
poolsMatches := re.FindAllStringSubmatch(poolsArgs, -1)
|
|
|
|
|
|
|
|
anonPools = make([]string, len(poolsMatches))
|
|
|
|
idxMap := map[int]string{
|
|
|
|
1: "spfx",
|
|
|
|
3: "ssfx",
|
|
|
|
}
|
|
|
|
for pi, poolsMatch := range poolsMatches {
|
|
|
|
// Replace the server prefix/suffix with anonymized ones
|
|
|
|
for idx, lbl := range idxMap {
|
|
|
|
if len(poolsMatch[idx]) > 0 {
|
|
|
|
poolsMatch[idx] = fmt.Sprintf("%s%d", lbl, crc32.ChecksumIEEE([]byte(poolsMatch[idx])))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the original pools args present at index 0
|
|
|
|
anonPools[pi] = strings.Join(poolsMatch[1:], "")
|
|
|
|
}
|
|
|
|
return cmdLineWithoutPools + strings.Join(anonPools, " ")
|
|
|
|
}
|
|
|
|
|
|
|
|
anonymizeProcInfo := func(p *madmin.ProcInfo) {
|
|
|
|
p.CmdLine = anonymizeCmdLine(p.CmdLine)
|
|
|
|
anonymizeAddr(p)
|
|
|
|
}
|
|
|
|
|
|
|
|
getAndWriteProcInfo := func() {
|
2021-06-01 11:55:49 -04:00
|
|
|
if query.Get("sysprocess") == "true" {
|
2022-06-03 08:58:45 -04:00
|
|
|
localProcInfo := madmin.GetProcInfo(healthCtx, globalLocalNodeName)
|
2021-07-14 03:23:22 -04:00
|
|
|
anonymizeProcInfo(&localProcInfo)
|
|
|
|
healthInfo.Sys.ProcInfo = append(healthInfo.Sys.ProcInfo, localProcInfo)
|
2022-06-03 08:58:45 -04:00
|
|
|
peerProcInfos := globalNotificationSys.GetProcInfo(healthCtx)
|
2021-07-14 03:23:22 -04:00
|
|
|
for _, p := range peerProcInfos {
|
|
|
|
anonymizeProcInfo(&p)
|
|
|
|
healthInfo.Sys.ProcInfo = append(healthInfo.Sys.ProcInfo, p)
|
|
|
|
}
|
2020-11-20 15:52:53 -05:00
|
|
|
partialWrite(healthInfo)
|
2020-03-27 00:07:39 -04:00
|
|
|
}
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
2020-04-16 13:56:18 -04:00
|
|
|
|
2021-07-14 03:23:22 -04:00
|
|
|
getAndWriteMinioConfig := func() {
|
2021-06-01 11:55:49 -04:00
|
|
|
if query.Get("minioconfig") == "true" {
|
2022-12-19 14:10:14 -05:00
|
|
|
config, err := readServerConfig(healthCtx, objectAPI, nil)
|
2021-06-01 11:55:49 -04:00
|
|
|
if err != nil {
|
|
|
|
healthInfo.Minio.Config = madmin.MinioConfig{
|
|
|
|
Error: err.Error(),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
healthInfo.Minio.Config = madmin.MinioConfig{
|
2021-06-03 11:15:44 -04:00
|
|
|
Config: config.RedactSensitiveInfo(),
|
2021-06-01 11:55:49 -04:00
|
|
|
}
|
|
|
|
}
|
2020-11-20 15:52:53 -05:00
|
|
|
partialWrite(healthInfo)
|
2020-03-27 00:07:39 -04:00
|
|
|
}
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2021-07-14 03:23:22 -04:00
|
|
|
anonymizeNetwork := func(network map[string]string) map[string]string {
|
|
|
|
anonNetwork := map[string]string{}
|
|
|
|
for endpoint, status := range network {
|
|
|
|
anonEndpoint := anonAddr(endpoint)
|
|
|
|
anonNetwork[anonEndpoint] = status
|
|
|
|
}
|
|
|
|
return anonNetwork
|
|
|
|
}
|
|
|
|
|
|
|
|
anonymizeDrives := func(drives []madmin.Disk) []madmin.Disk {
|
|
|
|
anonDrives := []madmin.Disk{}
|
|
|
|
for _, drive := range drives {
|
|
|
|
drive.Endpoint = anonAddr(drive.Endpoint)
|
|
|
|
anonDrives = append(anonDrives, drive)
|
|
|
|
}
|
|
|
|
return anonDrives
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(healthInfoCh)
|
|
|
|
|
2022-04-03 16:15:02 -04:00
|
|
|
partialWrite(healthInfo) // Write first message with only version and deployment id populated
|
2022-05-06 15:41:07 -04:00
|
|
|
getAndWritePlatformInfo()
|
2021-07-14 03:23:22 -04:00
|
|
|
getAndWriteCPUs()
|
|
|
|
getAndWritePartitions()
|
|
|
|
getAndWriteOSInfo()
|
|
|
|
getAndWriteMemInfo()
|
|
|
|
getAndWriteProcInfo()
|
|
|
|
getAndWriteMinioConfig()
|
2021-07-30 02:05:34 -04:00
|
|
|
getAndWriteSysErrors()
|
2021-08-12 21:58:40 -04:00
|
|
|
getAndWriteSysServices()
|
2021-08-24 20:09:37 -04:00
|
|
|
getAndWriteSysConfig()
|
2020-09-15 21:02:54 -04:00
|
|
|
|
2021-06-01 11:55:49 -04:00
|
|
|
if query.Get("minioinfo") == "true" {
|
2023-02-11 08:01:56 -05:00
|
|
|
infoMessage := getServerInfo(healthCtx, false, nil)
|
2022-10-11 14:31:26 -04:00
|
|
|
servers := make([]madmin.ServerInfo, 0, len(infoMessage.Servers))
|
2021-06-01 11:55:49 -04:00
|
|
|
for _, server := range infoMessage.Servers {
|
2021-07-14 03:23:22 -04:00
|
|
|
anonEndpoint := anonAddr(server.Endpoint)
|
2021-06-01 11:55:49 -04:00
|
|
|
servers = append(servers, madmin.ServerInfo{
|
|
|
|
State: server.State,
|
2021-07-14 03:23:22 -04:00
|
|
|
Endpoint: anonEndpoint,
|
2021-06-01 11:55:49 -04:00
|
|
|
Uptime: server.Uptime,
|
|
|
|
Version: server.Version,
|
|
|
|
CommitID: server.CommitID,
|
2021-07-14 03:23:22 -04:00
|
|
|
Network: anonymizeNetwork(server.Network),
|
|
|
|
Drives: anonymizeDrives(server.Disks),
|
2021-06-01 11:55:49 -04:00
|
|
|
PoolNumber: server.PoolNumber,
|
|
|
|
MemStats: madmin.MemStats{
|
|
|
|
Alloc: server.MemStats.Alloc,
|
|
|
|
TotalAlloc: server.MemStats.TotalAlloc,
|
|
|
|
Mallocs: server.MemStats.Mallocs,
|
|
|
|
Frees: server.MemStats.Frees,
|
|
|
|
HeapAlloc: server.MemStats.HeapAlloc,
|
|
|
|
},
|
2022-10-11 14:31:26 -04:00
|
|
|
GoMaxProcs: server.GoMaxProcs,
|
|
|
|
NumCPU: server.NumCPU,
|
|
|
|
RuntimeVersion: server.RuntimeVersion,
|
|
|
|
GCStats: server.GCStats,
|
|
|
|
MinioEnvVars: server.MinioEnvVars,
|
2021-06-01 11:55:49 -04:00
|
|
|
})
|
|
|
|
}
|
2021-10-20 13:12:01 -04:00
|
|
|
|
2021-11-18 12:02:33 -05:00
|
|
|
tls := getTLSInfo()
|
2021-12-17 17:46:54 -05:00
|
|
|
isK8s := IsKubernetes()
|
|
|
|
isDocker := IsDocker()
|
2021-06-01 11:55:49 -04:00
|
|
|
healthInfo.Minio.Info = madmin.MinioInfo{
|
|
|
|
Mode: infoMessage.Mode,
|
|
|
|
Domain: infoMessage.Domain,
|
|
|
|
Region: infoMessage.Region,
|
|
|
|
SQSARN: infoMessage.SQSARN,
|
|
|
|
DeploymentID: infoMessage.DeploymentID,
|
|
|
|
Buckets: infoMessage.Buckets,
|
|
|
|
Objects: infoMessage.Objects,
|
|
|
|
Usage: infoMessage.Usage,
|
|
|
|
Services: infoMessage.Services,
|
|
|
|
Backend: infoMessage.Backend,
|
|
|
|
Servers: servers,
|
2021-11-18 12:02:33 -05:00
|
|
|
TLS: &tls,
|
2021-12-17 17:46:54 -05:00
|
|
|
IsKubernetes: &isK8s,
|
|
|
|
IsDocker: &isDocker,
|
2021-06-01 11:55:49 -04:00
|
|
|
}
|
|
|
|
partialWrite(healthInfo)
|
|
|
|
}
|
2020-04-16 13:56:18 -04:00
|
|
|
}()
|
2022-11-15 16:53:05 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// HealthInfoHandler - GET /minio/admin/v3/healthinfo
|
|
|
|
// ----------
|
|
|
|
// Get server health info
|
|
|
|
func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "HealthInfo")
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
query := r.Form
|
|
|
|
healthInfoCh := make(chan madmin.HealthInfo)
|
|
|
|
enc := json.NewEncoder(w)
|
|
|
|
|
|
|
|
healthInfo := madmin.HealthInfo{
|
2022-11-23 10:11:22 -05:00
|
|
|
TimeStamp: time.Now().UTC(),
|
|
|
|
Version: madmin.HealthInfoVersion,
|
2022-11-15 16:53:05 -05:00
|
|
|
Minio: madmin.MinioHealthInfo{
|
|
|
|
Info: madmin.MinioInfo{
|
|
|
|
DeploymentID: globalDeploymentID,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
errResp := func(err error) {
|
|
|
|
errorResponse := getAPIErrorResponse(ctx, toAdminAPIErr(ctx, err), r.URL.String(),
|
|
|
|
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
|
|
|
encodedErrorResponse := encodeResponse(errorResponse)
|
|
|
|
healthInfo.Error = string(encodedErrorResponse)
|
|
|
|
logger.LogIf(ctx, enc.Encode(healthInfo))
|
|
|
|
}
|
|
|
|
|
|
|
|
deadline := 10 * time.Second // Default deadline is 10secs for health diagnostics.
|
|
|
|
if dstr := query.Get("deadline"); dstr != "" {
|
|
|
|
var err error
|
|
|
|
deadline, err = time.ParseDuration(dstr)
|
|
|
|
if err != nil {
|
|
|
|
errResp(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nsLock := objectAPI.NewNSLock(minioMetaBucket, "health-check-in-progress")
|
|
|
|
lkctx, err := nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline))
|
|
|
|
if err != nil { // returns a locked lock
|
|
|
|
errResp(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-12-23 22:49:07 -05:00
|
|
|
defer nsLock.Unlock(lkctx)
|
2022-11-15 16:53:05 -05:00
|
|
|
healthCtx, healthCancel := context.WithTimeout(lkctx.Context(), deadline)
|
|
|
|
defer healthCancel()
|
|
|
|
|
|
|
|
go fetchHealthInfo(healthCtx, objectAPI, &query, healthInfoCh, healthInfo)
|
|
|
|
|
|
|
|
setCommonHeaders(w)
|
|
|
|
setEventStreamHeaders(w)
|
|
|
|
w.WriteHeader(http.StatusOK)
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2021-01-06 16:29:50 -05:00
|
|
|
ticker := time.NewTicker(5 * time.Second)
|
2020-04-16 13:56:18 -04:00
|
|
|
defer ticker.Stop()
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
for {
|
|
|
|
select {
|
2020-11-20 15:52:53 -05:00
|
|
|
case oinfo, ok := <-healthInfoCh:
|
2020-04-16 13:56:18 -04:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2021-11-18 20:19:58 -05:00
|
|
|
if err := enc.Encode(oinfo); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(healthInfoCh) == 0 {
|
|
|
|
// Flush if nothing is queued
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
}
|
2020-04-16 13:56:18 -04:00
|
|
|
case <-ticker.C:
|
|
|
|
if _, err := w.Write([]byte(" ")); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
2022-06-03 08:58:45 -04:00
|
|
|
case <-healthCtx.Done():
|
2020-04-16 13:56:18 -04:00
|
|
|
return
|
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-20 13:12:01 -04:00
|
|
|
func getTLSInfo() madmin.TLSInfo {
|
|
|
|
tlsInfo := madmin.TLSInfo{
|
|
|
|
TLSEnabled: globalIsTLS,
|
|
|
|
Certs: []madmin.TLSCert{},
|
|
|
|
}
|
|
|
|
|
|
|
|
if globalIsTLS {
|
|
|
|
for _, c := range globalPublicCerts {
|
|
|
|
tlsInfo.Certs = append(tlsInfo.Certs, madmin.TLSCert{
|
|
|
|
PubKeyAlgo: c.PublicKeyAlgorithm.String(),
|
|
|
|
SignatureAlgo: c.SignatureAlgorithm.String(),
|
|
|
|
NotBefore: c.NotBefore,
|
|
|
|
NotAfter: c.NotAfter,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return tlsInfo
|
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// ServerInfoHandler - GET /minio/admin/v3/info
|
2019-12-11 17:27:03 -05:00
|
|
|
// ----------
|
|
|
|
// Get server information
|
|
|
|
func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "ServerInfo")
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2020-12-21 12:35:19 -05:00
|
|
|
// Validate request signature.
|
|
|
|
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ServerInfoAdminAction, "")
|
|
|
|
if adminAPIErr != ErrNone {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
2019-12-11 17:27:03 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Marshal API response
|
2023-02-11 08:01:56 -05:00
|
|
|
jsonBytes, err := json.Marshal(getServerInfo(ctx, true, r))
|
2019-12-11 17:27:03 -05:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-10-02 19:19:44 -04:00
|
|
|
// Reply with storage information (across nodes in a
|
2019-12-11 17:27:03 -05:00
|
|
|
// distributed setup) as json.
|
|
|
|
writeSuccessResponseJSON(w, jsonBytes)
|
|
|
|
}
|
|
|
|
|
2021-03-01 11:09:43 -05:00
|
|
|
func assignPoolNumbers(servers []madmin.ServerProperties) {
|
|
|
|
for i := range servers {
|
|
|
|
for idx, ge := range globalEndpoints {
|
|
|
|
for _, endpoint := range ge.Endpoints {
|
|
|
|
if servers[i].Endpoint == endpoint.Host {
|
|
|
|
servers[i].PoolNumber = idx + 1
|
|
|
|
} else if host, err := xnet.ParseHost(servers[i].Endpoint); err == nil {
|
|
|
|
if host.Name == endpoint.Hostname() {
|
|
|
|
servers[i].PoolNumber = idx + 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-02 19:19:44 -04:00
|
|
|
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
2020-02-01 07:20:04 -05:00
|
|
|
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
2019-12-11 17:27:03 -05:00
|
|
|
|
2020-10-02 19:19:44 -04:00
|
|
|
for _, tgt := range globalConfigTargetList.Targets() {
|
|
|
|
targetIDStatus := make(map[string]madmin.Status)
|
|
|
|
active, _ := tgt.IsActive()
|
|
|
|
targetID := tgt.ID()
|
|
|
|
if active {
|
2021-03-02 20:28:04 -05:00
|
|
|
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOnline)}
|
2020-10-02 19:19:44 -04:00
|
|
|
} else {
|
2021-03-02 20:28:04 -05:00
|
|
|
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOffline)}
|
2020-10-02 19:19:44 -04:00
|
|
|
}
|
|
|
|
list := lambdaMap[targetID.Name]
|
|
|
|
list = append(list, targetIDStatus)
|
|
|
|
lambdaMap[targetID.Name] = list
|
|
|
|
}
|
|
|
|
|
2019-12-11 17:27:03 -05:00
|
|
|
notify := make([]map[string][]madmin.TargetIDStatus, len(lambdaMap))
|
|
|
|
counter := 0
|
|
|
|
for key, value := range lambdaMap {
|
|
|
|
v := make(map[string][]madmin.TargetIDStatus)
|
|
|
|
v[key] = value
|
|
|
|
notify[counter] = v
|
|
|
|
counter++
|
|
|
|
}
|
|
|
|
return notify
|
|
|
|
}
|
|
|
|
|
2021-01-29 20:55:37 -05:00
|
|
|
// fetchKMSStatus fetches KMS-related status information.
|
|
|
|
func fetchKMSStatus() madmin.KMS {
|
|
|
|
kmsStat := madmin.KMS{}
|
2019-12-11 17:27:03 -05:00
|
|
|
if GlobalKMS == nil {
|
2021-01-29 20:55:37 -05:00
|
|
|
kmsStat.Status = "disabled"
|
|
|
|
return kmsStat
|
2019-12-11 17:27:03 -05:00
|
|
|
}
|
|
|
|
|
2022-07-18 21:54:27 -04:00
|
|
|
stat, err := GlobalKMS.Stat(context.Background())
|
2021-04-15 11:47:33 -04:00
|
|
|
if err != nil {
|
2021-03-02 20:28:04 -05:00
|
|
|
kmsStat.Status = string(madmin.ItemOffline)
|
2021-04-15 11:47:33 -04:00
|
|
|
return kmsStat
|
|
|
|
}
|
|
|
|
if len(stat.Endpoints) == 0 {
|
|
|
|
kmsStat.Status = stat.Name
|
2021-04-22 11:45:30 -04:00
|
|
|
return kmsStat
|
|
|
|
}
|
|
|
|
kmsStat.Status = string(madmin.ItemOnline)
|
2019-12-11 17:27:03 -05:00
|
|
|
|
2021-04-22 11:45:30 -04:00
|
|
|
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
|
|
|
// 1. Generate a new key using the KMS.
|
2022-07-18 21:54:27 -04:00
|
|
|
key, err := GlobalKMS.GenerateKey(context.Background(), "", kmsContext)
|
2021-04-22 11:45:30 -04:00
|
|
|
if err != nil {
|
|
|
|
kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
|
|
|
|
} else {
|
|
|
|
kmsStat.Encrypt = "success"
|
|
|
|
}
|
2021-04-15 11:47:33 -04:00
|
|
|
|
2021-04-22 11:45:30 -04:00
|
|
|
// 2. Verify that we can indeed decrypt the (encrypted) key
|
|
|
|
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
|
|
|
|
switch {
|
|
|
|
case err != nil:
|
|
|
|
kmsStat.Decrypt = fmt.Sprintf("Decryption failed: %v", err)
|
|
|
|
case subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1:
|
|
|
|
kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
|
|
|
|
default:
|
|
|
|
kmsStat.Decrypt = "success"
|
2019-12-11 17:27:03 -05:00
|
|
|
}
|
2021-01-29 20:55:37 -05:00
|
|
|
return kmsStat
|
2019-12-11 17:27:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// fetchLoggerDetails return log info
|
2020-10-02 19:19:44 -04:00
|
|
|
func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
|
|
|
var loggerInfo []madmin.Logger
|
|
|
|
var auditloggerInfo []madmin.Audit
|
2022-11-28 11:03:26 -05:00
|
|
|
for _, tgt := range logger.SystemTargets() {
|
|
|
|
if tgt.Endpoint() != "" {
|
|
|
|
loggerInfo = append(loggerInfo, madmin.Logger{tgt.String(): logger.TargetStatus(tgt)})
|
2019-12-11 17:27:03 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-28 11:03:26 -05:00
|
|
|
for _, tgt := range logger.AuditTargets() {
|
|
|
|
if tgt.Endpoint() != "" {
|
|
|
|
auditloggerInfo = append(auditloggerInfo, madmin.Audit{tgt.String(): logger.TargetStatus(tgt)})
|
2019-12-11 17:27:03 -05:00
|
|
|
}
|
|
|
|
}
|
2020-10-02 19:19:44 -04:00
|
|
|
|
|
|
|
return loggerInfo, auditloggerInfo
|
2019-12-11 17:27:03 -05:00
|
|
|
}
|
|
|
|
|
2022-07-25 12:11:35 -04:00
|
|
|
func embedFileInZip(zipWriter *zip.Writer, name string, data []byte) error {
|
|
|
|
// Send profiling data to zip as file
|
|
|
|
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
|
|
|
name: name,
|
|
|
|
size: int64(len(data)),
|
|
|
|
mode: 0o600,
|
|
|
|
modTime: UTCNow(),
|
|
|
|
isDir: false,
|
|
|
|
sys: nil,
|
|
|
|
})
|
|
|
|
if zerr != nil {
|
|
|
|
return zerr
|
|
|
|
}
|
|
|
|
header.Method = zip.Deflate
|
|
|
|
zwriter, zerr := zipWriter.CreateHeader(header)
|
|
|
|
if zerr != nil {
|
|
|
|
return zerr
|
|
|
|
}
|
|
|
|
_, err := io.Copy(zwriter, bytes.NewReader(data))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-11-02 16:36:38 -04:00
|
|
|
// getClusterMetaInfo gets information of the current cluster and
|
|
|
|
// returns it.
|
|
|
|
// This is not a critical function, and it is allowed
|
|
|
|
// to fail with a ten seconds timeout, returning nil.
|
|
|
|
func getClusterMetaInfo(ctx context.Context) []byte {
|
2022-07-25 12:11:35 -04:00
|
|
|
objectAPI := newObjectLayerFn()
|
|
|
|
if objectAPI == nil {
|
2022-11-02 16:36:38 -04:00
|
|
|
return nil
|
2022-07-25 12:11:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add a ten seconds timeout because getting profiling data
|
|
|
|
// is critical for debugging, in contrary to getting cluster info
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
2022-09-05 13:02:25 -04:00
|
|
|
resultCh := make(chan madmin.ClusterRegistrationInfo)
|
2022-11-16 06:04:36 -05:00
|
|
|
|
2022-07-25 12:11:35 -04:00
|
|
|
go func() {
|
2022-11-16 06:04:36 -05:00
|
|
|
defer close(resultCh)
|
|
|
|
|
2022-09-05 13:02:25 -04:00
|
|
|
ci := madmin.ClusterRegistrationInfo{}
|
|
|
|
ci.Info.NoOfServerPools = len(globalEndpoints)
|
|
|
|
ci.Info.NoOfServers = len(globalEndpoints.Hostnames())
|
2022-07-25 12:11:35 -04:00
|
|
|
ci.Info.MinioVersion = Version
|
|
|
|
|
2022-12-01 17:31:35 -05:00
|
|
|
si := objectAPI.StorageInfo(ctx)
|
2022-07-25 12:11:35 -04:00
|
|
|
|
2022-09-05 13:02:25 -04:00
|
|
|
ci.Info.NoOfDrives = len(si.Disks)
|
2022-07-25 12:11:35 -04:00
|
|
|
for _, disk := range si.Disks {
|
|
|
|
ci.Info.TotalDriveSpace += disk.TotalSpace
|
|
|
|
ci.Info.UsedDriveSpace += disk.UsedSpace
|
|
|
|
}
|
|
|
|
|
|
|
|
dataUsageInfo, _ := loadDataUsageFromBackend(ctx, objectAPI)
|
|
|
|
|
|
|
|
ci.UsedCapacity = dataUsageInfo.ObjectsTotalSize
|
2022-09-05 13:02:25 -04:00
|
|
|
ci.Info.NoOfBuckets = dataUsageInfo.BucketsCount
|
|
|
|
ci.Info.NoOfObjects = dataUsageInfo.ObjectsTotalCount
|
2022-07-25 12:11:35 -04:00
|
|
|
|
|
|
|
ci.DeploymentID = globalDeploymentID
|
2022-09-05 13:02:25 -04:00
|
|
|
ci.ClusterName = fmt.Sprintf("%d-servers-%d-disks-%s", ci.Info.NoOfServers, ci.Info.NoOfDrives, ci.Info.MinioVersion)
|
2022-11-16 06:04:36 -05:00
|
|
|
|
|
|
|
select {
|
|
|
|
case resultCh <- ci:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
2022-07-25 12:11:35 -04:00
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2022-11-02 16:36:38 -04:00
|
|
|
return nil
|
2022-07-25 12:11:35 -04:00
|
|
|
case ci := <-resultCh:
|
2022-11-02 16:36:38 -04:00
|
|
|
out, err := json.MarshalIndent(ci, "", " ")
|
2022-07-25 12:11:35 -04:00
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
2022-11-02 16:36:38 -04:00
|
|
|
return nil
|
2022-07-25 12:11:35 -04:00
|
|
|
}
|
2022-11-02 16:36:38 -04:00
|
|
|
return out
|
2022-07-25 12:11:35 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-02 16:36:38 -04:00
|
|
|
func bytesToPublicKey(pub []byte) (*rsa.PublicKey, error) {
|
|
|
|
block, _ := pem.Decode(pub)
|
|
|
|
if block != nil {
|
|
|
|
pub = block.Bytes
|
|
|
|
}
|
|
|
|
key, err := x509.ParsePKCS1PublicKey(pub)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return key, nil
|
|
|
|
}
|
|
|
|
|
2021-07-09 14:29:16 -04:00
|
|
|
// getRawDataer provides an interface for getting raw FS files.
|
|
|
|
type getRawDataer interface {
|
2021-10-21 14:20:13 -04:00
|
|
|
GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, info StatInfo) error) error
|
2021-07-09 14:29:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// InspectDataHandler - GET /minio/admin/v3/inspect-data
|
|
|
|
// ----------
|
|
|
|
// Download file from all nodes in a zip format
|
|
|
|
func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "InspectData")
|
|
|
|
|
|
|
|
// Validate request signature.
|
|
|
|
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.InspectDataAction, "")
|
|
|
|
if adminAPIErr != ErrNone {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
2022-07-25 12:11:35 -04:00
|
|
|
objLayer := newObjectLayerFn()
|
|
|
|
o, ok := objLayer.(getRawDataer)
|
2021-07-09 14:29:16 -04:00
|
|
|
if !ok {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-02 16:36:38 -04:00
|
|
|
if err := parseForm(r); err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-08-08 01:43:01 -04:00
|
|
|
volume := r.Form.Get("volume")
|
2021-07-09 14:29:16 -04:00
|
|
|
if len(volume) == 0 {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketName), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2022-11-02 16:36:38 -04:00
|
|
|
file := r.Form.Get("file")
|
2021-07-09 14:29:16 -04:00
|
|
|
if len(file) == 0 {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2021-10-01 14:50:00 -04:00
|
|
|
file = strings.ReplaceAll(file, string(os.PathSeparator), "/")
|
|
|
|
|
|
|
|
// Reject attempts to traverse parent or absolute paths.
|
|
|
|
if strings.Contains(file, "..") || strings.Contains(volume, "..") {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2021-07-09 14:29:16 -04:00
|
|
|
|
2022-11-02 16:36:38 -04:00
|
|
|
var publicKey *rsa.PublicKey
|
2021-07-09 14:29:16 -04:00
|
|
|
|
2022-11-02 16:36:38 -04:00
|
|
|
publicKeyB64 := r.Form.Get("public-key")
|
|
|
|
if publicKeyB64 != "" {
|
|
|
|
publicKeyBytes, err := base64.StdEncoding.DecodeString(publicKeyB64)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
publicKey, err = bytesToPublicKey(publicKeyBytes)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2021-07-09 14:29:16 -04:00
|
|
|
|
|
|
|
// Write a version for making *incompatible* changes.
|
|
|
|
// The AdminClient will reject any version it does not know.
|
2022-11-02 16:36:38 -04:00
|
|
|
var inspectZipW *zip.Writer
|
|
|
|
if publicKey != nil {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
stream := estream.NewWriter(w)
|
|
|
|
defer stream.Close()
|
2021-07-09 14:29:16 -04:00
|
|
|
|
2022-12-12 13:28:15 -05:00
|
|
|
clusterKey, err := bytesToPublicKey(getSubnetAdminPublicKey())
|
2022-11-02 16:36:38 -04:00
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, stream.AddError(err.Error()))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = stream.AddKeyEncrypted(clusterKey)
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, stream.AddError(err.Error()))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if b := getClusterMetaInfo(ctx); len(b) > 0 {
|
|
|
|
w, err := stream.AddEncryptedStream("cluster.info", nil)
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.Write(b)
|
|
|
|
w.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add new key for inspect data.
|
|
|
|
if err := stream.AddKeyEncrypted(publicKey); err != nil {
|
|
|
|
logger.LogIf(ctx, stream.AddError(err.Error()))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
encStream, err := stream.AddEncryptedStream("inspect.zip", nil)
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, stream.AddError(err.Error()))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer encStream.Close()
|
|
|
|
|
|
|
|
inspectZipW = zip.NewWriter(encStream)
|
|
|
|
defer inspectZipW.Close()
|
|
|
|
} else {
|
|
|
|
// Legacy: Remove if we stop supporting inspection without public key.
|
|
|
|
var key [32]byte
|
|
|
|
// MUST use crypto/rand
|
|
|
|
n, err := crand.Read(key[:])
|
|
|
|
if err != nil || n != len(key) {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write a version for making *incompatible* changes.
|
|
|
|
// The AdminClient will reject any version it does not know.
|
|
|
|
if publicKey == nil {
|
|
|
|
w.Write([]byte{1})
|
|
|
|
w.Write(key[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
stream, err := sio.AES_256_GCM.Stream(key[:])
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Zero nonce, we only use each key once, and 32 bytes is plenty.
|
|
|
|
nonce := make([]byte, stream.NonceSize())
|
|
|
|
encw := stream.EncryptWriter(w, nonce, nil)
|
|
|
|
defer encw.Close()
|
|
|
|
|
|
|
|
// Initialize a zip writer which will provide a zipped content
|
|
|
|
// of profiling data of all nodes
|
|
|
|
inspectZipW = zip.NewWriter(encw)
|
|
|
|
defer inspectZipW.Close()
|
|
|
|
|
|
|
|
if b := getClusterMetaInfo(ctx); len(b) > 0 {
|
|
|
|
logger.LogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b))
|
|
|
|
}
|
2021-07-09 14:29:16 -04:00
|
|
|
}
|
|
|
|
|
2022-03-25 05:02:17 -04:00
|
|
|
rawDataFn := func(r io.Reader, host, disk, filename string, si StatInfo) error {
|
2021-07-09 14:29:16 -04:00
|
|
|
// Prefix host+disk
|
|
|
|
filename = path.Join(host, disk, filename)
|
2021-10-21 14:20:13 -04:00
|
|
|
if si.Dir {
|
2021-10-01 14:50:00 -04:00
|
|
|
filename += "/"
|
2021-10-21 14:20:13 -04:00
|
|
|
si.Size = 0
|
|
|
|
}
|
|
|
|
if si.Mode == 0 {
|
|
|
|
// Not, set it to default.
|
2022-01-02 12:15:06 -05:00
|
|
|
si.Mode = 0o600
|
2021-10-01 14:50:00 -04:00
|
|
|
}
|
2022-04-05 16:06:10 -04:00
|
|
|
if si.ModTime.IsZero() {
|
|
|
|
// Set time to now.
|
|
|
|
si.ModTime = time.Now()
|
|
|
|
}
|
2021-07-09 14:29:16 -04:00
|
|
|
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
|
|
|
name: filename,
|
2021-10-21 14:20:13 -04:00
|
|
|
size: si.Size,
|
|
|
|
mode: os.FileMode(si.Mode),
|
|
|
|
modTime: si.ModTime,
|
|
|
|
isDir: si.Dir,
|
2021-07-09 14:29:16 -04:00
|
|
|
sys: nil,
|
|
|
|
})
|
|
|
|
if zerr != nil {
|
|
|
|
logger.LogIf(ctx, zerr)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
header.Method = zip.Deflate
|
2022-11-02 16:36:38 -04:00
|
|
|
zwriter, zerr := inspectZipW.CreateHeader(header)
|
2021-07-09 14:29:16 -04:00
|
|
|
if zerr != nil {
|
|
|
|
logger.LogIf(ctx, zerr)
|
|
|
|
return nil
|
|
|
|
}
|
2022-11-02 16:36:38 -04:00
|
|
|
if _, err := io.Copy(zwriter, r); err != nil {
|
2021-07-09 14:29:16 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
}
|
|
|
|
return nil
|
2022-03-25 05:02:17 -04:00
|
|
|
}
|
2022-11-02 16:36:38 -04:00
|
|
|
err := o.GetRawData(ctx, volume, file, rawDataFn)
|
2022-03-25 05:02:17 -04:00
|
|
|
if !errors.Is(err, errFileNotFound) {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// save the format.json as part of inspect by default
|
|
|
|
if volume != minioMetaBucket && file != formatConfigFile {
|
|
|
|
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
|
|
|
|
}
|
2022-03-04 23:01:26 -05:00
|
|
|
if !errors.Is(err, errFileNotFound) {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
}
|
2022-11-02 16:36:38 -04:00
|
|
|
|
2022-03-25 05:02:17 -04:00
|
|
|
// save args passed to inspect command
|
2022-11-02 16:36:38 -04:00
|
|
|
var sb bytes.Buffer
|
|
|
|
fmt.Fprintf(&sb, "Inspect path: %s%s%s\n", volume, slashSeparator, file)
|
|
|
|
sb.WriteString("Server command line args:")
|
2022-03-31 19:05:43 -04:00
|
|
|
for _, pool := range globalEndpoints {
|
2022-11-02 16:36:38 -04:00
|
|
|
sb.WriteString(" ")
|
|
|
|
sb.WriteString(pool.CmdLine)
|
2022-03-25 05:02:17 -04:00
|
|
|
}
|
2022-11-02 16:36:38 -04:00
|
|
|
sb.WriteString("\n")
|
|
|
|
logger.LogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes()))
|
2021-07-09 14:29:16 -04:00
|
|
|
}
|
2021-07-14 03:23:22 -04:00
|
|
|
|
2022-12-12 13:28:15 -05:00
|
|
|
func getSubnetAdminPublicKey() []byte {
|
|
|
|
if globalIsCICD {
|
|
|
|
return subnetAdminPublicKeyDev
|
|
|
|
}
|
|
|
|
return subnetAdminPublicKey
|
|
|
|
}
|
|
|
|
|
2021-07-14 03:23:22 -04:00
|
|
|
func createHostAnonymizerForFSMode() map[string]string {
|
|
|
|
hostAnonymizer := map[string]string{
|
|
|
|
globalLocalNodeName: "server1",
|
|
|
|
}
|
|
|
|
|
|
|
|
apiEndpoints := getAPIEndpoints()
|
|
|
|
for _, ep := range apiEndpoints {
|
2021-08-03 00:50:20 -04:00
|
|
|
if len(ep) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if url, err := xnet.ParseHTTPURL(ep); err == nil {
|
|
|
|
// In FS mode the drive names don't include the host.
|
|
|
|
// So mapping just the host should be sufficient.
|
|
|
|
hostAnonymizer[url.Host] = "server1"
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return hostAnonymizer
|
|
|
|
}
|
|
|
|
|
|
|
|
// anonymizeHost - Add entries related to given endpoint in the host anonymizer map
|
|
|
|
// The health report data can contain the hostname in various forms e.g. host, host:port,
|
|
|
|
// host:port/drivepath, full url (http://host:port/drivepath)
|
|
|
|
// The anonymizer map will have mappings for all these varients for efficiently replacing
|
|
|
|
// any of these strings to the anonymized versions at the time of health report generation.
|
|
|
|
func anonymizeHost(hostAnonymizer map[string]string, endpoint Endpoint, poolNum int, srvrNum int) {
|
|
|
|
if len(endpoint.Host) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
currentURL := endpoint.String()
|
|
|
|
|
|
|
|
// mapIfNotPresent - Maps the given key to the value only if the key is not present in the map
|
|
|
|
mapIfNotPresent := func(m map[string]string, key string, val string) {
|
|
|
|
_, found := m[key]
|
|
|
|
if !found {
|
|
|
|
m[key] = val
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_, found := hostAnonymizer[currentURL]
|
|
|
|
if !found {
|
|
|
|
// In distributed setup, anonymized addr = 'poolNum.serverNum'
|
|
|
|
newHost := fmt.Sprintf("pool%d.server%d", poolNum, srvrNum)
|
2022-04-12 16:17:44 -04:00
|
|
|
schemePfx := endpoint.Scheme + "://"
|
2021-07-14 03:23:22 -04:00
|
|
|
|
|
|
|
// Hostname
|
|
|
|
mapIfNotPresent(hostAnonymizer, endpoint.Hostname(), newHost)
|
|
|
|
|
|
|
|
newHostPort := newHost
|
|
|
|
if len(endpoint.Port()) > 0 {
|
|
|
|
// Host + port
|
|
|
|
newHostPort = newHost + ":" + endpoint.Port()
|
|
|
|
mapIfNotPresent(hostAnonymizer, endpoint.Host, newHostPort)
|
2022-04-12 16:17:44 -04:00
|
|
|
mapIfNotPresent(hostAnonymizer, schemePfx+endpoint.Host, newHostPort)
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
newHostPortPath := newHostPort
|
|
|
|
if len(endpoint.Path) > 0 {
|
|
|
|
// Host + port + path
|
|
|
|
currentHostPortPath := endpoint.Host + endpoint.Path
|
|
|
|
newHostPortPath = newHostPort + endpoint.Path
|
|
|
|
mapIfNotPresent(hostAnonymizer, currentHostPortPath, newHostPortPath)
|
2022-04-12 16:17:44 -04:00
|
|
|
mapIfNotPresent(hostAnonymizer, schemePfx+currentHostPortPath, newHostPortPath)
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Full url
|
2022-04-12 16:17:44 -04:00
|
|
|
hostAnonymizer[currentURL] = schemePfx + newHostPortPath
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// createHostAnonymizer - Creats a map of various strings to corresponding anonymized names
|
|
|
|
func createHostAnonymizer() map[string]string {
|
|
|
|
if !globalIsDistErasure {
|
|
|
|
return createHostAnonymizerForFSMode()
|
|
|
|
}
|
|
|
|
|
|
|
|
hostAnonymizer := map[string]string{}
|
2023-01-19 21:05:44 -05:00
|
|
|
hosts := set.NewStringSet()
|
|
|
|
srvrIdx := 0
|
2021-07-14 03:23:22 -04:00
|
|
|
|
|
|
|
for poolIdx, pool := range globalEndpoints {
|
2023-01-19 21:05:44 -05:00
|
|
|
for _, endpoint := range pool.Endpoints {
|
|
|
|
if !hosts.Contains(endpoint.Host) {
|
|
|
|
hosts.Add(endpoint.Host)
|
|
|
|
srvrIdx++
|
|
|
|
}
|
|
|
|
anonymizeHost(hostAnonymizer, endpoint, poolIdx+1, srvrIdx)
|
2021-07-14 03:23:22 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return hostAnonymizer
|
|
|
|
}
|