2016-12-16 01:26:15 -05:00
|
|
|
/*
|
2020-06-20 09:33:01 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
|
2016-12-16 01:26:15 -05:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2018-04-05 18:04:40 -04:00
|
|
|
"context"
|
2019-09-04 16:19:44 -04:00
|
|
|
"crypto/subtle"
|
2020-02-01 20:45:29 -05:00
|
|
|
"crypto/tls"
|
2016-12-16 01:26:15 -05:00
|
|
|
"encoding/json"
|
2020-05-19 16:53:54 -04:00
|
|
|
"errors"
|
2019-08-27 14:37:47 -04:00
|
|
|
"fmt"
|
2018-01-11 02:06:36 -05:00
|
|
|
"io"
|
2016-12-16 01:26:15 -05:00
|
|
|
"net/http"
|
2019-08-28 18:04:43 -04:00
|
|
|
"net/url"
|
2018-09-18 19:46:35 -04:00
|
|
|
"os"
|
2019-08-28 18:04:43 -04:00
|
|
|
"path"
|
|
|
|
"runtime"
|
2019-01-24 10:22:14 -05:00
|
|
|
"sort"
|
2018-09-06 11:03:18 -04:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2017-01-04 02:39:22 -05:00
|
|
|
"time"
|
2017-10-31 14:54:32 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
"github.com/gorilla/mux"
|
2019-01-14 01:44:20 -05:00
|
|
|
|
2019-10-23 01:59:13 -04:00
|
|
|
"github.com/minio/minio/cmd/config"
|
2019-12-11 17:27:03 -05:00
|
|
|
"github.com/minio/minio/cmd/config/notify"
|
2019-09-04 16:19:44 -04:00
|
|
|
"github.com/minio/minio/cmd/crypto"
|
2019-07-03 01:34:32 -04:00
|
|
|
xhttp "github.com/minio/minio/cmd/http"
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2020-03-20 18:13:41 -04:00
|
|
|
"github.com/minio/minio/cmd/logger/message/log"
|
2019-11-19 05:03:18 -05:00
|
|
|
"github.com/minio/minio/pkg/auth"
|
2018-03-02 18:23:04 -05:00
|
|
|
"github.com/minio/minio/pkg/handlers"
|
2019-11-19 05:03:18 -05:00
|
|
|
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
2018-01-22 17:54:55 -05:00
|
|
|
"github.com/minio/minio/pkg/madmin"
|
2019-01-14 01:44:20 -05:00
|
|
|
xnet "github.com/minio/minio/pkg/net"
|
2019-06-27 01:41:12 -04:00
|
|
|
trace "github.com/minio/minio/pkg/trace"
|
2016-12-16 01:26:15 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2018-10-09 17:00:01 -04:00
|
|
|
maxEConfigJSONSize = 262272
|
2016-12-16 01:26:15 -05:00
|
|
|
)
|
|
|
|
|
2017-03-16 03:15:06 -04:00
|
|
|
// Only valid query params for mgmt admin APIs.
|
2017-01-17 13:02:58 -05:00
|
|
|
const (
|
2020-09-24 11:40:21 -04:00
|
|
|
mgmtBucket = "bucket"
|
|
|
|
mgmtPrefix = "prefix"
|
|
|
|
mgmtClientToken = "clientToken"
|
|
|
|
mgmtForceStart = "forceStart"
|
|
|
|
mgmtForceStop = "forceStop"
|
2017-01-17 13:02:58 -05:00
|
|
|
)
|
|
|
|
|
2020-07-23 11:03:31 -04:00
|
|
|
func updateServer(u *url.URL, sha256Sum []byte, lrTime time.Time, mode string) (us madmin.ServerUpdateStatus, err error) {
|
|
|
|
if err = doUpdate(u, lrTime, sha256Sum, mode); err != nil {
|
2019-08-27 14:37:47 -04:00
|
|
|
return us, err
|
2018-01-22 17:54:55 -05:00
|
|
|
}
|
2020-07-23 11:03:31 -04:00
|
|
|
|
2019-08-27 14:37:47 -04:00
|
|
|
us.CurrentVersion = Version
|
2020-07-23 11:03:31 -04:00
|
|
|
us.UpdatedVersion = lrTime.Format(minioReleaseTagTimeLayout)
|
2019-08-27 14:37:47 -04:00
|
|
|
return us, nil
|
2017-01-23 11:56:06 -05:00
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// ServerUpdateHandler - POST /minio/admin/v3/update?updateURL={updateURL}
|
2016-12-20 21:49:48 -05:00
|
|
|
// ----------
|
2019-08-28 18:04:43 -04:00
|
|
|
// updates all minio servers and restarts them gracefully.
|
|
|
|
func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "ServerUpdate")
|
2018-11-12 14:07:43 -05:00
|
|
|
|
2020-05-11 13:34:08 -04:00
|
|
|
defer logger.AuditLog(w, r, "ServerUpdate", mustGetClaimsFromToken(r))
|
|
|
|
|
2019-11-19 05:03:18 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerUpdateAdminAction)
|
2019-02-12 04:25:52 -05:00
|
|
|
if objectAPI == nil {
|
2016-12-16 01:26:15 -05:00
|
|
|
return
|
|
|
|
}
|
2017-01-23 11:56:06 -05:00
|
|
|
|
2019-08-28 18:04:43 -04:00
|
|
|
if globalInplaceUpdateDisabled {
|
|
|
|
// if MINIO_UPDATE=off - inplace update is disabled, mostly
|
|
|
|
// in containers.
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
2019-08-27 14:37:47 -04:00
|
|
|
return
|
2018-01-22 17:54:55 -05:00
|
|
|
}
|
2017-01-23 11:56:06 -05:00
|
|
|
|
2019-08-28 18:04:43 -04:00
|
|
|
vars := mux.Vars(r)
|
2020-07-23 11:03:31 -04:00
|
|
|
updateURL := vars["updateURL"]
|
2019-08-28 18:04:43 -04:00
|
|
|
mode := getMinioMode()
|
2020-07-23 11:03:31 -04:00
|
|
|
if updateURL == "" {
|
|
|
|
updateURL = minioReleaseInfoURL
|
|
|
|
if runtime.GOOS == globalWindowsOSName {
|
|
|
|
updateURL = minioReleaseWindowsInfoURL
|
2019-08-27 14:37:47 -04:00
|
|
|
}
|
2020-07-23 11:03:31 -04:00
|
|
|
}
|
2017-02-08 03:13:02 -05:00
|
|
|
|
2020-07-23 11:03:31 -04:00
|
|
|
u, err := url.Parse(updateURL)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
content, err := downloadReleaseURL(u, updateTimeout, mode)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
sha256Sum, lrTime, err := parseReleaseData(content)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
u.Path = path.Dir(u.Path) + SlashSeparator + "minio.RELEASE." + lrTime.Format(minioReleaseTagTimeLayout)
|
|
|
|
|
|
|
|
crTime, err := GetCurrentReleaseTime()
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if lrTime.Sub(crTime) < 0 {
|
|
|
|
updateStatus := madmin.ServerUpdateStatus{
|
|
|
|
CurrentVersion: Version,
|
|
|
|
UpdatedVersion: Version,
|
2019-08-27 14:37:47 -04:00
|
|
|
}
|
2017-01-23 11:56:06 -05:00
|
|
|
|
2020-07-23 11:03:31 -04:00
|
|
|
// Marshal API response
|
|
|
|
jsonBytes, err := json.Marshal(updateStatus)
|
2019-08-27 14:37:47 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-23 11:03:31 -04:00
|
|
|
writeSuccessResponseJSON(w, jsonBytes)
|
|
|
|
return
|
2019-08-28 18:04:43 -04:00
|
|
|
}
|
2019-08-27 14:37:47 -04:00
|
|
|
|
2020-07-23 11:03:31 -04:00
|
|
|
for _, nerr := range globalNotificationSys.ServerUpdate(ctx, u, sha256Sum, lrTime) {
|
2019-08-28 18:04:43 -04:00
|
|
|
if nerr.Err != nil {
|
|
|
|
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
|
|
|
logger.LogIf(ctx, nerr.Err)
|
2020-07-23 11:03:31 -04:00
|
|
|
err = fmt.Errorf("Server update failed, please do not restart the servers yet: failed with %w", nerr.Err)
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
2019-08-27 14:37:47 -04:00
|
|
|
}
|
2019-08-28 18:04:43 -04:00
|
|
|
}
|
|
|
|
|
2020-07-23 11:03:31 -04:00
|
|
|
updateStatus, err := updateServer(u, sha256Sum, lrTime, mode)
|
2019-08-28 18:04:43 -04:00
|
|
|
if err != nil {
|
2020-07-23 11:03:31 -04:00
|
|
|
err = fmt.Errorf("Server update failed, please do not restart the servers yet: failed with %w", err)
|
2019-08-28 18:04:43 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2016-12-20 21:05:25 -05:00
|
|
|
return
|
2016-12-16 01:26:15 -05:00
|
|
|
}
|
2019-01-14 01:44:20 -05:00
|
|
|
|
2019-08-28 18:04:43 -04:00
|
|
|
// Marshal API response
|
|
|
|
jsonBytes, err := json.Marshal(updateStatus)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2016-12-16 01:26:15 -05:00
|
|
|
|
2019-08-28 18:04:43 -04:00
|
|
|
writeSuccessResponseJSON(w, jsonBytes)
|
2018-11-12 14:07:43 -05:00
|
|
|
|
2020-07-23 11:03:31 -04:00
|
|
|
// Notify all other MinIO peers signal service.
|
|
|
|
for _, nerr := range globalNotificationSys.SignalService(serviceRestart) {
|
|
|
|
if nerr.Err != nil {
|
|
|
|
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
|
|
|
logger.LogIf(ctx, nerr.Err)
|
|
|
|
}
|
2019-08-28 18:04:43 -04:00
|
|
|
}
|
2020-07-23 11:03:31 -04:00
|
|
|
|
|
|
|
globalServiceSignalCh <- serviceRestart
|
2019-08-28 18:04:43 -04:00
|
|
|
}
|
2017-01-17 17:25:59 -05:00
|
|
|
|
2020-05-11 13:34:08 -04:00
|
|
|
// ServiceHandler - POST /minio/admin/v3/service?action={action}
|
2019-08-28 18:04:43 -04:00
|
|
|
// ----------
|
|
|
|
// restarts/stops minio server gracefully. In a distributed setup,
|
2020-05-11 13:34:08 -04:00
|
|
|
func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "Service")
|
|
|
|
|
|
|
|
defer logger.AuditLog(w, r, "Service", mustGetClaimsFromToken(r))
|
2017-01-17 17:25:59 -05:00
|
|
|
|
2019-08-28 18:04:43 -04:00
|
|
|
vars := mux.Vars(r)
|
|
|
|
action := vars["action"]
|
|
|
|
|
|
|
|
var serviceSig serviceSignal
|
|
|
|
switch madmin.ServiceAction(action) {
|
|
|
|
case madmin.ServiceActionRestart:
|
|
|
|
serviceSig = serviceRestart
|
|
|
|
case madmin.ServiceActionStop:
|
|
|
|
serviceSig = serviceStop
|
|
|
|
default:
|
2019-10-11 21:50:54 -04:00
|
|
|
logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.Application)
|
2019-08-28 18:04:43 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
2017-01-17 17:25:59 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-05-11 13:34:08 -04:00
|
|
|
var objectAPI ObjectLayer
|
|
|
|
if serviceSig == serviceRestart {
|
|
|
|
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceRestartAdminAction)
|
|
|
|
} else {
|
|
|
|
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceStopAdminAction)
|
|
|
|
}
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-04-09 14:39:42 -04:00
|
|
|
// Notify all other MinIO peers signal service.
|
2019-01-14 01:44:20 -05:00
|
|
|
for _, nerr := range globalNotificationSys.SignalService(serviceSig) {
|
|
|
|
if nerr.Err != nil {
|
|
|
|
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
|
|
|
logger.LogIf(ctx, nerr.Err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-27 14:37:47 -04:00
|
|
|
// Reply to the client before restarting, stopping MinIO server.
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
|
|
|
|
2019-01-14 01:44:20 -05:00
|
|
|
globalServiceSignalCh <- serviceSig
|
2017-01-17 17:25:59 -05:00
|
|
|
}
|
|
|
|
|
2017-02-15 13:45:45 -05:00
|
|
|
// ServerProperties holds some server information such as, version, region
|
|
|
|
// uptime, etc..
|
|
|
|
type ServerProperties struct {
|
2019-12-11 20:56:02 -05:00
|
|
|
Uptime int64 `json:"uptime"`
|
|
|
|
Version string `json:"version"`
|
|
|
|
CommitID string `json:"commitID"`
|
|
|
|
DeploymentID string `json:"deploymentID"`
|
|
|
|
Region string `json:"region"`
|
|
|
|
SQSARN []string `json:"sqsARN"`
|
2017-02-15 13:45:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServerConnStats holds transferred bytes from/to the server
|
|
|
|
type ServerConnStats struct {
|
|
|
|
TotalInputBytes uint64 `json:"transferred"`
|
|
|
|
TotalOutputBytes uint64 `json:"received"`
|
|
|
|
Throughput uint64 `json:"throughput,omitempty"`
|
2019-10-23 00:01:14 -04:00
|
|
|
S3InputBytes uint64 `json:"transferredS3"`
|
|
|
|
S3OutputBytes uint64 `json:"receivedS3"`
|
2017-02-15 13:45:45 -05:00
|
|
|
}
|
|
|
|
|
2019-10-23 00:01:14 -04:00
|
|
|
// ServerHTTPAPIStats holds total number of HTTP operations from/to the server,
|
2017-04-07 02:08:33 -04:00
|
|
|
// including the average duration the call was spent.
|
2019-10-23 00:01:14 -04:00
|
|
|
type ServerHTTPAPIStats struct {
|
|
|
|
APIStats map[string]int `json:"apiStats"`
|
2017-04-07 02:08:33 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServerHTTPStats holds all type of http operations performed to/from the server
|
|
|
|
// including their average execution time.
|
|
|
|
type ServerHTTPStats struct {
|
2019-10-23 00:01:14 -04:00
|
|
|
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
|
|
|
|
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
|
|
|
|
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
|
2017-04-07 02:08:33 -04:00
|
|
|
}
|
|
|
|
|
2017-04-21 10:15:53 -04:00
|
|
|
// ServerInfoData holds storage, connections and other
|
|
|
|
// information of a given server.
|
|
|
|
type ServerInfoData struct {
|
2019-10-23 00:01:14 -04:00
|
|
|
ConnStats ServerConnStats `json:"network"`
|
|
|
|
HTTPStats ServerHTTPStats `json:"http"`
|
|
|
|
Properties ServerProperties `json:"server"`
|
2017-02-15 13:45:45 -05:00
|
|
|
}
|
|
|
|
|
2017-04-21 10:15:53 -04:00
|
|
|
// ServerInfo holds server information result of one node
|
|
|
|
type ServerInfo struct {
|
2017-05-15 10:28:47 -04:00
|
|
|
Error string `json:"error"`
|
|
|
|
Addr string `json:"addr"`
|
|
|
|
Data *ServerInfoData `json:"data"`
|
2017-04-21 10:15:53 -04:00
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// StorageInfoHandler - GET /minio/admin/v3/storageinfo
|
2019-10-23 00:01:14 -04:00
|
|
|
// ----------
|
|
|
|
// Get server information
|
|
|
|
func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "StorageInfo")
|
2020-05-11 13:34:08 -04:00
|
|
|
|
|
|
|
defer logger.AuditLog(w, r, "StorageInfo", mustGetClaimsFromToken(r))
|
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.StorageInfoAdminAction)
|
2019-10-23 00:01:14 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-05-28 16:03:04 -04:00
|
|
|
// ignores any errors here.
|
|
|
|
storageInfo, _ := objectAPI.StorageInfo(ctx, false)
|
2019-10-23 00:01:14 -04:00
|
|
|
|
2020-09-28 22:39:32 -04:00
|
|
|
// Collect any disk healing.
|
|
|
|
healing, _ := getAggregatedBackgroundHealState(ctx)
|
|
|
|
healDisks := make(map[string]struct{}, len(healing.HealDisks))
|
|
|
|
for _, disk := range healing.HealDisks {
|
|
|
|
healDisks[disk] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// find all disks which belong to each respective endpoints
|
|
|
|
for i, disk := range storageInfo.Disks {
|
|
|
|
if _, ok := healDisks[disk.Endpoint]; ok {
|
|
|
|
storageInfo.Disks[i].Healing = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-23 00:01:14 -04:00
|
|
|
// Marshal API response
|
|
|
|
jsonBytes, err := json.Marshal(storageInfo)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reply with storage information (across nodes in a
|
|
|
|
// distributed setup) as json.
|
|
|
|
writeSuccessResponseJSON(w, jsonBytes)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// DataUsageInfoHandler - GET /minio/admin/v3/datausage
|
2019-12-12 09:02:37 -05:00
|
|
|
// ----------
|
|
|
|
// Get server/cluster data usage info
|
|
|
|
func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "DataUsageInfo")
|
2020-05-11 13:34:08 -04:00
|
|
|
|
|
|
|
defer logger.AuditLog(w, r, "DataUsageInfo", mustGetClaimsFromToken(r))
|
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DataUsageInfoAdminAction)
|
2019-12-12 09:02:37 -05:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
|
|
|
if err != nil {
|
2020-01-21 17:07:49 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2019-12-12 09:02:37 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dataUsageInfoJSON, err := json.Marshal(dataUsageInfo)
|
|
|
|
if err != nil {
|
2020-01-21 17:07:49 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2019-12-12 09:02:37 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
writeSuccessResponseJSON(w, dataUsageInfoJSON)
|
|
|
|
}
|
|
|
|
|
2020-09-25 22:21:52 -04:00
|
|
|
func lriToLockEntry(l lockRequesterInfo, resource, server string, rquorum, wquorum int) *madmin.LockEntry {
|
2019-11-13 15:17:45 -05:00
|
|
|
entry := &madmin.LockEntry{
|
|
|
|
Timestamp: l.Timestamp,
|
|
|
|
Resource: resource,
|
|
|
|
ServerList: []string{server},
|
|
|
|
Source: l.Source,
|
2020-09-25 22:21:52 -04:00
|
|
|
Owner: l.Owner,
|
2019-11-13 15:17:45 -05:00
|
|
|
ID: l.UID,
|
|
|
|
}
|
2019-01-24 10:22:14 -05:00
|
|
|
if l.Writer {
|
2020-06-20 09:33:01 -04:00
|
|
|
entry.Type = "WRITE"
|
2020-09-25 22:21:52 -04:00
|
|
|
entry.Quorum = wquorum
|
2019-01-24 10:22:14 -05:00
|
|
|
} else {
|
2020-06-20 09:33:01 -04:00
|
|
|
entry.Type = "READ"
|
2020-09-25 22:21:52 -04:00
|
|
|
entry.Quorum = rquorum
|
2019-01-24 10:22:14 -05:00
|
|
|
}
|
|
|
|
return entry
|
|
|
|
}
|
|
|
|
|
2020-09-25 22:21:52 -04:00
|
|
|
func topLockEntries(peerLocks []*PeerLocks, count int, rquorum, wquorum int, stale bool) madmin.LockEntries {
|
2019-01-24 10:22:14 -05:00
|
|
|
entryMap := make(map[string]*madmin.LockEntry)
|
|
|
|
for _, peerLock := range peerLocks {
|
|
|
|
if peerLock == nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-11-13 15:17:45 -05:00
|
|
|
for _, locks := range peerLock.Locks {
|
|
|
|
for k, v := range locks {
|
|
|
|
for _, lockReqInfo := range v {
|
|
|
|
if val, ok := entryMap[lockReqInfo.UID]; ok {
|
|
|
|
val.ServerList = append(val.ServerList, peerLock.Addr)
|
|
|
|
} else {
|
2020-09-25 22:21:52 -04:00
|
|
|
entryMap[lockReqInfo.UID] = lriToLockEntry(lockReqInfo, k, peerLock.Addr, rquorum, wquorum)
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
2019-01-24 10:22:14 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-09-25 22:21:52 -04:00
|
|
|
var lockEntries madmin.LockEntries
|
2019-01-24 10:22:14 -05:00
|
|
|
for _, v := range entryMap {
|
2020-09-25 22:21:52 -04:00
|
|
|
if len(lockEntries) == count {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if stale {
|
|
|
|
lockEntries = append(lockEntries, *v)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(v.ServerList) >= v.Quorum {
|
|
|
|
lockEntries = append(lockEntries, *v)
|
|
|
|
}
|
2019-01-24 10:22:14 -05:00
|
|
|
}
|
|
|
|
sort.Sort(lockEntries)
|
|
|
|
return lockEntries
|
|
|
|
}
|
|
|
|
|
|
|
|
// PeerLocks holds server information result of one node
|
|
|
|
type PeerLocks struct {
|
|
|
|
Addr string
|
|
|
|
Locks GetLocksResp
|
|
|
|
}
|
|
|
|
|
|
|
|
// TopLocksHandler Get list of locks in use
|
|
|
|
func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "TopLocks")
|
|
|
|
|
2020-05-11 13:34:08 -04:00
|
|
|
defer logger.AuditLog(w, r, "TopLocks", mustGetClaimsFromToken(r))
|
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.TopLocksAdminAction)
|
2019-02-12 04:25:52 -05:00
|
|
|
if objectAPI == nil {
|
2019-01-24 10:22:14 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-06-20 09:33:01 -04:00
|
|
|
count := 10 // by default list only top 10 entries
|
|
|
|
if countStr := r.URL.Query().Get("count"); countStr != "" {
|
|
|
|
var err error
|
|
|
|
count, err = strconv.Atoi(countStr)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2020-09-25 22:21:52 -04:00
|
|
|
stale := r.URL.Query().Get("stale") == "true" // list also stale locks
|
2020-06-20 09:33:01 -04:00
|
|
|
|
2020-09-25 22:21:52 -04:00
|
|
|
peerLocks := globalNotificationSys.GetLocks(ctx, r)
|
2020-06-20 09:33:01 -04:00
|
|
|
|
2020-09-25 22:21:52 -04:00
|
|
|
rquorum := getReadQuorum(objectAPI.SetDriveCount())
|
|
|
|
wquorum := getWriteQuorum(objectAPI.SetDriveCount())
|
2019-01-24 10:22:14 -05:00
|
|
|
|
2020-09-25 22:21:52 -04:00
|
|
|
topLocks := topLockEntries(peerLocks, count, rquorum, wquorum, stale)
|
2019-01-24 10:22:14 -05:00
|
|
|
|
|
|
|
// Marshal API response
|
|
|
|
jsonBytes, err := json.Marshal(topLocks)
|
|
|
|
if err != nil {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2019-01-24 10:22:14 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reply with storage information (across nodes in a
|
|
|
|
// distributed setup) as json.
|
|
|
|
writeSuccessResponseJSON(w, jsonBytes)
|
|
|
|
}
|
|
|
|
|
2018-09-18 19:46:35 -04:00
|
|
|
// StartProfilingResult contains the status of the starting
|
|
|
|
// profiling action in a given server
|
|
|
|
type StartProfilingResult struct {
|
|
|
|
NodeName string `json:"nodeName"`
|
|
|
|
Success bool `json:"success"`
|
|
|
|
Error string `json:"error"`
|
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// StartProfilingHandler - POST /minio/admin/v3/profiling/start?profilerType={profilerType}
|
2018-09-18 19:46:35 -04:00
|
|
|
// ----------
|
2018-09-27 00:02:05 -04:00
|
|
|
// Enable server profiling
|
2018-09-18 19:46:35 -04:00
|
|
|
func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.Request) {
|
2018-11-12 14:07:43 -05:00
|
|
|
ctx := newContext(r, w, "StartProfiling")
|
|
|
|
|
2020-05-11 13:34:08 -04:00
|
|
|
defer logger.AuditLog(w, r, "StartProfiling", mustGetClaimsFromToken(r))
|
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ProfilingAdminAction)
|
2019-02-12 04:25:52 -05:00
|
|
|
if objectAPI == nil {
|
2018-09-18 19:46:35 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
2020-01-10 20:19:58 -05:00
|
|
|
profiles := strings.Split(vars["profilerType"], ",")
|
2019-01-14 01:44:20 -05:00
|
|
|
thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
|
|
|
|
if err != nil {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2019-01-14 01:44:20 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-01-10 20:19:58 -05:00
|
|
|
globalProfilerMu.Lock()
|
|
|
|
defer globalProfilerMu.Unlock()
|
2019-01-14 01:44:20 -05:00
|
|
|
|
2020-01-10 20:19:58 -05:00
|
|
|
if globalProfiler == nil {
|
|
|
|
globalProfiler = make(map[string]minioProfiler, 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop profiler of all types if already running
|
|
|
|
for k, v := range globalProfiler {
|
|
|
|
for _, p := range profiles {
|
|
|
|
if p == k {
|
|
|
|
v.Stop()
|
|
|
|
delete(globalProfiler, k)
|
|
|
|
}
|
2019-01-14 01:44:20 -05:00
|
|
|
}
|
2020-01-10 20:19:58 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start profiling on remote servers.
|
|
|
|
var hostErrs []NotificationPeerErr
|
|
|
|
for _, profiler := range profiles {
|
|
|
|
hostErrs = append(hostErrs, globalNotificationSys.StartProfiling(profiler)...)
|
|
|
|
|
|
|
|
// Start profiling locally as well.
|
|
|
|
prof, err := startProfiler(profiler)
|
2019-01-14 01:44:20 -05:00
|
|
|
if err != nil {
|
|
|
|
hostErrs = append(hostErrs, NotificationPeerErr{
|
|
|
|
Host: *thisAddr,
|
|
|
|
Err: err,
|
|
|
|
})
|
|
|
|
} else {
|
2020-01-10 20:19:58 -05:00
|
|
|
globalProfiler[profiler] = prof
|
2019-01-14 01:44:20 -05:00
|
|
|
hostErrs = append(hostErrs, NotificationPeerErr{
|
|
|
|
Host: *thisAddr,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var startProfilingResult []StartProfilingResult
|
|
|
|
|
|
|
|
for _, nerr := range hostErrs {
|
|
|
|
result := StartProfilingResult{NodeName: nerr.Host.String()}
|
|
|
|
if nerr.Err != nil {
|
|
|
|
result.Error = nerr.Err.Error()
|
|
|
|
} else {
|
2018-09-18 19:46:35 -04:00
|
|
|
result.Success = true
|
2019-01-14 01:44:20 -05:00
|
|
|
}
|
|
|
|
startProfilingResult = append(startProfilingResult, result)
|
2018-09-18 19:46:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create JSON result and send it to the client
|
|
|
|
startProfilingResultInBytes, err := json.Marshal(startProfilingResult)
|
|
|
|
if err != nil {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2018-09-18 19:46:35 -04:00
|
|
|
return
|
|
|
|
}
|
2019-01-14 01:44:20 -05:00
|
|
|
|
2020-01-10 20:19:58 -05:00
|
|
|
writeSuccessResponseJSON(w, startProfilingResultInBytes)
|
2018-09-18 19:46:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// dummyFileInfo represents a dummy representation of a profile data file
|
|
|
|
// present only in memory, it helps to generate the zip stream.
|
|
|
|
type dummyFileInfo struct {
|
|
|
|
name string
|
|
|
|
size int64
|
|
|
|
mode os.FileMode
|
|
|
|
modTime time.Time
|
|
|
|
isDir bool
|
|
|
|
sys interface{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f dummyFileInfo) Name() string { return f.name }
|
|
|
|
func (f dummyFileInfo) Size() int64 { return f.size }
|
|
|
|
func (f dummyFileInfo) Mode() os.FileMode { return f.mode }
|
|
|
|
func (f dummyFileInfo) ModTime() time.Time { return f.modTime }
|
|
|
|
func (f dummyFileInfo) IsDir() bool { return f.isDir }
|
|
|
|
func (f dummyFileInfo) Sys() interface{} { return f.sys }
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// DownloadProfilingHandler - POST /minio/admin/v3/profiling/download
|
2018-09-18 19:46:35 -04:00
|
|
|
// ----------
|
|
|
|
// Download profiling information of all nodes in a zip format
|
|
|
|
func (a adminAPIHandlers) DownloadProfilingHandler(w http.ResponseWriter, r *http.Request) {
|
2018-11-12 14:07:43 -05:00
|
|
|
ctx := newContext(r, w, "DownloadProfiling")
|
|
|
|
|
2020-05-11 13:34:08 -04:00
|
|
|
defer logger.AuditLog(w, r, "DownloadProfiling", mustGetClaimsFromToken(r))
|
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ProfilingAdminAction)
|
2019-02-12 04:25:52 -05:00
|
|
|
if objectAPI == nil {
|
2018-09-18 19:46:35 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-01-14 01:44:20 -05:00
|
|
|
if !globalNotificationSys.DownloadProfilingData(ctx, w) {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminProfilerNotEnabled), r.URL)
|
2018-09-27 13:34:37 -04:00
|
|
|
return
|
|
|
|
}
|
2018-09-18 19:46:35 -04:00
|
|
|
}
|
|
|
|
|
2019-08-29 16:53:27 -04:00
|
|
|
type healInitParams struct {
|
|
|
|
bucket, objPrefix string
|
|
|
|
hs madmin.HealOpts
|
|
|
|
clientToken string
|
|
|
|
forceStart, forceStop bool
|
|
|
|
}
|
2017-01-17 13:02:58 -05:00
|
|
|
|
2019-08-29 16:53:27 -04:00
|
|
|
// extractHealInitParams - Validates params for heal init API.
|
|
|
|
func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reader) (hip healInitParams, err APIErrorCode) {
|
2020-09-24 11:40:21 -04:00
|
|
|
hip.bucket = vars[mgmtBucket]
|
|
|
|
hip.objPrefix = vars[mgmtPrefix]
|
2017-01-17 13:02:58 -05:00
|
|
|
|
2019-08-29 16:53:27 -04:00
|
|
|
if hip.bucket == "" {
|
|
|
|
if hip.objPrefix != "" {
|
2018-01-22 17:54:55 -05:00
|
|
|
// Bucket is required if object-prefix is given
|
|
|
|
err = ErrHealMissingBucket
|
|
|
|
return
|
2017-03-16 03:15:06 -04:00
|
|
|
}
|
2019-08-29 16:53:27 -04:00
|
|
|
} else if isReservedOrInvalidBucket(hip.bucket, false) {
|
2018-01-22 17:54:55 -05:00
|
|
|
err = ErrInvalidBucketName
|
2017-01-17 13:02:58 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// empty prefix is valid.
|
2019-08-29 16:53:27 -04:00
|
|
|
if !IsValidObjectPrefix(hip.objPrefix) {
|
2018-01-22 17:54:55 -05:00
|
|
|
err = ErrInvalidObjectName
|
2018-01-11 13:21:41 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-09-24 11:40:21 -04:00
|
|
|
if len(qParms[mgmtClientToken]) > 0 {
|
|
|
|
hip.clientToken = qParms[mgmtClientToken][0]
|
2017-01-17 13:02:58 -05:00
|
|
|
}
|
2020-09-24 11:40:21 -04:00
|
|
|
if _, ok := qParms[mgmtForceStart]; ok {
|
2019-08-29 16:53:27 -04:00
|
|
|
hip.forceStart = true
|
2017-01-17 13:02:58 -05:00
|
|
|
}
|
2020-09-24 11:40:21 -04:00
|
|
|
if _, ok := qParms[mgmtForceStop]; ok {
|
2019-08-29 16:53:27 -04:00
|
|
|
hip.forceStop = true
|
2018-11-04 22:24:16 -05:00
|
|
|
}
|
2019-08-29 16:53:27 -04:00
|
|
|
|
|
|
|
// Invalid request conditions:
|
|
|
|
//
|
|
|
|
// Cannot have both forceStart and forceStop in the same
|
|
|
|
// request; If clientToken is provided, request can only be
|
|
|
|
// to continue receiving logs, so it cannot be start or
|
|
|
|
// stop;
|
|
|
|
if (hip.forceStart && hip.forceStop) ||
|
|
|
|
(hip.clientToken != "" && (hip.forceStart || hip.forceStop)) {
|
|
|
|
err = ErrInvalidRequest
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// ignore body if clientToken is provided
|
2019-08-29 16:53:27 -04:00
|
|
|
if hip.clientToken == "" {
|
|
|
|
jerr := json.NewDecoder(r).Decode(&hip.hs)
|
2018-01-22 17:54:55 -05:00
|
|
|
if jerr != nil {
|
2020-04-09 12:30:02 -04:00
|
|
|
logger.LogIf(GlobalContext, jerr, logger.Application)
|
2018-01-22 17:54:55 -05:00
|
|
|
err = ErrRequestBodyParse
|
|
|
|
return
|
|
|
|
}
|
2017-04-14 13:28:35 -04:00
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
err = ErrNone
|
|
|
|
return
|
2017-03-31 20:55:15 -04:00
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// HealHandler - POST /minio/admin/v3/heal/
|
2018-01-22 17:54:55 -05:00
|
|
|
// -----------
|
|
|
|
// Start heal processing and return heal status items.
|
|
|
|
//
|
|
|
|
// On a successful heal sequence start, a unique client token is
|
|
|
|
// returned. Subsequent requests to this endpoint providing the client
|
|
|
|
// token will receive heal status records from the running heal
|
|
|
|
// sequence.
|
|
|
|
//
|
|
|
|
// If no client token is provided, and a heal sequence is in progress
|
|
|
|
// an error is returned with information about the running heal
|
|
|
|
// sequence. However, if the force-start flag is provided, the server
|
|
|
|
// aborts the running heal sequence and starts a new one.
|
|
|
|
func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
ctx := newContext(r, w, "Heal")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
2020-05-11 13:34:08 -04:00
|
|
|
defer logger.AuditLog(w, r, "Heal", mustGetClaimsFromToken(r))
|
|
|
|
|
2019-11-19 05:03:18 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
2019-02-12 04:25:52 -05:00
|
|
|
if objectAPI == nil {
|
2017-01-17 13:02:58 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-01-11 13:21:41 -05:00
|
|
|
// Check if this setup has an erasure coded backend.
|
2020-06-12 23:04:01 -04:00
|
|
|
if !globalIsErasure {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
|
2017-01-24 11:11:05 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-08-29 16:53:27 -04:00
|
|
|
hip, errCode := extractHealInitParams(mux.Vars(r), r.URL.Query(), r.Body)
|
2019-02-12 04:25:52 -05:00
|
|
|
if errCode != ErrNone {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
2017-01-23 03:32:55 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-03 22:27:13 -04:00
|
|
|
// Analyze the heal token and route the request accordingly
|
|
|
|
token, success := proxyRequestByToken(ctx, w, r, hip.clientToken)
|
|
|
|
if success {
|
|
|
|
return
|
2020-07-03 14:53:03 -04:00
|
|
|
}
|
2020-07-03 22:27:13 -04:00
|
|
|
hip.clientToken = token
|
|
|
|
// if request was not successful, try this server locally if token
|
|
|
|
// is not found the call will fail anyways. if token is empty
|
|
|
|
// try this server to generate a new token.
|
2020-07-03 14:53:03 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
type healResp struct {
|
|
|
|
respBytes []byte
|
2019-02-12 04:25:52 -05:00
|
|
|
apiErr APIError
|
2018-01-22 17:54:55 -05:00
|
|
|
errBody string
|
|
|
|
}
|
|
|
|
|
|
|
|
// Define a closure to start sending whitespace to client
|
|
|
|
// after 10s unless a response item comes in
|
2020-05-04 01:35:40 -04:00
|
|
|
keepConnLive := func(w http.ResponseWriter, r *http.Request, respCh chan healResp) {
|
2018-01-22 17:54:55 -05:00
|
|
|
ticker := time.NewTicker(time.Second * 10)
|
2018-05-04 13:43:20 -04:00
|
|
|
defer ticker.Stop()
|
2018-01-22 17:54:55 -05:00
|
|
|
started := false
|
|
|
|
forLoop:
|
|
|
|
for {
|
|
|
|
select {
|
2020-05-04 01:35:40 -04:00
|
|
|
case <-r.Context().Done():
|
|
|
|
return
|
2018-01-22 17:54:55 -05:00
|
|
|
case <-ticker.C:
|
|
|
|
if !started {
|
|
|
|
// Start writing response to client
|
|
|
|
started = true
|
|
|
|
setCommonHeaders(w)
|
2020-07-30 22:45:12 -04:00
|
|
|
setEventStreamHeaders(w)
|
2018-01-22 17:54:55 -05:00
|
|
|
// Set 200 OK status
|
|
|
|
w.WriteHeader(200)
|
|
|
|
}
|
|
|
|
// Send whitespace and keep connection open
|
2019-07-11 16:19:25 -04:00
|
|
|
w.Write([]byte(" "))
|
2018-01-22 17:54:55 -05:00
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
case hr := <-respCh:
|
2019-02-12 04:25:52 -05:00
|
|
|
switch hr.apiErr {
|
|
|
|
case noError:
|
2018-11-04 22:24:16 -05:00
|
|
|
if started {
|
|
|
|
w.Write(hr.respBytes)
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
} else {
|
|
|
|
writeSuccessResponseJSON(w, hr.respBytes)
|
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
default:
|
2018-11-04 22:24:16 -05:00
|
|
|
var errorRespJSON []byte
|
|
|
|
if hr.errBody == "" {
|
2019-02-13 19:07:21 -05:00
|
|
|
errorRespJSON = encodeResponseJSON(getAPIErrorResponse(ctx, hr.apiErr,
|
2019-07-03 01:34:32 -04:00
|
|
|
r.URL.Path, w.Header().Get(xhttp.AmzRequestID),
|
2019-07-01 15:22:01 -04:00
|
|
|
globalDeploymentID))
|
2018-11-04 22:24:16 -05:00
|
|
|
} else {
|
|
|
|
errorRespJSON = encodeResponseJSON(APIErrorResponse{
|
2019-02-12 04:25:52 -05:00
|
|
|
Code: hr.apiErr.Code,
|
2018-11-04 22:24:16 -05:00
|
|
|
Message: hr.errBody,
|
|
|
|
Resource: r.URL.Path,
|
2019-07-03 01:34:32 -04:00
|
|
|
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
2019-07-01 15:22:01 -04:00
|
|
|
HostID: globalDeploymentID,
|
2018-11-04 22:24:16 -05:00
|
|
|
})
|
|
|
|
}
|
|
|
|
if !started {
|
|
|
|
setCommonHeaders(w)
|
2019-07-03 01:34:32 -04:00
|
|
|
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
2019-02-12 04:25:52 -05:00
|
|
|
w.WriteHeader(hr.apiErr.HTTPStatusCode)
|
2018-11-04 22:24:16 -05:00
|
|
|
}
|
|
|
|
w.Write(errorRespJSON)
|
|
|
|
w.(http.Flusher).Flush()
|
2018-01-22 17:54:55 -05:00
|
|
|
}
|
|
|
|
break forLoop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-29 16:53:27 -04:00
|
|
|
healPath := pathJoin(hip.bucket, hip.objPrefix)
|
|
|
|
if hip.clientToken == "" && !hip.forceStart && !hip.forceStop {
|
2018-11-04 22:24:16 -05:00
|
|
|
nh, exists := globalAllHealState.getHealSequence(healPath)
|
|
|
|
if exists && !nh.hasEnded() && len(nh.currentStatus.Items) > 0 {
|
2020-07-03 22:27:13 -04:00
|
|
|
clientToken := nh.clientToken
|
|
|
|
if globalIsDistErasure {
|
|
|
|
clientToken = fmt.Sprintf("%s@%d", nh.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
|
|
|
}
|
2018-11-04 22:24:16 -05:00
|
|
|
b, err := json.Marshal(madmin.HealStartSuccess{
|
2020-07-03 22:27:13 -04:00
|
|
|
ClientToken: clientToken,
|
2018-11-04 22:24:16 -05:00
|
|
|
ClientAddress: nh.clientAddress,
|
|
|
|
StartTime: nh.startTime,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
2018-11-04 22:24:16 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Client token not specified but a heal sequence exists on a path,
|
|
|
|
// Send the token back to client.
|
|
|
|
writeSuccessResponseJSON(w, b)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
2019-08-29 16:53:27 -04:00
|
|
|
if hip.clientToken != "" && !hip.forceStart && !hip.forceStop {
|
2018-01-22 17:54:55 -05:00
|
|
|
// Since clientToken is given, fetch heal status from running
|
|
|
|
// heal sequence.
|
|
|
|
respBytes, errCode := globalAllHealState.PopHealStatusJSON(
|
2019-08-29 16:53:27 -04:00
|
|
|
healPath, hip.clientToken)
|
2018-01-22 17:54:55 -05:00
|
|
|
if errCode != ErrNone {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
2018-01-22 17:54:55 -05:00
|
|
|
} else {
|
|
|
|
writeSuccessResponseJSON(w, respBytes)
|
|
|
|
}
|
2018-11-04 22:24:16 -05:00
|
|
|
return
|
2017-01-23 03:32:55 -05:00
|
|
|
}
|
2018-11-04 22:24:16 -05:00
|
|
|
|
|
|
|
respCh := make(chan healResp)
|
|
|
|
switch {
|
2019-08-29 16:53:27 -04:00
|
|
|
case hip.forceStop:
|
2018-11-04 22:24:16 -05:00
|
|
|
go func() {
|
2019-02-12 04:25:52 -05:00
|
|
|
respBytes, apiErr := globalAllHealState.stopHealSequence(healPath)
|
|
|
|
hr := healResp{respBytes: respBytes, apiErr: apiErr}
|
2018-11-04 22:24:16 -05:00
|
|
|
respCh <- hr
|
|
|
|
}()
|
2019-08-29 16:53:27 -04:00
|
|
|
case hip.clientToken == "":
|
2020-06-29 16:07:26 -04:00
|
|
|
nh := newHealSequence(GlobalContext, hip.bucket, hip.objPrefix, handlers.GetSourceIP(r), hip.hs, hip.forceStart)
|
2018-11-04 22:24:16 -05:00
|
|
|
go func() {
|
2019-02-12 04:25:52 -05:00
|
|
|
respBytes, apiErr, errMsg := globalAllHealState.LaunchNewHealSequence(nh)
|
|
|
|
hr := healResp{respBytes, apiErr, errMsg}
|
2018-11-04 22:24:16 -05:00
|
|
|
respCh <- hr
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Due to the force-starting functionality, the Launch
|
|
|
|
// call above can take a long time - to keep the
|
|
|
|
// connection alive, we start sending whitespace
|
2020-05-04 01:35:40 -04:00
|
|
|
keepConnLive(w, r, respCh)
|
2017-01-23 03:32:55 -05:00
|
|
|
}
|
2017-02-20 15:58:50 -05:00
|
|
|
|
2020-09-03 01:54:56 -04:00
|
|
|
func getAggregatedBackgroundHealState(ctx context.Context) (madmin.BgHealState, error) {
|
2019-06-25 19:42:24 -04:00
|
|
|
var bgHealStates []madmin.BgHealState
|
|
|
|
|
2020-08-07 22:43:06 -04:00
|
|
|
localHealState, ok := getLocalBackgroundHealStatus()
|
|
|
|
if !ok {
|
2020-09-03 01:54:56 -04:00
|
|
|
return madmin.BgHealState{}, errServerNotInitialized
|
2020-08-07 22:43:06 -04:00
|
|
|
}
|
|
|
|
|
2019-06-25 19:42:24 -04:00
|
|
|
// Get local heal status first
|
2020-08-07 22:43:06 -04:00
|
|
|
bgHealStates = append(bgHealStates, localHealState)
|
2019-06-25 19:42:24 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if globalIsDistErasure {
|
2019-06-25 19:42:24 -04:00
|
|
|
// Get heal status from other peers
|
2020-08-07 16:22:53 -04:00
|
|
|
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus()
|
2020-09-03 01:54:56 -04:00
|
|
|
var errCount int
|
2020-08-07 16:22:53 -04:00
|
|
|
for _, nerr := range nerrs {
|
|
|
|
if nerr.Err != nil {
|
|
|
|
logger.LogIf(ctx, nerr.Err)
|
2020-09-03 01:54:56 -04:00
|
|
|
errCount++
|
2020-08-07 16:22:53 -04:00
|
|
|
}
|
|
|
|
}
|
2020-09-03 01:54:56 -04:00
|
|
|
if errCount == len(nerrs) {
|
|
|
|
return madmin.BgHealState{}, fmt.Errorf("all remote servers failed to report heal status, cluster is unhealthy")
|
|
|
|
}
|
2019-06-25 19:42:24 -04:00
|
|
|
bgHealStates = append(bgHealStates, peersHealStates...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Aggregate healing result
|
2020-03-12 02:00:31 -04:00
|
|
|
var aggregatedHealStateResult = madmin.BgHealState{
|
|
|
|
ScannedItemsCount: bgHealStates[0].ScannedItemsCount,
|
|
|
|
LastHealActivity: bgHealStates[0].LastHealActivity,
|
|
|
|
NextHealRound: bgHealStates[0].NextHealRound,
|
2020-08-07 16:22:53 -04:00
|
|
|
HealDisks: bgHealStates[0].HealDisks,
|
2020-03-12 02:00:31 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
bgHealStates = bgHealStates[1:]
|
|
|
|
|
2019-06-25 19:42:24 -04:00
|
|
|
for _, state := range bgHealStates {
|
|
|
|
aggregatedHealStateResult.ScannedItemsCount += state.ScannedItemsCount
|
2020-08-07 16:22:53 -04:00
|
|
|
aggregatedHealStateResult.HealDisks = append(aggregatedHealStateResult.HealDisks, state.HealDisks...)
|
2020-03-12 02:00:31 -04:00
|
|
|
if !state.LastHealActivity.IsZero() && aggregatedHealStateResult.LastHealActivity.Before(state.LastHealActivity) {
|
2019-06-25 19:42:24 -04:00
|
|
|
aggregatedHealStateResult.LastHealActivity = state.LastHealActivity
|
2020-03-12 02:00:31 -04:00
|
|
|
// The node which has the last heal activity means its
|
|
|
|
// is the node that is orchestrating self healing operations,
|
|
|
|
// which also means it is the same node which decides when
|
|
|
|
// the next self healing operation will be done.
|
|
|
|
aggregatedHealStateResult.NextHealRound = state.NextHealRound
|
2019-06-25 19:42:24 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-07 16:22:53 -04:00
|
|
|
return aggregatedHealStateResult, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "HealBackgroundStatus")
|
|
|
|
|
|
|
|
defer logger.AuditLog(w, r, "HealBackgroundStatus", mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if this setup has an erasure coded backend.
|
|
|
|
if !globalIsErasure {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-09-03 01:54:56 -04:00
|
|
|
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context())
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-07 16:22:53 -04:00
|
|
|
if err := json.NewEncoder(w).Encode(aggregateHealStateResult); err != nil {
|
2019-06-25 19:42:24 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
}
|
|
|
|
|
2019-11-19 05:03:18 -05:00
|
|
|
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
|
|
|
var cred auth.Credentials
|
|
|
|
var adminAPIErr APIErrorCode
|
2018-09-06 11:03:18 -04:00
|
|
|
// Get current object layer instance.
|
2019-11-09 12:27:23 -05:00
|
|
|
objectAPI := newObjectLayerWithoutSafeModeFn()
|
|
|
|
if objectAPI == nil || globalNotificationSys == nil {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
2019-11-19 05:03:18 -05:00
|
|
|
return nil, cred
|
2018-09-06 11:03:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Validate request signature.
|
2019-11-19 05:03:18 -05:00
|
|
|
cred, adminAPIErr = checkAdminRequestAuthType(ctx, r, action, "")
|
2018-09-06 11:03:18 -04:00
|
|
|
if adminAPIErr != ErrNone {
|
2019-02-13 19:07:21 -05:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
2019-11-19 05:03:18 -05:00
|
|
|
return nil, cred
|
2019-02-12 04:25:52 -05:00
|
|
|
}
|
|
|
|
|
2019-11-19 05:03:18 -05:00
|
|
|
return objectAPI, cred
|
2019-02-12 04:25:52 -05:00
|
|
|
}
|
|
|
|
|
2019-08-27 14:37:47 -04:00
|
|
|
// AdminError - is a generic error for all admin APIs.
|
|
|
|
type AdminError struct {
|
|
|
|
Code string
|
|
|
|
Message string
|
|
|
|
StatusCode int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ae AdminError) Error() string {
|
|
|
|
return ae.Message
|
|
|
|
}
|
|
|
|
|
|
|
|
// Admin API errors
|
|
|
|
const (
|
|
|
|
AdminUpdateUnexpectedFailure = "XMinioAdminUpdateUnexpectedFailure"
|
|
|
|
AdminUpdateURLNotReachable = "XMinioAdminUpdateURLNotReachable"
|
|
|
|
AdminUpdateApplyFailure = "XMinioAdminUpdateApplyFailure"
|
|
|
|
)
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// toAdminAPIErrCode - converts errErasureWriteQuorum error to admin API
|
2017-02-27 14:40:27 -05:00
|
|
|
// specific error.
|
2018-11-12 14:07:43 -05:00
|
|
|
func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode {
|
2017-02-27 14:40:27 -05:00
|
|
|
switch err {
|
2020-06-12 23:04:01 -04:00
|
|
|
case errErasureWriteQuorum:
|
2017-02-27 14:40:27 -05:00
|
|
|
return ErrAdminConfigNoQuorum
|
2018-08-06 13:26:40 -04:00
|
|
|
default:
|
2018-11-12 14:07:43 -05:00
|
|
|
return toAPIErrorCode(ctx, err)
|
2017-02-27 14:40:27 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-12 04:25:52 -05:00
|
|
|
func toAdminAPIErr(ctx context.Context, err error) APIError {
|
2019-08-27 14:37:47 -04:00
|
|
|
if err == nil {
|
|
|
|
return noError
|
|
|
|
}
|
2019-10-30 03:04:39 -04:00
|
|
|
|
|
|
|
var apiErr APIError
|
|
|
|
switch e := err.(type) {
|
2020-01-03 14:28:52 -05:00
|
|
|
case iampolicy.Error:
|
|
|
|
apiErr = APIError{
|
|
|
|
Code: "XMinioMalformedIAMPolicy",
|
|
|
|
Description: e.Error(),
|
|
|
|
HTTPStatusCode: http.StatusBadRequest,
|
|
|
|
}
|
2019-10-30 03:04:39 -04:00
|
|
|
case config.Error:
|
|
|
|
apiErr = APIError{
|
|
|
|
Code: "XMinioConfigError",
|
|
|
|
Description: e.Error(),
|
|
|
|
HTTPStatusCode: http.StatusBadRequest,
|
2019-08-27 14:37:47 -04:00
|
|
|
}
|
2019-10-30 03:04:39 -04:00
|
|
|
case AdminError:
|
|
|
|
apiErr = APIError{
|
|
|
|
Code: e.Code,
|
|
|
|
Description: e.Message,
|
|
|
|
HTTPStatusCode: e.StatusCode,
|
|
|
|
}
|
|
|
|
default:
|
2020-06-11 17:11:30 -04:00
|
|
|
switch {
|
|
|
|
case errors.Is(err, errConfigNotFound):
|
2019-11-09 12:27:23 -05:00
|
|
|
apiErr = APIError{
|
|
|
|
Code: "XMinioConfigError",
|
|
|
|
Description: err.Error(),
|
|
|
|
HTTPStatusCode: http.StatusNotFound,
|
|
|
|
}
|
2020-06-11 17:11:30 -04:00
|
|
|
case errors.Is(err, errIAMActionNotAllowed):
|
|
|
|
apiErr = APIError{
|
|
|
|
Code: "XMinioIAMActionNotAllowed",
|
|
|
|
Description: err.Error(),
|
|
|
|
HTTPStatusCode: http.StatusForbidden,
|
|
|
|
}
|
|
|
|
case errors.Is(err, errIAMNotInitialized):
|
|
|
|
apiErr = APIError{
|
|
|
|
Code: "XMinioIAMNotInitialized",
|
|
|
|
Description: err.Error(),
|
|
|
|
HTTPStatusCode: http.StatusServiceUnavailable,
|
|
|
|
}
|
2020-07-08 21:50:43 -04:00
|
|
|
case errors.Is(err, crypto.ErrKESKeyExists):
|
|
|
|
apiErr = APIError{
|
|
|
|
Code: "XMinioKMSKeyExists",
|
|
|
|
Description: err.Error(),
|
|
|
|
HTTPStatusCode: http.StatusConflict,
|
|
|
|
}
|
2020-06-11 17:11:30 -04:00
|
|
|
default:
|
2020-04-14 14:28:56 -04:00
|
|
|
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
|
2018-12-18 17:39:21 -05:00
|
|
|
}
|
|
|
|
}
|
2019-11-09 12:27:23 -05:00
|
|
|
return apiErr
|
2018-10-09 17:00:01 -04:00
|
|
|
}
|
|
|
|
|
2019-07-31 14:08:39 -04:00
|
|
|
// Returns true if the trace.Info should be traced,
|
|
|
|
// false if certain conditions are not met.
|
|
|
|
// - input entry is not of the type *trace.Info*
|
|
|
|
// - errOnly entries are to be traced, not status code 2xx, 3xx.
|
|
|
|
// - all entries to be traced, if not trace only S3 API requests.
|
|
|
|
func mustTrace(entry interface{}, trcAll, errOnly bool) bool {
|
|
|
|
trcInfo, ok := entry.(trace.Info)
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
2019-12-06 02:16:06 -05:00
|
|
|
trace := trcAll || !HasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+SlashSeparator)
|
2019-07-31 14:08:39 -04:00
|
|
|
if errOnly {
|
|
|
|
return trace && trcInfo.RespInfo.StatusCode >= http.StatusBadRequest
|
|
|
|
}
|
|
|
|
return trace
|
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// TraceHandler - POST /minio/admin/v3/trace
|
2019-06-08 18:54:41 -04:00
|
|
|
// ----------
|
|
|
|
// The handler sends http trace to the connected HTTP client.
|
|
|
|
func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "HTTPTrace")
|
2020-05-11 13:34:08 -04:00
|
|
|
|
2019-06-08 18:54:41 -04:00
|
|
|
trcAll := r.URL.Query().Get("all") == "true"
|
2019-07-19 20:38:26 -04:00
|
|
|
trcErr := r.URL.Query().Get("err") == "true"
|
2019-06-21 19:47:51 -04:00
|
|
|
|
|
|
|
// Validate request signature.
|
2020-01-26 21:47:52 -05:00
|
|
|
_, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.TraceAdminAction, "")
|
2019-06-21 19:47:51 -04:00
|
|
|
if adminAPIErr != ErrNone {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-30 22:45:12 -04:00
|
|
|
setEventStreamHeaders(w)
|
2019-06-08 18:54:41 -04:00
|
|
|
|
2019-06-27 01:41:12 -04:00
|
|
|
// Trace Publisher and peer-trace-client uses nonblocking send and hence does not wait for slow receivers.
|
|
|
|
// Use buffered channel to take care of burst sends or slow w.Write()
|
|
|
|
traceCh := make(chan interface{}, 4000)
|
|
|
|
|
2020-04-17 14:20:56 -04:00
|
|
|
peers := newPeerRestClients(globalEndpoints)
|
2019-07-31 14:08:39 -04:00
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
globalHTTPTrace.Subscribe(traceCh, ctx.Done(), func(entry interface{}) bool {
|
2019-07-31 14:08:39 -04:00
|
|
|
return mustTrace(entry, trcAll, trcErr)
|
|
|
|
})
|
2019-06-27 01:41:12 -04:00
|
|
|
|
|
|
|
for _, peer := range peers {
|
2019-11-09 12:27:23 -05:00
|
|
|
if peer == nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-04-16 13:56:18 -04:00
|
|
|
peer.Trace(traceCh, ctx.Done(), trcAll, trcErr)
|
2019-06-27 01:41:12 -04:00
|
|
|
}
|
|
|
|
|
2019-07-31 14:08:39 -04:00
|
|
|
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
|
|
|
defer keepAliveTicker.Stop()
|
|
|
|
|
2019-06-27 01:41:12 -04:00
|
|
|
enc := json.NewEncoder(w)
|
2019-06-08 18:54:41 -04:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case entry := <-traceCh:
|
2019-06-27 01:41:12 -04:00
|
|
|
if err := enc.Encode(entry); err != nil {
|
2019-06-08 18:54:41 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
2019-07-31 14:08:39 -04:00
|
|
|
case <-keepAliveTicker.C:
|
|
|
|
if _, err := w.Write([]byte(" ")); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
2020-04-16 13:56:18 -04:00
|
|
|
case <-ctx.Done():
|
2019-06-08 18:54:41 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-09-03 14:10:48 -04:00
|
|
|
|
|
|
|
// The handler sends console logs to the connected HTTP client.
|
|
|
|
func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "ConsoleLog")
|
|
|
|
|
2020-05-11 13:34:08 -04:00
|
|
|
defer logger.AuditLog(w, r, "ConsoleLog", mustGetClaimsFromToken(r))
|
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConsoleLogAdminAction)
|
2019-09-03 14:10:48 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
node := r.URL.Query().Get("node")
|
|
|
|
// limit buffered console entries if client requested it.
|
|
|
|
limitStr := r.URL.Query().Get("limit")
|
|
|
|
limitLines, err := strconv.Atoi(limitStr)
|
|
|
|
if err != nil {
|
|
|
|
limitLines = 10
|
|
|
|
}
|
2019-10-11 21:50:54 -04:00
|
|
|
|
|
|
|
logKind := r.URL.Query().Get("logType")
|
|
|
|
if logKind == "" {
|
|
|
|
logKind = string(logger.All)
|
|
|
|
}
|
|
|
|
logKind = strings.ToUpper(logKind)
|
|
|
|
|
2019-09-03 14:10:48 -04:00
|
|
|
// Avoid reusing tcp connection if read timeout is hit
|
|
|
|
// This is needed to make r.Context().Done() work as
|
|
|
|
// expected in case of read timeout
|
2020-09-01 19:58:13 -04:00
|
|
|
w.Header().Set("Connection", "close")
|
2020-07-30 22:45:12 -04:00
|
|
|
|
|
|
|
setEventStreamHeaders(w)
|
2019-09-03 14:10:48 -04:00
|
|
|
|
|
|
|
logCh := make(chan interface{}, 4000)
|
|
|
|
|
2020-04-17 14:20:56 -04:00
|
|
|
peers := newPeerRestClients(globalEndpoints)
|
2019-09-03 14:10:48 -04:00
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
globalConsoleSys.Subscribe(logCh, ctx.Done(), node, limitLines, logKind, nil)
|
2019-09-03 14:10:48 -04:00
|
|
|
|
|
|
|
for _, peer := range peers {
|
2019-11-09 12:27:23 -05:00
|
|
|
if peer == nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-09-05 18:51:27 -04:00
|
|
|
if node == "" || strings.EqualFold(peer.host.Name, node) {
|
2020-04-16 13:56:18 -04:00
|
|
|
peer.ConsoleLog(logCh, ctx.Done())
|
2019-09-03 14:10:48 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
enc := json.NewEncoder(w)
|
|
|
|
|
|
|
|
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
|
|
|
defer keepAliveTicker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case entry := <-logCh:
|
2020-03-20 18:13:41 -04:00
|
|
|
log, ok := entry.(log.Info)
|
|
|
|
if ok && log.SendLog(node, logKind) {
|
2019-09-03 14:10:48 -04:00
|
|
|
if err := enc.Encode(log); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
}
|
|
|
|
case <-keepAliveTicker.C:
|
|
|
|
if _, err := w.Write([]byte(" ")); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
2020-04-16 13:56:18 -04:00
|
|
|
case <-ctx.Done():
|
2019-09-03 14:10:48 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-09-04 16:19:44 -04:00
|
|
|
|
2020-07-08 21:50:43 -04:00
|
|
|
// KMSCreateKeyHandler - POST /minio/admin/v3/kms/key/create?key-id=<master-key-id>
|
|
|
|
func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "KMSCreateKey")
|
|
|
|
defer logger.AuditLog(w, r, "KMSCreateKey", mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSCreateKeyAdminAction)
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if GlobalKMS == nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := GlobalKMS.CreateKey(r.URL.Query().Get("key-id")); err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// KMSKeyStatusHandler - GET /minio/admin/v3/kms/key/status?key-id=<master-key-id>
|
2019-09-04 16:19:44 -04:00
|
|
|
func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Request) {
|
2020-05-11 13:34:08 -04:00
|
|
|
ctx := newContext(r, w, "KMSKeyStatus")
|
|
|
|
|
|
|
|
defer logger.AuditLog(w, r, "KMSKeyStatus", mustGetClaimsFromToken(r))
|
2019-09-04 16:19:44 -04:00
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSKeyStatusAdminAction)
|
2019-09-04 16:19:44 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if GlobalKMS == nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
keyID := r.URL.Query().Get("key-id")
|
|
|
|
if keyID == "" {
|
2020-07-08 21:50:43 -04:00
|
|
|
keyID = GlobalKMS.DefaultKeyID()
|
2019-09-04 16:19:44 -04:00
|
|
|
}
|
|
|
|
var response = madmin.KMSKeyStatus{
|
|
|
|
KeyID: keyID,
|
|
|
|
}
|
|
|
|
|
|
|
|
kmsContext := crypto.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation
|
|
|
|
// 1. Generate a new key using the KMS.
|
|
|
|
key, sealedKey, err := GlobalKMS.GenerateKey(keyID, kmsContext)
|
|
|
|
if err != nil {
|
|
|
|
response.EncryptionErr = err.Error()
|
|
|
|
resp, err := json.Marshal(response)
|
|
|
|
if err != nil {
|
|
|
|
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
writeSuccessResponseJSON(w, resp)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-02-05 12:17:35 -05:00
|
|
|
// 2. Verify that we can indeed decrypt the (encrypted) key
|
2019-09-04 16:19:44 -04:00
|
|
|
decryptedKey, err := GlobalKMS.UnsealKey(keyID, sealedKey, kmsContext)
|
|
|
|
if err != nil {
|
|
|
|
response.DecryptionErr = err.Error()
|
|
|
|
resp, err := json.Marshal(response)
|
|
|
|
if err != nil {
|
|
|
|
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
writeSuccessResponseJSON(w, resp)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-02-05 12:17:35 -05:00
|
|
|
// 3. Compare generated key with decrypted key
|
2019-09-04 16:19:44 -04:00
|
|
|
if subtle.ConstantTimeCompare(key[:], decryptedKey[:]) != 1 {
|
|
|
|
response.DecryptionErr = "The generated and the decrypted data key do not match"
|
|
|
|
resp, err := json.Marshal(response)
|
|
|
|
if err != nil {
|
|
|
|
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
writeSuccessResponseJSON(w, resp)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := json.Marshal(response)
|
|
|
|
if err != nil {
|
|
|
|
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
writeSuccessResponseJSON(w, resp)
|
|
|
|
}
|
2019-10-03 10:48:38 -04:00
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// OBDInfoHandler - GET /minio/admin/v3/obdinfo
|
2020-03-27 00:07:39 -04:00
|
|
|
// ----------
|
|
|
|
// Get server on-board diagnostics
|
|
|
|
func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "OBDInfo")
|
2020-05-11 13:34:08 -04:00
|
|
|
|
|
|
|
defer logger.AuditLog(w, r, "OBDInfo", mustGetClaimsFromToken(r))
|
|
|
|
|
2020-03-27 00:07:39 -04:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.OBDInfoAdminAction)
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-09-15 21:02:54 -04:00
|
|
|
query := r.URL.Query()
|
2020-04-14 14:48:32 -04:00
|
|
|
obdInfo := madmin.OBDInfo{}
|
2020-04-16 13:56:18 -04:00
|
|
|
obdInfoCh := make(chan madmin.OBDInfo)
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-03-31 04:15:21 -04:00
|
|
|
enc := json.NewEncoder(w)
|
2020-04-16 13:56:18 -04:00
|
|
|
partialWrite := func(oinfo madmin.OBDInfo) {
|
|
|
|
obdInfoCh <- oinfo
|
2020-04-14 14:48:32 -04:00
|
|
|
}
|
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
setCommonHeaders(w)
|
2020-07-30 22:45:12 -04:00
|
|
|
|
|
|
|
setEventStreamHeaders(w)
|
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
w.WriteHeader(http.StatusOK)
|
2020-03-27 00:07:39 -04:00
|
|
|
|
|
|
|
errResp := func(err error) {
|
|
|
|
errorResponse := getAPIErrorResponse(ctx, toAdminAPIErr(ctx, err), r.URL.String(),
|
|
|
|
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
|
|
|
encodedErrorResponse := encodeResponse(errorResponse)
|
|
|
|
obdInfo.Error = string(encodedErrorResponse)
|
2020-04-16 13:56:18 -04:00
|
|
|
logger.LogIf(ctx, enc.Encode(obdInfo))
|
2020-03-27 00:07:39 -04:00
|
|
|
}
|
|
|
|
|
2020-04-14 14:48:32 -04:00
|
|
|
deadline := 3600 * time.Second
|
2020-04-16 13:56:18 -04:00
|
|
|
if dstr := r.URL.Query().Get("deadline"); dstr != "" {
|
2020-04-14 14:48:32 -04:00
|
|
|
var err error
|
2020-04-16 13:56:18 -04:00
|
|
|
deadline, err = time.ParseDuration(dstr)
|
2020-04-14 14:48:32 -04:00
|
|
|
if err != nil {
|
|
|
|
errResp(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
deadlinedCtx, cancel := context.WithTimeout(ctx, deadline)
|
|
|
|
defer cancel()
|
2020-04-14 14:48:32 -04:00
|
|
|
|
2020-08-17 14:29:58 -04:00
|
|
|
nsLock := objectAPI.NewNSLock(ctx, minioMetaBucket, "obd-in-progress")
|
2020-04-14 14:48:32 -04:00
|
|
|
if err := nsLock.GetLock(newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
|
2020-03-27 00:07:39 -04:00
|
|
|
errResp(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer nsLock.Unlock()
|
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
go func() {
|
|
|
|
defer close(obdInfoCh)
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-09-15 21:02:54 -04:00
|
|
|
if log := query.Get("log"); log == "true" {
|
|
|
|
obdInfo.Logging.ServersLog = append(obdInfo.Logging.ServersLog, getLocalLogOBD(deadlinedCtx, r))
|
|
|
|
obdInfo.Logging.ServersLog = append(obdInfo.Logging.ServersLog, globalNotificationSys.LogOBDInfo(deadlinedCtx)...)
|
|
|
|
partialWrite(obdInfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
if cpu := query.Get("syscpu"); cpu == "true" {
|
2020-05-18 12:59:45 -04:00
|
|
|
cpuInfo := getLocalCPUOBDInfo(deadlinedCtx, r)
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
obdInfo.Sys.CPUInfo = append(obdInfo.Sys.CPUInfo, cpuInfo)
|
|
|
|
obdInfo.Sys.CPUInfo = append(obdInfo.Sys.CPUInfo, globalNotificationSys.CPUOBDInfo(deadlinedCtx)...)
|
|
|
|
partialWrite(obdInfo)
|
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-09-15 21:02:54 -04:00
|
|
|
if diskHw := query.Get("sysdiskhw"); diskHw == "true" {
|
2020-05-18 12:59:45 -04:00
|
|
|
diskHwInfo := getLocalDiskHwOBD(deadlinedCtx, r)
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
obdInfo.Sys.DiskHwInfo = append(obdInfo.Sys.DiskHwInfo, diskHwInfo)
|
|
|
|
obdInfo.Sys.DiskHwInfo = append(obdInfo.Sys.DiskHwInfo, globalNotificationSys.DiskHwOBDInfo(deadlinedCtx)...)
|
|
|
|
partialWrite(obdInfo)
|
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-09-15 21:02:54 -04:00
|
|
|
if osInfo := query.Get("sysosinfo"); osInfo == "true" {
|
2020-05-18 12:59:45 -04:00
|
|
|
osInfo := getLocalOsInfoOBD(deadlinedCtx, r)
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
obdInfo.Sys.OsInfo = append(obdInfo.Sys.OsInfo, osInfo)
|
|
|
|
obdInfo.Sys.OsInfo = append(obdInfo.Sys.OsInfo, globalNotificationSys.OsOBDInfo(deadlinedCtx)...)
|
|
|
|
partialWrite(obdInfo)
|
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-09-15 21:02:54 -04:00
|
|
|
if mem := query.Get("sysmem"); mem == "true" {
|
2020-05-18 12:59:45 -04:00
|
|
|
memInfo := getLocalMemOBD(deadlinedCtx, r)
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
obdInfo.Sys.MemInfo = append(obdInfo.Sys.MemInfo, memInfo)
|
|
|
|
obdInfo.Sys.MemInfo = append(obdInfo.Sys.MemInfo, globalNotificationSys.MemOBDInfo(deadlinedCtx)...)
|
|
|
|
partialWrite(obdInfo)
|
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-09-15 21:02:54 -04:00
|
|
|
if proc := query.Get("sysprocess"); proc == "true" {
|
2020-05-18 12:59:45 -04:00
|
|
|
procInfo := getLocalProcOBD(deadlinedCtx, r)
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
obdInfo.Sys.ProcInfo = append(obdInfo.Sys.ProcInfo, procInfo)
|
|
|
|
obdInfo.Sys.ProcInfo = append(obdInfo.Sys.ProcInfo, globalNotificationSys.ProcOBDInfo(deadlinedCtx)...)
|
|
|
|
partialWrite(obdInfo)
|
2020-03-27 00:07:39 -04:00
|
|
|
}
|
2020-04-16 13:56:18 -04:00
|
|
|
|
2020-09-15 21:02:54 -04:00
|
|
|
if config := query.Get("minioconfig"); config == "true" {
|
2020-04-16 13:56:18 -04:00
|
|
|
cfg, err := readServerConfig(ctx, objectAPI)
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
obdInfo.Minio.Config = cfg
|
|
|
|
partialWrite(obdInfo)
|
2020-03-27 00:07:39 -04:00
|
|
|
}
|
|
|
|
|
2020-09-15 21:02:54 -04:00
|
|
|
if drive := query.Get("perfdrive"); drive == "true" {
|
2020-04-16 13:56:18 -04:00
|
|
|
// Get drive obd details from local server's drive(s)
|
|
|
|
driveOBDSerial := getLocalDrivesOBD(deadlinedCtx, false, globalEndpoints, r)
|
|
|
|
driveOBDParallel := getLocalDrivesOBD(deadlinedCtx, true, globalEndpoints, r)
|
|
|
|
|
|
|
|
errStr := ""
|
|
|
|
if driveOBDSerial.Error != "" {
|
|
|
|
errStr = "serial: " + driveOBDSerial.Error
|
|
|
|
}
|
|
|
|
if driveOBDParallel.Error != "" {
|
|
|
|
errStr = errStr + " parallel: " + driveOBDParallel.Error
|
|
|
|
}
|
|
|
|
|
|
|
|
driveOBD := madmin.ServerDrivesOBDInfo{
|
|
|
|
Addr: driveOBDSerial.Addr,
|
|
|
|
Serial: driveOBDSerial.Serial,
|
|
|
|
Parallel: driveOBDParallel.Parallel,
|
|
|
|
Error: errStr,
|
|
|
|
}
|
|
|
|
obdInfo.Perf.DriveInfo = append(obdInfo.Perf.DriveInfo, driveOBD)
|
2020-05-22 20:56:45 -04:00
|
|
|
partialWrite(obdInfo)
|
2020-04-16 13:56:18 -04:00
|
|
|
|
|
|
|
// Notify all other MinIO peers to report drive obd numbers
|
2020-05-22 20:56:45 -04:00
|
|
|
driveOBDs := globalNotificationSys.DriveOBDInfoChan(deadlinedCtx)
|
|
|
|
for obd := range driveOBDs {
|
|
|
|
obdInfo.Perf.DriveInfo = append(obdInfo.Perf.DriveInfo, obd)
|
|
|
|
partialWrite(obdInfo)
|
|
|
|
}
|
2020-04-16 13:56:18 -04:00
|
|
|
partialWrite(obdInfo)
|
2020-03-27 00:07:39 -04:00
|
|
|
}
|
|
|
|
|
2020-09-15 21:02:54 -04:00
|
|
|
if net := query.Get("perfnet"); net == "true" && globalIsDistErasure {
|
2020-04-16 13:56:18 -04:00
|
|
|
obdInfo.Perf.Net = append(obdInfo.Perf.Net, globalNotificationSys.NetOBDInfo(deadlinedCtx))
|
2020-05-22 20:56:45 -04:00
|
|
|
partialWrite(obdInfo)
|
|
|
|
|
|
|
|
netOBDs := globalNotificationSys.DispatchNetOBDChan(deadlinedCtx)
|
|
|
|
for obd := range netOBDs {
|
|
|
|
obdInfo.Perf.Net = append(obdInfo.Perf.Net, obd)
|
|
|
|
partialWrite(obdInfo)
|
|
|
|
}
|
|
|
|
partialWrite(obdInfo)
|
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
obdInfo.Perf.NetParallel = globalNotificationSys.NetOBDParallelInfo(deadlinedCtx)
|
|
|
|
partialWrite(obdInfo)
|
|
|
|
}
|
2020-09-15 21:02:54 -04:00
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
}()
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
ticker := time.NewTicker(30 * time.Second)
|
|
|
|
defer ticker.Stop()
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case oinfo, ok := <-obdInfoCh:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
logger.LogIf(ctx, enc.Encode(oinfo))
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
case <-ticker.C:
|
|
|
|
if _, err := w.Write([]byte(" ")); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
case <-deadlinedCtx.Done():
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
return
|
|
|
|
}
|
2020-03-27 00:07:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-04-07 22:30:59 -04:00
|
|
|
// ServerInfoHandler - GET /minio/admin/v3/info
|
2019-12-11 17:27:03 -05:00
|
|
|
// ----------
|
|
|
|
// Get server information
|
|
|
|
func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "ServerInfo")
|
2020-05-11 13:34:08 -04:00
|
|
|
|
|
|
|
defer logger.AuditLog(w, r, "ServerInfo", mustGetClaimsFromToken(r))
|
|
|
|
|
2020-01-26 21:47:52 -05:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction)
|
2019-12-11 17:27:03 -05:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg, err := readServerConfig(ctx, objectAPI)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-12-12 12:58:33 -05:00
|
|
|
buckets := madmin.Buckets{}
|
|
|
|
objects := madmin.Objects{}
|
|
|
|
usage := madmin.Usage{}
|
|
|
|
|
|
|
|
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
|
|
|
if err == nil {
|
|
|
|
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
|
2020-05-27 09:45:43 -04:00
|
|
|
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
|
2019-12-12 12:58:33 -05:00
|
|
|
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
|
2019-12-11 17:27:03 -05:00
|
|
|
}
|
|
|
|
|
2019-12-12 12:58:33 -05:00
|
|
|
vault := fetchVaultStatus(cfg)
|
|
|
|
|
2019-12-11 17:27:03 -05:00
|
|
|
ldap := madmin.LDAP{}
|
|
|
|
if globalLDAPConfig.Enabled {
|
|
|
|
ldapConn, err := globalLDAPConfig.Connect()
|
|
|
|
if err != nil {
|
|
|
|
ldap.Status = "offline"
|
|
|
|
} else if ldapConn == nil {
|
|
|
|
ldap.Status = "Not Configured"
|
|
|
|
} else {
|
2020-03-12 02:13:32 -04:00
|
|
|
// Close ldap connection to avoid leaks.
|
|
|
|
ldapConn.Close()
|
2019-12-11 17:27:03 -05:00
|
|
|
ldap.Status = "online"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log, audit := fetchLoggerInfo(cfg)
|
|
|
|
|
|
|
|
// Get the notification target info
|
|
|
|
notifyTarget := fetchLambdaInfo(cfg)
|
|
|
|
|
2020-05-28 16:03:04 -04:00
|
|
|
// Fetching the Storage information, ignore any errors.
|
|
|
|
storageInfo, _ := objectAPI.StorageInfo(ctx, false)
|
2019-12-11 17:27:03 -05:00
|
|
|
|
|
|
|
var backend interface{}
|
|
|
|
if storageInfo.Backend.Type == BackendType(madmin.Erasure) {
|
2020-06-12 23:04:01 -04:00
|
|
|
backend = madmin.ErasureBackend{
|
2019-12-11 17:27:03 -05:00
|
|
|
Type: madmin.ErasureType,
|
2020-07-13 12:51:07 -04:00
|
|
|
OnlineDisks: storageInfo.Backend.OnlineDisks.Sum(),
|
|
|
|
OfflineDisks: storageInfo.Backend.OfflineDisks.Sum(),
|
2019-12-11 17:27:03 -05:00
|
|
|
StandardSCData: storageInfo.Backend.StandardSCData,
|
|
|
|
StandardSCParity: storageInfo.Backend.StandardSCParity,
|
|
|
|
RRSCData: storageInfo.Backend.RRSCData,
|
|
|
|
RRSCParity: storageInfo.Backend.RRSCParity,
|
|
|
|
}
|
|
|
|
} else {
|
2020-03-20 18:00:44 -04:00
|
|
|
backend = madmin.FSBackend{
|
2019-12-11 17:27:03 -05:00
|
|
|
Type: madmin.FsType,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-08 20:36:56 -04:00
|
|
|
mode := "safemode"
|
2020-06-29 16:07:26 -04:00
|
|
|
if newObjectLayerFn() != nil {
|
2019-12-11 17:27:03 -05:00
|
|
|
mode = "online"
|
|
|
|
}
|
|
|
|
|
|
|
|
server := getLocalServerProperty(globalEndpoints, r)
|
|
|
|
servers := globalNotificationSys.ServerInfo()
|
|
|
|
servers = append(servers, server)
|
|
|
|
|
|
|
|
domain := globalDomainNames
|
|
|
|
services := madmin.Services{
|
|
|
|
Vault: vault,
|
|
|
|
LDAP: ldap,
|
|
|
|
Logger: log,
|
|
|
|
Audit: audit,
|
|
|
|
Notifications: notifyTarget,
|
|
|
|
}
|
|
|
|
|
2020-09-03 01:54:56 -04:00
|
|
|
// Collect any disk healing.
|
|
|
|
healing, _ := getAggregatedBackgroundHealState(ctx)
|
|
|
|
healDisks := make(map[string]struct{}, len(healing.HealDisks))
|
|
|
|
for _, disk := range healing.HealDisks {
|
|
|
|
healDisks[disk] = struct{}{}
|
|
|
|
}
|
|
|
|
|
2020-07-13 12:51:07 -04:00
|
|
|
// find all disks which belong to each respective endpoints
|
|
|
|
for i := range servers {
|
|
|
|
for _, disk := range storageInfo.Disks {
|
|
|
|
if strings.Contains(disk.Endpoint, servers[i].Endpoint) {
|
2020-09-03 01:54:56 -04:00
|
|
|
if _, ok := healDisks[disk.Endpoint]; ok {
|
|
|
|
disk.Healing = true
|
|
|
|
}
|
2020-07-13 12:51:07 -04:00
|
|
|
servers[i].Disks = append(servers[i].Disks, disk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-09-03 01:54:56 -04:00
|
|
|
|
2020-07-13 12:51:07 -04:00
|
|
|
// add all the disks local to this server.
|
|
|
|
for _, disk := range storageInfo.Disks {
|
2020-08-26 13:11:26 -04:00
|
|
|
if disk.DrivePath == "" && disk.Endpoint == "" {
|
|
|
|
continue
|
|
|
|
}
|
2020-07-13 12:51:07 -04:00
|
|
|
if disk.Endpoint == disk.DrivePath {
|
2020-09-03 01:54:56 -04:00
|
|
|
if _, ok := healDisks[disk.Endpoint]; ok {
|
|
|
|
disk.Healing = true
|
|
|
|
}
|
2020-07-13 12:51:07 -04:00
|
|
|
servers[len(servers)-1].Disks = append(servers[len(servers)-1].Disks, disk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-18 12:59:45 -04:00
|
|
|
infoMsg := madmin.InfoMessage{
|
2019-12-11 17:27:03 -05:00
|
|
|
Mode: mode,
|
|
|
|
Domain: domain,
|
|
|
|
Region: globalServerRegion,
|
2020-04-21 12:38:32 -04:00
|
|
|
SQSARN: globalNotificationSys.GetARNList(false),
|
2019-12-11 17:27:03 -05:00
|
|
|
DeploymentID: globalDeploymentID,
|
|
|
|
Buckets: buckets,
|
|
|
|
Objects: objects,
|
|
|
|
Usage: usage,
|
|
|
|
Services: services,
|
|
|
|
Backend: backend,
|
|
|
|
Servers: servers,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Marshal API response
|
|
|
|
jsonBytes, err := json.Marshal(infoMsg)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
//Reply with storage information (across nodes in a
|
|
|
|
// distributed setup) as json.
|
|
|
|
writeSuccessResponseJSON(w, jsonBytes)
|
|
|
|
}
|
|
|
|
|
|
|
|
func fetchLambdaInfo(cfg config.Config) []map[string][]madmin.TargetIDStatus {
|
2020-01-09 09:15:44 -05:00
|
|
|
|
2020-04-14 14:19:25 -04:00
|
|
|
// Fetch the configured targets
|
2020-04-17 14:20:56 -04:00
|
|
|
tr := NewGatewayHTTPTransport()
|
|
|
|
defer tr.CloseIdleConnections()
|
certs: refactor cert manager to support multiple certificates (#10207)
This commit refactors the certificate management implementation
in the `certs` package such that multiple certificates can be
specified at the same time. Therefore, the following layout of
the `certs/` directory is expected:
```
certs/
│
├─ public.crt
├─ private.key
├─ CAs/ // CAs directory is ignored
│ │
│ ...
│
├─ example.com/
│ │
│ ├─ public.crt
│ └─ private.key
└─ foobar.org/
│
├─ public.crt
└─ private.key
...
```
However, directory names like `example.com` are just for human
readability/organization and don't have any meaning w.r.t whether
a particular certificate is served or not. This decision is made based
on the SNI sent by the client and the SAN of the certificate.
***
The `Manager` will pick a certificate based on the client trying
to establish a TLS connection. In particular, it looks at the client
hello (i.e. SNI) to determine which host the client tries to access.
If the manager can find a certificate that matches the SNI it
returns this certificate to the client.
However, the client may choose to not send an SNI or tries to access
a server directly via IP (`https://<ip>:<port>`). In this case, we
cannot use the SNI to determine which certificate to serve. However,
we also should not pick "the first" certificate that would be accepted
by the client (based on crypto. parameters - like a signature algorithm)
because it may be an internal certificate that contains internal hostnames.
We would disclose internal infrastructure details doing so.
Therefore, the `Manager` returns the "default" certificate when the
client does not specify an SNI. The default certificate the top-level
`public.crt` - i.e. `certs/public.crt`.
This approach has some consequences:
- It's the operator's responsibility to ensure that the top-level
`public.crt` does not disclose any information (i.e. hostnames)
that are not publicly visible. However, this was the case in the
past already.
- Any other `public.crt` - except for the top-level one - must not
contain any IP SAN. The reason for this restriction is that the
Manager cannot match a SNI to an IP b/c the SNI is the server host
name. The entire purpose of SNI is to indicate which host the client
tries to connect to when multiple hosts run on the same IP. So, a
client will not set the SNI to an IP.
If we would allow IP SANs in a lower-level `public.crt` a user would
expect that it is possible to connect to MinIO directly via IP address
and that the MinIO server would pick "the right" certificate. However,
the MinIO server cannot determine which certificate to serve, and
therefore always picks the "default" one. This may lead to all sorts
of confusing errors like:
"It works if I use `https:instance.minio.local` but not when I use
`https://10.0.2.1`.
These consequences/limitations should be pointed out / explained in our
docs in an appropriate way. However, the support for multiple
certificates should not have any impact on how deployment with a single
certificate function today.
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-09-04 02:33:37 -04:00
|
|
|
targetList, err := notify.FetchRegisteredTargets(GlobalContext, cfg, tr, true, false)
|
2020-04-14 14:19:25 -04:00
|
|
|
if err != nil && err != notify.ErrTargetsOffline {
|
|
|
|
logger.LogIf(GlobalContext, err)
|
2020-02-01 07:20:04 -05:00
|
|
|
return nil
|
|
|
|
}
|
2020-04-14 14:19:25 -04:00
|
|
|
|
2020-02-01 07:20:04 -05:00
|
|
|
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
2019-12-11 17:27:03 -05:00
|
|
|
|
|
|
|
for targetID, target := range targetList.TargetMap() {
|
|
|
|
targetIDStatus := make(map[string]madmin.Status)
|
|
|
|
active, _ := target.IsActive()
|
|
|
|
if active {
|
|
|
|
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
|
|
|
} else {
|
|
|
|
targetIDStatus[targetID.ID] = madmin.Status{Status: "Offline"}
|
|
|
|
}
|
|
|
|
list := lambdaMap[targetID.Name]
|
|
|
|
list = append(list, targetIDStatus)
|
|
|
|
lambdaMap[targetID.Name] = list
|
2020-01-09 09:15:44 -05:00
|
|
|
// Close any leaking connections
|
|
|
|
_ = target.Close()
|
2019-12-11 17:27:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
notify := make([]map[string][]madmin.TargetIDStatus, len(lambdaMap))
|
|
|
|
counter := 0
|
|
|
|
for key, value := range lambdaMap {
|
|
|
|
v := make(map[string][]madmin.TargetIDStatus)
|
|
|
|
v[key] = value
|
|
|
|
notify[counter] = v
|
|
|
|
counter++
|
|
|
|
}
|
|
|
|
return notify
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetchVaultStatus fetches Vault Info
|
|
|
|
func fetchVaultStatus(cfg config.Config) madmin.Vault {
|
|
|
|
vault := madmin.Vault{}
|
|
|
|
if GlobalKMS == nil {
|
|
|
|
vault.Status = "disabled"
|
|
|
|
return vault
|
|
|
|
}
|
2020-07-08 21:50:43 -04:00
|
|
|
keyID := GlobalKMS.DefaultKeyID()
|
2019-12-11 17:27:03 -05:00
|
|
|
kmsInfo := GlobalKMS.Info()
|
|
|
|
|
2020-08-31 21:10:52 -04:00
|
|
|
if len(kmsInfo.Endpoints) == 0 {
|
2019-12-11 17:27:03 -05:00
|
|
|
vault.Status = "KMS configured using master key"
|
|
|
|
return vault
|
|
|
|
}
|
|
|
|
|
2020-08-31 21:10:52 -04:00
|
|
|
if err := checkConnection(kmsInfo.Endpoints[0], 15*time.Second); err != nil {
|
2019-12-11 17:27:03 -05:00
|
|
|
vault.Status = "offline"
|
|
|
|
} else {
|
|
|
|
vault.Status = "online"
|
|
|
|
|
2020-02-05 19:48:34 -05:00
|
|
|
kmsContext := crypto.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
2019-12-11 17:27:03 -05:00
|
|
|
// 1. Generate a new key using the KMS.
|
|
|
|
key, sealedKey, err := GlobalKMS.GenerateKey(keyID, kmsContext)
|
|
|
|
if err != nil {
|
2020-02-05 19:48:34 -05:00
|
|
|
vault.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
|
2019-12-11 17:27:03 -05:00
|
|
|
} else {
|
|
|
|
vault.Encrypt = "Ok"
|
|
|
|
}
|
|
|
|
|
2020-02-05 19:48:34 -05:00
|
|
|
// 2. Verify that we can indeed decrypt the (encrypted) key
|
|
|
|
decryptedKey, err := GlobalKMS.UnsealKey(keyID, sealedKey, kmsContext)
|
|
|
|
switch {
|
|
|
|
case err != nil:
|
|
|
|
vault.Decrypt = fmt.Sprintf("Decryption failed: %v", err)
|
|
|
|
case subtle.ConstantTimeCompare(key[:], decryptedKey[:]) != 1:
|
|
|
|
vault.Decrypt = "Decryption failed: decrypted key does not match generated key"
|
|
|
|
default:
|
2019-12-11 17:27:03 -05:00
|
|
|
vault.Decrypt = "Ok"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return vault
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetchLoggerDetails return log info
|
|
|
|
func fetchLoggerInfo(cfg config.Config) ([]madmin.Logger, []madmin.Audit) {
|
|
|
|
loggerCfg, _ := logger.LookupConfig(cfg)
|
|
|
|
|
|
|
|
var logger []madmin.Logger
|
|
|
|
var auditlogger []madmin.Audit
|
|
|
|
for log, l := range loggerCfg.HTTP {
|
|
|
|
if l.Enabled {
|
2020-02-01 20:45:29 -05:00
|
|
|
err := checkConnection(l.Endpoint, 15*time.Second)
|
2019-12-11 17:27:03 -05:00
|
|
|
if err == nil {
|
|
|
|
mapLog := make(map[string]madmin.Status)
|
|
|
|
mapLog[log] = madmin.Status{Status: "Online"}
|
|
|
|
logger = append(logger, mapLog)
|
|
|
|
} else {
|
|
|
|
mapLog := make(map[string]madmin.Status)
|
|
|
|
mapLog[log] = madmin.Status{Status: "offline"}
|
|
|
|
logger = append(logger, mapLog)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for audit, l := range loggerCfg.Audit {
|
|
|
|
if l.Enabled {
|
2020-02-01 20:45:29 -05:00
|
|
|
err := checkConnection(l.Endpoint, 15*time.Second)
|
2019-12-11 17:27:03 -05:00
|
|
|
if err == nil {
|
|
|
|
mapAudit := make(map[string]madmin.Status)
|
|
|
|
mapAudit[audit] = madmin.Status{Status: "Online"}
|
|
|
|
auditlogger = append(auditlogger, mapAudit)
|
|
|
|
} else {
|
|
|
|
mapAudit := make(map[string]madmin.Status)
|
|
|
|
mapAudit[audit] = madmin.Status{Status: "Offline"}
|
|
|
|
auditlogger = append(auditlogger, mapAudit)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return logger, auditlogger
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkConnection - ping an endpoint , return err in case of no connection
|
2020-02-01 20:45:29 -05:00
|
|
|
func checkConnection(endpointStr string, timeout time.Duration) error {
|
2020-04-28 16:57:56 -04:00
|
|
|
ctx, cancel := context.WithTimeout(GlobalContext, timeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2020-08-04 17:55:53 -04:00
|
|
|
client := &http.Client{Transport: &http.Transport{
|
|
|
|
Proxy: http.ProxyFromEnvironment,
|
|
|
|
DialContext: xhttp.NewCustomDialContext(timeout),
|
|
|
|
ResponseHeaderTimeout: 5 * time.Second,
|
|
|
|
TLSHandshakeTimeout: 5 * time.Second,
|
|
|
|
ExpectContinueTimeout: 5 * time.Second,
|
|
|
|
TLSClientConfig: &tls.Config{RootCAs: globalRootCAs},
|
|
|
|
// Go net/http automatically unzip if content-type is
|
|
|
|
// gzip disable this feature, as we are always interested
|
|
|
|
// in raw stream.
|
|
|
|
DisableCompression: true,
|
|
|
|
}}
|
|
|
|
defer client.CloseIdleConnections()
|
|
|
|
|
2020-09-08 17:22:04 -04:00
|
|
|
req, err := http.NewRequestWithContext(ctx, http.MethodHead, endpointStr, nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := client.Do(req)
|
2020-04-28 16:57:56 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-12-11 17:27:03 -05:00
|
|
|
}
|
2020-04-28 16:57:56 -04:00
|
|
|
defer xhttp.DrainBody(resp.Body)
|
2019-12-11 17:27:03 -05:00
|
|
|
return nil
|
|
|
|
}
|