The prometheus metrics refractoring (#8003)

The measures are consolidated to the following metrics

- `disk_storage_used` : Disk space used by the disk.
- `disk_storage_available`: Available disk space left on the disk.
- `disk_storage_total`: Total disk space on the disk.
- `disks_offline`: Total number of offline disks in current MinIO instance.
- `disks_total`: Total number of disks in current MinIO instance.
- `s3_requests_total`: Total number of s3 requests in current MinIO instance.
- `s3_errors_total`: Total number of errors in s3 requests in current MinIO instance.
- `s3_requests_current`: Total number of active s3 requests in current MinIO instance.
- `internode_rx_bytes_total`: Total number of internode bytes received by current MinIO server instance.
- `internode_tx_bytes_total`: Total number of bytes sent to the other nodes by current MinIO server instance.
- `s3_rx_bytes_total`: Total number of s3 bytes received by current MinIO server instance.
- `s3_tx_bytes_total`: Total number of s3 bytes sent by current MinIO server instance.
- `minio_version_info`: Current MinIO version with commit-id.
- `s3_ttfb_seconds_bucket`: Histogram that holds the latency information of the requests.

And this PR also modifies the current StorageInfo queries

- Decouples StorageInfo from ServerInfo .
- StorageInfo is enhanced to give endpoint information.

NOTE: ADMIN API VERSION IS BUMPED UP IN THIS PR

Fixes #7873
This commit is contained in:
Praveen raj Mani 2019-10-23 09:31:14 +05:30 committed by Harshavardhana
parent f01d53b20f
commit 8836d57e3c
49 changed files with 938 additions and 658 deletions

View File

@ -26,6 +26,7 @@ export class StorageInfo extends React.Component {
} }
render() { render() {
const { used } = this.props.storageInfo const { used } = this.props.storageInfo
var totalUsed = used.reduce((v1, v2) => v1 + v2, 0)
return ( return (
<div className="feh-used"> <div className="feh-used">
<div className="fehu-chart"> <div className="fehu-chart">
@ -34,7 +35,7 @@ export class StorageInfo extends React.Component {
<ul> <ul>
<li> <li>
<span>Used: </span> <span>Used: </span>
{humanize.filesize(used)} {humanize.filesize(totalUsed)}
</li> </li>
</ul> </ul>
</div> </div>
@ -54,4 +55,7 @@ const mapDispatchToProps = dispatch => {
} }
} }
export default connect(mapStateToProps, mapDispatchToProps)(StorageInfo) export default connect(
mapStateToProps,
mapDispatchToProps
)(StorageInfo)

View File

@ -21,10 +21,7 @@ import { StorageInfo } from "../StorageInfo"
describe("StorageInfo", () => { describe("StorageInfo", () => {
it("should render without crashing", () => { it("should render without crashing", () => {
shallow( shallow(
<StorageInfo <StorageInfo storageInfo={{ used: [60] }} fetchStorageInfo={jest.fn()} />
storageInfo={{ used: 60 }}
fetchStorageInfo={jest.fn()}
/>
) )
}) })
@ -32,7 +29,7 @@ describe("StorageInfo", () => {
const fetchStorageInfo = jest.fn() const fetchStorageInfo = jest.fn()
shallow( shallow(
<StorageInfo <StorageInfo
storageInfo={{ used: 60 }} storageInfo={{ used: [60] }}
fetchStorageInfo={fetchStorageInfo} fetchStorageInfo={fetchStorageInfo}
/> />
) )

View File

@ -20,7 +20,7 @@ import * as actionsCommon from "../actions"
jest.mock("../../web", () => ({ jest.mock("../../web", () => ({
StorageInfo: jest.fn(() => { StorageInfo: jest.fn(() => {
return Promise.resolve({ storageInfo: { Used: 60 } }) return Promise.resolve({ storageInfo: { Used: [60] } })
}), }),
ServerInfo: jest.fn(() => { ServerInfo: jest.fn(() => {
return Promise.resolve({ return Promise.resolve({
@ -40,7 +40,7 @@ describe("Common actions", () => {
it("creates common/SET_STORAGE_INFO after fetching the storage details ", () => { it("creates common/SET_STORAGE_INFO after fetching the storage details ", () => {
const store = mockStore() const store = mockStore()
const expectedActions = [ const expectedActions = [
{ type: "common/SET_STORAGE_INFO", storageInfo: { used: 60 } } { type: "common/SET_STORAGE_INFO", storageInfo: { used: [60] } }
] ]
return store.dispatch(actionsCommon.fetchStorageInfo()).then(() => { return store.dispatch(actionsCommon.fetchStorageInfo()).then(() => {
const actions = store.getActions() const actions = store.getActions()

View File

@ -22,8 +22,9 @@ describe("common reducer", () => {
expect(reducer(undefined, {})).toEqual({ expect(reducer(undefined, {})).toEqual({
sidebarOpen: false, sidebarOpen: false,
storageInfo: { storageInfo: {
total: 0, total: [0],
free: 0 free: [0],
used: [0]
}, },
serverInfo: {} serverInfo: {}
}) })
@ -61,11 +62,11 @@ describe("common reducer", () => {
{}, {},
{ {
type: actionsCommon.SET_STORAGE_INFO, type: actionsCommon.SET_STORAGE_INFO,
storageInfo: { total: 100, free: 40 } storageInfo: { total: [100], free: [40] }
} }
) )
).toEqual({ ).toEqual({
storageInfo: { total: 100, free: 40 } storageInfo: { total: [100], free: [40] }
}) })
}) })

View File

@ -19,7 +19,7 @@ import * as actionsCommon from "./actions"
export default ( export default (
state = { state = {
sidebarOpen: false, sidebarOpen: false,
storageInfo: { total: 0, free: 0 }, storageInfo: { total: [0], free: [0], used: [0] },
serverInfo: {} serverInfo: {}
}, },
action action

File diff suppressed because one or more lines are too long

View File

@ -229,37 +229,30 @@ type ServerConnStats struct {
TotalInputBytes uint64 `json:"transferred"` TotalInputBytes uint64 `json:"transferred"`
TotalOutputBytes uint64 `json:"received"` TotalOutputBytes uint64 `json:"received"`
Throughput uint64 `json:"throughput,omitempty"` Throughput uint64 `json:"throughput,omitempty"`
S3InputBytes uint64 `json:"transferredS3"`
S3OutputBytes uint64 `json:"receivedS3"`
} }
// ServerHTTPMethodStats holds total number of HTTP operations from/to the server, // ServerHTTPAPIStats holds total number of HTTP operations from/to the server,
// including the average duration the call was spent. // including the average duration the call was spent.
type ServerHTTPMethodStats struct { type ServerHTTPAPIStats struct {
Count uint64 `json:"count"` APIStats map[string]int `json:"apiStats"`
AvgDuration string `json:"avgDuration"`
} }
// ServerHTTPStats holds all type of http operations performed to/from the server // ServerHTTPStats holds all type of http operations performed to/from the server
// including their average execution time. // including their average execution time.
type ServerHTTPStats struct { type ServerHTTPStats struct {
TotalHEADStats ServerHTTPMethodStats `json:"totalHEADs"` CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
SuccessHEADStats ServerHTTPMethodStats `json:"successHEADs"` TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
TotalGETStats ServerHTTPMethodStats `json:"totalGETs"` TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
SuccessGETStats ServerHTTPMethodStats `json:"successGETs"`
TotalPUTStats ServerHTTPMethodStats `json:"totalPUTs"`
SuccessPUTStats ServerHTTPMethodStats `json:"successPUTs"`
TotalPOSTStats ServerHTTPMethodStats `json:"totalPOSTs"`
SuccessPOSTStats ServerHTTPMethodStats `json:"successPOSTs"`
TotalDELETEStats ServerHTTPMethodStats `json:"totalDELETEs"`
SuccessDELETEStats ServerHTTPMethodStats `json:"successDELETEs"`
} }
// ServerInfoData holds storage, connections and other // ServerInfoData holds storage, connections and other
// information of a given server. // information of a given server.
type ServerInfoData struct { type ServerInfoData struct {
StorageInfo StorageInfo `json:"storage"` ConnStats ServerConnStats `json:"network"`
ConnStats ServerConnStats `json:"network"` HTTPStats ServerHTTPStats `json:"http"`
HTTPStats ServerHTTPStats `json:"http"` Properties ServerProperties `json:"server"`
Properties ServerProperties `json:"server"`
} }
// ServerInfo holds server information result of one node // ServerInfo holds server information result of one node
@ -274,7 +267,6 @@ type ServerInfo struct {
// Get server information // Get server information
func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Request) { func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ServerInfo") ctx := newContext(r, w, "ServerInfo")
objectAPI := validateAdminReq(ctx, w, r) objectAPI := validateAdminReq(ctx, w, r)
if objectAPI == nil { if objectAPI == nil {
return return
@ -286,9 +278,8 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
serverInfo = append(serverInfo, ServerInfo{ serverInfo = append(serverInfo, ServerInfo{
Addr: getHostName(r), Addr: getHostName(r),
Data: &ServerInfoData{ Data: &ServerInfoData{
StorageInfo: objectAPI.StorageInfo(ctx), ConnStats: globalConnStats.toServerConnStats(),
ConnStats: globalConnStats.toServerConnStats(), HTTPStats: globalHTTPStats.toServerHTTPStats(),
HTTPStats: globalHTTPStats.toServerHTTPStats(),
Properties: ServerProperties{ Properties: ServerProperties{
Uptime: UTCNow().Sub(globalBootTime), Uptime: UTCNow().Sub(globalBootTime),
Version: Version, Version: Version,
@ -312,6 +303,31 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
writeSuccessResponseJSON(w, jsonBytes) writeSuccessResponseJSON(w, jsonBytes)
} }
// ServerInfoHandler - GET /minio/admin/v1/storageinfo
// ----------
// Get server information
func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "StorageInfo")
objectAPI := validateAdminReq(ctx, w, r)
if objectAPI == nil {
return
}
storageInfo := objectAPI.StorageInfo(ctx)
// Marshal API response
jsonBytes, err := json.Marshal(storageInfo)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Reply with storage information (across nodes in a
// distributed setup) as json.
writeSuccessResponseJSON(w, jsonBytes)
}
// ServerCPULoadInfo holds informantion about cpu utilization // ServerCPULoadInfo holds informantion about cpu utilization
// of one minio node. It also reports any errors if encountered // of one minio node. It also reports any errors if encountered
// while trying to reach this server. // while trying to reach this server.
@ -814,7 +830,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
// find number of disks in the setup // find number of disks in the setup
info := objectAPI.StorageInfo(ctx) info := objectAPI.StorageInfo(ctx)
numDisks := info.Backend.OfflineDisks + info.Backend.OnlineDisks numDisks := info.Backend.OfflineDisks.Sum() + info.Backend.OnlineDisks.Sum()
healPath := pathJoin(hip.bucket, hip.objPrefix) healPath := pathJoin(hip.bucket, hip.objPrefix)
if hip.clientToken == "" && !hip.forceStart && !hip.forceStop { if hip.clientToken == "" && !hip.forceStart && !hip.forceStop {

View File

@ -396,7 +396,7 @@ func testServiceSignalReceiver(cmd cmdType, t *testing.T) {
func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, error) { func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, error) {
queryVal := url.Values{} queryVal := url.Values{}
queryVal.Set("action", string(cmd.toServiceAction())) queryVal.Set("action", string(cmd.toServiceAction()))
resource := "/minio/admin/v1/service?" + queryVal.Encode() resource := adminAPIPathPrefix + "/service?" + queryVal.Encode()
req, err := newTestRequest(http.MethodPost, resource, 0, nil) req, err := newTestRequest(http.MethodPost, resource, 0, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@ -465,7 +465,7 @@ func buildAdminRequest(queryVal url.Values, method, path string,
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) { contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
req, err := newTestRequest(method, req, err := newTestRequest(method,
"/minio/admin/v1"+path+"?"+queryVal.Encode(), adminAPIPathPrefix+path+"?"+queryVal.Encode(),
contentLength, bodySeeker) contentLength, bodySeeker)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -20,10 +20,11 @@ import (
"net/http" "net/http"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/minio/minio/pkg/madmin"
) )
const ( const (
adminAPIPathPrefix = "/minio/admin" adminAPIPathPrefix = minioReservedBucketPath + "/admin/" + madmin.AdminAPIVersion
) )
// adminAPIHandlers provides HTTP handlers for MinIO admin API. // adminAPIHandlers provides HTTP handlers for MinIO admin API.
@ -37,113 +38,113 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
// Admin router // Admin router
adminRouter := router.PathPrefix(adminAPIPathPrefix).Subrouter() adminRouter := router.PathPrefix(adminAPIPathPrefix).Subrouter()
// Version handler
adminV1Router := adminRouter.PathPrefix("/v1").Subrouter()
/// Service operations /// Service operations
// Restart and stop MinIO service. // Restart and stop MinIO service.
adminV1Router.Methods(http.MethodPost).Path("/service").HandlerFunc(httpTraceAll(adminAPI.ServiceActionHandler)).Queries("action", "{action:.*}") adminRouter.Methods(http.MethodPost).Path("/service").HandlerFunc(httpTraceAll(adminAPI.ServiceActionHandler)).Queries("action", "{action:.*}")
// Update MinIO servers. // Update MinIO servers.
adminV1Router.Methods(http.MethodPost).Path("/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}") adminRouter.Methods(http.MethodPost).Path("/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
// Info operations // Info operations
adminV1Router.Methods(http.MethodGet).Path("/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler)) adminRouter.Methods(http.MethodGet).Path("/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
// Harware Info operations // Harware Info operations
adminV1Router.Methods(http.MethodGet).Path("/hardware").HandlerFunc(httpTraceAll(adminAPI.ServerHardwareInfoHandler)).Queries("hwType", "{hwType:.*}") adminRouter.Methods(http.MethodGet).Path("/hardware").HandlerFunc(httpTraceAll(adminAPI.ServerHardwareInfoHandler)).Queries("hwType", "{hwType:.*}")
// StorageInfo operations
adminRouter.Methods(http.MethodGet).Path("/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
if globalIsDistXL || globalIsXL { if globalIsDistXL || globalIsXL {
/// Heal operations /// Heal operations
// Heal processing endpoint. // Heal processing endpoint.
adminV1Router.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler)) adminRouter.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler)) adminRouter.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler)) adminRouter.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminV1Router.Methods(http.MethodPost).Path("/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler)) adminRouter.Methods(http.MethodPost).Path("/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
/// Health operations /// Health operations
} }
// Performance command - return performance details based on input type // Performance command - return performance details based on input type
adminV1Router.Methods(http.MethodGet).Path("/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}") adminRouter.Methods(http.MethodGet).Path("/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
// Profiling operations // Profiling operations
adminV1Router.Methods(http.MethodPost).Path("/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)). adminRouter.Methods(http.MethodPost).Path("/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
Queries("profilerType", "{profilerType:.*}") Queries("profilerType", "{profilerType:.*}")
adminV1Router.Methods(http.MethodGet).Path("/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler)) adminRouter.Methods(http.MethodGet).Path("/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
/// Config operations /// Config operations
if enableConfigOps { if enableConfigOps {
// Get config // Get config
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler)) adminRouter.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
// Set config // Set config
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler)) adminRouter.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
} }
if enableIAMOps { if enableIAMOps {
// -- IAM APIs -- // -- IAM APIs --
// Add policy IAM // Add policy IAM
adminV1Router.Methods(http.MethodPut).Path("/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", adminRouter.Methods(http.MethodPut).Path("/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name",
"{name:.*}") "{name:.*}")
// Add user IAM // Add user IAM
adminV1Router.Methods(http.MethodPut).Path("/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}") adminRouter.Methods(http.MethodPut).Path("/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
adminV1Router.Methods(http.MethodPut).Path("/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)). adminRouter.Methods(http.MethodPut).Path("/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}") Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
// Info policy IAM // Info policy IAM
adminV1Router.Methods(http.MethodGet).Path("/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}") adminRouter.Methods(http.MethodGet).Path("/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
// Remove policy IAM // Remove policy IAM
adminV1Router.Methods(http.MethodDelete).Path("/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}") adminRouter.Methods(http.MethodDelete).Path("/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
// Set user or group policy // Set user or group policy
adminV1Router.Methods(http.MethodPut).Path("/set-user-or-group-policy"). adminRouter.Methods(http.MethodPut).Path("/set-user-or-group-policy").
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)). HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}") Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
// Remove user IAM // Remove user IAM
adminV1Router.Methods(http.MethodDelete).Path("/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}") adminRouter.Methods(http.MethodDelete).Path("/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
// List users // List users
adminV1Router.Methods(http.MethodGet).Path("/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers)) adminRouter.Methods(http.MethodGet).Path("/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
// User info // User info
adminV1Router.Methods(http.MethodGet).Path("/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}") adminRouter.Methods(http.MethodGet).Path("/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
// Add/Remove members from group // Add/Remove members from group
adminV1Router.Methods(http.MethodPut).Path("/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers)) adminRouter.Methods(http.MethodPut).Path("/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
// Get Group // Get Group
adminV1Router.Methods(http.MethodGet).Path("/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}") adminRouter.Methods(http.MethodGet).Path("/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
// List Groups // List Groups
adminV1Router.Methods(http.MethodGet).Path("/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups)) adminRouter.Methods(http.MethodGet).Path("/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
// Set Group Status // Set Group Status
adminV1Router.Methods(http.MethodPut).Path("/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}") adminRouter.Methods(http.MethodPut).Path("/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
// List policies // List policies
adminV1Router.Methods(http.MethodGet).Path("/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies)) adminRouter.Methods(http.MethodGet).Path("/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
} }
// -- Top APIs -- // -- Top APIs --
// Top locks // Top locks
adminV1Router.Methods(http.MethodGet).Path("/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler)) adminRouter.Methods(http.MethodGet).Path("/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
// HTTP Trace // HTTP Trace
adminV1Router.Methods(http.MethodGet).Path("/trace").HandlerFunc(adminAPI.TraceHandler) adminRouter.Methods(http.MethodGet).Path("/trace").HandlerFunc(adminAPI.TraceHandler)
// Console Logs // Console Logs
adminV1Router.Methods(http.MethodGet).Path("/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler)) adminRouter.Methods(http.MethodGet).Path("/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
// -- KMS APIs -- // -- KMS APIs --
// //
adminV1Router.Methods(http.MethodGet).Path("/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler)) adminRouter.Methods(http.MethodGet).Path("/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
// If none of the routes match, return error. // If none of the routes match, return error.
adminV1Router.NotFoundHandler = http.HandlerFunc(httpTraceHdrs(notFoundHandler)) adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceHdrs(notFoundHandler))
adminV1Router.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(versionMismatchHandler)) adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(versionMismatchHandler))
} }

View File

@ -59,108 +59,108 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
for _, bucket := range routers { for _, bucket := range routers {
// Object operations // Object operations
// HeadObject // HeadObject
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.HeadObjectHandler)) bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler)))
// CopyObjectPart // CopyObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart // PutObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// ListObjectParts // ListObjectParts
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.ListObjectPartsHandler)).Queries("uploadId", "{uploadId:.*}") bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler))).Queries("uploadId", "{uploadId:.*}")
// CompleteMultipartUpload // CompleteMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.CompleteMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}") bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler))).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload // NewMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.NewMultipartUploadHandler)).Queries("uploads", "") bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler))).Queries("uploads", "")
// AbortMultipartUpload // AbortMultipartUpload
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}") bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler))).Queries("uploadId", "{uploadId:.*}")
// GetObjectACL - this is a dummy call. // GetObjectACL - this is a dummy call.
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectACLHandler)).Queries("acl", "") bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler))).Queries("acl", "")
// GetObjectTagging - this is a dummy call. // GetObjectTagging - this is a dummy call.
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectTaggingHandler)).Queries("tagging", "") bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler))).Queries("tagging", "")
// SelectObjectContent // SelectObjectContent
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.SelectObjectContentHandler)).Queries("select", "").Queries("select-type", "2") bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler))).Queries("select", "").Queries("select-type", "2")
// GetObject // GetObject
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectHandler)) bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler)))
// CopyObject // CopyObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectHandler)) bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler)))
// PutObject // PutObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectHandler)) bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler)))
// DeleteObject // DeleteObject
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.DeleteObjectHandler)) bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler)))
/// Bucket operations /// Bucket operations
// GetBucketLocation // GetBucketLocation
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketLocationHandler)).Queries("location", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getobjectlocation", httpTraceAll(api.GetBucketLocationHandler))).Queries("location", "")
// GetBucketPolicy // GetBucketPolicy
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketPolicyHandler)).Queries("policy", "") bucket.Methods("GET").HandlerFunc(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler))).Queries("policy", "")
// GetBucketLifecycle // GetBucketLifecycle
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "") bucket.Methods("GET").HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
// Dummy Bucket Calls // Dummy Bucket Calls
// GetBucketACL -- this is a dummy call. // GetBucketACL -- this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketACLHandler)).Queries("acl", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler))).Queries("acl", "")
// GetBucketCors - this is a dummy call. // GetBucketCors - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketCorsHandler)).Queries("cors", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler))).Queries("cors", "")
// GetBucketWebsiteHandler - this is a dummy call. // GetBucketWebsiteHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketWebsiteHandler)).Queries("website", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler))).Queries("website", "")
// GetBucketVersioningHandler - this is a dummy call. // GetBucketVersioningHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketVersioningHandler)).Queries("versioning", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketversion", httpTraceAll(api.GetBucketVersioningHandler))).Queries("versioning", "")
// GetBucketAccelerateHandler - this is a dummy call. // GetBucketAccelerateHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketAccelerateHandler)).Queries("accelerate", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler))).Queries("accelerate", "")
// GetBucketRequestPaymentHandler - this is a dummy call. // GetBucketRequestPaymentHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketRequestPaymentHandler)).Queries("requestPayment", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler))).Queries("requestPayment", "")
// GetBucketLoggingHandler - this is a dummy call. // GetBucketLoggingHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketLoggingHandler)).Queries("logging", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler))).Queries("logging", "")
// GetBucketLifecycleHandler - this is a dummy call. // GetBucketLifecycleHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
// GetBucketReplicationHandler - this is a dummy call. // GetBucketReplicationHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketReplicationHandler)).Queries("replication", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler))).Queries("replication", "")
// GetBucketTaggingHandler - this is a dummy call. // GetBucketTaggingHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketTaggingHandler)).Queries("tagging", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler))).Queries("tagging", "")
//DeleteBucketWebsiteHandler //DeleteBucketWebsiteHandler
bucket.Methods(http.MethodDelete).HandlerFunc(httpTraceAll(api.DeleteBucketWebsiteHandler)).Queries("website", "") bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler))).Queries("website", "")
// DeleteBucketTaggingHandler // DeleteBucketTaggingHandler
bucket.Methods(http.MethodDelete).HandlerFunc(httpTraceAll(api.DeleteBucketTaggingHandler)).Queries("tagging", "") bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler))).Queries("tagging", "")
// GetBucketNotification // GetBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketNotificationHandler)).Queries("notification", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler))).Queries("notification", "")
// ListenBucketNotification // ListenBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListenBucketNotificationHandler)).Queries("events", "{events:.*}") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
// ListMultipartUploads // ListMultipartUploads
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListMultipartUploadsHandler)).Queries("uploads", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler))).Queries("uploads", "")
// ListObjectsV2 // ListObjectsV2
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListObjectsV2Handler)).Queries("list-type", "2") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler))).Queries("list-type", "2")
// ListBucketVersions // ListBucketVersions
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListBucketObjectVersionsHandler)).Queries("versions", "") bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler))).Queries("versions", "")
// ListObjectsV1 (Legacy) // ListObjectsV1 (Legacy)
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListObjectsV1Handler)) bucket.Methods("GET").HandlerFunc(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler)))
// PutBucketLifecycle // PutBucketLifecycle
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketLifecycleHandler)).Queries("lifecycle", "") bucket.Methods("PUT").HandlerFunc(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler))).Queries("lifecycle", "")
// PutBucketPolicy // PutBucketPolicy
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketPolicyHandler)).Queries("policy", "") bucket.Methods("PUT").HandlerFunc(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler))).Queries("policy", "")
// PutBucketNotification // PutBucketNotification
bucket.Methods(http.MethodPut).HandlerFunc(httpTraceAll(api.PutBucketNotificationHandler)).Queries("notification", "") bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler))).Queries("notification", "")
// PutBucket // PutBucket
bucket.Methods(http.MethodPut).HandlerFunc(httpTraceAll(api.PutBucketHandler)) bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler)))
// HeadBucket // HeadBucket
bucket.Methods(http.MethodHead).HandlerFunc(httpTraceAll(api.HeadBucketHandler)) bucket.Methods(http.MethodHead).HandlerFunc(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler)))
// PostPolicy // PostPolicy
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(httpTraceHdrs(api.PostPolicyBucketHandler)) bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler)))
// DeleteMultipleObjects // DeleteMultipleObjects
bucket.Methods(http.MethodPost).HandlerFunc(httpTraceAll(api.DeleteMultipleObjectsHandler)).Queries("delete", "") bucket.Methods(http.MethodPost).HandlerFunc(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler))).Queries("delete", "")
// DeleteBucketPolicy // DeleteBucketPolicy
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketPolicyHandler)).Queries("policy", "") bucket.Methods("DELETE").HandlerFunc(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler))).Queries("policy", "")
// DeleteBucketLifecycle // DeleteBucketLifecycle
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketLifecycleHandler)).Queries("lifecycle", "") bucket.Methods("DELETE").HandlerFunc(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler))).Queries("lifecycle", "")
// DeleteBucket // DeleteBucket
bucket.Methods(http.MethodDelete).HandlerFunc(httpTraceAll(api.DeleteBucketHandler)) bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler)))
} }
/// Root operation /// Root operation
// ListBuckets // ListBuckets
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(httpTraceAll(api.ListBucketsHandler)) apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler)))
// If none of the routes match. // If none of the routes match.
apiRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler)) apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(notFoundHandler)))
} }

View File

@ -92,7 +92,7 @@ func startDailyHeal() {
// Find number of disks in the setup // Find number of disks in the setup
info := objAPI.StorageInfo(ctx) info := objAPI.StorageInfo(ctx)
numDisks := info.Backend.OnlineDisks + info.Backend.OfflineDisks numDisks := info.Backend.OnlineDisks.Sum() + info.Backend.OfflineDisks.Sum()
nh := newBgHealSequence(numDisks) nh := newBgHealSequence(numDisks)
globalSweepHealState.LaunchNewHealSequence(nh) globalSweepHealState.LaunchNewHealSequence(nh)

View File

@ -252,10 +252,12 @@ func (fs *FSObjects) StorageInfo(ctx context.Context) StorageInfo {
if !fs.diskMount { if !fs.diskMount {
used = atomic.LoadUint64(&fs.totalUsed) used = atomic.LoadUint64(&fs.totalUsed)
} }
localPeer := GetLocalPeer(globalEndpoints)
storageInfo := StorageInfo{ storageInfo := StorageInfo{
Used: used, Used: []uint64{used},
Total: di.Total, Total: []uint64{di.Total},
Available: di.Free, Available: []uint64{di.Free},
MountPaths: []string{localPeer + fs.fsPath},
} }
storageInfo.Backend.Type = BackendFS storageInfo.Backend.Type = BackendFS
return storageInfo return storageInfo

View File

@ -196,8 +196,6 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
} }
globalHTTPServer = xhttp.NewServer([]string{globalCLIContext.Addr}, criticalErrorHandler{registerHandlers(router, globalHandlers...)}, getCert) globalHTTPServer = xhttp.NewServer([]string{globalCLIContext.Addr}, criticalErrorHandler{registerHandlers(router, globalHandlers...)}, getCert)
globalHTTPServer.UpdateBytesReadFunc = globalConnStats.incInputBytes
globalHTTPServer.UpdateBytesWrittenFunc = globalConnStats.incOutputBytes
go func() { go func() {
globalHTTPServerErrorCh <- globalHTTPServer.Start() globalHTTPServerErrorCh <- globalHTTPServer.Start()
}() }()

View File

@ -226,7 +226,7 @@ func (n *hdfsObjects) StorageInfo(ctx context.Context) minio.StorageInfo {
return minio.StorageInfo{} return minio.StorageInfo{}
} }
sinfo := minio.StorageInfo{} sinfo := minio.StorageInfo{}
sinfo.Used = fsInfo.Used sinfo.Used = []uint64{fsInfo.Used}
sinfo.Backend.Type = minio.Unknown sinfo.Backend.Type = minio.Unknown
return sinfo return sinfo
} }

View File

@ -17,9 +17,7 @@
package cmd package cmd
import ( import (
"bufio"
"context" "context"
"net"
"net/http" "net/http"
"strings" "strings"
"time" "time"
@ -515,34 +513,6 @@ func (h resourceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.handler.ServeHTTP(w, r) h.handler.ServeHTTP(w, r)
} }
// httpResponseRecorder wraps http.ResponseWriter
// to record some useful http response data.
type httpResponseRecorder struct {
http.ResponseWriter
respStatusCode int
}
// Wraps ResponseWriter's Write()
func (rww *httpResponseRecorder) Write(b []byte) (int, error) {
return rww.ResponseWriter.Write(b)
}
// Wraps ResponseWriter's Flush()
func (rww *httpResponseRecorder) Flush() {
rww.ResponseWriter.(http.Flusher).Flush()
}
// Wraps ResponseWriter's WriteHeader() and record
// the response status code
func (rww *httpResponseRecorder) WriteHeader(httpCode int) {
rww.respStatusCode = httpCode
rww.ResponseWriter.WriteHeader(httpCode)
}
func (rww *httpResponseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return rww.ResponseWriter.(http.Hijacker).Hijack()
}
// httpStatsHandler definition: gather HTTP statistics // httpStatsHandler definition: gather HTTP statistics
type httpStatsHandler struct { type httpStatsHandler struct {
handler http.Handler handler http.Handler
@ -554,26 +524,13 @@ func setHTTPStatsHandler(h http.Handler) http.Handler {
} }
func (h httpStatsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h httpStatsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Wraps w to record http response information isS3Request := !strings.HasPrefix(r.URL.Path, minioReservedBucketPath)
ww := &httpResponseRecorder{ResponseWriter: w} // record s3 connection stats.
recordRequest := &recordTrafficRequest{ReadCloser: r.Body, isS3Request: isS3Request}
// Time start before the call is about to start. r.Body = recordRequest
tBefore := UTCNow() recordResponse := &recordTrafficResponse{w, isS3Request}
// Execute the request // Execute the request
h.handler.ServeHTTP(ww, r) h.handler.ServeHTTP(recordResponse, r)
// Time after call has completed.
tAfter := UTCNow()
// Time duration in secs since the call started.
//
// We don't need to do nanosecond precision in this
// simply for the fact that it is not human readable.
durationSecs := tAfter.Sub(tBefore).Seconds()
// Update http statistics
globalHTTPStats.updateStats(r, ww, durationSecs)
} }
// requestValidityHandler validates all the incoming paths for // requestValidityHandler validates all the incoming paths for

View File

@ -348,6 +348,39 @@ func httpTraceHdrs(f http.HandlerFunc) http.HandlerFunc {
} }
} }
func collectAPIStats(api string, f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
isS3Request := !strings.HasPrefix(r.URL.Path, minioReservedBucketPath)
apiStatsWriter := &recordAPIStats{w, UTCNow(), false, 0, isS3Request}
// Time start before the call is about to start.
tBefore := UTCNow()
if isS3Request {
globalHTTPStats.currentS3Requests.Inc(api)
}
// Execute the request
f.ServeHTTP(apiStatsWriter, r)
if isS3Request {
globalHTTPStats.currentS3Requests.Dec(api)
}
// Firstbyte read.
tAfter := apiStatsWriter.TTFB
// Time duration in secs since the call started.
//
// We don't need to do nanosecond precision in this
// simply for the fact that it is not human readable.
durationSecs := tAfter.Sub(tBefore).Seconds()
// Update http statistics
globalHTTPStats.updateStats(api, r, apiStatsWriter, durationSecs)
}
}
// Returns "/bucketName/objectName" for path-style or virtual-host-style requests. // Returns "/bucketName/objectName" for path-style or virtual-host-style requests.
func getResource(path string, host string, domains []string) (string, error) { func getResource(path string, host string, domains []string) (string, error) {
if len(domains) == 0 { if len(domains) == 0 {

View File

@ -19,6 +19,8 @@ package cmd
import ( import (
"fmt" "fmt"
"net/http" "net/http"
"strings"
"sync"
"time" "time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -31,6 +33,8 @@ import (
type ConnStats struct { type ConnStats struct {
totalInputBytes atomic.Uint64 totalInputBytes atomic.Uint64
totalOutputBytes atomic.Uint64 totalOutputBytes atomic.Uint64
s3InputBytes atomic.Uint64
s3OutputBytes atomic.Uint64
} }
// Increase total input bytes // Increase total input bytes
@ -53,11 +57,33 @@ func (s *ConnStats) getTotalOutputBytes() uint64 {
return s.totalOutputBytes.Load() return s.totalOutputBytes.Load()
} }
// Return connection stats (total input/output bytes) // Increase outbound input bytes
func (s *ConnStats) incS3InputBytes(n int) {
s.s3InputBytes.Add(uint64(n))
}
// Increase outbound output bytes
func (s *ConnStats) incS3OutputBytes(n int) {
s.s3OutputBytes.Add(uint64(n))
}
// Return outbound input bytes
func (s *ConnStats) getS3InputBytes() uint64 {
return s.s3InputBytes.Load()
}
// Return outbound output bytes
func (s *ConnStats) getS3OutputBytes() uint64 {
return s.s3OutputBytes.Load()
}
// Return connection stats (total input/output bytes and total s3 input/output bytes)
func (s *ConnStats) toServerConnStats() ServerConnStats { func (s *ConnStats) toServerConnStats() ServerConnStats {
return ServerConnStats{ return ServerConnStats{
TotalInputBytes: s.getTotalInputBytes(), TotalInputBytes: s.getTotalInputBytes(),
TotalOutputBytes: s.getTotalOutputBytes(), TotalOutputBytes: s.getTotalOutputBytes(),
S3InputBytes: s.getS3InputBytes(),
S3OutputBytes: s.getS3OutputBytes(),
} }
} }
@ -66,35 +92,55 @@ func newConnStats() *ConnStats {
return &ConnStats{} return &ConnStats{}
} }
// HTTPMethodStats holds statistics information about // HTTPAPIStats holds statistics information about
// a given HTTP method made by all clients // a given API in the requests.
type HTTPMethodStats struct { type HTTPAPIStats struct {
Counter atomic.Uint64 APIStats map[string]int
Duration atomic.Float64 sync.RWMutex
}
// Inc increments the api stats counter.
func (stats *HTTPAPIStats) Inc(api string) {
stats.Lock()
defer stats.Unlock()
if stats == nil {
return
}
if stats.APIStats == nil {
stats.APIStats = make(map[string]int)
}
if _, ok := stats.APIStats[api]; ok {
stats.APIStats[api]++
return
}
stats.APIStats[api] = 1
}
// Dec increments the api stats counter.
func (stats *HTTPAPIStats) Dec(api string) {
stats.Lock()
defer stats.Unlock()
if stats == nil {
return
}
if val, ok := stats.APIStats[api]; ok && val > 0 {
stats.APIStats[api]--
}
}
// Load returns the recorded stats.
func (stats *HTTPAPIStats) Load() map[string]int {
stats.Lock()
defer stats.Unlock()
return stats.APIStats
} }
// HTTPStats holds statistics information about // HTTPStats holds statistics information about
// HTTP requests made by all clients // HTTP requests made by all clients
type HTTPStats struct { type HTTPStats struct {
// HEAD request stats. currentS3Requests HTTPAPIStats
totalHEADs HTTPMethodStats totalS3Requests HTTPAPIStats
successHEADs HTTPMethodStats totalS3Errors HTTPAPIStats
// GET request stats.
totalGETs HTTPMethodStats
successGETs HTTPMethodStats
// PUT request stats.
totalPUTs HTTPMethodStats
successPUTs HTTPMethodStats
// POST request stats.
totalPOSTs HTTPMethodStats
successPOSTs HTTPMethodStats
// DELETE request stats.
totalDELETEs HTTPMethodStats
successDELETEs HTTPMethodStats
} }
func durationStr(totalDuration, totalCount float64) string { func durationStr(totalDuration, totalCount float64) string {
@ -102,95 +148,39 @@ func durationStr(totalDuration, totalCount float64) string {
} }
// Converts http stats into struct to be sent back to the client. // Converts http stats into struct to be sent back to the client.
func (st HTTPStats) toServerHTTPStats() ServerHTTPStats { func (st *HTTPStats) toServerHTTPStats() ServerHTTPStats {
serverStats := ServerHTTPStats{} serverStats := ServerHTTPStats{}
serverStats.TotalHEADStats = ServerHTTPMethodStats{
Count: st.totalHEADs.Counter.Load(), serverStats.CurrentS3Requests = ServerHTTPAPIStats{
AvgDuration: durationStr(st.totalHEADs.Duration.Load(), float64(st.totalHEADs.Counter.Load())), APIStats: st.currentS3Requests.Load(),
} }
serverStats.SuccessHEADStats = ServerHTTPMethodStats{
Count: st.successHEADs.Counter.Load(), serverStats.TotalS3Requests = ServerHTTPAPIStats{
AvgDuration: durationStr(st.successHEADs.Duration.Load(), float64(st.successHEADs.Counter.Load())), APIStats: st.totalS3Requests.Load(),
} }
serverStats.TotalGETStats = ServerHTTPMethodStats{
Count: st.totalGETs.Counter.Load(), serverStats.TotalS3Errors = ServerHTTPAPIStats{
AvgDuration: durationStr(st.totalGETs.Duration.Load(), float64(st.totalGETs.Counter.Load())), APIStats: st.totalS3Errors.Load(),
}
serverStats.SuccessGETStats = ServerHTTPMethodStats{
Count: st.successGETs.Counter.Load(),
AvgDuration: durationStr(st.successGETs.Duration.Load(), float64(st.successGETs.Counter.Load())),
}
serverStats.TotalPUTStats = ServerHTTPMethodStats{
Count: st.totalPUTs.Counter.Load(),
AvgDuration: durationStr(st.totalPUTs.Duration.Load(), float64(st.totalPUTs.Counter.Load())),
}
serverStats.SuccessPUTStats = ServerHTTPMethodStats{
Count: st.successPUTs.Counter.Load(),
AvgDuration: durationStr(st.successPUTs.Duration.Load(), float64(st.successPUTs.Counter.Load())),
}
serverStats.TotalPOSTStats = ServerHTTPMethodStats{
Count: st.totalPOSTs.Counter.Load(),
AvgDuration: durationStr(st.totalPOSTs.Duration.Load(), float64(st.totalPOSTs.Counter.Load())),
}
serverStats.SuccessPOSTStats = ServerHTTPMethodStats{
Count: st.successPOSTs.Counter.Load(),
AvgDuration: durationStr(st.successPOSTs.Duration.Load(), float64(st.successPOSTs.Counter.Load())),
}
serverStats.TotalDELETEStats = ServerHTTPMethodStats{
Count: st.totalDELETEs.Counter.Load(),
AvgDuration: durationStr(st.totalDELETEs.Duration.Load(), float64(st.totalDELETEs.Counter.Load())),
}
serverStats.SuccessDELETEStats = ServerHTTPMethodStats{
Count: st.successDELETEs.Counter.Load(),
AvgDuration: durationStr(st.successDELETEs.Duration.Load(), float64(st.successDELETEs.Counter.Load())),
} }
return serverStats return serverStats
} }
// Update statistics from http request and response data // Update statistics from http request and response data
func (st *HTTPStats) updateStats(r *http.Request, w *httpResponseRecorder, durationSecs float64) { func (st *HTTPStats) updateStats(api string, r *http.Request, w *recordAPIStats, durationSecs float64) {
// A successful request has a 2xx response code // A successful request has a 2xx response code
successReq := (w.respStatusCode >= 200 && w.respStatusCode < 300) successReq := (w.respStatusCode >= 200 && w.respStatusCode < 300)
// Update stats according to method verb
switch r.Method { if w.isS3Request && !strings.HasSuffix(r.URL.Path, prometheusMetricsPath) {
case "HEAD": st.totalS3Requests.Inc(api)
st.totalHEADs.Counter.Inc() if !successReq && w.respStatusCode != 0 {
st.totalHEADs.Duration.Add(durationSecs) st.totalS3Errors.Inc(api)
if successReq {
st.successHEADs.Counter.Inc()
st.successHEADs.Duration.Add(durationSecs)
}
case "GET":
st.totalGETs.Counter.Inc()
st.totalGETs.Duration.Add(durationSecs)
if successReq {
st.successGETs.Counter.Inc()
st.successGETs.Duration.Add(durationSecs)
}
case "PUT":
st.totalPUTs.Counter.Inc()
st.totalPUTs.Duration.Add(durationSecs)
if successReq {
st.successPUTs.Counter.Inc()
st.totalPUTs.Duration.Add(durationSecs)
}
case "POST":
st.totalPOSTs.Counter.Inc()
st.totalPOSTs.Duration.Add(durationSecs)
if successReq {
st.successPOSTs.Counter.Inc()
st.totalPOSTs.Duration.Add(durationSecs)
}
case "DELETE":
st.totalDELETEs.Counter.Inc()
st.totalDELETEs.Duration.Add(durationSecs)
if successReq {
st.successDELETEs.Counter.Inc()
st.successDELETEs.Duration.Add(durationSecs)
} }
} }
// Increment the prometheus http request response histogram with appropriate label
httpRequestsDuration.With(prometheus.Labels{"request_type": r.Method}).Observe(durationSecs) if w.isS3Request && r.Method == "GET" {
// Increment the prometheus http request response histogram with appropriate label
httpRequestsDuration.With(prometheus.Labels{"api": api}).Observe(durationSecs)
}
} }
// Prepare new HTTPStats structure // Prepare new HTTPStats structure

View File

@ -0,0 +1,107 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io"
"net/http"
"time"
)
// records the incoming bytes from the underlying request.Body.
type recordTrafficRequest struct {
io.ReadCloser
isS3Request bool
}
// Records the bytes read.
func (r *recordTrafficRequest) Read(p []byte) (n int, err error) {
n, err = r.ReadCloser.Read(p)
globalConnStats.incInputBytes(n)
if r.isS3Request {
globalConnStats.incS3InputBytes(n)
}
return n, err
}
// Records the outgoing bytes through the responseWriter.
type recordTrafficResponse struct {
// wrapper for underlying http.ResponseWriter.
writer http.ResponseWriter
isS3Request bool
}
// Calls the underlying WriteHeader.
func (r *recordTrafficResponse) WriteHeader(i int) {
r.writer.WriteHeader(i)
}
// Calls the underlying Header.
func (r *recordTrafficResponse) Header() http.Header {
return r.writer.Header()
}
// Records the output bytes
func (r *recordTrafficResponse) Write(p []byte) (n int, err error) {
n, err = r.writer.Write(p)
globalConnStats.incOutputBytes(n)
// Check if it is s3 request
if r.isS3Request {
globalConnStats.incS3OutputBytes(n)
}
return n, err
}
// Calls the underlying Flush.
func (r *recordTrafficResponse) Flush() {
r.writer.(http.Flusher).Flush()
}
// Records the outgoing bytes through the responseWriter.
type recordAPIStats struct {
// wrapper for underlying http.ResponseWriter.
writer http.ResponseWriter
TTFB time.Time // TimeToFirstByte.
firstByteRead bool
respStatusCode int
isS3Request bool
}
// Calls the underlying WriteHeader.
func (r *recordAPIStats) WriteHeader(i int) {
r.respStatusCode = i
r.writer.WriteHeader(i)
}
// Calls the underlying Header.
func (r *recordAPIStats) Header() http.Header {
return r.writer.Header()
}
// Records the TTFB on the first byte write.
func (r *recordAPIStats) Write(p []byte) (n int, err error) {
if !r.firstByteRead {
r.TTFB = UTCNow()
r.firstByteRead = true
}
return r.writer.Write(p)
}
// Calls the underlying Flush.
func (r *recordAPIStats) Flush() {
r.writer.(http.Flusher).Flush()
}

View File

@ -1,57 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2017-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package http
import (
"net"
)
// AccountingConn - is a generic stream-oriented network connection supporting buffered reader and read/write timeout.
type AccountingConn struct {
net.Conn
updateBytesReadFunc func(int) // function to be called to update bytes read.
updateBytesWrittenFunc func(int) // function to be called to update bytes written.
}
// Read - reads data from the connection using wrapped buffered reader.
func (c *AccountingConn) Read(b []byte) (n int, err error) {
n, err = c.Conn.Read(b)
if err == nil && c.updateBytesReadFunc != nil {
c.updateBytesReadFunc(n)
}
return n, err
}
// Write - writes data to the connection.
func (c *AccountingConn) Write(b []byte) (n int, err error) {
n, err = c.Conn.Write(b)
if err == nil && c.updateBytesWrittenFunc != nil {
c.updateBytesWrittenFunc(n)
}
return n, err
}
// newAccountingConn - creates a new connection object wrapping net.Conn with deadlines.
func newAccountingConn(c net.Conn, updateBytesReadFunc, updateBytesWrittenFunc func(int)) *AccountingConn {
return &AccountingConn{
Conn: c,
updateBytesReadFunc: updateBytesReadFunc,
updateBytesWrittenFunc: updateBytesWrittenFunc,
}
}

View File

@ -33,13 +33,11 @@ type acceptResult struct {
// httpListener - HTTP listener capable of handling multiple server addresses. // httpListener - HTTP listener capable of handling multiple server addresses.
type httpListener struct { type httpListener struct {
mutex sync.Mutex // to guard Close() method. mutex sync.Mutex // to guard Close() method.
tcpListeners []*net.TCPListener // underlaying TCP listeners. tcpListeners []*net.TCPListener // underlaying TCP listeners.
acceptCh chan acceptResult // channel where all TCP listeners write accepted connection. acceptCh chan acceptResult // channel where all TCP listeners write accepted connection.
doneCh chan struct{} // done channel for TCP listener goroutines. doneCh chan struct{} // done channel for TCP listener goroutines.
tcpKeepAliveTimeout time.Duration tcpKeepAliveTimeout time.Duration
updateBytesReadFunc func(int) // function to be called to update bytes read in Deadlineconn.
updateBytesWrittenFunc func(int) // function to be called to update bytes written in Deadlineconn.
} }
// isRoutineNetErr returns true if error is due to a network timeout, // isRoutineNetErr returns true if error is due to a network timeout,
@ -89,9 +87,7 @@ func (listener *httpListener) start() {
tcpConn.SetKeepAlive(true) tcpConn.SetKeepAlive(true)
tcpConn.SetKeepAlivePeriod(listener.tcpKeepAliveTimeout) tcpConn.SetKeepAlivePeriod(listener.tcpKeepAliveTimeout)
acctConn := newAccountingConn(tcpConn, listener.updateBytesReadFunc, listener.updateBytesWrittenFunc) send(acceptResult{tcpConn, nil}, doneCh)
send(acceptResult{acctConn, nil}, doneCh)
} }
// Closure to handle TCPListener until done channel is closed. // Closure to handle TCPListener until done channel is closed.
@ -172,9 +168,7 @@ func (listener *httpListener) Addrs() (addrs []net.Addr) {
// * listen to multiple addresses // * listen to multiple addresses
// * controls incoming connections only doing HTTP protocol // * controls incoming connections only doing HTTP protocol
func newHTTPListener(serverAddrs []string, func newHTTPListener(serverAddrs []string,
tcpKeepAliveTimeout time.Duration, tcpKeepAliveTimeout time.Duration) (listener *httpListener, err error) {
updateBytesReadFunc func(int),
updateBytesWrittenFunc func(int)) (listener *httpListener, err error) {
var tcpListeners []*net.TCPListener var tcpListeners []*net.TCPListener
@ -207,10 +201,8 @@ func newHTTPListener(serverAddrs []string,
} }
listener = &httpListener{ listener = &httpListener{
tcpListeners: tcpListeners, tcpListeners: tcpListeners,
tcpKeepAliveTimeout: tcpKeepAliveTimeout, tcpKeepAliveTimeout: tcpKeepAliveTimeout,
updateBytesReadFunc: updateBytesReadFunc,
updateBytesWrittenFunc: updateBytesWrittenFunc,
} }
listener.start() listener.start()

View File

@ -132,30 +132,26 @@ func getNonLoopBackIP(t *testing.T) string {
func TestNewHTTPListener(t *testing.T) { func TestNewHTTPListener(t *testing.T) {
testCases := []struct { testCases := []struct {
serverAddrs []string serverAddrs []string
tcpKeepAliveTimeout time.Duration tcpKeepAliveTimeout time.Duration
readTimeout time.Duration readTimeout time.Duration
writeTimeout time.Duration writeTimeout time.Duration
updateBytesReadFunc func(int) expectedErr bool
updateBytesWrittenFunc func(int)
expectedErr bool
}{ }{
{[]string{"93.184.216.34:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true}, {[]string{"93.184.216.34:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), true},
{[]string{"example.org:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true}, {[]string{"example.org:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), true},
{[]string{"unknown-host"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true}, {[]string{"unknown-host"}, time.Duration(0), time.Duration(0), time.Duration(0), true},
{[]string{"unknown-host:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true}, {[]string{"unknown-host:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), true},
{[]string{"localhost:65432", "93.184.216.34:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true}, {[]string{"localhost:65432", "93.184.216.34:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), true},
{[]string{"localhost:65432", "unknown-host:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true}, {[]string{"localhost:65432", "unknown-host:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), true},
{[]string{"localhost:0"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, false}, {[]string{"localhost:0"}, time.Duration(0), time.Duration(0), time.Duration(0), false},
{[]string{"localhost:0"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, false}, {[]string{"localhost:0"}, time.Duration(0), time.Duration(0), time.Duration(0), false},
} }
for _, testCase := range testCases { for _, testCase := range testCases {
listener, err := newHTTPListener( listener, err := newHTTPListener(
testCase.serverAddrs, testCase.serverAddrs,
testCase.tcpKeepAliveTimeout, testCase.tcpKeepAliveTimeout,
testCase.updateBytesReadFunc,
testCase.updateBytesWrittenFunc,
) )
if !testCase.expectedErr { if !testCase.expectedErr {
@ -190,7 +186,6 @@ func TestHTTPListenerStartClose(t *testing.T) {
listener, err := newHTTPListener( listener, err := newHTTPListener(
testCase.serverAddrs, testCase.serverAddrs,
time.Duration(0), time.Duration(0),
nil, nil,
) )
if err != nil { if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err) t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -231,7 +226,6 @@ func TestHTTPListenerAddr(t *testing.T) {
listener, err := newHTTPListener( listener, err := newHTTPListener(
testCase.serverAddrs, testCase.serverAddrs,
time.Duration(0), time.Duration(0),
nil, nil,
) )
if err != nil { if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err) t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -269,7 +263,6 @@ func TestHTTPListenerAddrs(t *testing.T) {
listener, err := newHTTPListener( listener, err := newHTTPListener(
testCase.serverAddrs, testCase.serverAddrs,
time.Duration(0), time.Duration(0),
nil, nil,
) )
if err != nil { if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err) t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)

View File

@ -46,15 +46,13 @@ const (
// Server - extended http.Server supports multiple addresses to serve and enhanced connection handling. // Server - extended http.Server supports multiple addresses to serve and enhanced connection handling.
type Server struct { type Server struct {
http.Server http.Server
Addrs []string // addresses on which the server listens for new connection. Addrs []string // addresses on which the server listens for new connection.
ShutdownTimeout time.Duration // timeout used for graceful server shutdown. ShutdownTimeout time.Duration // timeout used for graceful server shutdown.
TCPKeepAliveTimeout time.Duration // timeout used for underneath TCP connection. TCPKeepAliveTimeout time.Duration // timeout used for underneath TCP connection.
UpdateBytesReadFunc func(int) // function to be called to update bytes read in bufConn. listenerMutex sync.Mutex // to guard 'listener' field.
UpdateBytesWrittenFunc func(int) // function to be called to update bytes written in bufConn. listener *httpListener // HTTP listener for all 'Addrs' field.
listenerMutex sync.Mutex // to guard 'listener' field. inShutdown uint32 // indicates whether the server is in shutdown or not
listener *httpListener // HTTP listener for all 'Addrs' field. requestCount int32 // counter holds no. of request in progress.
inShutdown uint32 // indicates whether the server is in shutdown or not
requestCount int32 // counter holds no. of request in progress.
} }
// GetRequestCount - returns number of request in progress. // GetRequestCount - returns number of request in progress.
@ -79,8 +77,6 @@ func (srv *Server) Start() (err error) {
listener, err = newHTTPListener( listener, err = newHTTPListener(
addrs, addrs,
tcpKeepAliveTimeout, tcpKeepAliveTimeout,
srv.UpdateBytesReadFunc,
srv.UpdateBytesWrittenFunc,
) )
if err != nil { if err != nil {
return err return err

View File

@ -19,6 +19,7 @@ package cmd
import ( import (
"context" "context"
"net/http" "net/http"
"strings"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -28,11 +29,11 @@ import (
var ( var (
httpRequestsDuration = prometheus.NewHistogramVec( httpRequestsDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{ prometheus.HistogramOpts{
Name: "minio_http_requests_duration_seconds", Name: "s3_ttfb_seconds",
Help: "Time taken by requests served by current MinIO server instance", Help: "Time taken by requests served by current MinIO server instance",
Buckets: []float64{.001, .003, .005, .1, .5, 1}, Buckets: []float64{.05, .1, .25, .5, 1, 2.5, 5, 10},
}, },
[]string{"request_type"}, []string{"api"},
) )
minioVersionInfo = prometheus.NewGaugeVec( minioVersionInfo = prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
@ -79,51 +80,7 @@ func (c *minioCollector) Describe(ch chan<- *prometheus.Desc) {
func (c *minioCollector) Collect(ch chan<- prometheus.Metric) { func (c *minioCollector) Collect(ch chan<- prometheus.Metric) {
// Expose MinIO's version information // Expose MinIO's version information
minioVersionInfo.WithLabelValues(Version, CommitID).Add(1) minioVersionInfo.WithLabelValues(Version, CommitID).Set(float64(1.0))
// Always expose network stats
// Network Sent/Received Bytes
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("minio", "network", "sent_bytes_total"),
"Total number of bytes sent by current MinIO server instance",
nil, nil),
prometheus.CounterValue,
float64(globalConnStats.getTotalOutputBytes()),
)
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("minio", "network", "received_bytes_total"),
"Total number of bytes received by current MinIO server instance",
nil, nil),
prometheus.CounterValue,
float64(globalConnStats.getTotalInputBytes()),
)
// Expose cache stats only if available
cacheObjLayer := newCacheObjectsFn()
if cacheObjLayer != nil {
cs := cacheObjLayer.StorageInfo(context.Background())
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("minio", "disk", "cache_storage_bytes"),
"Total cache capacity on current MinIO server instance",
nil, nil),
prometheus.GaugeValue,
float64(cs.Total),
)
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("minio", "disk", "cache_storage_free_bytes"),
"Total cache available on current MinIO server instance",
nil, nil),
prometheus.GaugeValue,
float64(cs.Free),
)
}
// Expose disk stats only if applicable
// Fetch disk space info // Fetch disk space info
objLayer := newObjectLayerFn() objLayer := newObjectLayerFn()
@ -132,70 +89,158 @@ func (c *minioCollector) Collect(ch chan<- prometheus.Metric) {
return return
} }
s := objLayer.StorageInfo(context.Background()) storageAPIs := []StorageAPI{}
for _, endpoint := range globalEndpoints {
// Gateways don't provide disk info if endpoint.IsLocal {
if s.Backend.Type == Unknown { // Construct storageAPIs.
return sAPI, _ := newStorageAPI(endpoint)
storageAPIs = append(storageAPIs, sAPI)
}
} }
var totalDisks, offlineDisks int disksInfo, onlineDisks, offlineDisks := getDisksInfo(storageAPIs)
// Setting totalDisks to 1 and offlineDisks to 0 in FS mode totalDisks := offlineDisks.Merge(onlineDisks)
if s.Backend.Type == BackendFS {
totalDisks = 1 for _, offDisks := range offlineDisks {
offlineDisks = 0 // MinIO Offline Disks per node
} else { ch <- prometheus.MustNewConstMetric(
offlineDisks = s.Backend.OfflineDisks prometheus.NewDesc(
totalDisks = s.Backend.OfflineDisks + s.Backend.OnlineDisks prometheus.BuildFQName("minio", "disks", "offline"),
"Total number of offline disks in current MinIO server instance",
nil, nil),
prometheus.GaugeValue,
float64(offDisks),
)
} }
// Total disk usage by current MinIO server instance for _, totDisks := range totalDisks {
// MinIO Total Disks per node
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("minio", "disks", "total"),
"Total number of disks for current MinIO server instance",
nil, nil),
prometheus.GaugeValue,
float64(totDisks),
)
}
localPeer := GetLocalPeer(globalEndpoints)
for _, di := range disksInfo {
// Trim the host
absPath := strings.TrimPrefix(di.RelativePath, localPeer)
// Total disk usage by the disk
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("disk", "storage", "used"),
"Total disk storage used on the disk",
[]string{"disk"}, nil),
prometheus.GaugeValue,
float64(di.Total-di.Free),
absPath,
)
// Total available space in the disk
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("disk", "storage", "available"),
"Total available space left on the disk",
[]string{"disk"}, nil),
prometheus.GaugeValue,
float64(di.Free),
absPath,
)
// Total storage space of the disk
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("disk", "storage", "total"),
"Total space on the disk",
[]string{"disk"}, nil),
prometheus.GaugeValue,
float64(di.Total),
absPath,
)
}
connStats := globalConnStats.toServerConnStats()
httpStats := globalHTTPStats.toServerHTTPStats()
// Network Sent/Received Bytes (internode)
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(
prometheus.BuildFQName("minio", "disk", "storage_used_bytes"), prometheus.BuildFQName("internode", "tx", "bytes_total"),
"Total disk storage used by current MinIO server instance", "Total number of bytes sent to the other peer nodes by current MinIO server instance",
nil, nil), nil, nil),
prometheus.GaugeValue, prometheus.CounterValue,
float64(s.Used), float64(connStats.TotalOutputBytes),
) )
// Total disk available space seen by MinIO server instance
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(
prometheus.BuildFQName("minio", "disk", "storage_available_bytes"), prometheus.BuildFQName("internode", "rx", "bytes_total"),
"Total disk available space seen by MinIO server instance", "Total number of internode bytes received by current MinIO server instance",
nil, nil), nil, nil),
prometheus.GaugeValue, prometheus.CounterValue,
float64(s.Available), float64(connStats.TotalInputBytes),
) )
// Total disk space seen by MinIO server instance // Network Sent/Received Bytes (Outbound)
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(
prometheus.BuildFQName("minio", "disk", "storage_total_bytes"), prometheus.BuildFQName("s3", "tx", "bytes_total"),
"Total disk space seen by MinIO server instance", "Total number of s3 bytes sent by current MinIO server instance",
nil, nil), nil, nil),
prometheus.GaugeValue, prometheus.CounterValue,
float64(s.Total), float64(connStats.S3OutputBytes),
) )
// MinIO Total Disk/Offline Disk
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(
prometheus.BuildFQName("minio", "total", "disks"), prometheus.BuildFQName("s3", "rx", "bytes_total"),
"Total number of disks for current MinIO server instance", "Total number of s3 bytes received by current MinIO server instance",
nil, nil), nil, nil),
prometheus.GaugeValue, prometheus.CounterValue,
float64(totalDisks), float64(connStats.S3InputBytes),
)
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("minio", "offline", "disks"),
"Total number of offline disks for current MinIO server instance",
nil, nil),
prometheus.GaugeValue,
float64(offlineDisks),
) )
for api, value := range httpStats.CurrentS3Requests.APIStats {
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("s3", "requests", "current"),
"Total number of running s3 requests in current MinIO server instance",
[]string{"api"}, nil),
prometheus.CounterValue,
float64(value),
api,
)
}
for api, value := range httpStats.TotalS3Requests.APIStats {
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("s3", "requests", "total"),
"Total number of s3 requests in current MinIO server instance",
[]string{"api"}, nil),
prometheus.CounterValue,
float64(value),
api,
)
}
for api, value := range httpStats.TotalS3Errors.APIStats {
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("s3", "errors", "total"),
"Total number of s3 errors in current MinIO server instance",
[]string{"api"}, nil),
prometheus.CounterValue,
float64(value),
api,
)
}
} }
func metricsHandler() http.Handler { func metricsHandler() http.Handler {

View File

@ -39,11 +39,13 @@ const (
// StorageInfo - represents total capacity of underlying storage. // StorageInfo - represents total capacity of underlying storage.
type StorageInfo struct { type StorageInfo struct {
Used uint64 // Used total used per tenant. Used []uint64 // Used total used per disk.
Total uint64 // Total disk space. Total []uint64 // Total disk space per disk.
Available uint64 // Total disk space available. Available []uint64 // Total disk space available per disk.
MountPaths []string // Disk mountpoints
// Backend type. // Backend type.
Backend struct { Backend struct {
@ -51,12 +53,12 @@ type StorageInfo struct {
Type BackendType Type BackendType
// Following fields are only meaningful if BackendType is Erasure. // Following fields are only meaningful if BackendType is Erasure.
OnlineDisks int // Online disks during server startup. OnlineDisks madmin.BackendDisks // Online disks during server startup.
OfflineDisks int // Offline disks during server startup. OfflineDisks madmin.BackendDisks // Offline disks during server startup.
StandardSCData int // Data disks for currently configured Standard storage class. StandardSCData int // Data disks for currently configured Standard storage class.
StandardSCParity int // Parity disks for currently configured Standard storage class. StandardSCParity int // Parity disks for currently configured Standard storage class.
RRSCData int // Data disks for currently configured Reduced Redundancy storage class. RRSCData int // Data disks for currently configured Reduced Redundancy storage class.
RRSCParity int // Parity disks for currently configured Reduced Redundancy storage class. RRSCParity int // Parity disks for currently configured Reduced Redundancy storage class.
// List of all disk status, this is only meaningful if BackendType is Erasure. // List of all disk status, this is only meaningful if BackendType is Erasure.
Sets [][]madmin.DriveInfo Sets [][]madmin.DriveInfo

View File

@ -50,11 +50,11 @@ func getServerInfo() (*ServerInfoData, error) {
if objLayer == nil { if objLayer == nil {
return nil, errServerNotInitialized return nil, errServerNotInitialized
} }
// Server info data. // Server info data.
return &ServerInfoData{ return &ServerInfoData{
StorageInfo: objLayer.StorageInfo(context.Background()), ConnStats: globalConnStats.toServerConnStats(),
ConnStats: globalConnStats.toServerConnStats(), HTTPStats: globalHTTPStats.toServerHTTPStats(),
HTTPStats: globalHTTPStats.toServerHTTPStats(),
Properties: ServerProperties{ Properties: ServerProperties{
Uptime: UTCNow().Sub(globalBootTime), Uptime: UTCNow().Sub(globalBootTime),
Version: Version, Version: Version,

View File

@ -309,10 +309,11 @@ func (s *posix) IsOnline() bool {
// DiskInfo is an extended type which returns current // DiskInfo is an extended type which returns current
// disk usage per path. // disk usage per path.
type DiskInfo struct { type DiskInfo struct {
Total uint64 Total uint64
Free uint64 Free uint64
Used uint64 Used uint64
RootDisk bool RootDisk bool
RelativePath string
} }
// DiskInfo provides current information about disk space usage, // DiskInfo provides current information about disk space usage,
@ -346,12 +347,14 @@ func (s *posix) DiskInfo() (info DiskInfo, err error) {
if err != nil { if err != nil {
return info, err return info, err
} }
localPeer := GetLocalPeer(globalEndpoints)
return DiskInfo{ return DiskInfo{
Total: di.Total, Total: di.Total,
Free: di.Free, Free: di.Free,
Used: used, Used: used,
RootDisk: rootDisk, RootDisk: rootDisk,
RelativePath: localPeer + s.diskPath,
}, nil }, nil
} }

View File

@ -305,8 +305,6 @@ func serverMain(ctx *cli.Context) {
} }
globalHTTPServer = xhttp.NewServer([]string{globalMinioAddr}, criticalErrorHandler{handler}, getCert) globalHTTPServer = xhttp.NewServer([]string{globalMinioAddr}, criticalErrorHandler{handler}, getCert)
globalHTTPServer.UpdateBytesReadFunc = globalConnStats.incInputBytes
globalHTTPServer.UpdateBytesWrittenFunc = globalConnStats.incOutputBytes
go func() { go func() {
globalHTTPServerErrorCh <- globalHTTPServer.Start() globalHTTPServerErrorCh <- globalHTTPServer.Start()
}() }()

View File

@ -188,7 +188,7 @@ func printObjectAPIMsg() {
func getStorageInfoMsg(storageInfo StorageInfo) string { func getStorageInfoMsg(storageInfo StorageInfo) string {
var msg string var msg string
if storageInfo.Backend.Type == BackendErasure { if storageInfo.Backend.Type == BackendErasure {
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks) diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks.Sum(), storageInfo.Backend.OfflineDisks.Sum())
msg += color.Blue("Status:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo) msg += color.Blue("Status:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo)
} }
return msg return msg

View File

@ -27,14 +27,15 @@ import (
"time" "time"
"github.com/minio/minio/pkg/color" "github.com/minio/minio/pkg/color"
"github.com/minio/minio/pkg/madmin"
) )
// Tests if we generate storage info. // Tests if we generate storage info.
func TestStorageInfoMsg(t *testing.T) { func TestStorageInfoMsg(t *testing.T) {
infoStorage := StorageInfo{} infoStorage := StorageInfo{}
infoStorage.Backend.Type = BackendErasure infoStorage.Backend.Type = BackendErasure
infoStorage.Backend.OnlineDisks = 7 infoStorage.Backend.OnlineDisks = madmin.BackendDisks{"127.0.0.1:9000": 4, "127.0.0.1:9001": 3}
infoStorage.Backend.OfflineDisks = 1 infoStorage.Backend.OfflineDisks = madmin.BackendDisks{"127.0.0.1:9000": 0, "127.0.0.1:9001": 1}
if msg := getStorageInfoMsg(infoStorage); !strings.Contains(msg, "7 Online, 1 Offline") { if msg := getStorageInfoMsg(infoStorage); !strings.Contains(msg, "7 Online, 1 Offline") {
t.Fatal("Unexpected storage info message, found:", msg) t.Fatal("Unexpected storage info message, found:", msg)

View File

@ -322,11 +322,12 @@ func (s *xlSets) StorageInfo(ctx context.Context) StorageInfo {
g.Wait() g.Wait()
for _, lstorageInfo := range storageInfos { for _, lstorageInfo := range storageInfos {
storageInfo.Used += lstorageInfo.Used storageInfo.Used = append(storageInfo.Used, lstorageInfo.Used...)
storageInfo.Total += lstorageInfo.Total storageInfo.Total = append(storageInfo.Total, lstorageInfo.Total...)
storageInfo.Available += lstorageInfo.Available storageInfo.Available = append(storageInfo.Available, lstorageInfo.Available...)
storageInfo.Backend.OnlineDisks += lstorageInfo.Backend.OnlineDisks storageInfo.MountPaths = append(storageInfo.MountPaths, lstorageInfo.MountPaths...)
storageInfo.Backend.OfflineDisks += lstorageInfo.Backend.OfflineDisks storageInfo.Backend.OnlineDisks = storageInfo.Backend.OnlineDisks.Merge(lstorageInfo.Backend.OnlineDisks)
storageInfo.Backend.OfflineDisks = storageInfo.Backend.OfflineDisks.Merge(lstorageInfo.Backend.OfflineDisks)
} }
scfg := globalServerConfig.GetStorageClass() scfg := globalServerConfig.GetStorageClass()

View File

@ -19,9 +19,12 @@ package cmd
import ( import (
"context" "context"
"sort" "sort"
"strings"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bpool" "github.com/minio/minio/pkg/bpool"
"github.com/minio/minio/pkg/madmin"
xnet "github.com/minio/minio/pkg/net"
"github.com/minio/minio/pkg/sync/errgroup" "github.com/minio/minio/pkg/sync/errgroup"
) )
@ -69,7 +72,7 @@ func (d byDiskTotal) Less(i, j int) bool {
} }
// getDisksInfo - fetch disks info across all other storage API. // getDisksInfo - fetch disks info across all other storage API.
func getDisksInfo(disks []StorageAPI) (disksInfo []DiskInfo, onlineDisks int, offlineDisks int) { func getDisksInfo(disks []StorageAPI) (disksInfo []DiskInfo, onlineDisks, offlineDisks madmin.BackendDisks) {
disksInfo = make([]DiskInfo, len(disks)) disksInfo = make([]DiskInfo, len(disks))
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
@ -94,13 +97,33 @@ func getDisksInfo(disks []StorageAPI) (disksInfo []DiskInfo, onlineDisks int, of
}, index) }, index)
} }
// Wait for the routines. getPeerAddress := func(diskPath string) (string, error) {
for _, err := range g.Wait() { hostPort := strings.Split(diskPath, SlashSeparator)[0]
thisAddr, err := xnet.ParseHost(hostPort)
if err != nil { if err != nil {
offlineDisks++ return "", err
}
return thisAddr.String(), nil
}
onlineDisks = make(madmin.BackendDisks)
offlineDisks = make(madmin.BackendDisks)
// Wait for the routines.
for i, err := range g.Wait() {
peerAddr, pErr := getPeerAddress(disksInfo[i].RelativePath)
if pErr != nil {
continue continue
} }
onlineDisks++ if _, ok := offlineDisks[peerAddr]; !ok {
offlineDisks[peerAddr] = 0
}
if _, ok := onlineDisks[peerAddr]; !ok {
onlineDisks[peerAddr] = 0
}
if err != nil {
offlineDisks[peerAddr]++
}
onlineDisks[peerAddr]++
} }
// Success. // Success.
@ -134,17 +157,23 @@ func getStorageInfo(disks []StorageAPI) StorageInfo {
} }
// Combine all disks to get total usage // Combine all disks to get total usage
var used, total, available uint64 usedList := make([]uint64, len(validDisksInfo))
for _, di := range validDisksInfo { totalList := make([]uint64, len(validDisksInfo))
used = used + di.Used availableList := make([]uint64, len(validDisksInfo))
total = total + di.Total mountPaths := make([]string, len(validDisksInfo))
available = available + di.Free
for i, di := range validDisksInfo {
usedList[i] = di.Used
totalList[i] = di.Total
availableList[i] = di.Free
mountPaths[i] = di.RelativePath
} }
storageInfo := StorageInfo{ storageInfo := StorageInfo{
Used: used, Used: usedList,
Total: total, Total: totalList,
Available: available, Available: availableList,
MountPaths: mountPaths,
} }
storageInfo.Backend.Type = BackendErasure storageInfo.Backend.Type = BackendErasure

View File

@ -104,32 +104,75 @@ Here `prometheus.yml` is the name of configuration file. You can now see MinIO m
MinIO server exposes the following metrics on `/minio/prometheus/metrics` endpoint. All of these can be accessed via Prometheus dashboard. The full list of exposed metrics along with their definition is available in the demo server at https://play.min.io:9000/minio/prometheus/metrics MinIO server exposes the following metrics on `/minio/prometheus/metrics` endpoint. All of these can be accessed via Prometheus dashboard. The full list of exposed metrics along with their definition is available in the demo server at https://play.min.io:9000/minio/prometheus/metrics
These are the new set of metrics which will be in effect after `RELEASE.2019-10-16*`. Some of the key changes in this update are listed below.
- Metrics are bound the respective nodes and is not cluster-wide. Each and every node in a cluster will expose its own metrics.
- Additional metrics to cover the s3 and internode traffic statistics were added.
- Metrics that records the http statistics and latencies are labeled to their respective APIs (putobject,getobject etc).
- Disk usage metrics are distributed and labeled to the respective disk paths.
For more details, please check the `Migration guide for the new set of metrics`
The list of metrics and its definition are as follows. (NOTE: instance here is one MinIO node)
> NOTES:
> 1. Instance here is one MinIO node.
> 2. `s3 requests` exclude internode requests.
- standard go runtime metrics prefixed by `go_` - standard go runtime metrics prefixed by `go_`
- process level metrics prefixed with `process_` - process level metrics prefixed with `process_`
- prometheus scrap metrics prefixed with `promhttp_` - prometheus scrap metrics prefixed with `promhttp_`
- `minio_disk_storage_used_bytes` : Total byte count of disk storage used by current MinIO server instance - `disk_storage_used` : Disk space used by the disk.
- `minio_http_requests_duration_seconds_bucket` : Cumulative counters for all the request types (HEAD/GET/PUT/POST/DELETE) in different time brackets - `disk_storage_available`: Available disk space left on the disk.
- `minio_http_requests_duration_seconds_count` : Count of current number of observations i.e. total HTTP requests (HEAD/GET/PUT/POST/DELETE) - `disk_storage_total`: Total disk space on the disk.
- `minio_http_requests_duration_seconds_sum` : Current aggregate time spent servicing all HTTP requests (HEAD/GET/PUT/POST/DELETE) in seconds - `disks_offline`: Total number of offline disks in current MinIO instance.
- `minio_network_received_bytes_total` : Total number of bytes received by current MinIO server instance - `disks_total`: Total number of disks in current MinIO instance.
- `minio_network_sent_bytes_total` : Total number of bytes sent by current MinIO server instance - `s3_requests_total`: Total number of s3 requests in current MinIO instance.
- `minio_offline_disks` : Total number of offline disks for current MinIO server instance - `s3_errors_total`: Total number of errors in s3 requests in current MinIO instance.
- `minio_total_disks` : Total number of disks for current MinIO server instance - `s3_requests_current`: Total number of active s3 requests in current MinIO instance.
- `minio_disk_storage_available_bytes` : Current storage space available to MinIO server in bytes - `internode_rx_bytes_total`: Total number of internode bytes received by current MinIO server instance.
- `minio_disk_storage_total_bytes` : Total storage space available to MinIO server in bytes - `internode_tx_bytes_total`: Total number of bytes sent to the other nodes by current MinIO server instance.
- `process_start_time_seconds` : Start time of MinIO server since unix epoc hin seconds - `s3_rx_bytes_total`: Total number of s3 bytes received by current MinIO server instance.
- `s3_tx_bytes_total`: Total number of s3 bytes sent by current MinIO server instance.
- `minio_version_info`: Current MinIO version with commit-id.
- `s3_ttfb_seconds`: Histogram that holds the latency information of the requests.
If you're running MinIO gateway, disk/storage information is not exposed. Only following metrics are available
- `minio_http_requests_duration_seconds_bucket` : Cumulative counters for all the request types (HEAD/GET/PUT/POST/DELETE) in different time brackets ## Migration guide for the new set of metrics
- `minio_http_requests_duration_seconds_count` : Count of current number of observations i.e. total HTTP requests (HEAD/GET/PUT/POST/DELETE)
- `minio_http_requests_duration_seconds_sum` : Current aggregate time spent servicing all HTTP requests (HEAD/GET/PUT/POST/DELETE) in seconds
- `minio_network_received_bytes_total` : Total number of bytes received by current MinIO server instance
- `minio_network_sent_bytes_total` : Total number of bytes sent by current MinIO server instance
- `process_start_time_seconds` : Start time of MinIO server since unix epoch in seconds
For MinIO instances with [`caching`](https://github.com/minio/minio/tree/master/docs/disk-caching) enabled, these additional metrics are available. This migration guide applies for older releases or any releases before `RELEASE.2019-10-23*`
- `minio_disk_cache_storage_bytes` : Total byte count of cache capacity available for current MinIO server instance ### MinIO disk level metrics - `disk_*`
- `minio_disk_cache_storage_free_bytes` : Total byte count of free cache available for current MinIO server instance
The migrations include
- `minio_total_disks` to `disks_total`
- `minio_offline_disks` to `disks_offline`
### MinIO disk level metrics - `disk_storage_*`
These metrics have one label.
- `disk`: Holds the disk path
The migrations include
- `minio_disk_storage_used_bytes` to `disk_storage_used`
- `minio_disk_storage_available_bytes` to `disk_storage_available`
- `minio_disk_storage_total_bytes` to `disk_storage_total`
### MinIO network level metrics
These metrics are detailed to cover the s3 and internode network statistics.
The migrations include
- `minio_network_sent_bytes_total` to `s3_tx_bytes_total` and `internode_tx_bytes_total`
- `minio_network_received_bytes_total` to `s3_rx_bytes_total` and `internode_rx_bytes_total`
Some of the additional metrics added were
- `s3_requests_total`
- `s3_errors_total`
- `s3_ttfb_seconds`

1
go.sum
View File

@ -208,6 +208,7 @@ github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSN
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 h1:eqyIo2HjKhKe/mJzTG8n4VqvLXIOEG+SLdDqX7xGtkY=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=

View File

@ -42,15 +42,16 @@ func main() {
} }
``` ```
| Service operations | Info operations | Healing operations | Config operations | Top operations | IAM operations | Misc | KMS | | Service operations | Info operations | Healing operations | Config operations | Top operations | IAM operations | Misc | KMS |
|:------------------------------------|:------------------------------------------------------------|:-------------------|:--------------------------|:------------------------|:--------------------------------------|:--------------------------------------------------|:--------------------------------| |:------------------------------------|:--------------------------------------------------|:-------------------|:--------------------------|:------------------------|:--------------------------------------|:--------------------------------------------------|:--------------------------------|
| [`ServiceRestart`](#ServiceRestart) | [`ServerInfo`](#ServerInfo) | [`Heal`](#Heal) | [`GetConfig`](#GetConfig) | [`TopLocks`](#TopLocks) | [`AddUser`](#AddUser) | | [`GetKeyStatus`](#GetKeyStatus) | | [`ServiceRestart`](#ServiceRestart) | [`ServerInfo`](#ServerInfo) | [`Heal`](#Heal) | [`GetConfig`](#GetConfig) | [`TopLocks`](#TopLocks) | [`AddUser`](#AddUser) | | [`GetKeyStatus`](#GetKeyStatus) |
| [`ServiceStop`](#ServiceStop) | [`ServerCPULoadInfo`](#ServerCPULoadInfo) | | [`SetConfig`](#SetConfig) | | [`SetUserPolicy`](#SetUserPolicy) | [`StartProfiling`](#StartProfiling) | | | [`ServiceStop`](#ServiceStop) | [`ServerCPULoadInfo`](#ServerCPULoadInfo) | | [`SetConfig`](#SetConfig) | | [`SetUserPolicy`](#SetUserPolicy) | [`StartProfiling`](#StartProfiling) | |
| | [`ServerMemUsageInfo`](#ServerMemUsageInfo) | | | | [`ListUsers`](#ListUsers) | [`DownloadProfilingData`](#DownloadProfilingData) | | | | [`ServerMemUsageInfo`](#ServerMemUsageInfo) | | | | [`ListUsers`](#ListUsers) | [`DownloadProfilingData`](#DownloadProfilingData) | |
| [`ServiceTrace`](#ServiceTrace) | [`ServerDrivesPerfInfo`](#ServerDrivesPerfInfo) | | | | [`AddCannedPolicy`](#AddCannedPolicy) | [`ServerUpdate`](#ServerUpdate) | | | [`ServiceTrace`](#ServiceTrace) | [`ServerDrivesPerfInfo`](#ServerDrivesPerfInfo) | | | | [`AddCannedPolicy`](#AddCannedPolicy) | [`ServerUpdate`](#ServerUpdate) | |
| | [`NetPerfInfo`](#NetPerfInfo) | | | | | | | | | [`NetPerfInfo`](#NetPerfInfo) | | | | | | |
| | [`ServerCPUHardwareInfo`](#ServerCPUHardwareInfo) | | | | | | | | | [`ServerCPUHardwareInfo`](#ServerCPUHardwareInfo) | | | | | | |
| | [`ServerNetworkHardwareInfo`](#ServerNetworkHardwareInfo) | | | | | | | | | [`ServerNetworkHardwareInfo`](#ServerNetworkHardwareInfo) | | | | | | |
| | [`StorageInfo`](#StorageInfo) | | | | | | |
## 1. Constructor ## 1. Constructor
<a name="MinIO"></a> <a name="MinIO"></a>
@ -150,16 +151,12 @@ __Example__
### ServerInfo() ([]ServerInfo, error) ### ServerInfo() ([]ServerInfo, error)
Fetches information for all cluster nodes, such as server properties, storage information, network statistics, etc. Fetches information for all cluster nodes, such as server properties, storage information, network statistics, etc.
| Param | Type | Description | | Param | Type | Description |
|---------------------------------|--------------------|--------------------------------------------------------------------| |----------------------------------|--------------------|--------------------------------------------------------------------|
| `si.Addr` | _string_ | Address of the server the following information is retrieved from. | | `si.Addr` | _string_ | Address of the server the following information is retrieved from. |
| `si.ConnStats` | _ServerConnStats_ | Connection statistics from the given server. | | `si.ConnStats` | _ServerConnStats_ | Connection statistics from the given server. |
| `si.HTTPStats` | _ServerHTTPStats_ | HTTP connection statistics from the given server. | | `si.HTTPStats` | _ServerHTTPStats_ | HTTP connection statistics from the given server. |
| `si.Properties` | _ServerProperties_ | Server properties such as region, notification targets. | | `si.Properties` | _ServerProperties_ | Server properties such as region, notification targets. |
| `si.Data.StorageInfo.Used` | _int64_ | Used disk space. |
| `si.Data.StorageInfo.Total` | _int64_ | Total disk space. |
| `si.Data.StorageInfo.Available` | _int64_ | Available disk space. |
| `si.Data.StorageInfo.Backend` | _struct{}_ | Represents backend type embedded structure. |
| Param | Type | Description | | Param | Type | Description |
|-----------------------------|-----------------|----------------------------------------------------| |-----------------------------|-----------------|----------------------------------------------------|
@ -187,23 +184,11 @@ Fetches information for all cluster nodes, such as server properties, storage in
| `ServerHTTPStats.TotalDELETEStats` | _ServerHTTPMethodStats_ | Total statistics regarding DELETE operations | | `ServerHTTPStats.TotalDELETEStats` | _ServerHTTPMethodStats_ | Total statistics regarding DELETE operations |
| `ServerHTTPStats.SuccessDELETEStats` | _ServerHTTPMethodStats_ | Total statistics regarding successful DELETE operations | | `ServerHTTPStats.SuccessDELETEStats` | _ServerHTTPMethodStats_ | Total statistics regarding successful DELETE operations |
| Param | Type | Description | | Param | Type | Description |
|-------------------------------------|----------|-------------------------------------------------| |-------------------------------------|----------|-------------------------------------------------|
| `ServerHTTPMethodStats.Count` | _uint64_ | Total number of operations. | | `ServerHTTPMethodStats.Count` | _uint64_ | Total number of operations. |
| `ServerHTTPMethodStats.AvgDuration` | _string_ | Average duration of Count number of operations. | | `ServerHTTPMethodStats.AvgDuration` | _string_ | Average duration of Count number of operations. |
| Param | Type | Description |
|----------------------------|-----------------|-----------------------------------------------------------------------------------|
| `Backend.Type` | _BackendType_ | Type of backend used by the server currently only FS or Erasure. |
| `Backend.OnlineDisks` | _int_ | Total number of disks online (only applies to Erasure backend), is empty for FS. |
| `Backend.OfflineDisks` | _int_ | Total number of disks offline (only applies to Erasure backend), is empty for FS. |
| `Backend.StandardSCData` | _int_ | Data disks set for standard storage class, is empty for FS. |
| `Backend.StandardSCParity` | _int_ | Parity disks set for standard storage class, is empty for FS. |
| `Backend.RRSCData` | _int_ | Data disks set for reduced redundancy storage class, is empty for FS. |
| `Backend.RRSCParity` | _int_ | Parity disks set for reduced redundancy storage class, is empty for FS. |
| `Backend.Sets` | _[][]DriveInfo_ | Represents topology of drives in erasure coded sets. |
| Param | Type | Description | | Param | Type | Description |
|----------------------|----------|-------------------------------------------------------| |----------------------|----------|-------------------------------------------------------|
| `DriveInfo.UUID` | _string_ | Unique ID for each disk provisioned by server format. | | `DriveInfo.UUID` | _string_ | Unique ID for each disk provisioned by server format. |
@ -288,6 +273,7 @@ Fetches network performance of all cluster nodes using given sized payload. Retu
| `Error` | _string_ | Errors (if any) encountered while reaching this node | | `Error` | _string_ | Errors (if any) encountered while reaching this node |
| `ReadThroughput` | _uint64_ | Network read throughput of the server in bytes per second | | `ReadThroughput` | _uint64_ | Network read throughput of the server in bytes per second |
<a name="ServerCPUHardwareInfo"></a> <a name="ServerCPUHardwareInfo"></a>
### ServerCPUHardwareInfo() ([]ServerCPUHardwareInfo, error) ### ServerCPUHardwareInfo() ([]ServerCPUHardwareInfo, error)
@ -334,6 +320,42 @@ Fetches hardware information of CPU.
| `NetworkInfo.HardwareAddr` | _[]byte_ | IEEE MAC-48, EUI-48 and EUI-64 form | | `NetworkInfo.HardwareAddr` | _[]byte_ | IEEE MAC-48, EUI-48 and EUI-64 form |
| `NetworkInfo.Flags` | _uint32_ | e.g., FlagUp, FlagLoopback, FlagMulticast | | `NetworkInfo.Flags` | _uint32_ | e.g., FlagUp, FlagLoopback, FlagMulticast |
<a name="StorageInfo"></a>
### StorageInfo() (StorageInfo, error)
Fetches Storage information for all cluster nodes.
| Param | Type | Description |
|-------------------------|------------|---------------------------------------------|
| `storageInfo.Used` | _[]int64_ | Used disk spaces. |
| `storageInfo.Total` | _[]int64_ | Total disk spaces. |
| `storageInfo.Available` | _[]int64_ | Available disk spaces. |
| `StorageInfo.Backend` | _struct{}_ | Represents backend type embedded structure. |
| Param | Type | Description |
|----------------------------|-----------------|--------------------------------------------------------------------------------------------------------------------------|
| `Backend.Type` | _BackendType_ | Type of backend used by the server currently only FS or Erasure. |
| `Backend.OnlineDisks` | _BackendDisks_ | Total number of disks online per node (only applies to Erasure backend) represented in map[string]int, is empty for FS. |
| `Backend.OfflineDisks` | _BackendDisks_ | Total number of disks offline per node (only applies to Erasure backend) represented in map[string]int, is empty for FS. |
| `Backend.StandardSCData` | _int_ | Data disks set for standard storage class, is empty for FS. |
| `Backend.StandardSCParity` | _int_ | Parity disks set for standard storage class, is empty for FS. |
| `Backend.RRSCData` | _int_ | Data disks set for reduced redundancy storage class, is empty for FS. |
| `Backend.RRSCParity` | _int_ | Parity disks set for reduced redundancy storage class, is empty for FS. |
| `Backend.Sets` | _[][]DriveInfo_ | Represents topology of drives in erasure coded sets. |
__Example__
```go
storageInfo, err := madmClnt.StorageInfo()
if err != nil {
log.Fatalln(err)
}
log.Println(storageInfo)
```
## 5. Heal operations ## 5. Heal operations
<a name="Heal"></a> <a name="Heal"></a>

View File

@ -54,7 +54,7 @@ func (adm AdminClient) GetLogs(node string, lineCnt int, logKind string, doneCh
urlValues.Set("logType", logKind) urlValues.Set("logType", logKind)
for { for {
reqData := requestData{ reqData := requestData{
relPath: "/v1/log", relPath: adminAPIPrefix + "/log",
queryValues: urlValues, queryValues: urlValues,
} }
// Execute GET to call log handler // Execute GET to call log handler

View File

@ -29,9 +29,9 @@ import (
// GetConfig - returns the config.json of a minio setup, incoming data is encrypted. // GetConfig - returns the config.json of a minio setup, incoming data is encrypted.
func (adm *AdminClient) GetConfig() ([]byte, error) { func (adm *AdminClient) GetConfig() ([]byte, error) {
// Execute GET on /minio/admin/v1/config to get config of a setup. // Execute GET on /minio/admin/v2/config to get config of a setup.
resp, err := adm.executeMethod("GET", resp, err := adm.executeMethod("GET",
requestData{relPath: "/v1/config"}) requestData{relPath: adminAPIPrefix + "/config"})
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return nil, err return nil, err
@ -85,11 +85,11 @@ func (adm *AdminClient) SetConfig(config io.Reader) (err error) {
} }
reqData := requestData{ reqData := requestData{
relPath: "/v1/config", relPath: adminAPIPrefix + "/config",
content: econfigBytes, content: econfigBytes,
} }
// Execute PUT on /minio/admin/v1/config to set config. // Execute PUT on /minio/admin/v2/config to set config.
resp, err := adm.executeMethod("PUT", reqData) resp, err := adm.executeMethod("PUT", reqData)
defer closeResponse(resp) defer closeResponse(resp)

View File

@ -0,0 +1,44 @@
// +build ignore
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package main
import (
"log"
"github.com/minio/minio/pkg/madmin"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
st, err := madmClnt.StorageInfo()
if err != nil {
log.Fatalln(err)
}
log.Println(st)
}

View File

@ -41,11 +41,11 @@ func (adm *AdminClient) UpdateGroupMembers(g GroupAddRemove) error {
} }
reqData := requestData{ reqData := requestData{
relPath: "/v1/update-group-members", relPath: adminAPIPrefix + "/update-group-members",
content: data, content: data,
} }
// Execute PUT on /minio/admin/v1/update-group-members // Execute PUT on /minio/admin/v2/update-group-members
resp, err := adm.executeMethod("PUT", reqData) resp, err := adm.executeMethod("PUT", reqData)
defer closeResponse(resp) defer closeResponse(resp)
@ -74,7 +74,7 @@ func (adm *AdminClient) GetGroupDescription(group string) (*GroupDesc, error) {
v := url.Values{} v := url.Values{}
v.Set("group", group) v.Set("group", group)
reqData := requestData{ reqData := requestData{
relPath: "/v1/group", relPath: adminAPIPrefix + "/group",
queryValues: v, queryValues: v,
} }
@ -104,7 +104,7 @@ func (adm *AdminClient) GetGroupDescription(group string) (*GroupDesc, error) {
// ListGroups - lists all groups names present on the server. // ListGroups - lists all groups names present on the server.
func (adm *AdminClient) ListGroups() ([]string, error) { func (adm *AdminClient) ListGroups() ([]string, error) {
reqData := requestData{ reqData := requestData{
relPath: "/v1/groups", relPath: adminAPIPrefix + "/groups",
} }
resp, err := adm.executeMethod("GET", reqData) resp, err := adm.executeMethod("GET", reqData)
@ -146,7 +146,7 @@ func (adm *AdminClient) SetGroupStatus(group string, status GroupStatus) error {
v.Set("status", string(status)) v.Set("status", string(status))
reqData := requestData{ reqData := requestData{
relPath: "/v1/set-group-status", relPath: adminAPIPrefix + "/set-group-status",
queryValues: v, queryValues: v,
} }

View File

@ -51,7 +51,7 @@ func (adm *AdminClient) ServerCPUHardwareInfo() ([]ServerCPUHardwareInfo, error)
v := url.Values{} v := url.Values{}
v.Set(HARDWARE, string(CPU)) v.Set(HARDWARE, string(CPU))
resp, err := adm.executeMethod("GET", requestData{ resp, err := adm.executeMethod("GET", requestData{
relPath: "/v1/hardware", relPath: adminAPIPrefix + "/hardware",
queryValues: v, queryValues: v,
}) })

View File

@ -208,7 +208,7 @@ func (adm *AdminClient) Heal(bucket, prefix string, healOpts HealOpts,
return healStart, healTaskStatus, err return healStart, healTaskStatus, err
} }
path := fmt.Sprintf("/v1/heal/%s", bucket) path := fmt.Sprintf(adminAPIPrefix+"/heal/%s", bucket)
if bucket != "" && prefix != "" { if bucket != "" && prefix != "" {
path += "/" + prefix path += "/" + prefix
} }
@ -280,7 +280,7 @@ type BgHealState struct {
// current server or cluster. // current server or cluster.
func (adm *AdminClient) BackgroundHealStatus() (BgHealState, error) { func (adm *AdminClient) BackgroundHealStatus() (BgHealState, error) {
// Execute POST request to background heal status api // Execute POST request to background heal status api
resp, err := adm.executeMethod("POST", requestData{relPath: "/v1/background-heal/status"}) resp, err := adm.executeMethod("POST", requestData{relPath: adminAPIPrefix + "/background-heal/status"})
if err != nil { if err != nil {
return BgHealState{}, err return BgHealState{}, err
} }

View File

@ -59,11 +59,13 @@ type DriveInfo HealDriveInfo
// StorageInfo - represents total capacity of underlying storage. // StorageInfo - represents total capacity of underlying storage.
type StorageInfo struct { type StorageInfo struct {
Used uint64 // Total used spaced per tenant. Used []uint64 // Used total used per disk.
Available uint64 // Total available space. Total []uint64 // Total disk space per disk.
Total uint64 // Total disk space. Available []uint64 // Total disk space available per disk.
MountPaths []string // Disk mountpoints
// Backend type. // Backend type.
Backend struct { Backend struct {
@ -71,18 +73,41 @@ type StorageInfo struct {
Type BackendType Type BackendType
// Following fields are only meaningful if BackendType is Erasure. // Following fields are only meaningful if BackendType is Erasure.
OnlineDisks int // Online disks during server startup. OnlineDisks BackendDisks // Online disks during server startup.
OfflineDisks int // Offline disks during server startup. OfflineDisks BackendDisks // Offline disks during server startup.
StandardSCData int // Data disks for currently configured Standard storage class. StandardSCData int // Data disks for currently configured Standard storage class.
StandardSCParity int // Parity disks for currently configured Standard storage class. StandardSCParity int // Parity disks for currently configured Standard storage class.
RRSCData int // Data disks for currently configured Reduced Redundancy storage class. RRSCData int // Data disks for currently configured Reduced Redundancy storage class.
RRSCParity int // Parity disks for currently configured Reduced Redundancy storage class. RRSCParity int // Parity disks for currently configured Reduced Redundancy storage class.
// List of all disk status, this is only meaningful if BackendType is Erasure. // List of all disk status, this is only meaningful if BackendType is Erasure.
Sets [][]DriveInfo Sets [][]DriveInfo
} }
} }
// BackendDisks - represents the map of endpoint-disks.
type BackendDisks map[string]int
// Sum - Return the sum of the disks in the endpoint-disk map.
func (d1 BackendDisks) Sum() (sum int) {
for _, count := range d1 {
sum += count
}
return sum
}
// Merge - Reduces two endpoint-disk maps.
func (d1 BackendDisks) Merge(d2 BackendDisks) BackendDisks {
for i1, v1 := range d1 {
if v2, ok := d2[i1]; ok {
d2[i1] = v2 + v1
continue
}
d2[i1] = v1
}
return d2
}
// ServerProperties holds some of the server's information such as uptime, // ServerProperties holds some of the server's information such as uptime,
// version, region, .. // version, region, ..
type ServerProperties struct { type ServerProperties struct {
@ -141,7 +166,7 @@ type ServerInfo struct {
// ServerInfo - Connect to a minio server and call Server Info Management API // ServerInfo - Connect to a minio server and call Server Info Management API
// to fetch server's information represented by ServerInfo structure // to fetch server's information represented by ServerInfo structure
func (adm *AdminClient) ServerInfo() ([]ServerInfo, error) { func (adm *AdminClient) ServerInfo() ([]ServerInfo, error) {
resp, err := adm.executeMethod("GET", requestData{relPath: "/v1/info"}) resp, err := adm.executeMethod("GET", requestData{relPath: adminAPIPrefix + "/info"})
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return nil, err return nil, err
@ -168,6 +193,36 @@ func (adm *AdminClient) ServerInfo() ([]ServerInfo, error) {
return serversInfo, nil return serversInfo, nil
} }
// StorageInfo - Connect to a minio server and call Storage Info Management API
// to fetch server's information represented by StorageInfo structure
func (adm *AdminClient) StorageInfo() (StorageInfo, error) {
resp, err := adm.executeMethod("GET", requestData{relPath: adminAPIPrefix + "/storageinfo"})
defer closeResponse(resp)
if err != nil {
return StorageInfo{}, err
}
// Check response http status code
if resp.StatusCode != http.StatusOK {
return StorageInfo{}, httpRespToErrorResponse(resp)
}
// Unmarshal the server's json response
var storageInfo StorageInfo
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return StorageInfo{}, err
}
err = json.Unmarshal(respBytes, &storageInfo)
if err != nil {
return StorageInfo{}, err
}
return storageInfo, nil
}
// ServerDrivesPerfInfo holds informantion about address and write speed of // ServerDrivesPerfInfo holds informantion about address and write speed of
// all drives in a single server node // all drives in a single server node
type ServerDrivesPerfInfo struct { type ServerDrivesPerfInfo struct {
@ -185,7 +240,7 @@ func (adm *AdminClient) ServerDrivesPerfInfo(size int64) ([]ServerDrivesPerfInfo
v.Set("size", strconv.FormatInt(size, 10)) v.Set("size", strconv.FormatInt(size, 10))
resp, err := adm.executeMethod("GET", requestData{ resp, err := adm.executeMethod("GET", requestData{
relPath: "/v1/performance", relPath: adminAPIPrefix + "/performance",
queryValues: v, queryValues: v,
}) })
@ -229,7 +284,7 @@ func (adm *AdminClient) ServerCPULoadInfo() ([]ServerCPULoadInfo, error) {
v := url.Values{} v := url.Values{}
v.Set("perfType", string("cpu")) v.Set("perfType", string("cpu"))
resp, err := adm.executeMethod("GET", requestData{ resp, err := adm.executeMethod("GET", requestData{
relPath: "/v1/performance", relPath: adminAPIPrefix + "/performance",
queryValues: v, queryValues: v,
}) })
@ -273,7 +328,7 @@ func (adm *AdminClient) ServerMemUsageInfo() ([]ServerMemUsageInfo, error) {
v := url.Values{} v := url.Values{}
v.Set("perfType", string("mem")) v.Set("perfType", string("mem"))
resp, err := adm.executeMethod("GET", requestData{ resp, err := adm.executeMethod("GET", requestData{
relPath: "/v1/performance", relPath: adminAPIPrefix + "/performance",
queryValues: v, queryValues: v,
}) })
@ -318,7 +373,7 @@ func (adm *AdminClient) NetPerfInfo(size int) (map[string][]NetPerfInfo, error)
v.Set("size", strconv.Itoa(size)) v.Set("size", strconv.Itoa(size))
} }
resp, err := adm.executeMethod("GET", requestData{ resp, err := adm.executeMethod("GET", requestData{
relPath: "/v1/performance", relPath: adminAPIPrefix + "/performance",
queryValues: v, queryValues: v,
}) })

View File

@ -24,13 +24,13 @@ import (
// GetKeyStatus requests status information about the key referenced by keyID // GetKeyStatus requests status information about the key referenced by keyID
// from the KMS connected to a MinIO by performing a Admin-API request. // from the KMS connected to a MinIO by performing a Admin-API request.
// It basically hits the `/minio/admin/v1/kms/key/status` API endpoint. // It basically hits the `/minio/admin/v2/kms/key/status` API endpoint.
func (adm *AdminClient) GetKeyStatus(keyID string) (*KMSKeyStatus, error) { func (adm *AdminClient) GetKeyStatus(keyID string) (*KMSKeyStatus, error) {
// GET /minio/admin/v1/kms/key/status?key-id=<keyID> // GET /minio/admin/v2/kms/key/status?key-id=<keyID>
qv := url.Values{} qv := url.Values{}
qv.Set("key-id", keyID) qv.Set("key-id", keyID)
reqData := requestData{ reqData := requestData{
relPath: "/v1/kms/key/status", relPath: adminAPIPrefix + "/kms/key/status",
queryValues: qv, queryValues: qv,
} }

View File

@ -30,11 +30,11 @@ func (adm *AdminClient) InfoCannedPolicy(policyName string) ([]byte, error) {
queryValues.Set("name", policyName) queryValues.Set("name", policyName)
reqData := requestData{ reqData := requestData{
relPath: "/v1/info-canned-policy", relPath: adminAPIPrefix + "/info-canned-policy",
queryValues: queryValues, queryValues: queryValues,
} }
// Execute GET on /minio/admin/v1/info-canned-policy // Execute GET on /minio/admin/v2/info-canned-policy
resp, err := adm.executeMethod("GET", reqData) resp, err := adm.executeMethod("GET", reqData)
defer closeResponse(resp) defer closeResponse(resp)
@ -52,10 +52,10 @@ func (adm *AdminClient) InfoCannedPolicy(policyName string) ([]byte, error) {
// ListCannedPolicies - list all configured canned policies. // ListCannedPolicies - list all configured canned policies.
func (adm *AdminClient) ListCannedPolicies() (map[string][]byte, error) { func (adm *AdminClient) ListCannedPolicies() (map[string][]byte, error) {
reqData := requestData{ reqData := requestData{
relPath: "/v1/list-canned-policies", relPath: adminAPIPrefix + "/list-canned-policies",
} }
// Execute GET on /minio/admin/v1/list-canned-policies // Execute GET on /minio/admin/v2/list-canned-policies
resp, err := adm.executeMethod("GET", reqData) resp, err := adm.executeMethod("GET", reqData)
defer closeResponse(resp) defer closeResponse(resp)
@ -86,11 +86,11 @@ func (adm *AdminClient) RemoveCannedPolicy(policyName string) error {
queryValues.Set("name", policyName) queryValues.Set("name", policyName)
reqData := requestData{ reqData := requestData{
relPath: "/v1/remove-canned-policy", relPath: adminAPIPrefix + "/remove-canned-policy",
queryValues: queryValues, queryValues: queryValues,
} }
// Execute DELETE on /minio/admin/v1/remove-canned-policy to remove policy. // Execute DELETE on /minio/admin/v2/remove-canned-policy to remove policy.
resp, err := adm.executeMethod("DELETE", reqData) resp, err := adm.executeMethod("DELETE", reqData)
defer closeResponse(resp) defer closeResponse(resp)
@ -111,12 +111,12 @@ func (adm *AdminClient) AddCannedPolicy(policyName, policy string) error {
queryValues.Set("name", policyName) queryValues.Set("name", policyName)
reqData := requestData{ reqData := requestData{
relPath: "/v1/add-canned-policy", relPath: adminAPIPrefix + "/add-canned-policy",
queryValues: queryValues, queryValues: queryValues,
content: []byte(policy), content: []byte(policy),
} }
// Execute PUT on /minio/admin/v1/add-canned-policy to set policy. // Execute PUT on /minio/admin/v2/add-canned-policy to set policy.
resp, err := adm.executeMethod("PUT", reqData) resp, err := adm.executeMethod("PUT", reqData)
defer closeResponse(resp) defer closeResponse(resp)
@ -143,11 +143,11 @@ func (adm *AdminClient) SetPolicy(policyName, entityName string, isGroup bool) e
queryValues.Set("isGroup", groupStr) queryValues.Set("isGroup", groupStr)
reqData := requestData{ reqData := requestData{
relPath: "/v1/set-user-or-group-policy", relPath: adminAPIPrefix + "/set-user-or-group-policy",
queryValues: queryValues, queryValues: queryValues,
} }
// Execute PUT on /minio/admin/v1/set-user-or-group-policy to set policy. // Execute PUT on /minio/admin/v2/set-user-or-group-policy to set policy.
resp, err := adm.executeMethod("PUT", reqData) resp, err := adm.executeMethod("PUT", reqData)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {

View File

@ -54,7 +54,7 @@ func (adm *AdminClient) StartProfiling(profiler ProfilerType) ([]StartProfilingR
v := url.Values{} v := url.Values{}
v.Set("profilerType", string(profiler)) v.Set("profilerType", string(profiler))
resp, err := adm.executeMethod("POST", requestData{ resp, err := adm.executeMethod("POST", requestData{
relPath: "/v1/profiling/start", relPath: adminAPIPrefix + "/profiling/start",
queryValues: v, queryValues: v,
}) })
defer closeResponse(resp) defer closeResponse(resp)
@ -83,7 +83,7 @@ func (adm *AdminClient) StartProfiling(profiler ProfilerType) ([]StartProfilingR
// DownloadProfilingData makes an admin call to download profiling data of a standalone // DownloadProfilingData makes an admin call to download profiling data of a standalone
// server or of the whole cluster in case of a distributed setup. // server or of the whole cluster in case of a distributed setup.
func (adm *AdminClient) DownloadProfilingData() (io.ReadCloser, error) { func (adm *AdminClient) DownloadProfilingData() (io.ReadCloser, error) {
path := fmt.Sprintf("/v1/profiling/download") path := fmt.Sprintf(adminAPIPrefix + "/profiling/download")
resp, err := adm.executeMethod("GET", requestData{ resp, err := adm.executeMethod("GET", requestData{
relPath: path, relPath: path,
}) })

View File

@ -53,7 +53,7 @@ func (adm *AdminClient) serviceCallAction(action ServiceAction) error {
// Request API to Restart server // Request API to Restart server
resp, err := adm.executeMethod("POST", requestData{ resp, err := adm.executeMethod("POST", requestData{
relPath: "/v1/service", relPath: adminAPIPrefix + "/service",
queryValues: queryValues, queryValues: queryValues,
}) })
defer closeResponse(resp) defer closeResponse(resp)
@ -85,7 +85,7 @@ func (adm AdminClient) ServiceTrace(allTrace, errTrace bool, doneCh <-chan struc
urlValues.Set("all", strconv.FormatBool(allTrace)) urlValues.Set("all", strconv.FormatBool(allTrace))
urlValues.Set("err", strconv.FormatBool(errTrace)) urlValues.Set("err", strconv.FormatBool(errTrace))
reqData := requestData{ reqData := requestData{
relPath: "/v1/trace", relPath: adminAPIPrefix + "/trace",
queryValues: urlValues, queryValues: urlValues,
} }
// Execute GET to call trace handler // Execute GET to call trace handler

View File

@ -54,10 +54,10 @@ func (l LockEntries) Swap(i, j int) {
// TopLocks - returns the oldest locks in a minio setup. // TopLocks - returns the oldest locks in a minio setup.
func (adm *AdminClient) TopLocks() (LockEntries, error) { func (adm *AdminClient) TopLocks() (LockEntries, error) {
// Execute GET on /minio/admin/v1/top/locks // Execute GET on /minio/admin/v2/top/locks
// to get the oldest locks in a minio setup. // to get the oldest locks in a minio setup.
resp, err := adm.executeMethod("GET", resp, err := adm.executeMethod("GET",
requestData{relPath: "/v1/top/locks"}) requestData{relPath: adminAPIPrefix + "/top/locks"})
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -38,7 +38,7 @@ func (adm *AdminClient) ServerUpdate(updateURL string) (us ServerUpdateStatus, e
// Request API to Restart server // Request API to Restart server
resp, err := adm.executeMethod("POST", requestData{ resp, err := adm.executeMethod("POST", requestData{
relPath: "/v1/update", relPath: adminAPIPrefix + "/update",
queryValues: queryValues, queryValues: queryValues,
}) })
defer closeResponse(resp) defer closeResponse(resp)

View File

@ -49,11 +49,11 @@ func (adm *AdminClient) RemoveUser(accessKey string) error {
queryValues.Set("accessKey", accessKey) queryValues.Set("accessKey", accessKey)
reqData := requestData{ reqData := requestData{
relPath: "/v1/remove-user", relPath: adminAPIPrefix + "/remove-user",
queryValues: queryValues, queryValues: queryValues,
} }
// Execute DELETE on /minio/admin/v1/remove-user to remove a user. // Execute DELETE on /minio/admin/v2/remove-user to remove a user.
resp, err := adm.executeMethod("DELETE", reqData) resp, err := adm.executeMethod("DELETE", reqData)
defer closeResponse(resp) defer closeResponse(resp)
@ -71,10 +71,10 @@ func (adm *AdminClient) RemoveUser(accessKey string) error {
// ListUsers - list all users. // ListUsers - list all users.
func (adm *AdminClient) ListUsers() (map[string]UserInfo, error) { func (adm *AdminClient) ListUsers() (map[string]UserInfo, error) {
reqData := requestData{ reqData := requestData{
relPath: "/v1/list-users", relPath: adminAPIPrefix + "/list-users",
} }
// Execute GET on /minio/admin/v1/list-users // Execute GET on /minio/admin/v2/list-users
resp, err := adm.executeMethod("GET", reqData) resp, err := adm.executeMethod("GET", reqData)
defer closeResponse(resp) defer closeResponse(resp)
@ -105,11 +105,11 @@ func (adm *AdminClient) GetUserInfo(name string) (u UserInfo, err error) {
queryValues.Set("accessKey", name) queryValues.Set("accessKey", name)
reqData := requestData{ reqData := requestData{
relPath: "/v1/user-info", relPath: adminAPIPrefix + "/user-info",
queryValues: queryValues, queryValues: queryValues,
} }
// Execute GET on /minio/admin/v1/user-info // Execute GET on /minio/admin/v2/user-info
resp, err := adm.executeMethod("GET", reqData) resp, err := adm.executeMethod("GET", reqData)
defer closeResponse(resp) defer closeResponse(resp)
@ -160,12 +160,12 @@ func (adm *AdminClient) SetUser(accessKey, secretKey string, status AccountStatu
queryValues.Set("accessKey", accessKey) queryValues.Set("accessKey", accessKey)
reqData := requestData{ reqData := requestData{
relPath: "/v1/add-user", relPath: adminAPIPrefix + "/add-user",
queryValues: queryValues, queryValues: queryValues,
content: econfigBytes, content: econfigBytes,
} }
// Execute PUT on /minio/admin/v1/add-user to set a user. // Execute PUT on /minio/admin/v2/add-user to set a user.
resp, err := adm.executeMethod("PUT", reqData) resp, err := adm.executeMethod("PUT", reqData)
defer closeResponse(resp) defer closeResponse(resp)
@ -192,11 +192,11 @@ func (adm *AdminClient) SetUserStatus(accessKey string, status AccountStatus) er
queryValues.Set("status", string(status)) queryValues.Set("status", string(status))
reqData := requestData{ reqData := requestData{
relPath: "/v1/set-user-status", relPath: adminAPIPrefix + "/set-user-status",
queryValues: queryValues, queryValues: queryValues,
} }
// Execute PUT on /minio/admin/v1/set-user-status to set status. // Execute PUT on /minio/admin/v2/set-user-status to set status.
resp, err := adm.executeMethod("PUT", reqData) resp, err := adm.executeMethod("PUT", reqData)
defer closeResponse(resp) defer closeResponse(resp)

View File

@ -30,6 +30,12 @@ import (
"github.com/minio/minio-go/v6/pkg/s3utils" "github.com/minio/minio-go/v6/pkg/s3utils"
) )
const (
// AdminAPIVersion - admin api version used in the request.
AdminAPIVersion = "v2"
adminAPIPrefix = "/" + AdminAPIVersion
)
// sum256 calculate sha256 sum for an input byte array. // sum256 calculate sha256 sum for an input byte array.
func sum256(data []byte) []byte { func sum256(data []byte) []byte {
hash := sha256.New() hash := sha256.New()