Add cluster info to inspect/profiling archive (#15360)

Add cluster info to inspect and profiling archive.

In addition to the existing data generation for both inspect and profiling,
cluster.info file is added. This latter contains some info of the cluster.
The generation of cluster.info is is done as the last step and it can fail
if it exceed 10 seconds.
This commit is contained in:
Anis Elleuch 2022-07-25 17:11:35 +01:00 committed by GitHub
parent e465c3587b
commit f23f442d33
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 106 additions and 50 deletions

View File

@ -2636,6 +2636,100 @@ func checkConnection(endpointStr string, timeout time.Duration) error {
return nil
}
type clusterInfo struct {
DeploymentID string `json:"deployement_id"`
ClusterName string `json:"cluster_name"`
UsedCapacity uint64 `json:"used_capacity"`
Info struct {
MinioVersion string `json:"minio_version"`
PoolsCount int `json:"pools_count"`
ServersCount int `json:"servers_count"`
DrivesCount int `json:"drives_count"`
BucketsCount uint64 `json:"buckets_count"`
ObjectsCount uint64 `json:"objects_count"`
TotalDriveSpace uint64 `json:"total_drive_space"`
UsedDriveSpace uint64 `json:"used_drive_space"`
} `json:"info"`
}
func embedFileInZip(zipWriter *zip.Writer, name string, data []byte) error {
// Send profiling data to zip as file
header, zerr := zip.FileInfoHeader(dummyFileInfo{
name: name,
size: int64(len(data)),
mode: 0o600,
modTime: UTCNow(),
isDir: false,
sys: nil,
})
if zerr != nil {
return zerr
}
header.Method = zip.Deflate
zwriter, zerr := zipWriter.CreateHeader(header)
if zerr != nil {
return zerr
}
_, err := io.Copy(zwriter, bytes.NewReader(data))
return err
}
// appendClusterMetaInfoToZip gets information of the current cluster and embedded
// it in the passed zipwriter, This is not a critical function and it is allowed
// to fail with a ten seconds timeout.
func appendClusterMetaInfoToZip(ctx context.Context, zipWriter *zip.Writer) {
objectAPI := newObjectLayerFn()
if objectAPI == nil {
return
}
// Add a ten seconds timeout because getting profiling data
// is critical for debugging, in contrary to getting cluster info
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
resultCh := make(chan clusterInfo)
go func() {
ci := clusterInfo{}
ci.Info.PoolsCount = len(globalEndpoints)
ci.Info.ServersCount = globalEndpoints.NEndpoints()
ci.Info.MinioVersion = Version
si, _ := objectAPI.StorageInfo(ctx)
ci.Info.DrivesCount = len(si.Disks)
for _, disk := range si.Disks {
ci.Info.TotalDriveSpace += disk.TotalSpace
ci.Info.UsedDriveSpace += disk.UsedSpace
}
dataUsageInfo, _ := loadDataUsageFromBackend(ctx, objectAPI)
ci.UsedCapacity = dataUsageInfo.ObjectsTotalSize
ci.Info.BucketsCount = dataUsageInfo.BucketsCount
ci.Info.ObjectsCount = dataUsageInfo.ObjectsTotalCount
ci.DeploymentID = globalDeploymentID
ci.ClusterName = fmt.Sprintf("%d-servers-%d-disks-%s", ci.Info.ServersCount, ci.Info.DrivesCount, ci.Info.MinioVersion)
resultCh <- ci
}()
select {
case <-ctx.Done():
return
case ci := <-resultCh:
out, err := json.MarshalIndent(ci, "", " ")
if err != nil {
logger.LogIf(ctx, err)
return
}
err = embedFileInZip(zipWriter, "cluster.info", out)
if err != nil {
logger.LogIf(ctx, err)
}
}
}
// getRawDataer provides an interface for getting raw FS files.
type getRawDataer interface {
GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, info StatInfo) error) error
@ -2655,7 +2749,8 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
}
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
o, ok := newObjectLayerFn().(getRawDataer)
objLayer := newObjectLayerFn()
o, ok := objLayer.(getRawDataer)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
@ -2778,6 +2873,8 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
}); err != nil {
logger.LogIf(ctx, err)
}
appendClusterMetaInfoToZip(ctx, zipWriter)
}
func createHostAnonymizerForFSMode() map[string]string {

View File

@ -297,9 +297,7 @@ func (sys *NotificationSys) StartProfiling(profiler string) []NotificationPeerEr
}
// DownloadProfilingData - download profiling data from all remote peers.
func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io.Writer) bool {
profilingDataFound := false
func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io.Writer) (profilingDataFound bool) {
// Initialize a zip writer which will provide a zipped content
// of profiling data of all nodes
zipWriter := zip.NewWriter(writer)
@ -320,34 +318,11 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io
profilingDataFound = true
for typ, data := range data {
// Send profiling data to zip as file
header, zerr := zip.FileInfoHeader(dummyFileInfo{
name: fmt.Sprintf("profile-%s-%s", client.host.String(), typ),
size: int64(len(data)),
mode: 0o600,
modTime: UTCNow(),
isDir: false,
sys: nil,
})
if zerr != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String())
ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, zerr)
continue
}
header.Method = zip.Deflate
zwriter, zerr := zipWriter.CreateHeader(header)
if zerr != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String())
ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, zerr)
continue
}
if _, err = io.Copy(zwriter, bytes.NewReader(data)); err != nil {
err := embedFileInZip(zipWriter, fmt.Sprintf("profile-%s-%s", client.host.String(), typ), data)
if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String())
ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, err)
continue
}
}
}
@ -371,30 +346,14 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io
// Send profiling data to zip as file
for typ, data := range data {
header, zerr := zip.FileInfoHeader(dummyFileInfo{
name: fmt.Sprintf("profile-%s-%s", thisAddr, typ),
size: int64(len(data)),
mode: 0o600,
modTime: UTCNow(),
isDir: false,
sys: nil,
})
if zerr != nil {
return profilingDataFound
}
header.Method = zip.Deflate
zwriter, zerr := zipWriter.CreateHeader(header)
if zerr != nil {
return profilingDataFound
}
if _, err = io.Copy(zwriter, bytes.NewReader(data)); err != nil {
return profilingDataFound
err := embedFileInZip(zipWriter, fmt.Sprintf("profile-%s-%s", thisAddr, typ), data)
if err != nil {
logger.LogIf(ctx, err)
}
}
return profilingDataFound
appendClusterMetaInfoToZip(ctx, zipWriter)
return
}
// ServerUpdate - updates remote peers.