Reuse madmin.ClusterRegistrationInfo (#15654)

The `clusterInfo` struct in admin-handlers is same as
madmin.ClusterRegistrationInfo, except for small differences in field
names.

Removing this and using madmin.ClusterRegistrationInfo in its place will
help in following ways:

- The JSON payload generated by mc in case of cluster registration will
  be consistent (same keys) with cluster.info generated by minio as part
  of the profile and inspect zip
- health-analyzer can parse the cluster.info using the same struct and
  won't have to define it's own
This commit is contained in:
Shireesh Anjal 2022-09-05 22:32:25 +05:30 committed by GitHub
parent 157272dc5b
commit c240da6568
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 8 additions and 24 deletions

View File

@ -2683,22 +2683,6 @@ func checkConnection(endpointStr string, timeout time.Duration) error {
return nil
}
type clusterInfo struct {
DeploymentID string `json:"deployment_id"`
ClusterName string `json:"cluster_name"`
UsedCapacity uint64 `json:"used_capacity"`
Info struct {
MinioVersion string `json:"minio_version"`
PoolsCount int `json:"pools_count"`
ServersCount int `json:"servers_count"`
DrivesCount int `json:"drives_count"`
BucketsCount uint64 `json:"buckets_count"`
ObjectsCount uint64 `json:"objects_count"`
TotalDriveSpace uint64 `json:"total_drive_space"`
UsedDriveSpace uint64 `json:"used_drive_space"`
} `json:"info"`
}
func embedFileInZip(zipWriter *zip.Writer, name string, data []byte) error {
// Send profiling data to zip as file
header, zerr := zip.FileInfoHeader(dummyFileInfo{
@ -2735,16 +2719,16 @@ func appendClusterMetaInfoToZip(ctx context.Context, zipWriter *zip.Writer) {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
resultCh := make(chan clusterInfo)
resultCh := make(chan madmin.ClusterRegistrationInfo)
go func() {
ci := clusterInfo{}
ci.Info.PoolsCount = len(globalEndpoints)
ci.Info.ServersCount = len(globalEndpoints.Hostnames())
ci := madmin.ClusterRegistrationInfo{}
ci.Info.NoOfServerPools = len(globalEndpoints)
ci.Info.NoOfServers = len(globalEndpoints.Hostnames())
ci.Info.MinioVersion = Version
si, _ := objectAPI.StorageInfo(ctx)
ci.Info.DrivesCount = len(si.Disks)
ci.Info.NoOfDrives = len(si.Disks)
for _, disk := range si.Disks {
ci.Info.TotalDriveSpace += disk.TotalSpace
ci.Info.UsedDriveSpace += disk.UsedSpace
@ -2753,11 +2737,11 @@ func appendClusterMetaInfoToZip(ctx context.Context, zipWriter *zip.Writer) {
dataUsageInfo, _ := loadDataUsageFromBackend(ctx, objectAPI)
ci.UsedCapacity = dataUsageInfo.ObjectsTotalSize
ci.Info.BucketsCount = dataUsageInfo.BucketsCount
ci.Info.ObjectsCount = dataUsageInfo.ObjectsTotalCount
ci.Info.NoOfBuckets = dataUsageInfo.BucketsCount
ci.Info.NoOfObjects = dataUsageInfo.ObjectsTotalCount
ci.DeploymentID = globalDeploymentID
ci.ClusterName = fmt.Sprintf("%d-servers-%d-disks-%s", ci.Info.ServersCount, ci.Info.DrivesCount, ci.Info.MinioVersion)
ci.ClusterName = fmt.Sprintf("%d-servers-%d-disks-%s", ci.Info.NoOfServers, ci.Info.NoOfDrives, ci.Info.MinioVersion)
resultCh <- ci
}()