mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
fix: re-use endpoint strings to avoid allocation during audit (#19116)
This commit is contained in:
parent
a3ac62596c
commit
f965434022
@ -325,15 +325,18 @@ func (s *erasureSets) GetLockers(setIndex int) func() ([]dsync.NetLocker, string
|
||||
}
|
||||
}
|
||||
|
||||
func (s *erasureSets) GetEndpointStrings(setIndex int) func() []string {
|
||||
return func() []string {
|
||||
eps := make([]string, s.setDriveCount)
|
||||
copy(eps, s.endpointStrings[setIndex*s.setDriveCount:setIndex*s.setDriveCount+s.setDriveCount])
|
||||
return eps
|
||||
}
|
||||
}
|
||||
|
||||
func (s *erasureSets) GetEndpoints(setIndex int) func() []Endpoint {
|
||||
return func() []Endpoint {
|
||||
s.erasureDisksMu.RLock()
|
||||
defer s.erasureDisksMu.RUnlock()
|
||||
|
||||
eps := make([]Endpoint, s.setDriveCount)
|
||||
for i := 0; i < s.setDriveCount; i++ {
|
||||
eps[i] = s.endpoints.Endpoints[setIndex*s.setDriveCount+i]
|
||||
}
|
||||
copy(eps, s.endpoints.Endpoints[setIndex*s.setDriveCount:setIndex*s.setDriveCount+s.setDriveCount])
|
||||
return eps
|
||||
}
|
||||
}
|
||||
@ -463,7 +466,6 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
||||
return
|
||||
}
|
||||
disk.SetDiskLoc(s.poolIndex, m, n)
|
||||
s.endpointStrings[m*setDriveCount+n] = disk.String()
|
||||
s.erasureDisks[m][n] = disk
|
||||
}(disk, i, j)
|
||||
}
|
||||
@ -478,6 +480,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
||||
getDisks: s.GetDisks(i),
|
||||
getLockers: s.GetLockers(i),
|
||||
getEndpoints: s.GetEndpoints(i),
|
||||
getEndpointStrings: s.GetEndpointStrings(i),
|
||||
nsMutex: mutex,
|
||||
}
|
||||
}(i)
|
||||
@ -571,18 +574,11 @@ func auditObjectErasureSet(ctx context.Context, object string, set *erasureObjec
|
||||
return
|
||||
}
|
||||
|
||||
object = decodeDirObject(object)
|
||||
endpoints := set.getEndpoints()
|
||||
disksEndpoints := make([]string, 0, len(endpoints))
|
||||
for _, endpoint := range endpoints {
|
||||
disksEndpoints = append(disksEndpoints, endpoint.String())
|
||||
}
|
||||
|
||||
op := auditObjectOp{
|
||||
Name: object,
|
||||
Name: decodeDirObject(object),
|
||||
Pool: set.poolIndex + 1,
|
||||
Set: set.setIndex + 1,
|
||||
Disks: disksEndpoints,
|
||||
Disks: set.getEndpointStrings(),
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("objectLocation", op)
|
||||
|
@ -58,10 +58,14 @@ type erasureObjects struct {
|
||||
// getLockers returns list of remote and local lockers.
|
||||
getLockers func() ([]dsync.NetLocker, string)
|
||||
|
||||
// getEndpoints returns list of endpoint strings belonging this set.
|
||||
// getEndpoints returns list of endpoint belonging this set.
|
||||
// some may be local and some remote.
|
||||
getEndpoints func() []Endpoint
|
||||
|
||||
// getEndpoints returns list of endpoint strings belonging this set.
|
||||
// some may be local and some remote.
|
||||
getEndpointStrings func() []string
|
||||
|
||||
// Locker mutex map.
|
||||
nsMutex *nsLockMap
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user