mirror of
https://github.com/minio/minio.git
synced 2025-01-11 23:13:23 -05:00
fix: re-use endpoint strings to avoid allocation during audit (#19116)
This commit is contained in:
parent
a3ac62596c
commit
f965434022
@ -325,15 +325,18 @@ func (s *erasureSets) GetLockers(setIndex int) func() ([]dsync.NetLocker, string
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *erasureSets) GetEndpointStrings(setIndex int) func() []string {
|
||||||
|
return func() []string {
|
||||||
|
eps := make([]string, s.setDriveCount)
|
||||||
|
copy(eps, s.endpointStrings[setIndex*s.setDriveCount:setIndex*s.setDriveCount+s.setDriveCount])
|
||||||
|
return eps
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *erasureSets) GetEndpoints(setIndex int) func() []Endpoint {
|
func (s *erasureSets) GetEndpoints(setIndex int) func() []Endpoint {
|
||||||
return func() []Endpoint {
|
return func() []Endpoint {
|
||||||
s.erasureDisksMu.RLock()
|
|
||||||
defer s.erasureDisksMu.RUnlock()
|
|
||||||
|
|
||||||
eps := make([]Endpoint, s.setDriveCount)
|
eps := make([]Endpoint, s.setDriveCount)
|
||||||
for i := 0; i < s.setDriveCount; i++ {
|
copy(eps, s.endpoints.Endpoints[setIndex*s.setDriveCount:setIndex*s.setDriveCount+s.setDriveCount])
|
||||||
eps[i] = s.endpoints.Endpoints[setIndex*s.setDriveCount+i]
|
|
||||||
}
|
|
||||||
return eps
|
return eps
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -463,7 +466,6 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
disk.SetDiskLoc(s.poolIndex, m, n)
|
disk.SetDiskLoc(s.poolIndex, m, n)
|
||||||
s.endpointStrings[m*setDriveCount+n] = disk.String()
|
|
||||||
s.erasureDisks[m][n] = disk
|
s.erasureDisks[m][n] = disk
|
||||||
}(disk, i, j)
|
}(disk, i, j)
|
||||||
}
|
}
|
||||||
@ -478,6 +480,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
|||||||
getDisks: s.GetDisks(i),
|
getDisks: s.GetDisks(i),
|
||||||
getLockers: s.GetLockers(i),
|
getLockers: s.GetLockers(i),
|
||||||
getEndpoints: s.GetEndpoints(i),
|
getEndpoints: s.GetEndpoints(i),
|
||||||
|
getEndpointStrings: s.GetEndpointStrings(i),
|
||||||
nsMutex: mutex,
|
nsMutex: mutex,
|
||||||
}
|
}
|
||||||
}(i)
|
}(i)
|
||||||
@ -571,18 +574,11 @@ func auditObjectErasureSet(ctx context.Context, object string, set *erasureObjec
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
object = decodeDirObject(object)
|
|
||||||
endpoints := set.getEndpoints()
|
|
||||||
disksEndpoints := make([]string, 0, len(endpoints))
|
|
||||||
for _, endpoint := range endpoints {
|
|
||||||
disksEndpoints = append(disksEndpoints, endpoint.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
op := auditObjectOp{
|
op := auditObjectOp{
|
||||||
Name: object,
|
Name: decodeDirObject(object),
|
||||||
Pool: set.poolIndex + 1,
|
Pool: set.poolIndex + 1,
|
||||||
Set: set.setIndex + 1,
|
Set: set.setIndex + 1,
|
||||||
Disks: disksEndpoints,
|
Disks: set.getEndpointStrings(),
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.GetReqInfo(ctx).AppendTags("objectLocation", op)
|
logger.GetReqInfo(ctx).AppendTags("objectLocation", op)
|
||||||
|
@ -58,10 +58,14 @@ type erasureObjects struct {
|
|||||||
// getLockers returns list of remote and local lockers.
|
// getLockers returns list of remote and local lockers.
|
||||||
getLockers func() ([]dsync.NetLocker, string)
|
getLockers func() ([]dsync.NetLocker, string)
|
||||||
|
|
||||||
// getEndpoints returns list of endpoint strings belonging this set.
|
// getEndpoints returns list of endpoint belonging this set.
|
||||||
// some may be local and some remote.
|
// some may be local and some remote.
|
||||||
getEndpoints func() []Endpoint
|
getEndpoints func() []Endpoint
|
||||||
|
|
||||||
|
// getEndpoints returns list of endpoint strings belonging this set.
|
||||||
|
// some may be local and some remote.
|
||||||
|
getEndpointStrings func() []string
|
||||||
|
|
||||||
// Locker mutex map.
|
// Locker mutex map.
|
||||||
nsMutex *nsLockMap
|
nsMutex *nsLockMap
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user