mirror of
https://github.com/minio/minio.git
synced 2025-11-07 21:02:58 -05:00
fix: do not pass around sync.Map (#15250)
it is not safe to pass around sync.Map
through pointers, as it may be concurrently
updated by different callers.
this PR simplifies by avoiding sync.Map
altogether, we do not need sync.Map
to keep object->erasureMap association.
This PR fixes a crash when concurrently
using this value when audit logs are
configured.
```
fatal error: concurrent map iteration and map write
goroutine 247651580 [running]:
runtime.throw({0x277a6c1?, 0xc002381400?})
runtime/panic.go:992 +0x71 fp=0xc004d29b20 sp=0xc004d29af0 pc=0x438671
runtime.mapiternext(0xc0d6e87f18?)
runtime/map.go:871 +0x4eb fp=0xc004d29b90 sp=0xc004d29b20 pc=0x41002b
```
This commit is contained in:
@@ -20,7 +20,6 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
@@ -549,29 +548,13 @@ func (s *erasureSets) cleanupStaleUploads(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
const objectErasureMapKey = "objectErasureMap"
|
||||
|
||||
type auditObjectOp struct {
|
||||
Name string `json:"name"`
|
||||
Pool int `json:"poolId"`
|
||||
Set int `json:"setId"`
|
||||
Disks []string `json:"disks"`
|
||||
}
|
||||
|
||||
type auditObjectErasureMap struct {
|
||||
sync.Map
|
||||
}
|
||||
|
||||
// Define how to marshal auditObjectErasureMap so it can be
|
||||
// printed in the audit webhook notification request.
|
||||
func (a *auditObjectErasureMap) MarshalJSON() ([]byte, error) {
|
||||
mapCopy := make(map[string]auditObjectOp)
|
||||
a.Range(func(k, v interface{}) bool {
|
||||
mapCopy[k.(string)] = v.(auditObjectOp)
|
||||
return true
|
||||
})
|
||||
return json.Marshal(mapCopy)
|
||||
}
|
||||
|
||||
// Add erasure set information to the current context
|
||||
func auditObjectErasureSet(ctx context.Context, object string, set *erasureObjects) {
|
||||
if len(logger.AuditTargets()) == 0 {
|
||||
@@ -579,33 +562,20 @@ func auditObjectErasureSet(ctx context.Context, object string, set *erasureObjec
|
||||
}
|
||||
|
||||
object = decodeDirObject(object)
|
||||
|
||||
var disksEndpoints []string
|
||||
for _, endpoint := range set.getEndpoints() {
|
||||
endpoints := set.getEndpoints()
|
||||
disksEndpoints := make([]string, 0, len(endpoints))
|
||||
for _, endpoint := range endpoints {
|
||||
disksEndpoints = append(disksEndpoints, endpoint.String())
|
||||
}
|
||||
|
||||
op := auditObjectOp{
|
||||
Name: object,
|
||||
Pool: set.poolIndex + 1,
|
||||
Set: set.setIndex + 1,
|
||||
Disks: disksEndpoints,
|
||||
}
|
||||
|
||||
var objectErasureSetTag *auditObjectErasureMap
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
for _, kv := range reqInfo.GetTags() {
|
||||
if kv.Key == objectErasureMapKey {
|
||||
objectErasureSetTag = kv.Val.(*auditObjectErasureMap)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if objectErasureSetTag == nil {
|
||||
objectErasureSetTag = &auditObjectErasureMap{}
|
||||
}
|
||||
|
||||
objectErasureSetTag.Store(object, op)
|
||||
reqInfo.SetTags(objectErasureMapKey, objectErasureSetTag)
|
||||
logger.GetReqInfo(ctx).AppendTags("objectLocation", op)
|
||||
}
|
||||
|
||||
// NewNSLock - initialize a new namespace RWLocker instance.
|
||||
|
||||
Reference in New Issue
Block a user