mirror of
https://github.com/minio/minio.git
synced 2025-01-11 23:13:23 -05:00
Change replication to use read lock instead of writelock (#12581)
Fixes #12573 This PR also adding audit logging for replication activity
This commit is contained in:
parent
ca79869078
commit
a69c2a2fb3
@ -605,6 +605,12 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationActio
|
|||||||
// replicateObject replicates the specified version of the object to destination bucket
|
// replicateObject replicates the specified version of the object to destination bucket
|
||||||
// The source object is then updated to reflect the replication status.
|
// The source object is then updated to reflect the replication status.
|
||||||
func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI ObjectLayer) {
|
func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI ObjectLayer) {
|
||||||
|
auditLogInternal(context.Background(), ri.Bucket, ri.Name, AuditLogOptions{
|
||||||
|
Trigger: ReplicationIncomingActivity,
|
||||||
|
APIName: "s3:ReplicateObject",
|
||||||
|
VersionID: ri.VersionID,
|
||||||
|
Status: ri.ReplicationStatus.String(),
|
||||||
|
})
|
||||||
objInfo := ri.ObjectInfo
|
objInfo := ri.ObjectInfo
|
||||||
bucket := objInfo.Bucket
|
bucket := objInfo.Bucket
|
||||||
object := objInfo.Name
|
object := objInfo.Name
|
||||||
@ -631,7 +637,8 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, writeLock, ObjectOptions{
|
var closeOnDefer bool
|
||||||
|
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
|
||||||
VersionID: objInfo.VersionID,
|
VersionID: objInfo.VersionID,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -644,8 +651,12 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replicate for %s/%s(%s): %w", bucket, object, objInfo.VersionID, err))
|
logger.LogIf(ctx, fmt.Errorf("Unable to update replicate for %s/%s(%s): %w", bucket, object, objInfo.VersionID, err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
defer gr.Close() // hold write lock for entire transaction
|
if closeOnDefer {
|
||||||
|
gr.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
closeOnDefer = true
|
||||||
|
|
||||||
objInfo = gr.ObjInfo
|
objInfo = gr.ObjInfo
|
||||||
size, err := objInfo.GetActualSize()
|
size, err := objInfo.GetActualSize()
|
||||||
@ -686,28 +697,29 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||||||
if objInfo.ReplicationStatus == replication.Pending || objInfo.ReplicationStatus == replication.Failed {
|
if objInfo.ReplicationStatus == replication.Pending || objInfo.ReplicationStatus == replication.Failed {
|
||||||
// if metadata is not updated for some reason after replication, such as 503 encountered while updating metadata - make sure
|
// if metadata is not updated for some reason after replication, such as 503 encountered while updating metadata - make sure
|
||||||
// to set ReplicationStatus as Completed.Note that replication Stats would have been updated despite metadata update failure.
|
// to set ReplicationStatus as Completed.Note that replication Stats would have been updated despite metadata update failure.
|
||||||
z, ok := objectAPI.(*erasureServerPools)
|
gr.Close()
|
||||||
if !ok {
|
closeOnDefer = false
|
||||||
return
|
popts := ObjectOptions{
|
||||||
|
MTime: objInfo.ModTime,
|
||||||
|
VersionID: objInfo.VersionID,
|
||||||
|
UserDefined: make(map[string]string, len(objInfo.UserDefined)),
|
||||||
}
|
}
|
||||||
// This lower level implementation is necessary to avoid write locks from CopyObject.
|
for k, v := range objInfo.UserDefined {
|
||||||
poolIdx, err := z.getPoolIdx(ctx, bucket, object, objInfo.Size)
|
popts.UserDefined[k] = v
|
||||||
if err != nil {
|
}
|
||||||
|
popts.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Completed.String()
|
||||||
|
if objInfo.UserTags != "" {
|
||||||
|
popts.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||||
|
}
|
||||||
|
if _, err = objectAPI.PutObjectMetadata(ctx, bucket, object, popts); err != nil {
|
||||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||||
} else {
|
} else {
|
||||||
fi := FileInfo{}
|
auditLogInternal(context.Background(), ri.Bucket, ri.Name, AuditLogOptions{
|
||||||
fi.VersionID = objInfo.VersionID
|
Trigger: ReplicationIncomingActivity,
|
||||||
fi.Metadata = make(map[string]string, len(objInfo.UserDefined))
|
APIName: "s3:ReplicateObject",
|
||||||
for k, v := range objInfo.UserDefined {
|
VersionID: ri.VersionID,
|
||||||
fi.Metadata[k] = v
|
Status: ri.ReplicationStatus.String(),
|
||||||
}
|
})
|
||||||
fi.Metadata[xhttp.AmzBucketReplicationStatus] = replication.Completed.String()
|
|
||||||
if objInfo.UserTags != "" {
|
|
||||||
fi.Metadata[xhttp.AmzObjectTagging] = objInfo.UserTags
|
|
||||||
}
|
|
||||||
if err = z.serverPools[poolIdx].getHashedSet(object).updateObjectMeta(ctx, bucket, object, fi); err != nil {
|
|
||||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@ -761,6 +773,8 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
|
logger.LogIf(ctx, fmt.Errorf("Unable to replicate for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
gr.Close()
|
||||||
|
closeOnDefer = false
|
||||||
|
|
||||||
prevReplStatus := objInfo.ReplicationStatus
|
prevReplStatus := objInfo.ReplicationStatus
|
||||||
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||||
@ -778,28 +792,32 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||||||
eventName = event.ObjectReplicationFailed
|
eventName = event.ObjectReplicationFailed
|
||||||
}
|
}
|
||||||
|
|
||||||
z, ok := objectAPI.(*erasureServerPools)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Leave metadata in `PENDING` state if inline replication fails to save iops
|
// Leave metadata in `PENDING` state if inline replication fails to save iops
|
||||||
if ri.OpType == replication.HealReplicationType ||
|
if ri.OpType == replication.HealReplicationType ||
|
||||||
replicationStatus == replication.Completed {
|
replicationStatus == replication.Completed {
|
||||||
// This lower level implementation is necessary to avoid write locks from CopyObject.
|
popts := ObjectOptions{
|
||||||
poolIdx, err := z.getPoolIdx(ctx, bucket, object, objInfo.Size)
|
MTime: objInfo.ModTime,
|
||||||
if err != nil {
|
VersionID: objInfo.VersionID,
|
||||||
|
UserDefined: make(map[string]string, len(objInfo.UserDefined)),
|
||||||
|
}
|
||||||
|
for k, v := range objInfo.UserDefined {
|
||||||
|
popts.UserDefined[k] = v
|
||||||
|
}
|
||||||
|
popts.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Completed.String()
|
||||||
|
if objInfo.UserTags != "" {
|
||||||
|
popts.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||||
|
}
|
||||||
|
if _, err = objectAPI.PutObjectMetadata(ctx, bucket, object, popts); err != nil {
|
||||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||||
} else {
|
} else {
|
||||||
fi := FileInfo{}
|
auditLogInternal(context.Background(), objInfo.Bucket, objInfo.Name, AuditLogOptions{
|
||||||
fi.VersionID = objInfo.VersionID
|
Trigger: ReplicationIncomingActivity,
|
||||||
fi.Metadata = make(map[string]string, len(objInfo.UserDefined))
|
APIName: "s3:ReplicateObject",
|
||||||
for k, v := range objInfo.UserDefined {
|
VersionID: objInfo.VersionID,
|
||||||
fi.Metadata[k] = v
|
Status: replicationStatus.String(),
|
||||||
}
|
})
|
||||||
if err = z.serverPools[poolIdx].getHashedSet(object).updateObjectMeta(ctx, bucket, object, fi); err != nil {
|
|
||||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
opType := replication.MetadataReplicationType
|
opType := replication.MetadataReplicationType
|
||||||
if rtype == replicateAll {
|
if rtype == replicateAll {
|
||||||
opType = replication.ObjectReplicationType
|
opType = replication.ObjectReplicationType
|
||||||
@ -852,6 +870,23 @@ type DeletedObjectReplicationInfo struct {
|
|||||||
ResetID string
|
ResetID string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ReplicationQueuedActivity - replication being queued activity trail
|
||||||
|
ReplicationQueuedActivity = "replication:queue"
|
||||||
|
// ReplicationExistingActivity - activity trail for existing objects replication
|
||||||
|
ReplicationExistingActivity = "replication:existing"
|
||||||
|
// ReplicationMRFActivity - activity trail for replication from Most Recent Failures (MRF) queue
|
||||||
|
ReplicationMRFActivity = "replication:mrf"
|
||||||
|
// ReplicationIncomingActivity - activity trail indicating replication started [could be from incoming/existing/heal activity]
|
||||||
|
ReplicationIncomingActivity = "replication:incoming"
|
||||||
|
// ReplicationHealActivity - activity trail for healing of failed/pending replications
|
||||||
|
ReplicationHealActivity = "replication:heal"
|
||||||
|
// ReplicationDeleteActivity - activity trail for delete replication
|
||||||
|
ReplicationDeleteActivity = "replication:delete"
|
||||||
|
// ReplicationExistingDeleteActivity - activity trail for delete replication triggered for existing delete markers
|
||||||
|
ReplicationExistingDeleteActivity = "replication:delete:existing"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
globalReplicationPool *ReplicationPool
|
globalReplicationPool *ReplicationPool
|
||||||
globalReplicationStats *ReplicationStats
|
globalReplicationStats *ReplicationStats
|
||||||
@ -1002,6 +1037,12 @@ func (p *ReplicationPool) queueReplicaFailedTask(ri ReplicateObjectInfo) {
|
|||||||
close(p.existingReplicaCh)
|
close(p.existingReplicaCh)
|
||||||
})
|
})
|
||||||
case p.mrfReplicaCh <- ri:
|
case p.mrfReplicaCh <- ri:
|
||||||
|
auditLogInternal(context.Background(), ri.Bucket, ri.Name, AuditLogOptions{
|
||||||
|
Trigger: ReplicationMRFActivity,
|
||||||
|
APIName: "s3:ReplicateObject",
|
||||||
|
VersionID: ri.VersionID,
|
||||||
|
Status: ri.ReplicationStatus.String(),
|
||||||
|
})
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1011,9 +1052,14 @@ func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
var ch chan ReplicateObjectInfo
|
var ch chan ReplicateObjectInfo
|
||||||
|
trigger := ReplicationQueuedActivity
|
||||||
switch ri.OpType {
|
switch ri.OpType {
|
||||||
case replication.ExistingObjectReplicationType:
|
case replication.ExistingObjectReplicationType:
|
||||||
ch = p.existingReplicaCh
|
ch = p.existingReplicaCh
|
||||||
|
trigger = ReplicationExistingActivity
|
||||||
|
case replication.HealReplicationType:
|
||||||
|
ch = p.replicaCh
|
||||||
|
trigger = ReplicationHealActivity
|
||||||
default:
|
default:
|
||||||
ch = p.replicaCh
|
ch = p.replicaCh
|
||||||
}
|
}
|
||||||
@ -1025,6 +1071,12 @@ func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) {
|
|||||||
close(p.existingReplicaCh)
|
close(p.existingReplicaCh)
|
||||||
})
|
})
|
||||||
case ch <- ri:
|
case ch <- ri:
|
||||||
|
auditLogInternal(context.Background(), ri.Bucket, ri.Name, AuditLogOptions{
|
||||||
|
Trigger: trigger,
|
||||||
|
APIName: "s3:ReplicateObject",
|
||||||
|
VersionID: ri.VersionID,
|
||||||
|
Status: string(ri.ReplicationStatus),
|
||||||
|
})
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1033,10 +1085,15 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInf
|
|||||||
if p == nil {
|
if p == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
trigger := ReplicationDeleteActivity
|
||||||
var ch chan DeletedObjectReplicationInfo
|
var ch chan DeletedObjectReplicationInfo
|
||||||
switch doi.OpType {
|
switch doi.OpType {
|
||||||
case replication.ExistingObjectReplicationType:
|
case replication.ExistingObjectReplicationType:
|
||||||
ch = p.existingReplicaDeleteCh
|
ch = p.existingReplicaDeleteCh
|
||||||
|
trigger = ReplicationExistingDeleteActivity
|
||||||
|
case replication.HealReplicationType:
|
||||||
|
ch = p.replicaDeleteCh
|
||||||
|
trigger = ReplicationHealActivity
|
||||||
default:
|
default:
|
||||||
ch = p.replicaDeleteCh
|
ch = p.replicaDeleteCh
|
||||||
}
|
}
|
||||||
@ -1048,6 +1105,16 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInf
|
|||||||
close(p.existingReplicaDeleteCh)
|
close(p.existingReplicaDeleteCh)
|
||||||
})
|
})
|
||||||
case ch <- doi:
|
case ch <- doi:
|
||||||
|
replStatus := doi.DeleteMarkerReplicationStatus
|
||||||
|
if doi.VersionPurgeStatus != "" {
|
||||||
|
replStatus = string(doi.VersionPurgeStatus)
|
||||||
|
}
|
||||||
|
auditLogInternal(context.Background(), doi.Bucket, doi.ObjectName, AuditLogOptions{
|
||||||
|
Trigger: trigger,
|
||||||
|
APIName: "s3:ReplicateDelete",
|
||||||
|
VersionID: doi.VersionID,
|
||||||
|
Status: replStatus,
|
||||||
|
})
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,6 @@ import (
|
|||||||
"github.com/minio/minio/internal/event"
|
"github.com/minio/minio/internal/event"
|
||||||
"github.com/minio/minio/internal/hash"
|
"github.com/minio/minio/internal/hash"
|
||||||
"github.com/minio/minio/internal/logger"
|
"github.com/minio/minio/internal/logger"
|
||||||
"github.com/minio/minio/internal/logger/message/audit"
|
|
||||||
"github.com/minio/pkg/console"
|
"github.com/minio/pkg/console"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1342,12 +1341,12 @@ func (d *dynamicSleeper) Update(factor float64, maxWait time.Duration) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ILMExpiryActivity - activity trail for ILM expiry
|
||||||
|
const ILMExpiryActivity = "ilm:expiry"
|
||||||
|
|
||||||
func auditLogLifecycle(ctx context.Context, bucket, object string) {
|
func auditLogLifecycle(ctx context.Context, bucket, object string) {
|
||||||
entry := audit.NewEntry(globalDeploymentID)
|
auditLogInternal(ctx, bucket, object, AuditLogOptions{
|
||||||
entry.Trigger = "internal-scanner"
|
Trigger: ILMExpiryActivity,
|
||||||
entry.API.Name = "DeleteObject"
|
APIName: "s3:ExpireObject",
|
||||||
entry.API.Bucket = bucket
|
})
|
||||||
entry.API.Object = object
|
|
||||||
ctx = logger.SetAuditEntry(ctx, &entry)
|
|
||||||
logger.AuditLog(ctx, nil, nil, nil)
|
|
||||||
}
|
}
|
||||||
|
25
cmd/utils.go
25
cmd/utils.go
@ -48,6 +48,7 @@ import (
|
|||||||
"github.com/minio/minio/internal/handlers"
|
"github.com/minio/minio/internal/handlers"
|
||||||
xhttp "github.com/minio/minio/internal/http"
|
xhttp "github.com/minio/minio/internal/http"
|
||||||
"github.com/minio/minio/internal/logger"
|
"github.com/minio/minio/internal/logger"
|
||||||
|
"github.com/minio/minio/internal/logger/message/audit"
|
||||||
"github.com/minio/minio/internal/rest"
|
"github.com/minio/minio/internal/rest"
|
||||||
"github.com/minio/pkg/certs"
|
"github.com/minio/pkg/certs"
|
||||||
)
|
)
|
||||||
@ -940,3 +941,27 @@ func totalNodeCount() uint64 {
|
|||||||
}
|
}
|
||||||
return totalNodesCount
|
return totalNodesCount
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AuditLogOptions takes options for audit logging subsystem activity
|
||||||
|
type AuditLogOptions struct {
|
||||||
|
Trigger string
|
||||||
|
APIName string
|
||||||
|
Status string
|
||||||
|
VersionID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// sends audit logs for internal subsystem activity
|
||||||
|
func auditLogInternal(ctx context.Context, bucket, object string, opts AuditLogOptions) {
|
||||||
|
entry := audit.NewEntry(globalDeploymentID)
|
||||||
|
entry.Trigger = opts.Trigger
|
||||||
|
entry.API.Name = opts.APIName
|
||||||
|
entry.API.Bucket = bucket
|
||||||
|
entry.API.Object = object
|
||||||
|
if opts.VersionID != "" {
|
||||||
|
entry.ReqQuery = make(map[string]string)
|
||||||
|
entry.ReqQuery[xhttp.VersionID] = opts.VersionID
|
||||||
|
}
|
||||||
|
entry.API.Status = opts.Status
|
||||||
|
ctx = logger.SetAuditEntry(ctx, &entry)
|
||||||
|
logger.AuditLog(ctx, nil, nil, nil)
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user