mirror of https://github.com/minio/minio.git
make notification as separate package (#5294)
* Remove old notification files * Add net package * Add event package * Modify minio to take new notification system
This commit is contained in:
parent
abffa00b76
commit
0e4431725c
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -176,6 +176,11 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
|||
// Init global heal state
|
||||
initAllHealState(globalIsXL)
|
||||
|
||||
globalNotificationSys, err = NewNotificationSys(globalServerConfig, globalEndpoints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup admin mgmt REST API handlers.
|
||||
adminRouter := router.NewRouter()
|
||||
registerAdminRouter(adminRouter)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2014, 2015, 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2014, 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -139,11 +139,6 @@ func (lc localAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
|
|||
}
|
||||
storage := objLayer.StorageInfo()
|
||||
|
||||
var arns []string
|
||||
for queueArn := range globalEventNotifier.GetAllExternalTargets() {
|
||||
arns = append(arns, queueArn)
|
||||
}
|
||||
|
||||
return ServerInfoData{
|
||||
StorageInfo: storage,
|
||||
ConnStats: globalConnStats.toServerConnStats(),
|
||||
|
@ -152,7 +147,7 @@ func (lc localAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
|
|||
Uptime: UTCNow().Sub(globalBootTime),
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
SQSARN: arns,
|
||||
SQSARN: globalNotificationSys.GetARNList(),
|
||||
Region: globalServerConfig.GetRegion(),
|
||||
},
|
||||
}, nil
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -120,18 +120,13 @@ func (s *adminCmd) ServerInfoData(args *AuthRPCArgs, reply *ServerInfoDataReply)
|
|||
}
|
||||
storageInfo := objLayer.StorageInfo()
|
||||
|
||||
var arns []string
|
||||
for queueArn := range globalEventNotifier.GetAllExternalTargets() {
|
||||
arns = append(arns, queueArn)
|
||||
}
|
||||
|
||||
reply.ServerInfoData = ServerInfoData{
|
||||
Properties: ServerProperties{
|
||||
Uptime: UTCNow().Sub(globalBootTime),
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
Region: globalServerConfig.GetRegion(),
|
||||
SQSARN: arns,
|
||||
SQSARN: globalNotificationSys.GetARNList(),
|
||||
},
|
||||
StorageInfo: storageInfo,
|
||||
ConnStats: globalConnStats.toServerConnStats(),
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
|
@ -950,6 +951,28 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
|||
apiErr = ErrPartsSizeUnequal
|
||||
case BucketPolicyNotFound:
|
||||
apiErr = ErrNoSuchBucketPolicy
|
||||
case *event.ErrInvalidEventName:
|
||||
apiErr = ErrEventNotification
|
||||
case *event.ErrInvalidARN:
|
||||
apiErr = ErrARNNotification
|
||||
case *event.ErrARNNotFound:
|
||||
apiErr = ErrARNNotification
|
||||
case *event.ErrUnknownRegion:
|
||||
apiErr = ErrRegionNotification
|
||||
case *event.ErrInvalidFilterName:
|
||||
apiErr = ErrFilterNameInvalid
|
||||
case *event.ErrFilterNamePrefix:
|
||||
apiErr = ErrFilterNamePrefix
|
||||
case *event.ErrFilterNameSuffix:
|
||||
apiErr = ErrFilterNameSuffix
|
||||
case *event.ErrInvalidFilterValue:
|
||||
apiErr = ErrFilterValueInvalid
|
||||
case *event.ErrDuplicateEventName:
|
||||
apiErr = ErrOverlappingConfigs
|
||||
case *event.ErrDuplicateQueueConfiguration:
|
||||
apiErr = ErrOverlappingFilterNotification
|
||||
case *event.ErrUnsupportedConfiguration:
|
||||
apiErr = ErrUnsupportedNotification
|
||||
default:
|
||||
apiErr = ErrInternalError
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -79,14 +79,6 @@ func getObjectResources(values url.Values) (uploadID string, partNumberMarker, m
|
|||
return
|
||||
}
|
||||
|
||||
// Parse listen bucket notification resources.
|
||||
func getListenBucketNotificationResources(values url.Values) (prefix []string, suffix []string, events []string) {
|
||||
prefix = values["prefix"]
|
||||
suffix = values["suffix"]
|
||||
events = values["events"]
|
||||
return prefix, suffix, events
|
||||
}
|
||||
|
||||
// Validates filter values
|
||||
func validateFilterValues(values []string) (err APIErrorCode) {
|
||||
for _, value := range values {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -27,7 +27,6 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
timeFormatAMZ = "2006-01-02T15:04:05Z" // Reply date format
|
||||
timeFormatAMZLong = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse.
|
||||
maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -71,11 +71,7 @@ func (br *browserPeerAPIHandlers) SetAuthPeer(args SetAuthPeerArgs, reply *AuthR
|
|||
|
||||
// Sends SetAuthPeer RPCs to all peers in the Minio cluster
|
||||
func updateCredsOnPeers(creds auth.Credentials) map[string]error {
|
||||
// Get list of peer addresses (from globalS3Peers)
|
||||
peers := []string{}
|
||||
for _, p := range globalS3Peers {
|
||||
peers = append(peers, p.addr)
|
||||
}
|
||||
peers := GetRemotePeers(globalEndpoints)
|
||||
|
||||
// Array of errors for each peer
|
||||
errs := make([]error, len(peers))
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -33,6 +33,7 @@ import (
|
|||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
|
@ -356,10 +357,10 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
|
||||
// Notify deleted event for objects.
|
||||
for _, dobj := range deletedObjects {
|
||||
eventNotify(eventData{
|
||||
Type: ObjectRemovedDelete,
|
||||
Bucket: bucket,
|
||||
ObjInfo: ObjectInfo{
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
},
|
||||
ReqParams: extractReqParams(r),
|
||||
|
@ -606,10 +607,10 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
}
|
||||
|
||||
// Notify object created event.
|
||||
defer eventNotify(eventData{
|
||||
Type: ObjectCreatedPost,
|
||||
Bucket: objInfo.Bucket,
|
||||
ObjInfo: objInfo,
|
||||
defer sendEvent(eventArgs{
|
||||
EventName: event.ObjectCreatedPost,
|
||||
BucketName: objInfo.Bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
|
@ -692,6 +693,16 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
|||
return
|
||||
}
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) {
|
||||
errorIf(err, "unable to update policy change in remote peer %v", addr)
|
||||
}
|
||||
|
||||
globalNotificationSys.RemoveNotification(bucket)
|
||||
for addr, err := range globalNotificationSys.DeleteBucket(bucket) {
|
||||
errorIf(err, "unable to delete bucket in remote peer %v", addr)
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -617,11 +617,6 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
|||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
var err error
|
||||
// register event notifier.
|
||||
err = initEventNotifier(obj)
|
||||
if err != nil {
|
||||
t.Fatal("Notifier initialization failed.")
|
||||
}
|
||||
|
||||
contentBytes := []byte("hello")
|
||||
sha256sum := ""
|
||||
|
|
|
@ -1,126 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
// BucketMetaState - Interface to update bucket metadata in-memory
|
||||
// state.
|
||||
type BucketMetaState interface {
|
||||
// Updates bucket notification
|
||||
UpdateBucketNotification(args *SetBucketNotificationPeerArgs) error
|
||||
|
||||
// Updates bucket listener
|
||||
UpdateBucketListener(args *SetBucketListenerPeerArgs) error
|
||||
|
||||
// Updates bucket policy
|
||||
UpdateBucketPolicy(args *SetBucketPolicyPeerArgs) error
|
||||
|
||||
// Sends event
|
||||
SendEvent(args *EventArgs) error
|
||||
}
|
||||
|
||||
// BucketUpdater - Interface implementer calls one of BucketMetaState's methods.
|
||||
type BucketUpdater interface {
|
||||
BucketUpdate(client BucketMetaState) error
|
||||
}
|
||||
|
||||
// Type that implements BucketMetaState for local node.
|
||||
type localBucketMetaState struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
}
|
||||
|
||||
// localBucketMetaState.UpdateBucketNotification - updates in-memory global bucket
|
||||
// notification info.
|
||||
func (lc *localBucketMetaState) UpdateBucketNotification(args *SetBucketNotificationPeerArgs) error {
|
||||
// check if object layer is available.
|
||||
objAPI := lc.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
globalEventNotifier.SetBucketNotificationConfig(args.Bucket, args.NCfg)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// localBucketMetaState.UpdateBucketListener - updates in-memory global bucket
|
||||
// listeners info.
|
||||
func (lc *localBucketMetaState) UpdateBucketListener(args *SetBucketListenerPeerArgs) error {
|
||||
// check if object layer is available.
|
||||
objAPI := lc.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// Update in-memory notification config.
|
||||
return globalEventNotifier.SetBucketListenerConfig(args.Bucket, args.LCfg)
|
||||
}
|
||||
|
||||
// localBucketMetaState.UpdateBucketPolicy - updates in-memory global bucket
|
||||
// policy info.
|
||||
func (lc *localBucketMetaState) UpdateBucketPolicy(args *SetBucketPolicyPeerArgs) error {
|
||||
// check if object layer is available.
|
||||
objAPI := lc.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
return objAPI.RefreshBucketPolicy(args.Bucket)
|
||||
}
|
||||
|
||||
// localBucketMetaState.SendEvent - sends event to local event notifier via
|
||||
// `globalEventNotifier`
|
||||
func (lc *localBucketMetaState) SendEvent(args *EventArgs) error {
|
||||
// check if object layer is available.
|
||||
objAPI := lc.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return globalEventNotifier.SendListenerEvent(args.Arn, args.Event)
|
||||
}
|
||||
|
||||
// Type that implements BucketMetaState for remote node.
|
||||
type remoteBucketMetaState struct {
|
||||
*AuthRPCClient
|
||||
}
|
||||
|
||||
// remoteBucketMetaState.UpdateBucketNotification - sends bucket notification
|
||||
// change to remote peer via RPC call.
|
||||
func (rc *remoteBucketMetaState) UpdateBucketNotification(args *SetBucketNotificationPeerArgs) error {
|
||||
reply := AuthRPCReply{}
|
||||
return rc.Call("S3.SetBucketNotificationPeer", args, &reply)
|
||||
}
|
||||
|
||||
// remoteBucketMetaState.UpdateBucketListener - sends bucket listener change to
|
||||
// remote peer via RPC call.
|
||||
func (rc *remoteBucketMetaState) UpdateBucketListener(args *SetBucketListenerPeerArgs) error {
|
||||
reply := AuthRPCReply{}
|
||||
return rc.Call("S3.SetBucketListenerPeer", args, &reply)
|
||||
}
|
||||
|
||||
// remoteBucketMetaState.UpdateBucketPolicy - sends bucket policy change to remote
|
||||
// peer via RPC call.
|
||||
func (rc *remoteBucketMetaState) UpdateBucketPolicy(args *SetBucketPolicyPeerArgs) error {
|
||||
reply := AuthRPCReply{}
|
||||
return rc.Call("S3.SetBucketPolicyPeer", args, &reply)
|
||||
}
|
||||
|
||||
// remoteBucketMetaState.SendEvent - sends event for bucket listener to remote
|
||||
// peer via RPC call.
|
||||
func (rc *remoteBucketMetaState) SendEvent(args *EventArgs) error {
|
||||
reply := AuthRPCReply{}
|
||||
return rc.Call("S3.Event", args, &reply)
|
||||
}
|
|
@ -1,213 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Represents the criteria for the filter rule.
|
||||
type filterRule struct {
|
||||
Name string `xml:"Name"`
|
||||
Value string `xml:"Value"`
|
||||
}
|
||||
|
||||
// Collection of filter rules per service config.
|
||||
type keyFilter struct {
|
||||
FilterRules []filterRule `xml:"FilterRule,omitempty"`
|
||||
}
|
||||
|
||||
type filterStruct struct {
|
||||
Key keyFilter `xml:"S3Key,omitempty" json:"S3Key,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceConfig - Common elements of service notification.
|
||||
type ServiceConfig struct {
|
||||
Events []string `xml:"Event" json:"Event"`
|
||||
Filter filterStruct `xml:"Filter" json:"Filter"`
|
||||
ID string `xml:"Id" json:"Id"`
|
||||
}
|
||||
|
||||
// Queue SQS configuration.
|
||||
type queueConfig struct {
|
||||
ServiceConfig
|
||||
QueueARN string `xml:"Queue"`
|
||||
}
|
||||
|
||||
// Topic SNS configuration, this is a compliance field not used by minio yet.
|
||||
type topicConfig struct {
|
||||
ServiceConfig
|
||||
TopicARN string `xml:"Topic" json:"Topic"`
|
||||
}
|
||||
|
||||
// Lambda function configuration, this is a compliance field not used by minio yet.
|
||||
type lambdaConfig struct {
|
||||
ServiceConfig
|
||||
LambdaARN string `xml:"CloudFunction"`
|
||||
}
|
||||
|
||||
// Notification configuration structure represents the XML format of
|
||||
// notification configuration of buckets.
|
||||
type notificationConfig struct {
|
||||
XMLName xml.Name `xml:"NotificationConfiguration"`
|
||||
QueueConfigs []queueConfig `xml:"QueueConfiguration"`
|
||||
LambdaConfigs []lambdaConfig `xml:"CloudFunctionConfiguration"`
|
||||
TopicConfigs []topicConfig `xml:"TopicConfiguration"`
|
||||
}
|
||||
|
||||
// listenerConfig structure represents run-time notification
|
||||
// configuration for live listeners
|
||||
type listenerConfig struct {
|
||||
TopicConfig topicConfig `json:"TopicConfiguration"`
|
||||
TargetServer string `json:"TargetServer"`
|
||||
}
|
||||
|
||||
// Internal error used to signal notifications not set.
|
||||
var errNoSuchNotifications = errors.New("The specified bucket does not have bucket notifications")
|
||||
|
||||
// EventName is AWS S3 event type:
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
|
||||
type EventName int
|
||||
|
||||
const (
|
||||
// ObjectCreatedPut is s3:ObjectCreated:Put
|
||||
ObjectCreatedPut EventName = iota
|
||||
// ObjectCreatedPost is s3:ObjectCreated:Post
|
||||
ObjectCreatedPost
|
||||
// ObjectCreatedCopy is s3:ObjectCreated:Copy
|
||||
ObjectCreatedCopy
|
||||
// ObjectCreatedCompleteMultipartUpload is s3:ObjectCreated:CompleteMultipartUpload
|
||||
ObjectCreatedCompleteMultipartUpload
|
||||
// ObjectRemovedDelete is s3:ObjectRemoved:Delete
|
||||
ObjectRemovedDelete
|
||||
// ObjectAccessedGet is s3:ObjectAccessed:Get
|
||||
ObjectAccessedGet
|
||||
// ObjectAccessedHead is s3:ObjectAccessed:Head
|
||||
ObjectAccessedHead
|
||||
)
|
||||
|
||||
// Stringer interface for event name.
|
||||
func (eventName EventName) String() string {
|
||||
switch eventName {
|
||||
case ObjectCreatedPut:
|
||||
return "s3:ObjectCreated:Put"
|
||||
case ObjectCreatedPost:
|
||||
return "s3:ObjectCreated:Post"
|
||||
case ObjectCreatedCopy:
|
||||
return "s3:ObjectCreated:Copy"
|
||||
case ObjectCreatedCompleteMultipartUpload:
|
||||
return "s3:ObjectCreated:CompleteMultipartUpload"
|
||||
case ObjectRemovedDelete:
|
||||
return "s3:ObjectRemoved:Delete"
|
||||
case ObjectAccessedGet:
|
||||
return "s3:ObjectAccessed:Get"
|
||||
case ObjectAccessedHead:
|
||||
return "s3:ObjectAccessed:Head"
|
||||
default:
|
||||
return "s3:Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// Indentity represents the accessKey who caused the event.
|
||||
type identity struct {
|
||||
PrincipalID string `json:"principalId"`
|
||||
}
|
||||
|
||||
// Notification event bucket metadata.
|
||||
type bucketMeta struct {
|
||||
Name string `json:"name"`
|
||||
OwnerIdentity identity `json:"ownerIdentity"`
|
||||
ARN string `json:"arn"`
|
||||
}
|
||||
|
||||
// Notification event object metadata.
|
||||
type objectMeta struct {
|
||||
Key string `json:"key"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
ETag string `json:"eTag,omitempty"`
|
||||
ContentType string `json:"contentType,omitempty"`
|
||||
UserMetadata map[string]string `json:"userMetadata,omitempty"`
|
||||
VersionID string `json:"versionId,omitempty"`
|
||||
Sequencer string `json:"sequencer"`
|
||||
}
|
||||
|
||||
const (
|
||||
// Event schema version number defaulting to the value in S3 spec.
|
||||
// ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
|
||||
eventSchemaVersion = "1.0"
|
||||
|
||||
// Default ID found in bucket notification configuration.
|
||||
// ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
|
||||
eventConfigID = "Config"
|
||||
)
|
||||
|
||||
const (
|
||||
// Response element origin endpoint key.
|
||||
responseOriginEndpointKey = "x-minio-origin-endpoint"
|
||||
)
|
||||
|
||||
// Notification event server specific metadata.
|
||||
type eventMeta struct {
|
||||
SchemaVersion string `json:"s3SchemaVersion"`
|
||||
ConfigurationID string `json:"configurationId"`
|
||||
Bucket bucketMeta `json:"bucket"`
|
||||
Object objectMeta `json:"object"`
|
||||
}
|
||||
|
||||
const (
|
||||
// Event source static value defaulting to the value in S3 spec.
|
||||
// ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
|
||||
eventSource = "aws:s3"
|
||||
|
||||
// Event version number defaulting to the value in S3 spec.
|
||||
// ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
|
||||
eventVersion = "2.0"
|
||||
)
|
||||
|
||||
// sourceInfo represents information on the client that triggered the
|
||||
// event notification.
|
||||
type sourceInfo struct {
|
||||
Host string `json:"host"`
|
||||
Port string `json:"port"`
|
||||
UserAgent string `json:"userAgent"`
|
||||
}
|
||||
|
||||
// NotificationEvent represents an Amazon an S3 bucket notification event.
|
||||
type NotificationEvent struct {
|
||||
EventVersion string `json:"eventVersion"`
|
||||
EventSource string `json:"eventSource"`
|
||||
AwsRegion string `json:"awsRegion"`
|
||||
EventTime string `json:"eventTime"`
|
||||
EventName string `json:"eventName"`
|
||||
UserIdentity identity `json:"userIdentity"`
|
||||
RequestParameters map[string]string `json:"requestParameters"`
|
||||
ResponseElements map[string]string `json:"responseElements"`
|
||||
S3 eventMeta `json:"s3"`
|
||||
Source sourceInfo `json:"source"`
|
||||
}
|
||||
|
||||
// Represents the minio sqs type and account id's.
|
||||
type arnSQS struct {
|
||||
Type string
|
||||
AccountID string
|
||||
}
|
||||
|
||||
// Stringer for constructing AWS ARN compatible string.
|
||||
func (m arnSQS) String() string {
|
||||
return minioSqs + globalServerConfig.GetRegion() + ":" + m.AccountID + ":" + m.Type
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -17,18 +17,16 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
xerrors "github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -37,11 +35,11 @@ const (
|
|||
bucketListenerConfig = "listener.json"
|
||||
)
|
||||
|
||||
// GetBucketNotificationHandler - This implementation of the GET
|
||||
// operation uses the notification subresource to return the
|
||||
// notification configuration of a bucket. If notifications are
|
||||
// not enabled on the bucket, the operation returns an empty
|
||||
// NotificationConfiguration element.
|
||||
var errNoSuchNotifications = errors.New("The specified bucket does not have bucket notifications")
|
||||
|
||||
// GetBucketNotificationHandler - This HTTP handler returns event notification configuration
|
||||
// as per http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html.
|
||||
// It returns empty configuration if its not set.
|
||||
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
|
@ -60,46 +58,40 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
|||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
_, err := objAPI.GetBucketInfo(bucket)
|
||||
_, err := objAPI.GetBucketInfo(bucketName)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to find bucket info.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Attempt to successfully load notification config.
|
||||
nConfig, err := loadNotificationConfig(bucket, objAPI)
|
||||
if err != nil && errors.Cause(err) != errNoSuchNotifications {
|
||||
nConfig, err := readNotificationConfig(objAPI, bucketName)
|
||||
if err != nil {
|
||||
// Ignore errNoSuchNotifications to comply with AWS S3.
|
||||
if xerrors.Cause(err) != errNoSuchNotifications {
|
||||
errorIf(err, "Unable to read notification configuration.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
// For no notifications we write a dummy XML.
|
||||
if errors.Cause(err) == errNoSuchNotifications {
|
||||
// Complies with the s3 behavior in this regard.
|
||||
nConfig = ¬ificationConfig{}
|
||||
|
||||
nConfig = &event.Config{}
|
||||
}
|
||||
|
||||
notificationBytes, err := xml.Marshal(nConfig)
|
||||
if err != nil {
|
||||
// For any marshalling failure.
|
||||
errorIf(err, "Unable to marshal notification configuration into XML.", err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Success.
|
||||
writeSuccessResponseXML(w, notificationBytes)
|
||||
}
|
||||
|
||||
// PutBucketNotificationHandler - Minio notification feature enables
|
||||
// you to receive notifications when certain events happen in your bucket.
|
||||
// Using this API, you can replace an existing notification configuration.
|
||||
// The configuration is an XML file that defines the event types that you
|
||||
// want Minio to publish and the destination where you want Minio to publish
|
||||
// an event notification when it detects an event of the specified type.
|
||||
// By default, your bucket has no event notifications configured. That is,
|
||||
// the notification configuration will be an empty NotificationConfiguration.
|
||||
// PutBucketNotificationHandler - This HTTP handler stores given notification configuration as per
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html.
|
||||
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
|
@ -118,185 +110,56 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
|||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
_, err := objectAPI.GetBucketInfo(bucket)
|
||||
_, err := objectAPI.GetBucketInfo(bucketName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// If Content-Length is unknown or zero, deny the request.
|
||||
// PutBucketNotification always needs a Content-Length.
|
||||
if r.ContentLength == -1 || r.ContentLength == 0 {
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Reads the incoming notification configuration.
|
||||
var buffer bytes.Buffer
|
||||
if r.ContentLength >= 0 {
|
||||
_, err = io.CopyN(&buffer, r.Body, r.ContentLength)
|
||||
} else {
|
||||
_, err = io.Copy(&buffer, r.Body)
|
||||
}
|
||||
var config *event.Config
|
||||
config, err = event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalServerConfig.GetRegion(), globalNotificationSys.targetList)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to read incoming body.")
|
||||
apiErr := ErrMalformedXML
|
||||
if event.IsEventError(err) {
|
||||
apiErr = toAPIErrorCode(err)
|
||||
}
|
||||
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Acquire a write lock on bucket before modifying its configuration.
|
||||
bucketLock := globalNSMutex.NewNSLock(bucketName, "")
|
||||
if err = bucketLock.GetLock(globalOperationTimeout); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
defer bucketLock.Unlock()
|
||||
|
||||
if err = saveNotificationConfig(objectAPI, bucketName, config); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var notificationCfg notificationConfig
|
||||
// Unmarshal notification bytes.
|
||||
notificationConfigBytes := buffer.Bytes()
|
||||
if err = xml.Unmarshal(notificationConfigBytes, ¬ificationCfg); err != nil {
|
||||
errorIf(err, "Unable to parse notification configuration XML.")
|
||||
writeErrorResponse(w, ErrMalformedXML, r.URL)
|
||||
return
|
||||
} // Successfully marshalled notification configuration.
|
||||
|
||||
// Validate unmarshalled bucket notification configuration.
|
||||
if s3Error := validateNotificationConfig(notificationCfg); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
rulesMap := config.ToRulesMap()
|
||||
globalNotificationSys.AddRulesMap(bucketName, rulesMap)
|
||||
for addr, err := range globalNotificationSys.PutBucketNotification(bucketName, rulesMap) {
|
||||
errorIf(err, "unable to put bucket notification to remote peer %v", addr)
|
||||
}
|
||||
|
||||
// Convert the incoming ARNs properly to the GetRegion().
|
||||
for i, queueConfig := range notificationCfg.QueueConfigs {
|
||||
queueConfig.QueueARN = unmarshalSqsARN(queueConfig.QueueARN).String()
|
||||
notificationCfg.QueueConfigs[i] = queueConfig
|
||||
}
|
||||
|
||||
// Put bucket notification config.
|
||||
err = PutBucketNotificationConfig(bucket, ¬ificationCfg, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Success.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// PutBucketNotificationConfig - Put a new notification config for a
|
||||
// bucket (overwrites any previous config) persistently, updates
|
||||
// global in-memory state, and notify other nodes in the cluster (if
|
||||
// any)
|
||||
func PutBucketNotificationConfig(bucket string, ncfg *notificationConfig, objAPI ObjectLayer) error {
|
||||
if ncfg == nil {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
// Acquire a write lock on bucket before modifying its
|
||||
// configuration.
|
||||
bucketLock := globalNSMutex.NewNSLock(bucket, "")
|
||||
if err := bucketLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
// Release lock after notifying peers
|
||||
defer bucketLock.Unlock()
|
||||
|
||||
// persist config to disk
|
||||
err := persistNotificationConfig(bucket, ncfg, objAPI)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to persist Bucket notification config to object layer - config=%v errMsg=%v", *ncfg, err)
|
||||
}
|
||||
|
||||
// All servers (including local) are told to update in-memory config
|
||||
S3PeersUpdateBucketNotification(bucket, ncfg)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeNotification marshals notification message before writing to client.
|
||||
func writeNotification(w http.ResponseWriter, notification map[string][]NotificationEvent) error {
|
||||
// Invalid response writer.
|
||||
if w == nil {
|
||||
return errInvalidArgument
|
||||
}
|
||||
// Invalid notification input.
|
||||
if notification == nil {
|
||||
return errInvalidArgument
|
||||
}
|
||||
// Marshal notification data into JSON and write to client.
|
||||
notificationBytes, err := json.Marshal(¬ification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add additional CRLF characters for client to
|
||||
// differentiate the individual events properly.
|
||||
_, err = w.Write(append(notificationBytes, crlf...))
|
||||
// Make sure we have flushed, this would set Transfer-Encoding: chunked.
|
||||
w.(http.Flusher).Flush()
|
||||
return err
|
||||
}
|
||||
|
||||
// CRLF character used for chunked transfer in accordance with HTTP standards.
|
||||
var crlf = []byte("\r\n")
|
||||
|
||||
// listenChan A `listenChan` provides a data channel to send event
|
||||
// notifications on and `doneCh` to signal that events are no longer
|
||||
// being received. It also sends empty events (whitespace) to keep the
|
||||
// underlying connection alive.
|
||||
type listenChan struct {
|
||||
doneCh chan struct{}
|
||||
dataCh chan []NotificationEvent
|
||||
}
|
||||
|
||||
// newListenChan returns a listenChan with properly initialized
|
||||
// unbuffered channels.
|
||||
func newListenChan() *listenChan {
|
||||
return &listenChan{
|
||||
doneCh: make(chan struct{}),
|
||||
dataCh: make(chan []NotificationEvent),
|
||||
}
|
||||
}
|
||||
|
||||
// sendNotificationEvent sends notification events on the data channel
|
||||
// unless doneCh is not closed
|
||||
func (l *listenChan) sendNotificationEvent(events []NotificationEvent) {
|
||||
select {
|
||||
// Returns immediately if receiver has quit.
|
||||
case <-l.doneCh:
|
||||
// Blocks until receiver is available.
|
||||
case l.dataCh <- events:
|
||||
}
|
||||
}
|
||||
|
||||
// waitForListener writes event notification OR whitespaces on
|
||||
// ResponseWriter until client closes connection
|
||||
func (l *listenChan) waitForListener(w http.ResponseWriter) {
|
||||
|
||||
// Logs errors other than EPIPE and ECONNRESET.
|
||||
// EPIPE and ECONNRESET indicate that the client stopped
|
||||
// listening to notification events.
|
||||
logClientError := func(err error, msg string) {
|
||||
if oe, ok := err.(*net.OpError); ok && (oe.Err == syscall.EPIPE || oe.Err ==
|
||||
syscall.ECONNRESET) {
|
||||
errorIf(err, msg)
|
||||
}
|
||||
}
|
||||
|
||||
emptyEvent := map[string][]NotificationEvent{"Records": nil}
|
||||
defer close(l.doneCh)
|
||||
for {
|
||||
select {
|
||||
case events := <-l.dataCh:
|
||||
if err := writeNotification(w, map[string][]NotificationEvent{"Records": events}); err != nil {
|
||||
logClientError(err, "Unable to write notification")
|
||||
return
|
||||
}
|
||||
case <-time.After(globalSNSConnAlive):
|
||||
if err := writeNotification(w, emptyEvent); err != nil {
|
||||
logClientError(err, "Unable to write empty notification")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ListenBucketNotificationHandler - list bucket notifications.
|
||||
// ListenBucketNotificationHandler - This HTTP handler sends events to the connected HTTP client.
|
||||
// Client should send prefix/suffix object name to match and events to watch as query parameters.
|
||||
func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Validate if bucket exists.
|
||||
objAPI := api.ObjectAPI()
|
||||
|
@ -314,195 +177,84 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
|||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
// Parse listen bucket notification resources.
|
||||
prefixes, suffixes, events := getListenBucketNotificationResources(r.URL.Query())
|
||||
values := r.URL.Query()
|
||||
|
||||
if err := validateFilterValues(prefixes); err != ErrNone {
|
||||
writeErrorResponse(w, err, r.URL)
|
||||
var prefix string
|
||||
if len(values["prefix"]) > 1 {
|
||||
writeErrorResponse(w, ErrFilterNamePrefix, r.URL)
|
||||
}
|
||||
if len(values["prefix"]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values["prefix"][0]); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := validateFilterValues(suffixes); err != ErrNone {
|
||||
writeErrorResponse(w, err, r.URL)
|
||||
prefix = values["prefix"][0]
|
||||
}
|
||||
|
||||
var suffix string
|
||||
if len(values["suffix"]) > 1 {
|
||||
writeErrorResponse(w, ErrFilterNameSuffix, r.URL)
|
||||
}
|
||||
if len(values["suffix"]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values["suffix"][0]); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate all the resource events.
|
||||
for _, event := range events {
|
||||
if errCode := checkEvent(event); errCode != ErrNone {
|
||||
writeErrorResponse(w, errCode, r.URL)
|
||||
return
|
||||
}
|
||||
suffix = values["suffix"][0]
|
||||
}
|
||||
|
||||
_, err := objAPI.GetBucketInfo(bucket)
|
||||
pattern := event.NewPattern(prefix, suffix)
|
||||
|
||||
eventNames := []event.Name{}
|
||||
for _, s := range values["events"] {
|
||||
eventName, err := event.ParseName(s)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
targetServer := GetLocalPeer(globalEndpoints)
|
||||
accountID := fmt.Sprintf("%d", UTCNow().UnixNano())
|
||||
accountARN := fmt.Sprintf(
|
||||
"%s:%s:%s:%s-%s",
|
||||
minioTopic,
|
||||
globalServerConfig.GetRegion(),
|
||||
accountID,
|
||||
snsTypeMinio,
|
||||
targetServer,
|
||||
)
|
||||
|
||||
var filterRules []filterRule
|
||||
|
||||
for _, prefix := range prefixes {
|
||||
filterRules = append(filterRules, filterRule{
|
||||
Name: "prefix",
|
||||
Value: prefix,
|
||||
})
|
||||
eventNames = append(eventNames, eventName)
|
||||
}
|
||||
|
||||
for _, suffix := range suffixes {
|
||||
filterRules = append(filterRules, filterRule{
|
||||
Name: "suffix",
|
||||
Value: suffix,
|
||||
})
|
||||
}
|
||||
|
||||
// Make topic configuration corresponding to this
|
||||
// ListenBucketNotification request.
|
||||
topicCfg := &topicConfig{
|
||||
TopicARN: accountARN,
|
||||
ServiceConfig: ServiceConfig{
|
||||
Events: events,
|
||||
Filter: struct {
|
||||
Key keyFilter `xml:"S3Key,omitempty" json:"S3Key,omitempty"`
|
||||
}{
|
||||
Key: keyFilter{
|
||||
FilterRules: filterRules,
|
||||
},
|
||||
},
|
||||
ID: "sns-" + accountID,
|
||||
},
|
||||
}
|
||||
|
||||
// Setup a listen channel to receive notifications like
|
||||
// s3:ObjectCreated, s3:ObjectDeleted etc.
|
||||
nListenCh := newListenChan()
|
||||
// Add channel for listener events
|
||||
if err = globalEventNotifier.AddListenerChan(accountARN, nListenCh); err != nil {
|
||||
errorIf(err, "Error adding a listener!")
|
||||
if _, err := objAPI.GetBucketInfo(bucketName); err != nil {
|
||||
errorIf(err, "Unable to get bucket info.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
// Remove listener channel after the writer has closed or the
|
||||
// client disconnected.
|
||||
defer globalEventNotifier.RemoveListenerChan(accountARN)
|
||||
|
||||
// Update topic config to bucket config and persist - as soon
|
||||
// as this call completes, events may start appearing in
|
||||
// nListenCh
|
||||
lc := listenerConfig{
|
||||
TopicConfig: *topicCfg,
|
||||
TargetServer: targetServer,
|
||||
}
|
||||
host := xnet.MustParseHost(r.RemoteAddr)
|
||||
target := target.NewHTTPClientTarget(*host, w)
|
||||
rulesMap := event.NewRulesMap(eventNames, pattern, target.ID())
|
||||
|
||||
err = AddBucketListenerConfig(bucket, &lc, objAPI)
|
||||
if err != nil {
|
||||
if err := globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
|
||||
errorIf(err, "Unable to add httpclient target %v to globalNotificationSys.targetList.", target)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
defer RemoveBucketListenerConfig(bucket, &lc, objAPI)
|
||||
defer globalNotificationSys.RemoveRemoteTarget(bucketName, target.ID())
|
||||
defer globalNotificationSys.RemoveRulesMap(bucketName, rulesMap)
|
||||
|
||||
// Add all common headers.
|
||||
setCommonHeaders(w)
|
||||
|
||||
// https://github.com/containous/traefik/issues/560
|
||||
// https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events
|
||||
//
|
||||
// Proxies might buffer the connection to avoid this we
|
||||
// need the proper MIME type before writing to client.
|
||||
// This MIME header tells the proxies to avoid buffering
|
||||
w.Header().Set("Content-Type", "text/event-stream")
|
||||
|
||||
// Start writing bucket notifications to ResponseWriter.
|
||||
nListenCh.waitForListener(w)
|
||||
}
|
||||
|
||||
// AddBucketListenerConfig - Updates on disk state of listeners, and
|
||||
// updates all peers with the change in listener config.
|
||||
func AddBucketListenerConfig(bucket string, lcfg *listenerConfig, objAPI ObjectLayer) error {
|
||||
if lcfg == nil {
|
||||
return errInvalidArgument
|
||||
}
|
||||
listenerCfgs := globalEventNotifier.GetBucketListenerConfig(bucket)
|
||||
|
||||
// add new lid to listeners and persist to object layer.
|
||||
listenerCfgs = append(listenerCfgs, *lcfg)
|
||||
|
||||
// Acquire a write lock on bucket before modifying its
|
||||
// configuration.
|
||||
bucketLock := globalNSMutex.NewNSLock(bucket, "")
|
||||
if err := bucketLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
// Release lock after notifying peers
|
||||
defer bucketLock.Unlock()
|
||||
|
||||
// update persistent config if dist XL
|
||||
if globalIsDistXL {
|
||||
err := persistListenerConfig(bucket, listenerCfgs, objAPI)
|
||||
if err != nil {
|
||||
errorIf(err, "Error persisting listener config when adding a listener.")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// persistence success - now update in-memory globals on all
|
||||
// peers (including local)
|
||||
S3PeersUpdateBucketListener(bucket, listenerCfgs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBucketListenerConfig - removes a given bucket notification config
|
||||
func RemoveBucketListenerConfig(bucket string, lcfg *listenerConfig, objAPI ObjectLayer) {
|
||||
listenerCfgs := globalEventNotifier.GetBucketListenerConfig(bucket)
|
||||
|
||||
// remove listener with matching ARN - if not found ignore and exit.
|
||||
var updatedLcfgs []listenerConfig
|
||||
found := false
|
||||
for k, configuredLcfg := range listenerCfgs {
|
||||
if configuredLcfg.TopicConfig.TopicARN == lcfg.TopicConfig.TopicARN {
|
||||
updatedLcfgs = append(listenerCfgs[:k],
|
||||
listenerCfgs[k+1:]...)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
thisAddr := xnet.MustParseHost(GetLocalPeer(globalEndpoints))
|
||||
if err := SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil {
|
||||
errorIf(err, "Unable to save HTTP listener %v", target)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Acquire a write lock on bucket before modifying its
|
||||
// configuration.
|
||||
bucketLock := globalNSMutex.NewNSLock(bucket, "")
|
||||
if bucketLock.GetLock(globalOperationTimeout) != nil {
|
||||
return
|
||||
errors := globalNotificationSys.ListenBucketNotification(bucketName, eventNames, pattern, target.ID(), *thisAddr)
|
||||
for addr, err := range errors {
|
||||
errorIf(err, "unable to call listen bucket notification to remote peer %v", addr)
|
||||
}
|
||||
// Release lock after notifying peers
|
||||
defer bucketLock.Unlock()
|
||||
|
||||
// update persistent config if dist XL
|
||||
if globalIsDistXL {
|
||||
err := persistListenerConfig(bucket, updatedLcfgs, objAPI)
|
||||
if err != nil {
|
||||
errorIf(err, "Error persisting listener config when removing a listener.")
|
||||
<-target.DoneCh
|
||||
|
||||
if err := RemoveListener(objAPI, bucketName, target.ID(), *thisAddr); err != nil {
|
||||
errorIf(err, "Unable to save HTTP listener %v", target)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// persistence success - now update in-memory globals on all
|
||||
// peers (including local)
|
||||
S3PeersUpdateBucketListener(bucket, updatedLcfgs)
|
||||
}
|
||||
|
|
|
@ -1,483 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
)
|
||||
|
||||
// Implement a dummy flush writer.
|
||||
type flushWriter struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// Flush writer is a dummy writer compatible with http.Flusher and http.ResponseWriter.
|
||||
func (f *flushWriter) Flush() {}
|
||||
func (f *flushWriter) Write(b []byte) (n int, err error) { return f.Writer.Write(b) }
|
||||
func (f *flushWriter) Header() http.Header { return http.Header{} }
|
||||
func (f *flushWriter) WriteHeader(code int) {}
|
||||
|
||||
func newFlushWriter(writer io.Writer) http.ResponseWriter {
|
||||
return &flushWriter{writer}
|
||||
}
|
||||
|
||||
// Tests write notification code.
|
||||
func TestWriteNotification(t *testing.T) {
|
||||
// Initialize a new test config.
|
||||
root, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize test config %s", err)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
// Collection of test cases for each event writer.
|
||||
testCases := []struct {
|
||||
writer http.ResponseWriter
|
||||
event map[string][]NotificationEvent
|
||||
err error
|
||||
}{
|
||||
// Invalid input argument with writer `nil` - Test - 1
|
||||
{
|
||||
writer: nil,
|
||||
event: nil,
|
||||
err: errInvalidArgument,
|
||||
},
|
||||
// Invalid input argument with event `nil` - Test - 2
|
||||
{
|
||||
writer: newFlushWriter(ioutil.Discard),
|
||||
event: nil,
|
||||
err: errInvalidArgument,
|
||||
},
|
||||
// Unmarshal and write, validate last 5 bytes. - Test - 3
|
||||
{
|
||||
writer: newFlushWriter(&buffer),
|
||||
event: map[string][]NotificationEvent{
|
||||
"Records": {newNotificationEvent(eventData{
|
||||
Type: ObjectCreatedPut,
|
||||
Bucket: "testbucket",
|
||||
ObjInfo: ObjectInfo{
|
||||
Name: "key",
|
||||
},
|
||||
ReqParams: map[string]string{
|
||||
"ip": "10.1.10.1",
|
||||
}}),
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
// Validates all the testcases for writing notification.
|
||||
for _, testCase := range testCases {
|
||||
err := writeNotification(testCase.writer, testCase.event)
|
||||
if err != testCase.err {
|
||||
t.Errorf("Unable to write notification %s", err)
|
||||
}
|
||||
// Validates if the ending string has 'crlf'
|
||||
if err == nil && !bytes.HasSuffix(buffer.Bytes(), crlf) {
|
||||
buf := buffer.Bytes()[buffer.Len()-5 : 0]
|
||||
t.Errorf("Invalid suffix found from the writer last 5 bytes %s, expected `\r\n`", string(buf))
|
||||
}
|
||||
// Not printing 'buf' on purpose, validates look for string '10.1.10.1'.
|
||||
if err == nil && !bytes.Contains(buffer.Bytes(), []byte("10.1.10.1")) {
|
||||
// Enable when debugging)
|
||||
// fmt.Println(string(buffer.Bytes()))
|
||||
t.Errorf("Requested content couldn't be found, expected `10.1.10.1`")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// testResponseWriter implements `http.ResponseWriter` that buffers
|
||||
// response body in a `bytes.Buffer` and returns error after `failCount`
|
||||
// calls to `Write` method
|
||||
type testResponseWriter struct {
|
||||
mu sync.Mutex
|
||||
failCount int
|
||||
buf *bytes.Buffer
|
||||
m http.Header
|
||||
}
|
||||
|
||||
func newTestResponseWriter(failAt int) *testResponseWriter {
|
||||
return &testResponseWriter{
|
||||
buf: new(bytes.Buffer),
|
||||
m: make(http.Header),
|
||||
failCount: failAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (trw *testResponseWriter) Flush() {
|
||||
}
|
||||
|
||||
func (trw *testResponseWriter) Write(p []byte) (int, error) {
|
||||
trw.mu.Lock()
|
||||
defer trw.mu.Unlock()
|
||||
|
||||
if trw.failCount == 0 {
|
||||
return 0, errors.New("Custom error")
|
||||
}
|
||||
trw.failCount--
|
||||
|
||||
return trw.buf.Write(p)
|
||||
}
|
||||
|
||||
func (trw *testResponseWriter) Header() http.Header {
|
||||
return trw.m
|
||||
}
|
||||
|
||||
func (trw *testResponseWriter) WriteHeader(i int) {
|
||||
}
|
||||
|
||||
func TestListenChan(t *testing.T) {
|
||||
// Initialize a new test config.
|
||||
root, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize test config %s", err)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
// Create a listen channel to manage notifications
|
||||
nListenCh := newListenChan()
|
||||
|
||||
// Construct notification events to be passed on the events channel.
|
||||
var events []NotificationEvent
|
||||
evTypes := []EventName{
|
||||
ObjectCreatedPut,
|
||||
ObjectCreatedPost,
|
||||
ObjectCreatedCopy,
|
||||
ObjectCreatedCompleteMultipartUpload,
|
||||
}
|
||||
|
||||
for _, evType := range evTypes {
|
||||
events = append(events, newNotificationEvent(eventData{
|
||||
Type: evType,
|
||||
}))
|
||||
}
|
||||
|
||||
// Send notification events one-by-one
|
||||
go func() {
|
||||
for _, event := range events {
|
||||
nListenCh.sendNotificationEvent([]NotificationEvent{event})
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a http.ResponseWriter that fails after len(events)
|
||||
// number of times
|
||||
trw := newTestResponseWriter(len(events))
|
||||
|
||||
// Wait for all (4) notification events to be received
|
||||
nListenCh.waitForListener(trw)
|
||||
|
||||
// Used to read JSON-formatted event stream line-by-line
|
||||
scanner := bufio.NewScanner(trw.buf)
|
||||
var records map[string][]NotificationEvent
|
||||
for i := 0; scanner.Scan(); i++ {
|
||||
err = json.Unmarshal(scanner.Bytes(), &records)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to unmarshal json %v", err)
|
||||
}
|
||||
|
||||
nEvent := records["Records"][0]
|
||||
if nEvent.EventName != evTypes[i].String() {
|
||||
t.Errorf("notification event name mismatch, expected %s but got %s", evTypes[i], nEvent.EventName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendNotificationEvent(t *testing.T) {
|
||||
// This test verifies that sendNotificationEvent function
|
||||
// returns once listenChan.doneCh is closed
|
||||
|
||||
l := newListenChan()
|
||||
testCh := make(chan struct{})
|
||||
timeout := 5 * time.Second
|
||||
|
||||
go func() {
|
||||
// Send one empty notification event on listenChan
|
||||
events := []NotificationEvent{{}}
|
||||
l.sendNotificationEvent(events)
|
||||
testCh <- struct{}{}
|
||||
}()
|
||||
|
||||
// close l.doneCh to signal client exiting from
|
||||
// ListenBucketNotification API call
|
||||
close(l.doneCh)
|
||||
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
t.Fatalf("sendNotificationEvent didn't return after %v seconds", timeout)
|
||||
case <-testCh:
|
||||
// If we reach this case, sendNotificationEvent
|
||||
// returned on closing l.doneCh
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBucketNotificationHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testGetBucketNotificationHandler, []string{
|
||||
"GetBucketNotification",
|
||||
})
|
||||
}
|
||||
|
||||
func testGetBucketNotificationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
// declare sample configs
|
||||
filterRules := []filterRule{
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "minio",
|
||||
},
|
||||
{
|
||||
Name: "suffix",
|
||||
Value: "*.jpg",
|
||||
},
|
||||
}
|
||||
sampleSvcCfg := ServiceConfig{
|
||||
[]string{"s3:ObjectRemoved:*", "s3:ObjectCreated:*"},
|
||||
filterStruct{
|
||||
keyFilter{filterRules},
|
||||
},
|
||||
"1",
|
||||
}
|
||||
sampleNotifCfg := notificationConfig{
|
||||
QueueConfigs: []queueConfig{
|
||||
{
|
||||
ServiceConfig: sampleSvcCfg,
|
||||
QueueARN: "testqARN",
|
||||
},
|
||||
},
|
||||
}
|
||||
rec := httptest.NewRecorder()
|
||||
req, err := newTestSignedRequestV4("GET", getGetBucketNotificationURL("", bucketName),
|
||||
0, nil, credentials.AccessKey, credentials.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: Failed to create HTTP testRequest for ListenBucketNotification: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("Unexpected http response %d", rec.Code)
|
||||
}
|
||||
if err = persistNotificationConfig(bucketName, &sampleNotifCfg, obj); err != nil {
|
||||
t.Fatalf("Unable to save notification config %s", err)
|
||||
}
|
||||
rec = httptest.NewRecorder()
|
||||
req, err = newTestSignedRequestV4("GET", getGetBucketNotificationURL("", bucketName),
|
||||
0, nil, credentials.AccessKey, credentials.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: Failed to create HTTP testRequest for ListenBucketNotification: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("Unexpected http response %d", rec.Code)
|
||||
}
|
||||
notificationBytes, err := ioutil.ReadAll(rec.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error %s", err)
|
||||
}
|
||||
nConfig := notificationConfig{}
|
||||
if err = xml.Unmarshal(notificationBytes, &nConfig); err != nil {
|
||||
t.Fatalf("Unexpected XML received %s", err)
|
||||
}
|
||||
if sampleNotifCfg.QueueConfigs[0].QueueARN != nConfig.QueueConfigs[0].QueueARN {
|
||||
t.Fatalf("Uexpected notification configs expected %#v, got %#v", sampleNotifCfg, nConfig)
|
||||
}
|
||||
if !reflect.DeepEqual(sampleNotifCfg.QueueConfigs[0].Events, nConfig.QueueConfigs[0].Events) {
|
||||
t.Fatalf("Uexpected notification configs expected %#v, got %#v", sampleNotifCfg, nConfig)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutBucketNotificationHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testPutBucketNotificationHandler, []string{
|
||||
"PutBucketNotification",
|
||||
})
|
||||
}
|
||||
|
||||
func testPutBucketNotificationHandler(obj ObjectLayer, instanceType,
|
||||
bucketName string, apiRouter http.Handler, credentials auth.Credentials,
|
||||
t *testing.T) {
|
||||
|
||||
// declare sample configs
|
||||
filterRules := []filterRule{
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "minio",
|
||||
},
|
||||
{
|
||||
Name: "suffix",
|
||||
Value: "*.jpg",
|
||||
},
|
||||
}
|
||||
sampleSvcCfg := ServiceConfig{
|
||||
[]string{"s3:ObjectRemoved:*", "s3:ObjectCreated:*"},
|
||||
filterStruct{
|
||||
keyFilter{filterRules},
|
||||
},
|
||||
"1",
|
||||
}
|
||||
sampleNotifCfg := notificationConfig{
|
||||
QueueConfigs: []queueConfig{
|
||||
{
|
||||
ServiceConfig: sampleSvcCfg,
|
||||
QueueARN: "testqARN",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
{
|
||||
sampleNotifCfg.LambdaConfigs = []lambdaConfig{
|
||||
{
|
||||
sampleSvcCfg, "testLARN",
|
||||
},
|
||||
}
|
||||
xmlBytes, err := xml.Marshal(sampleNotifCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: Unexpected err: %#v", instanceType, err)
|
||||
}
|
||||
rec := httptest.NewRecorder()
|
||||
req, err := newTestSignedRequestV4("PUT",
|
||||
getPutBucketNotificationURL("", bucketName),
|
||||
int64(len(xmlBytes)), bytes.NewReader(xmlBytes),
|
||||
credentials.AccessKey, credentials.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: Failed to create HTTP testRequest for PutBucketNotification: <ERROR> %v",
|
||||
instanceType, err)
|
||||
}
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusBadRequest {
|
||||
t.Fatalf("Unexpected http response %d", rec.Code)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
sampleNotifCfg.LambdaConfigs = nil
|
||||
sampleNotifCfg.TopicConfigs = []topicConfig{
|
||||
{
|
||||
sampleSvcCfg, "testTARN",
|
||||
},
|
||||
}
|
||||
xmlBytes, err := xml.Marshal(sampleNotifCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: Unexpected err: %#v", instanceType, err)
|
||||
}
|
||||
rec := httptest.NewRecorder()
|
||||
req, err := newTestSignedRequestV4("PUT",
|
||||
getPutBucketNotificationURL("", bucketName),
|
||||
int64(len(xmlBytes)), bytes.NewReader(xmlBytes),
|
||||
credentials.AccessKey, credentials.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: Failed to create HTTP testRequest for PutBucketNotification: <ERROR> %v",
|
||||
instanceType, err)
|
||||
}
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusBadRequest {
|
||||
t.Fatalf("Unexpected http response %d", rec.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListenBucketNotificationNilHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testListenBucketNotificationNilHandler, []string{
|
||||
"ListenBucketNotification",
|
||||
"PutObject",
|
||||
})
|
||||
}
|
||||
|
||||
func testListenBucketNotificationNilHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
// get random bucket name.
|
||||
randBucket := getRandomBucketName()
|
||||
|
||||
// Nil Object layer
|
||||
nilAPIRouter := initTestAPIEndPoints(nil, []string{
|
||||
"ListenBucketNotification",
|
||||
})
|
||||
testRec := httptest.NewRecorder()
|
||||
testReq, tErr := newTestSignedRequestV4("GET",
|
||||
getListenBucketNotificationURL("", randBucket, []string{},
|
||||
[]string{"*.jpg"}, []string{
|
||||
"s3:ObjectCreated:*",
|
||||
"s3:ObjectRemoved:*",
|
||||
"s3:ObjectAccessed:*",
|
||||
}), 0, nil, credentials.AccessKey, credentials.SecretKey)
|
||||
if tErr != nil {
|
||||
t.Fatalf("%s: Failed to create HTTP testRequest for ListenBucketNotification: <ERROR> %v", instanceType, tErr)
|
||||
}
|
||||
nilAPIRouter.ServeHTTP(testRec, testReq)
|
||||
if testRec.Code != http.StatusServiceUnavailable {
|
||||
t.Fatalf("Test 1: %s: expected HTTP code %d, but received %d: <ERROR> %v",
|
||||
instanceType, http.StatusServiceUnavailable, testRec.Code, tErr)
|
||||
}
|
||||
}
|
||||
|
||||
func testRemoveNotificationConfig(obj ObjectLayer, instanceType,
|
||||
bucketName string, apiRouter http.Handler, credentials auth.Credentials,
|
||||
t *testing.T) {
|
||||
|
||||
invalidBucket := "Invalid\\Bucket"
|
||||
// get random bucket name.
|
||||
randBucket := bucketName
|
||||
|
||||
nCfg := notificationConfig{
|
||||
QueueConfigs: []queueConfig{
|
||||
{
|
||||
ServiceConfig: ServiceConfig{
|
||||
Events: []string{"s3:ObjectRemoved:*",
|
||||
"s3:ObjectCreated:*"},
|
||||
},
|
||||
QueueARN: "testqARN",
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := persistNotificationConfig(randBucket, &nCfg, obj); err != nil {
|
||||
t.Fatalf("Unexpected error: %#v", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
expectedErr error
|
||||
}{
|
||||
{invalidBucket, BucketNameInvalid{Bucket: invalidBucket}},
|
||||
{randBucket, nil},
|
||||
}
|
||||
for i, test := range testCases {
|
||||
tErr := removeNotificationConfig(test.bucketName, obj)
|
||||
if tErr != test.expectedErr {
|
||||
t.Errorf("Test %d: %s expected error %v, but received %v", i+1, instanceType, test.expectedErr, tErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveNotificationConfig(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testRemoveNotificationConfig, []string{
|
||||
"PutBucketNotification",
|
||||
"ListenBucketNotification",
|
||||
})
|
||||
}
|
|
@ -1,317 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
)
|
||||
|
||||
// List of valid event types.
|
||||
var suppportedEventTypes = map[string]struct{}{
|
||||
// Object created event types.
|
||||
"s3:ObjectCreated:*": {},
|
||||
"s3:ObjectCreated:Put": {},
|
||||
"s3:ObjectCreated:Post": {},
|
||||
"s3:ObjectCreated:Copy": {},
|
||||
"s3:ObjectCreated:CompleteMultipartUpload": {},
|
||||
// Object removed event types.
|
||||
"s3:ObjectRemoved:*": {},
|
||||
"s3:ObjectRemoved:Delete": {},
|
||||
"s3:ObjectAccessed:Get": {},
|
||||
"s3:ObjectAccessed:Head": {},
|
||||
"s3:ObjectAccessed:*": {},
|
||||
}
|
||||
|
||||
// checkEvent - checks if an event is supported.
|
||||
func checkEvent(event string) APIErrorCode {
|
||||
_, ok := suppportedEventTypes[event]
|
||||
if !ok {
|
||||
return ErrEventNotification
|
||||
}
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// checkEvents - checks given list of events if all of them are valid.
|
||||
// given if one of them is invalid, this function returns an error.
|
||||
func checkEvents(events []string) APIErrorCode {
|
||||
for _, event := range events {
|
||||
if s3Error := checkEvent(event); s3Error != ErrNone {
|
||||
return s3Error
|
||||
}
|
||||
}
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Valid if filterName is 'prefix'.
|
||||
func isValidFilterNamePrefix(filterName string) bool {
|
||||
return "prefix" == filterName
|
||||
}
|
||||
|
||||
// Valid if filterName is 'suffix'.
|
||||
func isValidFilterNameSuffix(filterName string) bool {
|
||||
return "suffix" == filterName
|
||||
}
|
||||
|
||||
// Is this a valid filterName? - returns true if valid.
|
||||
func isValidFilterName(filterName string) bool {
|
||||
return isValidFilterNamePrefix(filterName) || isValidFilterNameSuffix(filterName)
|
||||
}
|
||||
|
||||
// checkFilterRules - checks given list of filter rules if all of them are valid.
|
||||
func checkFilterRules(filterRules []filterRule) APIErrorCode {
|
||||
ruleSetMap := make(map[string]string)
|
||||
// Validate all filter rules.
|
||||
for _, filterRule := range filterRules {
|
||||
// Unknown filter rule name found, returns an appropriate error.
|
||||
if !isValidFilterName(filterRule.Name) {
|
||||
return ErrFilterNameInvalid
|
||||
}
|
||||
|
||||
// Filter names should not be set twice per notification service
|
||||
// configuration, if found return an appropriate error.
|
||||
if _, ok := ruleSetMap[filterRule.Name]; ok {
|
||||
if isValidFilterNamePrefix(filterRule.Name) {
|
||||
return ErrFilterNamePrefix
|
||||
} else if isValidFilterNameSuffix(filterRule.Name) {
|
||||
return ErrFilterNameSuffix
|
||||
} else {
|
||||
return ErrFilterNameInvalid
|
||||
}
|
||||
}
|
||||
|
||||
if !IsValidObjectPrefix(filterRule.Value) {
|
||||
return ErrFilterValueInvalid
|
||||
}
|
||||
|
||||
// Set the new rule name to keep track of duplicates.
|
||||
ruleSetMap[filterRule.Name] = filterRule.Value
|
||||
}
|
||||
// Success all prefixes validated.
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Checks validity of input ARN for a given arnType.
|
||||
func checkARN(arn, arnType string) APIErrorCode {
|
||||
if !strings.HasPrefix(arn, arnType) {
|
||||
return ErrARNNotification
|
||||
}
|
||||
strs := strings.SplitN(arn, ":", -1)
|
||||
if len(strs) != 6 {
|
||||
return ErrARNNotification
|
||||
}
|
||||
|
||||
// Server region is allowed to be empty by default,
|
||||
// in such a scenario ARN region is not validating
|
||||
// allowing all regions.
|
||||
if sregion := globalServerConfig.GetRegion(); sregion != "" {
|
||||
region := strs[3]
|
||||
if region != sregion {
|
||||
return ErrRegionNotification
|
||||
}
|
||||
}
|
||||
accountID := strs[4]
|
||||
resource := strs[5]
|
||||
if accountID == "" || resource == "" {
|
||||
return ErrARNNotification
|
||||
}
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// checkQueueARN - check if the queue arn is valid.
|
||||
func checkQueueARN(queueARN string) APIErrorCode {
|
||||
return checkARN(queueARN, minioSqs)
|
||||
}
|
||||
|
||||
// Validates account id for input queue ARN.
|
||||
func isValidQueueID(queueARN string) bool {
|
||||
// Unmarshals QueueARN into structured object.
|
||||
sqsARN := unmarshalSqsARN(queueARN)
|
||||
// Is Queue identifier valid?.
|
||||
|
||||
if isAMQPQueue(sqsARN) { // AMQP eueue.
|
||||
amqpN := globalServerConfig.Notify.GetAMQPByID(sqsARN.AccountID)
|
||||
return amqpN.Enable && amqpN.URL != ""
|
||||
} else if isMQTTQueue(sqsARN) {
|
||||
mqttN := globalServerConfig.Notify.GetMQTTByID(sqsARN.AccountID)
|
||||
return mqttN.Enable && mqttN.Broker != ""
|
||||
} else if isNATSQueue(sqsARN) {
|
||||
natsN := globalServerConfig.Notify.GetNATSByID(sqsARN.AccountID)
|
||||
return natsN.Enable && natsN.Address != ""
|
||||
} else if isElasticQueue(sqsARN) { // Elastic queue.
|
||||
elasticN := globalServerConfig.Notify.GetElasticSearchByID(sqsARN.AccountID)
|
||||
return elasticN.Enable && elasticN.URL != ""
|
||||
} else if isRedisQueue(sqsARN) { // Redis queue.
|
||||
redisN := globalServerConfig.Notify.GetRedisByID(sqsARN.AccountID)
|
||||
return redisN.Enable && redisN.Addr != ""
|
||||
} else if isPostgreSQLQueue(sqsARN) {
|
||||
pgN := globalServerConfig.Notify.GetPostgreSQLByID(sqsARN.AccountID)
|
||||
// Postgres can work with only default conn. info.
|
||||
return pgN.Enable
|
||||
} else if isMySQLQueue(sqsARN) {
|
||||
msqlN := globalServerConfig.Notify.GetMySQLByID(sqsARN.AccountID)
|
||||
// Mysql can work with only default conn. info.
|
||||
return msqlN.Enable
|
||||
} else if isKafkaQueue(sqsARN) {
|
||||
kafkaN := globalServerConfig.Notify.GetKafkaByID(sqsARN.AccountID)
|
||||
return (kafkaN.Enable && len(kafkaN.Brokers) > 0 &&
|
||||
kafkaN.Topic != "")
|
||||
} else if isWebhookQueue(sqsARN) {
|
||||
webhookN := globalServerConfig.Notify.GetWebhookByID(sqsARN.AccountID)
|
||||
return webhookN.Enable && webhookN.Endpoint != ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Check - validates queue configuration and returns error if any.
|
||||
func checkQueueConfig(qConfig queueConfig) APIErrorCode {
|
||||
// Check queue arn is valid.
|
||||
if s3Error := checkQueueARN(qConfig.QueueARN); s3Error != ErrNone {
|
||||
return s3Error
|
||||
}
|
||||
|
||||
// Validate if the account ID is correct.
|
||||
if !isValidQueueID(qConfig.QueueARN) {
|
||||
return ErrARNNotification
|
||||
}
|
||||
|
||||
// Check if valid events are set in queue config.
|
||||
if s3Error := checkEvents(qConfig.Events); s3Error != ErrNone {
|
||||
return s3Error
|
||||
}
|
||||
|
||||
// Check if valid filters are set in queue config.
|
||||
if s3Error := checkFilterRules(qConfig.Filter.Key.FilterRules); s3Error != ErrNone {
|
||||
return s3Error
|
||||
}
|
||||
|
||||
// Success.
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Validates all incoming queue configs, checkQueueConfig validates if the
|
||||
// input fields for each queues is not malformed and has valid configuration
|
||||
// information. If validation fails bucket notifications are not enabled.
|
||||
func validateQueueConfigs(queueConfigs []queueConfig) APIErrorCode {
|
||||
for _, qConfig := range queueConfigs {
|
||||
if s3Error := checkQueueConfig(qConfig); s3Error != ErrNone {
|
||||
return s3Error
|
||||
}
|
||||
}
|
||||
// Success.
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Check all the queue configs for any duplicates.
|
||||
func checkDuplicateQueueConfigs(configs []queueConfig) APIErrorCode {
|
||||
queueConfigARNS := set.NewStringSet()
|
||||
|
||||
// Navigate through each configs and count the entries.
|
||||
for _, config := range configs {
|
||||
queueConfigARNS.Add(config.QueueARN)
|
||||
}
|
||||
|
||||
if len(queueConfigARNS) != len(configs) {
|
||||
return ErrOverlappingConfigs
|
||||
}
|
||||
|
||||
// Success.
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Validates all the bucket notification configuration for their validity,
|
||||
// if one of the config is malformed or has invalid data it is rejected.
|
||||
// Configuration is never applied partially.
|
||||
func validateNotificationConfig(nConfig notificationConfig) APIErrorCode {
|
||||
// Minio server does not support lambda/topic configurations
|
||||
// currently. Such configuration is rejected.
|
||||
if len(nConfig.LambdaConfigs) > 0 || len(nConfig.TopicConfigs) > 0 {
|
||||
return ErrUnsupportedNotification
|
||||
}
|
||||
|
||||
// Validate all queue configs.
|
||||
if s3Error := validateQueueConfigs(nConfig.QueueConfigs); s3Error != ErrNone {
|
||||
return s3Error
|
||||
}
|
||||
|
||||
// Check for duplicate queue configs.
|
||||
if len(nConfig.QueueConfigs) > 1 {
|
||||
if s3Error := checkDuplicateQueueConfigs(nConfig.QueueConfigs); s3Error != ErrNone {
|
||||
return s3Error
|
||||
}
|
||||
}
|
||||
|
||||
// Add validation for other configurations.
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Unmarshals input value of AWS ARN format into minioSqs object.
|
||||
// Returned value represents minio sqs types, currently supported are
|
||||
// - amqp
|
||||
// - mqtt
|
||||
// - nats
|
||||
// - elasticsearch
|
||||
// - redis
|
||||
// - postgresql
|
||||
// - mysql
|
||||
// - kafka
|
||||
// - webhook
|
||||
func unmarshalSqsARN(queueARN string) (mSqs arnSQS) {
|
||||
strs := strings.SplitN(queueARN, ":", -1)
|
||||
if len(strs) != 6 {
|
||||
return
|
||||
}
|
||||
|
||||
// Server region is allowed to be empty by default,
|
||||
// in such a scenario ARN region is not validating
|
||||
// allowing all regions.
|
||||
if sregion := globalServerConfig.GetRegion(); sregion != "" {
|
||||
region := strs[3]
|
||||
if region != sregion {
|
||||
return
|
||||
}
|
||||
}
|
||||
sqsType := strs[5]
|
||||
switch sqsType {
|
||||
case queueTypeAMQP:
|
||||
mSqs.Type = queueTypeAMQP
|
||||
case queueTypeMQTT:
|
||||
mSqs.Type = queueTypeMQTT
|
||||
case queueTypeNATS:
|
||||
mSqs.Type = queueTypeNATS
|
||||
case queueTypeElastic:
|
||||
mSqs.Type = queueTypeElastic
|
||||
case queueTypeRedis:
|
||||
mSqs.Type = queueTypeRedis
|
||||
case queueTypePostgreSQL:
|
||||
mSqs.Type = queueTypePostgreSQL
|
||||
case queueTypeMySQL:
|
||||
mSqs.Type = queueTypeMySQL
|
||||
case queueTypeKafka:
|
||||
mSqs.Type = queueTypeKafka
|
||||
case queueTypeWebhook:
|
||||
mSqs.Type = queueTypeWebhook
|
||||
default:
|
||||
errorIf(errors.New("invalid SQS type"), "SQS type: %s", sqsType)
|
||||
} // Add more queues here.
|
||||
|
||||
mSqs.AccountID = strs[4]
|
||||
|
||||
return
|
||||
}
|
|
@ -1,420 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test validates for duplicate configs.
|
||||
func TestCheckDuplicateConfigs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
qConfigs []queueConfig
|
||||
expectedErrCode APIErrorCode
|
||||
}{
|
||||
// Error for duplicate queue configs.
|
||||
{
|
||||
qConfigs: []queueConfig{
|
||||
{
|
||||
QueueARN: "arn:minio:sqs:us-east-1:1:redis",
|
||||
},
|
||||
{
|
||||
QueueARN: "arn:minio:sqs:us-east-1:1:redis",
|
||||
},
|
||||
},
|
||||
expectedErrCode: ErrOverlappingConfigs,
|
||||
},
|
||||
// Valid queue configs.
|
||||
{
|
||||
qConfigs: []queueConfig{
|
||||
{
|
||||
QueueARN: "arn:minio:sqs:us-east-1:1:redis",
|
||||
},
|
||||
},
|
||||
expectedErrCode: ErrNone,
|
||||
},
|
||||
}
|
||||
|
||||
// ... validate for duplicate queue configs.
|
||||
for i, testCase := range testCases {
|
||||
errCode := checkDuplicateQueueConfigs(testCase.qConfigs)
|
||||
if errCode != testCase.expectedErrCode {
|
||||
t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.expectedErrCode, errCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests for validating filter rules.
|
||||
func TestCheckFilterRules(t *testing.T) {
|
||||
testCases := []struct {
|
||||
rules []filterRule
|
||||
expectedErrCode APIErrorCode
|
||||
}{
|
||||
// Valid prefix and suffix values.
|
||||
{
|
||||
rules: []filterRule{
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "test/test1",
|
||||
},
|
||||
{
|
||||
Name: "suffix",
|
||||
Value: ".jpg",
|
||||
},
|
||||
},
|
||||
expectedErrCode: ErrNone,
|
||||
},
|
||||
// Invalid filter name.
|
||||
{
|
||||
rules: []filterRule{
|
||||
{
|
||||
Name: "unknown",
|
||||
Value: "test/test1",
|
||||
},
|
||||
},
|
||||
expectedErrCode: ErrFilterNameInvalid,
|
||||
},
|
||||
// Cannot have duplicate prefixes.
|
||||
{
|
||||
rules: []filterRule{
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "test/test1",
|
||||
},
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "test/test1",
|
||||
},
|
||||
},
|
||||
expectedErrCode: ErrFilterNamePrefix,
|
||||
},
|
||||
// Cannot have duplicate suffixes.
|
||||
{
|
||||
rules: []filterRule{
|
||||
{
|
||||
Name: "suffix",
|
||||
Value: ".jpg",
|
||||
},
|
||||
{
|
||||
Name: "suffix",
|
||||
Value: ".txt",
|
||||
},
|
||||
},
|
||||
expectedErrCode: ErrFilterNameSuffix,
|
||||
},
|
||||
// Filter value cannot be bigger than > 1024.
|
||||
{
|
||||
rules: []filterRule{
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: strings.Repeat("a", 1025),
|
||||
},
|
||||
},
|
||||
expectedErrCode: ErrFilterValueInvalid,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
errCode := checkFilterRules(testCase.rules)
|
||||
if errCode != testCase.expectedErrCode {
|
||||
t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.expectedErrCode, errCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests filter name validation.
|
||||
func TestIsValidFilterName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
filterName string
|
||||
status bool
|
||||
}{
|
||||
// Validate if 'prefix' is correct.
|
||||
{
|
||||
filterName: "prefix",
|
||||
status: true,
|
||||
},
|
||||
// Validate if 'suffix' is correct.
|
||||
{
|
||||
filterName: "suffix",
|
||||
status: true,
|
||||
},
|
||||
// Invalid filter name empty string should return false.
|
||||
{
|
||||
filterName: "",
|
||||
status: false,
|
||||
},
|
||||
// Invalid filter name random character should return false.
|
||||
{
|
||||
filterName: "unknown",
|
||||
status: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
status := isValidFilterName(testCase.filterName)
|
||||
if testCase.status != status {
|
||||
t.Errorf("Test %d: Expected \"%t\", got \"%t\"", i+1, testCase.status, status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests list of valid and invalid events.
|
||||
func TestValidEvents(t *testing.T) {
|
||||
testCases := []struct {
|
||||
events []string
|
||||
errCode APIErrorCode
|
||||
}{
|
||||
// Return error for unknown event element.
|
||||
{
|
||||
events: []string{
|
||||
"s3:UnknownAPI",
|
||||
},
|
||||
errCode: ErrEventNotification,
|
||||
},
|
||||
// Return success for supported event.
|
||||
{
|
||||
events: []string{
|
||||
"s3:ObjectCreated:Put",
|
||||
},
|
||||
errCode: ErrNone,
|
||||
},
|
||||
// Return success for supported events.
|
||||
{
|
||||
events: []string{
|
||||
"s3:ObjectCreated:*",
|
||||
"s3:ObjectRemoved:*",
|
||||
},
|
||||
errCode: ErrNone,
|
||||
},
|
||||
// Return error for empty event list.
|
||||
{
|
||||
events: []string{""},
|
||||
errCode: ErrEventNotification,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
errCode := checkEvents(testCase.events)
|
||||
if testCase.errCode != errCode {
|
||||
t.Errorf("Test %d: Expected \"%d\", got \"%d\"", i+1, testCase.errCode, errCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests queue arn validation.
|
||||
func TestQueueARN(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
testCases := []struct {
|
||||
queueARN string
|
||||
errCode APIErrorCode
|
||||
}{
|
||||
|
||||
// Valid webhook queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:webhook",
|
||||
errCode: ErrNone,
|
||||
},
|
||||
// Valid redis queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:redis",
|
||||
errCode: ErrNone,
|
||||
},
|
||||
// Valid elasticsearch queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:elasticsearch",
|
||||
errCode: ErrNone,
|
||||
},
|
||||
// Valid amqp queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:amqp",
|
||||
errCode: ErrNone,
|
||||
},
|
||||
// Invalid empty queue arn.
|
||||
{
|
||||
queueARN: "",
|
||||
errCode: ErrARNNotification,
|
||||
},
|
||||
// Invalid notification service type.
|
||||
{
|
||||
queueARN: "arn:minio:sns:us-east-1:1:listen",
|
||||
errCode: ErrARNNotification,
|
||||
},
|
||||
// Invalid queue name empty in queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:",
|
||||
errCode: ErrARNNotification,
|
||||
},
|
||||
// Invalid queue id empty in queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1::redis",
|
||||
errCode: ErrARNNotification,
|
||||
},
|
||||
// Invalid queue id and queue name empty in queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1::",
|
||||
errCode: ErrARNNotification,
|
||||
},
|
||||
// Missing queue id and separator missing at the end in queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:amqp",
|
||||
errCode: ErrARNNotification,
|
||||
},
|
||||
// Missing queue id and empty string at the end in queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:",
|
||||
errCode: ErrARNNotification,
|
||||
},
|
||||
}
|
||||
|
||||
// Validate all tests for queue arn.
|
||||
for i, testCase := range testCases {
|
||||
errCode := checkQueueARN(testCase.queueARN)
|
||||
if testCase.errCode != errCode {
|
||||
t.Errorf("Test %d: Expected \"%d\", got \"%d\"", i+1, testCase.errCode, errCode)
|
||||
}
|
||||
}
|
||||
|
||||
// Test when server region is set.
|
||||
rootPath, err = newTestConfig("us-east-1")
|
||||
if err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
testCases = []struct {
|
||||
queueARN string
|
||||
errCode APIErrorCode
|
||||
}{
|
||||
// Incorrect region should produce error.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-west-1:1:webhook",
|
||||
errCode: ErrRegionNotification,
|
||||
},
|
||||
// Correct region should not produce error.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:webhook",
|
||||
errCode: ErrNone,
|
||||
},
|
||||
}
|
||||
|
||||
// Validate all tests for queue arn.
|
||||
for i, testCase := range testCases {
|
||||
errCode := checkQueueARN(testCase.queueARN)
|
||||
if testCase.errCode != errCode {
|
||||
t.Errorf("Test %d: Expected \"%d\", got \"%d\"", i+1, testCase.errCode, errCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test unmarshal queue arn.
|
||||
func TestUnmarshalSQSARN(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
testCases := []struct {
|
||||
queueARN string
|
||||
Type string
|
||||
}{
|
||||
// Valid webhook queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:webhook",
|
||||
Type: "webhook",
|
||||
},
|
||||
// Valid redis queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:redis",
|
||||
Type: "redis",
|
||||
},
|
||||
// Valid elasticsearch queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:elasticsearch",
|
||||
Type: "elasticsearch",
|
||||
},
|
||||
// Valid amqp queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:amqp",
|
||||
Type: "amqp",
|
||||
},
|
||||
// Valid mqtt queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:mqtt",
|
||||
Type: "mqtt",
|
||||
},
|
||||
// Invalid empty queue arn.
|
||||
{
|
||||
queueARN: "",
|
||||
Type: "",
|
||||
},
|
||||
// Partial queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:",
|
||||
Type: "",
|
||||
},
|
||||
// Invalid queue service value.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:*",
|
||||
Type: "",
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
mSqs := unmarshalSqsARN(testCase.queueARN)
|
||||
if testCase.Type != mSqs.Type {
|
||||
t.Errorf("Test %d: Expected \"%s\", got \"%s\"", i+1, testCase.Type, mSqs.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// Test when the server region is set.
|
||||
rootPath, err = newTestConfig("us-east-1")
|
||||
if err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
testCases = []struct {
|
||||
queueARN string
|
||||
Type string
|
||||
}{
|
||||
// Incorrect region in ARN returns empty mSqs.Type
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-west-1:1:webhook",
|
||||
Type: "",
|
||||
},
|
||||
// Correct regionin ARN returns valid mSqs.Type
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:webhook",
|
||||
Type: "webhook",
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
mSqs := unmarshalSqsARN(testCase.queueARN)
|
||||
if testCase.Type != mSqs.Type {
|
||||
t.Errorf("Test %d: Expected \"%s\", got \"%s\"", i+1, testCase.Type, mSqs.Type)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -285,6 +285,10 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
|||
return
|
||||
}
|
||||
|
||||
for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) {
|
||||
errorIf(err, "unable to update policy change in remote peer %v", addr)
|
||||
}
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
@ -322,6 +326,10 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) {
|
||||
errorIf(err, "unable to update policy change in remote peer %v", addr)
|
||||
}
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -31,7 +31,7 @@ import (
|
|||
const (
|
||||
// Static prefix to be used while constructing bucket ARN.
|
||||
// refer to S3 docs for more info.
|
||||
bucketARNPrefix = "arn:" + eventSource + ":::"
|
||||
bucketARNPrefix = "arn:aws:s3:::"
|
||||
|
||||
// Bucket policy config name.
|
||||
bucketPolicyConfig = "policy.json"
|
||||
|
@ -202,7 +202,5 @@ func persistAndNotifyBucketPolicyChange(bucket string, isRemove bool, bktPolicy
|
|||
}
|
||||
}
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketPolicy(bucket)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -24,6 +24,8 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
@ -129,7 +131,7 @@ func (s *serverConfig) ConfigDiff(t *serverConfig) string {
|
|||
return "AMQP Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.NATS, t.Notify.NATS):
|
||||
return "NATS Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.ElasticSearch, t.Notify.ElasticSearch):
|
||||
case !reflect.DeepEqual(s.Notify.Elasticsearch, t.Notify.Elasticsearch):
|
||||
return "ElasticSearch Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.Redis, t.Notify.Redis):
|
||||
return "Redis Notification configuration differs"
|
||||
|
@ -166,24 +168,24 @@ func newServerConfig() *serverConfig {
|
|||
}
|
||||
|
||||
// Make sure to initialize notification configs.
|
||||
srvCfg.Notify.AMQP = make(map[string]amqpNotify)
|
||||
srvCfg.Notify.AMQP["1"] = amqpNotify{}
|
||||
srvCfg.Notify.MQTT = make(map[string]mqttNotify)
|
||||
srvCfg.Notify.MQTT["1"] = mqttNotify{}
|
||||
srvCfg.Notify.ElasticSearch = make(map[string]elasticSearchNotify)
|
||||
srvCfg.Notify.ElasticSearch["1"] = elasticSearchNotify{}
|
||||
srvCfg.Notify.Redis = make(map[string]redisNotify)
|
||||
srvCfg.Notify.Redis["1"] = redisNotify{}
|
||||
srvCfg.Notify.NATS = make(map[string]natsNotify)
|
||||
srvCfg.Notify.NATS["1"] = natsNotify{}
|
||||
srvCfg.Notify.PostgreSQL = make(map[string]postgreSQLNotify)
|
||||
srvCfg.Notify.PostgreSQL["1"] = postgreSQLNotify{}
|
||||
srvCfg.Notify.MySQL = make(map[string]mySQLNotify)
|
||||
srvCfg.Notify.MySQL["1"] = mySQLNotify{}
|
||||
srvCfg.Notify.Kafka = make(map[string]kafkaNotify)
|
||||
srvCfg.Notify.Kafka["1"] = kafkaNotify{}
|
||||
srvCfg.Notify.Webhook = make(map[string]webhookNotify)
|
||||
srvCfg.Notify.Webhook["1"] = webhookNotify{}
|
||||
srvCfg.Notify.AMQP = make(map[string]target.AMQPArgs)
|
||||
srvCfg.Notify.AMQP["1"] = target.AMQPArgs{}
|
||||
srvCfg.Notify.MQTT = make(map[string]target.MQTTArgs)
|
||||
srvCfg.Notify.MQTT["1"] = target.MQTTArgs{}
|
||||
srvCfg.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs)
|
||||
srvCfg.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{}
|
||||
srvCfg.Notify.Redis = make(map[string]target.RedisArgs)
|
||||
srvCfg.Notify.Redis["1"] = target.RedisArgs{}
|
||||
srvCfg.Notify.NATS = make(map[string]target.NATSArgs)
|
||||
srvCfg.Notify.NATS["1"] = target.NATSArgs{}
|
||||
srvCfg.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs)
|
||||
srvCfg.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{}
|
||||
srvCfg.Notify.MySQL = make(map[string]target.MySQLArgs)
|
||||
srvCfg.Notify.MySQL["1"] = target.MySQLArgs{}
|
||||
srvCfg.Notify.Kafka = make(map[string]target.KafkaArgs)
|
||||
srvCfg.Notify.Kafka["1"] = target.KafkaArgs{}
|
||||
srvCfg.Notify.Webhook = make(map[string]target.WebhookArgs)
|
||||
srvCfg.Notify.Webhook["1"] = target.WebhookArgs{}
|
||||
|
||||
return srvCfg
|
||||
}
|
||||
|
@ -310,11 +312,6 @@ func getValidConfig() (*serverConfig, error) {
|
|||
return nil, errors.New("invalid credential in config file " + configFile)
|
||||
}
|
||||
|
||||
// Validate notify field
|
||||
if err = srvCfg.Notify.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return srvCfg, nil
|
||||
}
|
||||
|
||||
|
@ -369,3 +366,119 @@ func loadConfig() error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getNotificationTargets - returns TargetList which contains enabled targets in serverConfig.
|
||||
// A new notification target is added like below
|
||||
// * Add a new target in pkg/event/target package.
|
||||
// * Add newly added target configuration to serverConfig.Notify.<TARGET_NAME>.
|
||||
// * Handle the configuration in this function to create/add into TargetList.
|
||||
func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
|
||||
targetList := event.NewTargetList()
|
||||
|
||||
for id, args := range config.Notify.AMQP {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewAMQPTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.Elasticsearch {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.Kafka {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewKafkaTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.MQTT {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewMQTTTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.MySQL {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewMySQLTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.NATS {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewNATSTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.PostgreSQL {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.Redis {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewRedisTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.Webhook {
|
||||
if args.Enable {
|
||||
newTarget := target.NewWebhookTarget(id, args)
|
||||
if err := targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return targetList, nil
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -20,10 +20,10 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
|
@ -45,55 +45,6 @@ func TestServerConfig(t *testing.T) {
|
|||
t.Errorf("Expecting region `us-west-1` found %s", globalServerConfig.GetRegion())
|
||||
}
|
||||
|
||||
// Set new amqp notification id.
|
||||
globalServerConfig.Notify.SetAMQPByID("2", amqpNotify{})
|
||||
savedNotifyCfg1 := globalServerConfig.Notify.GetAMQPByID("2")
|
||||
if !reflect.DeepEqual(savedNotifyCfg1, amqpNotify{}) {
|
||||
t.Errorf("Expecting AMQP config %#v found %#v", amqpNotify{}, savedNotifyCfg1)
|
||||
}
|
||||
|
||||
// Set new elastic search notification id.
|
||||
globalServerConfig.Notify.SetElasticSearchByID("2", elasticSearchNotify{})
|
||||
savedNotifyCfg2 := globalServerConfig.Notify.GetElasticSearchByID("2")
|
||||
if !reflect.DeepEqual(savedNotifyCfg2, elasticSearchNotify{}) {
|
||||
t.Errorf("Expecting Elasticsearch config %#v found %#v", elasticSearchNotify{}, savedNotifyCfg2)
|
||||
}
|
||||
|
||||
// Set new redis notification id.
|
||||
globalServerConfig.Notify.SetRedisByID("2", redisNotify{})
|
||||
savedNotifyCfg3 := globalServerConfig.Notify.GetRedisByID("2")
|
||||
if !reflect.DeepEqual(savedNotifyCfg3, redisNotify{}) {
|
||||
t.Errorf("Expecting Redis config %#v found %#v", redisNotify{}, savedNotifyCfg3)
|
||||
}
|
||||
|
||||
// Set new kafka notification id.
|
||||
globalServerConfig.Notify.SetKafkaByID("2", kafkaNotify{})
|
||||
savedNotifyCfg4 := globalServerConfig.Notify.GetKafkaByID("2")
|
||||
if !reflect.DeepEqual(savedNotifyCfg4, kafkaNotify{}) {
|
||||
t.Errorf("Expecting Kafka config %#v found %#v", kafkaNotify{}, savedNotifyCfg4)
|
||||
}
|
||||
|
||||
// Set new Webhook notification id.
|
||||
globalServerConfig.Notify.SetWebhookByID("2", webhookNotify{})
|
||||
savedNotifyCfg5 := globalServerConfig.Notify.GetWebhookByID("2")
|
||||
if !reflect.DeepEqual(savedNotifyCfg5, webhookNotify{}) {
|
||||
t.Errorf("Expecting Webhook config %#v found %#v", webhookNotify{}, savedNotifyCfg5)
|
||||
}
|
||||
|
||||
// Set new MySQL notification id.
|
||||
globalServerConfig.Notify.SetMySQLByID("2", mySQLNotify{})
|
||||
savedNotifyCfg6 := globalServerConfig.Notify.GetMySQLByID("2")
|
||||
if !reflect.DeepEqual(savedNotifyCfg6, mySQLNotify{}) {
|
||||
t.Errorf("Expecting Webhook config %#v found %#v", mySQLNotify{}, savedNotifyCfg6)
|
||||
}
|
||||
|
||||
// Set new MQTT notification id.
|
||||
globalServerConfig.Notify.SetMQTTByID("2", mqttNotify{})
|
||||
savedNotifyCfg7 := globalServerConfig.Notify.GetMQTTByID("2")
|
||||
if !reflect.DeepEqual(savedNotifyCfg7, mqttNotify{}) {
|
||||
t.Errorf("Expecting Webhook config %#v found %#v", mqttNotify{}, savedNotifyCfg7)
|
||||
}
|
||||
|
||||
// Match version.
|
||||
if globalServerConfig.GetVersion() != serverConfigVersion {
|
||||
t.Errorf("Expecting version %s found %s", globalServerConfig.GetVersion(), serverConfigVersion)
|
||||
|
@ -252,55 +203,55 @@ func TestValidateConfig(t *testing.T) {
|
|||
{`{"version": "` + v + `", "browser": "on", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, false},
|
||||
|
||||
// Test 11 - Test AMQP
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, true},
|
||||
|
||||
// Test 12 - Test NATS
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "clientID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "clientID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, true},
|
||||
|
||||
// Test 13 - Test ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, true},
|
||||
|
||||
// Test 14 - Test Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, true},
|
||||
|
||||
// Test 15 - Test PostgreSQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, true},
|
||||
|
||||
// Test 16 - Test Kafka
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, true},
|
||||
|
||||
// Test 17 - Test Webhook
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, true},
|
||||
|
||||
// Test 18 - Test MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, true},
|
||||
|
||||
// Test 19 - Test Format for MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
|
||||
|
||||
// Test 20 - Test valid Format for MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "namespace", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
|
||||
|
||||
// Test 21 - Test Format for PostgreSQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "invalid", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "invalid", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
|
||||
|
||||
// Test 22 - Test valid Format for PostgreSQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "namespace", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
|
||||
|
||||
// Test 23 - Test Format for ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, true},
|
||||
|
||||
// Test 24 - Test valid Format for ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex" } }}}`, true},
|
||||
|
||||
// Test 25 - Test Format for Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
|
||||
|
||||
// Test 26 - Test valid Format for Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
|
||||
|
||||
// Test 27 - Test MQTT
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "clientId": "", "username": "", "password": ""}}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "clientId": "", "username": "", "password": ""}}}}`, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
|
@ -345,56 +296,56 @@ func TestConfigDiff(t *testing.T) {
|
|||
},
|
||||
// 7
|
||||
{
|
||||
&serverConfig{Notify: notifier{AMQP: map[string]amqpNotify{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{AMQP: map[string]amqpNotify{"1": {Enable: false}}}},
|
||||
&serverConfig{Notify: notifier{AMQP: map[string]target.AMQPArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{AMQP: map[string]target.AMQPArgs{"1": {Enable: false}}}},
|
||||
"AMQP Notification configuration differs",
|
||||
},
|
||||
// 8
|
||||
{
|
||||
&serverConfig{Notify: notifier{NATS: map[string]natsNotify{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{NATS: map[string]natsNotify{"1": {Enable: false}}}},
|
||||
&serverConfig{Notify: notifier{NATS: map[string]target.NATSArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{NATS: map[string]target.NATSArgs{"1": {Enable: false}}}},
|
||||
"NATS Notification configuration differs",
|
||||
},
|
||||
// 9
|
||||
{
|
||||
&serverConfig{Notify: notifier{ElasticSearch: map[string]elasticSearchNotify{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{ElasticSearch: map[string]elasticSearchNotify{"1": {Enable: false}}}},
|
||||
&serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: false}}}},
|
||||
"ElasticSearch Notification configuration differs",
|
||||
},
|
||||
// 10
|
||||
{
|
||||
&serverConfig{Notify: notifier{Redis: map[string]redisNotify{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Redis: map[string]redisNotify{"1": {Enable: false}}}},
|
||||
&serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: false}}}},
|
||||
"Redis Notification configuration differs",
|
||||
},
|
||||
// 11
|
||||
{
|
||||
&serverConfig{Notify: notifier{PostgreSQL: map[string]postgreSQLNotify{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{PostgreSQL: map[string]postgreSQLNotify{"1": {Enable: false}}}},
|
||||
&serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: false}}}},
|
||||
"PostgreSQL Notification configuration differs",
|
||||
},
|
||||
// 12
|
||||
{
|
||||
&serverConfig{Notify: notifier{Kafka: map[string]kafkaNotify{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Kafka: map[string]kafkaNotify{"1": {Enable: false}}}},
|
||||
&serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: false}}}},
|
||||
"Kafka Notification configuration differs",
|
||||
},
|
||||
// 13
|
||||
{
|
||||
&serverConfig{Notify: notifier{Webhook: map[string]webhookNotify{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Webhook: map[string]webhookNotify{"1": {Enable: false}}}},
|
||||
&serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: false}}}},
|
||||
"Webhook Notification configuration differs",
|
||||
},
|
||||
// 14
|
||||
{
|
||||
&serverConfig{Notify: notifier{MySQL: map[string]mySQLNotify{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{MySQL: map[string]mySQLNotify{"1": {Enable: false}}}},
|
||||
&serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: false}}}},
|
||||
"MySQL Notification configuration differs",
|
||||
},
|
||||
// 15
|
||||
{
|
||||
&serverConfig{Notify: notifier{MQTT: map[string]mqttNotify{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{MQTT: map[string]mqttNotify{"1": {Enable: false}}}},
|
||||
&serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: false}}}},
|
||||
"MQTT Notification configuration differs",
|
||||
},
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -20,6 +20,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
)
|
||||
|
||||
/////////////////// Config V1 ///////////////////
|
||||
|
@ -226,23 +227,23 @@ type configV6 struct {
|
|||
// Notifier represents collection of supported notification queues in version
|
||||
// 1 without NATS streaming.
|
||||
type notifierV1 struct {
|
||||
AMQP map[string]amqpNotify `json:"amqp"`
|
||||
AMQP map[string]target.AMQPArgs `json:"amqp"`
|
||||
NATS map[string]natsNotifyV1 `json:"nats"`
|
||||
ElasticSearch map[string]elasticSearchNotify `json:"elasticsearch"`
|
||||
Redis map[string]redisNotify `json:"redis"`
|
||||
PostgreSQL map[string]postgreSQLNotify `json:"postgresql"`
|
||||
Kafka map[string]kafkaNotify `json:"kafka"`
|
||||
ElasticSearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
|
||||
Redis map[string]target.RedisArgs `json:"redis"`
|
||||
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
|
||||
Kafka map[string]target.KafkaArgs `json:"kafka"`
|
||||
}
|
||||
|
||||
// Notifier represents collection of supported notification queues in version 2
|
||||
// with NATS streaming but without webhook.
|
||||
type notifierV2 struct {
|
||||
AMQP map[string]amqpNotify `json:"amqp"`
|
||||
NATS map[string]natsNotify `json:"nats"`
|
||||
ElasticSearch map[string]elasticSearchNotify `json:"elasticsearch"`
|
||||
Redis map[string]redisNotify `json:"redis"`
|
||||
PostgreSQL map[string]postgreSQLNotify `json:"postgresql"`
|
||||
Kafka map[string]kafkaNotify `json:"kafka"`
|
||||
AMQP map[string]target.AMQPArgs `json:"amqp"`
|
||||
NATS map[string]target.NATSArgs `json:"nats"`
|
||||
ElasticSearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
|
||||
Redis map[string]target.RedisArgs `json:"redis"`
|
||||
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
|
||||
Kafka map[string]target.KafkaArgs `json:"kafka"`
|
||||
}
|
||||
|
||||
// configV7 server configuration version '7'.
|
||||
|
@ -368,6 +369,18 @@ type serverConfigV12 struct {
|
|||
Notify notifierV2 `json:"notify"`
|
||||
}
|
||||
|
||||
type notifier struct {
|
||||
AMQP map[string]target.AMQPArgs `json:"amqp"`
|
||||
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
|
||||
Kafka map[string]target.KafkaArgs `json:"kafka"`
|
||||
MQTT map[string]target.MQTTArgs `json:"mqtt"`
|
||||
MySQL map[string]target.MySQLArgs `json:"mysql"`
|
||||
NATS map[string]target.NATSArgs `json:"nats"`
|
||||
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
|
||||
Redis map[string]target.RedisArgs `json:"redis"`
|
||||
Webhook map[string]target.WebhookArgs `json:"webhook"`
|
||||
}
|
||||
|
||||
// serverConfigV13 server configuration version '13' which is like
|
||||
// version '12' except it adds support for webhook notification.
|
||||
type serverConfigV13 struct {
|
||||
|
|
|
@ -462,9 +462,13 @@ func GetLocalPeer(endpoints EndpointList) (localPeer string) {
|
|||
}
|
||||
}
|
||||
if peerSet.IsEmpty() {
|
||||
// If local peer is empty can happen in FS or Erasure coded mode.
|
||||
// then set the value to globalMinioAddr instead.
|
||||
return globalMinioAddr
|
||||
// Local peer can be empty in FS or Erasure coded mode.
|
||||
// If so, return globalMinioHost + globalMinioPort value.
|
||||
if globalMinioHost != "" {
|
||||
return globalMinioHost + ":" + globalMinioPort
|
||||
}
|
||||
|
||||
return "127.0.0.1:" + globalMinioPort
|
||||
}
|
||||
return peerSet.ToSlice()[0]
|
||||
}
|
||||
|
|
|
@ -334,16 +334,19 @@ func TestCreateEndpoints(t *testing.T) {
|
|||
// is considered a remote service from localhost:9000 perspective.
|
||||
func TestGetLocalPeer(t *testing.T) {
|
||||
tempGlobalMinioAddr := globalMinioAddr
|
||||
tempGlobalMinioPort := globalMinioPort
|
||||
defer func() {
|
||||
globalMinioAddr = tempGlobalMinioAddr
|
||||
globalMinioPort = tempGlobalMinioPort
|
||||
}()
|
||||
globalMinioAddr = ":9000"
|
||||
globalMinioPort = "9000"
|
||||
|
||||
testCases := []struct {
|
||||
endpointArgs []string
|
||||
expectedResult string
|
||||
}{
|
||||
{[]string{"/d1", "/d2", "d3", "d4"}, ":9000"},
|
||||
{[]string{"/d1", "/d2", "d3", "d4"}, "127.0.0.1:9000"},
|
||||
{[]string{"http://localhost:9000/d1", "http://localhost:9000/d2", "http://example.org:9000/d3", "http://example.com:9000/d4"},
|
||||
"localhost:9000"},
|
||||
{[]string{"http://localhost:9000/d1", "http://example.org:9000/d2", "http://example.com:9000/d3", "http://example.net:9000/d4"},
|
||||
|
|
|
@ -1,804 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
const (
|
||||
minioEventSource = "minio:s3"
|
||||
)
|
||||
|
||||
type externalNotifier struct {
|
||||
// Per-bucket notification config. This is updated via
|
||||
// PutBucketNotification API.
|
||||
notificationConfigs map[string]*notificationConfig
|
||||
|
||||
// An external target keeps a connection to an external
|
||||
// service to which events are to be sent. It is a mapping
|
||||
// from an ARN to a log object
|
||||
targets map[string]*logrus.Logger
|
||||
|
||||
rwMutex *sync.RWMutex
|
||||
}
|
||||
|
||||
type internalNotifier struct {
|
||||
// per-bucket listener configuration. This is updated
|
||||
// when listeners connect or disconnect.
|
||||
listenerConfigs map[string][]listenerConfig
|
||||
|
||||
// An internal target is a peer Minio server, that is
|
||||
// connected to a listening client. Here, targets is a map of
|
||||
// listener ARN to log object.
|
||||
targets map[string]*listenerLogger
|
||||
|
||||
// Connected listeners is a map of listener ARNs to channels
|
||||
// on which the ListenBucket API handler go routine is waiting
|
||||
// for events to send to a client.
|
||||
connectedListeners map[string]*listenChan
|
||||
|
||||
rwMutex *sync.RWMutex
|
||||
}
|
||||
|
||||
// Global event notification configuration. This structure has state
|
||||
// about configured external notifications, and run-time configuration
|
||||
// for listener notifications.
|
||||
type eventNotifier struct {
|
||||
|
||||
// `external` here refers to notification configuration to
|
||||
// send events to supported external systems
|
||||
external externalNotifier
|
||||
|
||||
// `internal` refers to notification configuration for live
|
||||
// listening clients. Events for a client are send from all
|
||||
// servers, internally to a particular server that is
|
||||
// connected to the client.
|
||||
internal internalNotifier
|
||||
}
|
||||
|
||||
// Represents data to be sent with notification event.
|
||||
type eventData struct {
|
||||
Type EventName
|
||||
Bucket string
|
||||
ObjInfo ObjectInfo
|
||||
ReqParams map[string]string
|
||||
Host string
|
||||
Port string
|
||||
UserAgent string
|
||||
}
|
||||
|
||||
// New notification event constructs a new notification event message from
|
||||
// input request metadata which completed successfully.
|
||||
func newNotificationEvent(event eventData) NotificationEvent {
|
||||
getResponseOriginEndpointKey := func() string {
|
||||
host := globalMinioHost
|
||||
if host == "" {
|
||||
// FIXME: Send FQDN or hostname of this machine than sending IP address.
|
||||
host = localIP4.ToSlice()[0]
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s://%s:%s", getURLScheme(globalIsSSL), host, globalMinioPort)
|
||||
}
|
||||
|
||||
// Fetch the region.
|
||||
region := globalServerConfig.GetRegion()
|
||||
|
||||
// Fetch the credentials.
|
||||
creds := globalServerConfig.GetCredential()
|
||||
|
||||
// Time when Minio finished processing the request.
|
||||
eventTime := UTCNow()
|
||||
|
||||
// Fetch a hexadecimal representation of event time in nano seconds.
|
||||
uniqueID := mustGetRequestID(eventTime)
|
||||
|
||||
/// Construct a new object created event.
|
||||
|
||||
// Following blocks fills in all the necessary details of s3
|
||||
// event message structure.
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
|
||||
nEvent := NotificationEvent{
|
||||
EventVersion: eventVersion,
|
||||
EventSource: minioEventSource,
|
||||
AwsRegion: region,
|
||||
EventTime: eventTime.Format(timeFormatAMZ),
|
||||
EventName: event.Type.String(),
|
||||
UserIdentity: identity{creds.AccessKey},
|
||||
RequestParameters: event.ReqParams,
|
||||
ResponseElements: map[string]string{
|
||||
responseRequestIDKey: uniqueID,
|
||||
// Following is a custom response element to indicate
|
||||
// event origin server endpoint.
|
||||
responseOriginEndpointKey: getResponseOriginEndpointKey(),
|
||||
},
|
||||
S3: eventMeta{
|
||||
SchemaVersion: eventSchemaVersion,
|
||||
ConfigurationID: eventConfigID,
|
||||
Bucket: bucketMeta{
|
||||
Name: event.Bucket,
|
||||
OwnerIdentity: identity{creds.AccessKey},
|
||||
ARN: bucketARNPrefix + event.Bucket,
|
||||
},
|
||||
},
|
||||
Source: sourceInfo{
|
||||
Host: event.Host,
|
||||
Port: event.Port,
|
||||
UserAgent: event.UserAgent,
|
||||
},
|
||||
}
|
||||
|
||||
// Escape the object name. For example "red flower.jpg" becomes "red+flower.jpg".
|
||||
escapedObj := url.QueryEscape(event.ObjInfo.Name)
|
||||
|
||||
// For delete object event type, we do not need to set ETag and Size.
|
||||
if event.Type == ObjectRemovedDelete {
|
||||
nEvent.S3.Object = objectMeta{
|
||||
Key: escapedObj,
|
||||
VersionID: "1",
|
||||
Sequencer: uniqueID,
|
||||
}
|
||||
return nEvent
|
||||
}
|
||||
|
||||
// For all other events we should set ETag and Size.
|
||||
nEvent.S3.Object = objectMeta{
|
||||
Key: escapedObj,
|
||||
ETag: event.ObjInfo.ETag,
|
||||
Size: event.ObjInfo.Size,
|
||||
ContentType: event.ObjInfo.ContentType,
|
||||
UserMetadata: event.ObjInfo.UserDefined,
|
||||
VersionID: "1",
|
||||
Sequencer: uniqueID,
|
||||
}
|
||||
|
||||
// Success.
|
||||
return nEvent
|
||||
}
|
||||
|
||||
// Fetch all external targets. This returns a copy of the current map of
|
||||
// external notification targets.
|
||||
func (en eventNotifier) GetAllExternalTargets() map[string]*logrus.Logger {
|
||||
en.external.rwMutex.RLock()
|
||||
defer en.external.rwMutex.RUnlock()
|
||||
targetsCopy := make(map[string]*logrus.Logger)
|
||||
for k, v := range en.external.targets {
|
||||
targetsCopy[k] = v
|
||||
}
|
||||
return targetsCopy
|
||||
}
|
||||
|
||||
// Fetch the external target.
|
||||
func (en eventNotifier) GetExternalTarget(queueARN string) *logrus.Logger {
|
||||
en.external.rwMutex.RLock()
|
||||
defer en.external.rwMutex.RUnlock()
|
||||
return en.external.targets[queueARN]
|
||||
}
|
||||
|
||||
func (en eventNotifier) GetInternalTarget(arn string) *listenerLogger {
|
||||
en.internal.rwMutex.RLock()
|
||||
defer en.internal.rwMutex.RUnlock()
|
||||
return en.internal.targets[arn]
|
||||
}
|
||||
|
||||
// Set a new sns target for an input sns ARN.
|
||||
func (en *eventNotifier) AddListenerChan(snsARN string, listenerCh *listenChan) error {
|
||||
if listenerCh == nil {
|
||||
return errInvalidArgument
|
||||
}
|
||||
en.internal.rwMutex.Lock()
|
||||
defer en.internal.rwMutex.Unlock()
|
||||
en.internal.connectedListeners[snsARN] = listenerCh
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove sns target for an input sns ARN.
|
||||
func (en *eventNotifier) RemoveListenerChan(snsARN string) {
|
||||
en.internal.rwMutex.Lock()
|
||||
defer en.internal.rwMutex.Unlock()
|
||||
if en.internal.connectedListeners != nil {
|
||||
delete(en.internal.connectedListeners, snsARN)
|
||||
}
|
||||
}
|
||||
|
||||
func (en *eventNotifier) SendListenerEvent(arn string, event []NotificationEvent) error {
|
||||
en.internal.rwMutex.Lock()
|
||||
defer en.internal.rwMutex.Unlock()
|
||||
|
||||
listenChan, ok := en.internal.connectedListeners[arn]
|
||||
if ok {
|
||||
listenChan.sendNotificationEvent(event)
|
||||
}
|
||||
// If the channel is not present we ignore the event.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch bucket notification config for an input bucket.
|
||||
func (en eventNotifier) GetBucketNotificationConfig(bucket string) *notificationConfig {
|
||||
en.external.rwMutex.RLock()
|
||||
defer en.external.rwMutex.RUnlock()
|
||||
return en.external.notificationConfigs[bucket]
|
||||
}
|
||||
|
||||
func (en *eventNotifier) SetBucketNotificationConfig(bucket string, ncfg *notificationConfig) {
|
||||
en.external.rwMutex.Lock()
|
||||
if ncfg == nil {
|
||||
delete(en.external.notificationConfigs, bucket)
|
||||
} else {
|
||||
en.external.notificationConfigs[bucket] = ncfg
|
||||
}
|
||||
en.external.rwMutex.Unlock()
|
||||
}
|
||||
|
||||
func (en *eventNotifier) GetBucketListenerConfig(bucket string) []listenerConfig {
|
||||
en.internal.rwMutex.RLock()
|
||||
defer en.internal.rwMutex.RUnlock()
|
||||
return en.internal.listenerConfigs[bucket]
|
||||
}
|
||||
|
||||
func (en *eventNotifier) SetBucketListenerConfig(bucket string, lcfg []listenerConfig) error {
|
||||
en.internal.rwMutex.Lock()
|
||||
defer en.internal.rwMutex.Unlock()
|
||||
if len(lcfg) == 0 {
|
||||
delete(en.internal.listenerConfigs, bucket)
|
||||
} else {
|
||||
en.internal.listenerConfigs[bucket] = lcfg
|
||||
}
|
||||
for _, elcArr := range en.internal.listenerConfigs {
|
||||
for _, elcElem := range elcArr {
|
||||
currArn := elcElem.TopicConfig.TopicARN
|
||||
logger, err := newListenerLogger(currArn, elcElem.TargetServer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
en.internal.targets[currArn] = logger
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func eventNotifyForBucketNotifications(eventType, objectName, bucketName string, nEvent []NotificationEvent) {
|
||||
nConfig := globalEventNotifier.GetBucketNotificationConfig(bucketName)
|
||||
if nConfig == nil {
|
||||
return
|
||||
}
|
||||
// Validate if the event and object match the queue configs.
|
||||
for _, qConfig := range nConfig.QueueConfigs {
|
||||
eventMatch := eventMatch(eventType, qConfig.Events)
|
||||
ruleMatch := filterRuleMatch(objectName, qConfig.Filter.Key.FilterRules)
|
||||
if eventMatch && ruleMatch {
|
||||
targetLog := globalEventNotifier.GetExternalTarget(qConfig.QueueARN)
|
||||
if targetLog != nil {
|
||||
targetLog.WithFields(logrus.Fields{
|
||||
"Key": path.Join(bucketName, objectName),
|
||||
"EventType": eventType,
|
||||
"Records": nEvent,
|
||||
}).Info()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func eventNotifyForBucketListeners(eventType, objectName, bucketName string,
|
||||
nEvent []NotificationEvent) {
|
||||
lCfgs := globalEventNotifier.GetBucketListenerConfig(bucketName)
|
||||
if lCfgs == nil {
|
||||
return
|
||||
}
|
||||
// Validate if the event and object match listener configs
|
||||
for _, lcfg := range lCfgs {
|
||||
ruleMatch := filterRuleMatch(objectName, lcfg.TopicConfig.Filter.Key.FilterRules)
|
||||
eventMatch := eventMatch(eventType, lcfg.TopicConfig.Events)
|
||||
if eventMatch && ruleMatch {
|
||||
targetLog := globalEventNotifier.GetInternalTarget(
|
||||
lcfg.TopicConfig.TopicARN)
|
||||
if targetLog != nil && targetLog.log != nil {
|
||||
targetLog.log.WithFields(logrus.Fields{
|
||||
"Key": path.Join(bucketName, objectName),
|
||||
"EventType": eventType,
|
||||
"Records": nEvent,
|
||||
}).Info()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// eventNotify notifies an event to relevant targets based on their
|
||||
// bucket configuration (notifications and listeners).
|
||||
func eventNotify(event eventData) {
|
||||
if globalEventNotifier == nil {
|
||||
return
|
||||
}
|
||||
// Notifies a new event.
|
||||
// List of events reported through this function are
|
||||
// - s3:ObjectCreated:Put
|
||||
// - s3:ObjectCreated:Post
|
||||
// - s3:ObjectCreated:Copy
|
||||
// - s3:ObjectCreated:CompleteMultipartUpload
|
||||
// - s3:ObjectRemoved:Delete
|
||||
|
||||
// Event type.
|
||||
eventType := event.Type.String()
|
||||
|
||||
// Object name.
|
||||
objectName := event.ObjInfo.Name
|
||||
|
||||
// Save the notification event to be sent.
|
||||
notificationEvent := []NotificationEvent{newNotificationEvent(event)}
|
||||
|
||||
// Notify external targets.
|
||||
eventNotifyForBucketNotifications(eventType, objectName, event.Bucket, notificationEvent)
|
||||
|
||||
// Notify internal targets.
|
||||
eventNotifyForBucketListeners(eventType, objectName, event.Bucket, notificationEvent)
|
||||
}
|
||||
|
||||
// loads notification config if any for a given bucket, returns
|
||||
// structured notification config.
|
||||
func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationConfig, error) {
|
||||
// Construct the notification config path.
|
||||
ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
err := objAPI.GetObject(minioMetaBucket, ncPath, 0, -1, &buffer, "") // Read everything.
|
||||
if err != nil {
|
||||
// 'notification.xml' not found return
|
||||
// 'errNoSuchNotifications'. This is default when no
|
||||
// bucket notifications are found on the bucket.
|
||||
if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
|
||||
return nil, errors.Trace(errNoSuchNotifications)
|
||||
}
|
||||
errorIf(err, "Unable to load bucket-notification for bucket %s", bucket)
|
||||
// Returns error for other errors.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if `notifications.xml` is empty we should return NoSuchNotifications.
|
||||
if buffer.Len() == 0 {
|
||||
return nil, errors.Trace(errNoSuchNotifications)
|
||||
}
|
||||
|
||||
// Unmarshal notification bytes.
|
||||
notificationConfigBytes := buffer.Bytes()
|
||||
notificationCfg := ¬ificationConfig{}
|
||||
// Unmarshal notification bytes only if we read data.
|
||||
if err = xml.Unmarshal(notificationConfigBytes, notificationCfg); err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
|
||||
// Return success.
|
||||
return notificationCfg, nil
|
||||
}
|
||||
|
||||
// loads notification config if any for a given bucket, returns
|
||||
// structured notification config.
|
||||
func loadListenerConfig(bucket string, objAPI ObjectLayer) ([]listenerConfig, error) {
|
||||
// in single node mode, there are no peers, so in this case
|
||||
// there is no configuration to load, as any previously
|
||||
// connected listen clients have been disconnected
|
||||
if !globalIsDistXL {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Construct the notification config path.
|
||||
lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
err := objAPI.GetObject(minioMetaBucket, lcPath, 0, -1, &buffer, "")
|
||||
if err != nil {
|
||||
// 'listener.json' not found return
|
||||
// 'errNoSuchNotifications'. This is default when no
|
||||
// bucket listeners are found on the bucket
|
||||
if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
|
||||
return nil, errors.Trace(errNoSuchNotifications)
|
||||
}
|
||||
errorIf(err, "Unable to load bucket-listeners for bucket %s", bucket)
|
||||
// Returns error for other errors.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if `listener.json` is empty we should return NoSuchNotifications.
|
||||
if buffer.Len() == 0 {
|
||||
return nil, errors.Trace(errNoSuchNotifications)
|
||||
}
|
||||
|
||||
var lCfg []listenerConfig
|
||||
lConfigBytes := buffer.Bytes()
|
||||
if err = json.Unmarshal(lConfigBytes, &lCfg); err != nil {
|
||||
errorIf(err, "Unable to unmarshal listener config from JSON.")
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
|
||||
// Return success.
|
||||
return lCfg, nil
|
||||
}
|
||||
|
||||
func persistNotificationConfig(bucket string, ncfg *notificationConfig, obj ObjectLayer) error {
|
||||
// marshal to xml
|
||||
buf, err := xml.Marshal(ncfg)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to marshal notification configuration into XML")
|
||||
return err
|
||||
}
|
||||
|
||||
// build path
|
||||
ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
|
||||
|
||||
// write object to path
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf))
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to write bucket notification configuration.")
|
||||
return err
|
||||
}
|
||||
_, err = obj.PutObject(minioMetaBucket, ncPath, hashReader, nil)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to write bucket notification configuration.")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Persists validated listener config to object layer.
|
||||
func persistListenerConfig(bucket string, lcfg []listenerConfig, obj ObjectLayer) error {
|
||||
buf, err := json.Marshal(lcfg)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to marshal listener config to JSON.")
|
||||
return err
|
||||
}
|
||||
|
||||
// build path
|
||||
lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
|
||||
|
||||
// write object to path
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf))
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to write bucket listener configuration to object layer.")
|
||||
return err
|
||||
}
|
||||
|
||||
// write object to path
|
||||
_, err = obj.PutObject(minioMetaBucket, lcPath, hashReader, nil)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to write bucket listener configuration to object layer.")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Removes notification.xml for a given bucket, only used during DeleteBucket.
|
||||
func removeNotificationConfig(bucket string, objAPI ObjectLayer) error {
|
||||
// Verify bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
|
||||
|
||||
return objAPI.DeleteObject(minioMetaBucket, ncPath)
|
||||
}
|
||||
|
||||
// Remove listener configuration from storage layer. Used when a bucket is deleted.
|
||||
func removeListenerConfig(bucket string, objAPI ObjectLayer) error {
|
||||
// make the path
|
||||
lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
|
||||
|
||||
return objAPI.DeleteObject(minioMetaBucket, lcPath)
|
||||
}
|
||||
|
||||
// Loads both notification and listener config.
|
||||
func loadNotificationAndListenerConfig(bucketName string, objAPI ObjectLayer) (nCfg *notificationConfig, lCfg []listenerConfig, err error) {
|
||||
// Loads notification config if any.
|
||||
nCfg, err = loadNotificationConfig(bucketName, objAPI)
|
||||
if err != nil && !errors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Loads listener config if any.
|
||||
lCfg, err = loadListenerConfig(bucketName, objAPI)
|
||||
if err != nil && !errors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nCfg, lCfg, nil
|
||||
}
|
||||
|
||||
// loads all bucket notifications if present.
|
||||
func loadAllBucketNotifications(objAPI ObjectLayer) (map[string]*notificationConfig, map[string][]listenerConfig, error) {
|
||||
// List buckets to proceed loading all notification configuration.
|
||||
buckets, err := objAPI.ListBuckets()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
nConfigs := make(map[string]*notificationConfig)
|
||||
lConfigs := make(map[string][]listenerConfig)
|
||||
|
||||
// Loads all bucket notifications.
|
||||
for _, bucket := range buckets {
|
||||
// Load persistent notification and listener configurations
|
||||
// a given bucket name.
|
||||
nConfigs[bucket.Name], lConfigs[bucket.Name], err = loadNotificationAndListenerConfig(bucket.Name, objAPI)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Success.
|
||||
return nConfigs, lConfigs, nil
|
||||
}
|
||||
|
||||
// addQueueTarget - calls newTargetFunc function and adds its returned value to queueTargets
|
||||
func addQueueTarget(queueTargets map[string]*logrus.Logger,
|
||||
accountID, queueType string,
|
||||
newTargetFunc func(string) (*logrus.Logger, error)) (string, error) {
|
||||
|
||||
// Construct the queue ARN for AMQP.
|
||||
queueARN := minioSqs + globalServerConfig.GetRegion() + ":" + accountID + ":" + queueType
|
||||
|
||||
// Queue target if already initialized we move to the next ARN.
|
||||
if _, ok := queueTargets[queueARN]; ok {
|
||||
return queueARN, nil
|
||||
}
|
||||
|
||||
// Using accountID we can now initialize a new AMQP logrus instance.
|
||||
logger, err := newTargetFunc(accountID)
|
||||
if err == nil {
|
||||
queueTargets[queueARN] = logger
|
||||
}
|
||||
|
||||
return queueARN, err
|
||||
}
|
||||
|
||||
// Loads all queue targets, initializes each queueARNs depending on their config.
|
||||
// Each instance of queueARN registers its own logrus to communicate with the
|
||||
// queue service. QueueARN once initialized is not initialized again for the
|
||||
// same queueARN, instead previous connection is used.
|
||||
func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
|
||||
queueTargets := make(map[string]*logrus.Logger)
|
||||
// Load all amqp targets, initialize their respective loggers.
|
||||
for accountID, amqpN := range globalServerConfig.Notify.GetAMQP() {
|
||||
if !amqpN.Enable {
|
||||
continue
|
||||
}
|
||||
|
||||
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeAMQP, newAMQPNotify); err != nil {
|
||||
if _, ok := err.(net.Error); ok {
|
||||
err = &net.OpError{
|
||||
Op: "Connecting to " + queueARN,
|
||||
Net: "tcp",
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Load all mqtt targets, initialize their respective loggers.
|
||||
for accountID, mqttN := range globalServerConfig.Notify.GetMQTT() {
|
||||
if !mqttN.Enable {
|
||||
continue
|
||||
}
|
||||
|
||||
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeMQTT, newMQTTNotify); err != nil {
|
||||
if _, ok := err.(net.Error); ok {
|
||||
err = &net.OpError{
|
||||
Op: "Connecting to " + queueARN,
|
||||
Net: "tcp",
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Load all nats targets, initialize their respective loggers.
|
||||
for accountID, natsN := range globalServerConfig.Notify.GetNATS() {
|
||||
if !natsN.Enable {
|
||||
continue
|
||||
}
|
||||
|
||||
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeNATS, newNATSNotify); err != nil {
|
||||
if _, ok := err.(net.Error); ok {
|
||||
err = &net.OpError{
|
||||
Op: "Connecting to " + queueARN,
|
||||
Net: "tcp",
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Load redis targets, initialize their respective loggers.
|
||||
for accountID, redisN := range globalServerConfig.Notify.GetRedis() {
|
||||
if !redisN.Enable {
|
||||
continue
|
||||
}
|
||||
|
||||
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeRedis, newRedisNotify); err != nil {
|
||||
if _, ok := err.(net.Error); ok {
|
||||
err = &net.OpError{
|
||||
Op: "Connecting to " + queueARN,
|
||||
Net: "tcp",
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Load Webhook targets, initialize their respective loggers.
|
||||
for accountID, webhookN := range globalServerConfig.Notify.GetWebhook() {
|
||||
if !webhookN.Enable {
|
||||
continue
|
||||
}
|
||||
if _, err := addQueueTarget(queueTargets, accountID, queueTypeWebhook, newWebhookNotify); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Load elastic targets, initialize their respective loggers.
|
||||
for accountID, elasticN := range globalServerConfig.Notify.GetElasticSearch() {
|
||||
if !elasticN.Enable {
|
||||
continue
|
||||
}
|
||||
|
||||
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeElastic, newElasticNotify); err != nil {
|
||||
if _, ok := err.(net.Error); ok {
|
||||
err = &net.OpError{
|
||||
Op: "Connecting to " + queueARN,
|
||||
Net: "tcp",
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Load PostgreSQL targets, initialize their respective loggers.
|
||||
for accountID, pgN := range globalServerConfig.Notify.GetPostgreSQL() {
|
||||
if !pgN.Enable {
|
||||
continue
|
||||
}
|
||||
|
||||
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypePostgreSQL, newPostgreSQLNotify); err != nil {
|
||||
if _, ok := err.(net.Error); ok {
|
||||
err = &net.OpError{
|
||||
Op: "Connecting to " + queueARN,
|
||||
Net: "tcp",
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Load MySQL targets, initialize their respective loggers.
|
||||
for accountID, msqlN := range globalServerConfig.Notify.GetMySQL() {
|
||||
if !msqlN.Enable {
|
||||
continue
|
||||
}
|
||||
|
||||
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeMySQL, newMySQLNotify); err != nil {
|
||||
if _, ok := err.(net.Error); ok {
|
||||
err = &net.OpError{
|
||||
Op: "Connecting to " + queueARN,
|
||||
Net: "tcp",
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Load Kafka targets, initialize their respective loggers.
|
||||
for accountID, kafkaN := range globalServerConfig.Notify.GetKafka() {
|
||||
if !kafkaN.Enable {
|
||||
continue
|
||||
}
|
||||
|
||||
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeKafka, newKafkaNotify); err != nil {
|
||||
if _, ok := err.(net.Error); ok {
|
||||
err = &net.OpError{
|
||||
Op: "Connecting to " + queueARN,
|
||||
Net: "tcp",
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Successfully initialized queue targets.
|
||||
return queueTargets, nil
|
||||
}
|
||||
|
||||
// Global instance of event notification queue.
|
||||
var globalEventNotifier *eventNotifier
|
||||
|
||||
// Initialize event notifier.
|
||||
func initEventNotifier(objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
// Read all saved bucket notifications.
|
||||
nConfigs, lConfigs, err := loadAllBucketNotifications(objAPI)
|
||||
if err != nil {
|
||||
errorIf(err, "Error loading bucket notifications - %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Initializes all queue targets.
|
||||
queueTargets, err := loadAllQueueTargets()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize internal listener targets
|
||||
listenTargets := make(map[string]*listenerLogger)
|
||||
for _, listeners := range lConfigs {
|
||||
for _, listener := range listeners {
|
||||
ln, err := newListenerLogger(
|
||||
listener.TopicConfig.TopicARN,
|
||||
listener.TargetServer,
|
||||
)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to initialize listener target logger.")
|
||||
//TODO: improve error
|
||||
return fmt.Errorf("Error initializing listner target logger - %v", err)
|
||||
}
|
||||
listenTargets[listener.TopicConfig.TopicARN] = ln
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize event notifier queue.
|
||||
globalEventNotifier = &eventNotifier{
|
||||
external: externalNotifier{
|
||||
notificationConfigs: nConfigs,
|
||||
targets: queueTargets,
|
||||
rwMutex: &sync.RWMutex{},
|
||||
},
|
||||
internal: internalNotifier{
|
||||
rwMutex: &sync.RWMutex{},
|
||||
targets: listenTargets,
|
||||
listenerConfigs: lConfigs,
|
||||
connectedListeners: make(map[string]*listenChan),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,586 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
)
|
||||
|
||||
// Test InitEventNotifier with faulty disks
|
||||
func TestInitEventNotifierFaultyDisks(t *testing.T) {
|
||||
// Prepare for tests
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
disks, err := getRandomDisks(16)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||
}
|
||||
defer removeRoots(disks)
|
||||
obj, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
bucketName := "bucket"
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
xl := obj.(*xlObjects)
|
||||
|
||||
listenARN := "arn:minio:sns:us-east-1:1:listen"
|
||||
queueARN := "arn:minio:sqs:us-east-1:1:redis"
|
||||
|
||||
// Write a notification.xml in the disk
|
||||
notificationXML := "<NotificationConfiguration>"
|
||||
notificationXML += "<TopicConfiguration><Event>s3:ObjectRemoved:*</Event><Event>s3:ObjectRemoved:*</Event><Topic>" + listenARN + "</Topic></TopicConfiguration>"
|
||||
notificationXML += "<QueueConfiguration><Event>s3:ObjectRemoved:*</Event><Event>s3:ObjectRemoved:*</Event><Queue>" + queueARN + "</Queue></QueueConfiguration>"
|
||||
notificationXML += "</NotificationConfiguration>"
|
||||
size := int64(len([]byte(notificationXML)))
|
||||
reader := bytes.NewReader([]byte(notificationXML))
|
||||
bucketConfigPath := bucketConfigPrefix + "/" + bucketName + "/" + bucketNotificationConfig
|
||||
if _, err := xl.PutObject(minioMetaBucket, bucketConfigPath, mustGetHashReader(t, reader, size, "", ""), nil); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
for i, d := range xl.storageDisks {
|
||||
xl.storageDisks[i] = newNaughtyDisk(d, nil, errFaultyDisk)
|
||||
}
|
||||
// Test initEventNotifier() with faulty disks
|
||||
for i := 1; i <= 3; i++ {
|
||||
if err := initEventNotifier(xl); errors.Cause(err) != errFaultyDisk {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// InitEventNotifierWithPostgreSQL - tests InitEventNotifier when PostgreSQL is not prepared
|
||||
func TestInitEventNotifierWithPostgreSQL(t *testing.T) {
|
||||
// initialize the server and obtain the credentials and root.
|
||||
// credentials are necessary to sign the HTTP request.
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
disks, err := getRandomDisks(1)
|
||||
defer os.RemoveAll(disks[0])
|
||||
if err != nil {
|
||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||
}
|
||||
fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
globalServerConfig.Notify.SetPostgreSQLByID("1", postgreSQLNotify{Enable: true})
|
||||
if err := initEventNotifier(fs); err == nil {
|
||||
t.Fatal("PostgreSQL config didn't fail.")
|
||||
}
|
||||
}
|
||||
|
||||
// InitEventNotifierWithNATS - tests InitEventNotifier when NATS is not prepared
|
||||
func TestInitEventNotifierWithNATS(t *testing.T) {
|
||||
// initialize the server and obtain the credentials and root.
|
||||
// credentials are necessary to sign the HTTP request.
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
disks, err := getRandomDisks(1)
|
||||
defer os.RemoveAll(disks[0])
|
||||
if err != nil {
|
||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||
}
|
||||
fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
globalServerConfig.Notify.SetNATSByID("1", natsNotify{Enable: true})
|
||||
if err := initEventNotifier(fs); err == nil {
|
||||
t.Fatal("NATS config didn't fail.")
|
||||
}
|
||||
}
|
||||
|
||||
// InitEventNotifierWithWebHook - tests InitEventNotifier when WebHook is not prepared
|
||||
func TestInitEventNotifierWithWebHook(t *testing.T) {
|
||||
// initialize the server and obtain the credentials and root.
|
||||
// credentials are necessary to sign the HTTP request.
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
disks, err := getRandomDisks(1)
|
||||
defer os.RemoveAll(disks[0])
|
||||
if err != nil {
|
||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||
}
|
||||
fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
globalServerConfig.Notify.SetWebhookByID("1", webhookNotify{Enable: true})
|
||||
if err := initEventNotifier(fs); err == nil {
|
||||
t.Fatal("WebHook config didn't fail.")
|
||||
}
|
||||
}
|
||||
|
||||
// InitEventNotifierWithAMQP - tests InitEventNotifier when AMQP is not prepared
|
||||
func TestInitEventNotifierWithAMQP(t *testing.T) {
|
||||
// initialize the server and obtain the credentials and root.
|
||||
// credentials are necessary to sign the HTTP request.
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
disks, err := getRandomDisks(1)
|
||||
defer os.RemoveAll(disks[0])
|
||||
if err != nil {
|
||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||
}
|
||||
fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
globalServerConfig.Notify.SetAMQPByID("1", amqpNotify{Enable: true})
|
||||
if err := initEventNotifier(fs); err == nil {
|
||||
t.Fatal("AMQP config didn't fail.")
|
||||
}
|
||||
}
|
||||
|
||||
// InitEventNotifierWithElasticSearch - test InitEventNotifier when ElasticSearch is not ready
|
||||
func TestInitEventNotifierWithElasticSearch(t *testing.T) {
|
||||
// initialize the server and obtain the credentials and root.
|
||||
// credentials are necessary to sign the HTTP request.
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
disks, err := getRandomDisks(1)
|
||||
defer os.RemoveAll(disks[0])
|
||||
if err != nil {
|
||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||
}
|
||||
fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
globalServerConfig.Notify.SetElasticSearchByID("1", elasticSearchNotify{Enable: true})
|
||||
if err := initEventNotifier(fs); err == nil {
|
||||
t.Fatal("ElasticSearch config didn't fail.")
|
||||
}
|
||||
}
|
||||
|
||||
// InitEventNotifierWithRedis - test InitEventNotifier when Redis is not ready
|
||||
func TestInitEventNotifierWithRedis(t *testing.T) {
|
||||
// initialize the server and obtain the credentials and root.
|
||||
// credentials are necessary to sign the HTTP request.
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
disks, err := getRandomDisks(1)
|
||||
defer os.RemoveAll(disks[0])
|
||||
if err != nil {
|
||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||
}
|
||||
fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
globalServerConfig.Notify.SetRedisByID("1", redisNotify{Enable: true})
|
||||
if err := initEventNotifier(fs); err == nil {
|
||||
t.Fatal("Redis config didn't fail.")
|
||||
}
|
||||
}
|
||||
|
||||
type TestPeerRPCServerData struct {
|
||||
serverType string
|
||||
testServer TestServer
|
||||
}
|
||||
|
||||
func (s *TestPeerRPCServerData) Setup(t *testing.T) {
|
||||
s.testServer = StartTestPeersRPCServer(t, s.serverType)
|
||||
|
||||
// setup port and minio addr
|
||||
host, port := mustSplitHostPort(s.testServer.Server.Listener.Addr().String())
|
||||
globalMinioHost = host
|
||||
globalMinioPort = port
|
||||
globalMinioAddr = getEndpointsLocalAddr(s.testServer.endpoints)
|
||||
|
||||
// initialize the peer client(s)
|
||||
initGlobalS3Peers(s.testServer.Disks)
|
||||
}
|
||||
|
||||
func (s *TestPeerRPCServerData) TearDown() {
|
||||
s.testServer.Stop()
|
||||
_ = os.RemoveAll(s.testServer.Root)
|
||||
for _, d := range s.testServer.Disks {
|
||||
_ = os.RemoveAll(d.Path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetNGetBucketNotification(t *testing.T) {
|
||||
s := TestPeerRPCServerData{serverType: "XL"}
|
||||
|
||||
// setup and teardown
|
||||
s.Setup(t)
|
||||
defer s.TearDown()
|
||||
|
||||
bucketName := getRandomBucketName()
|
||||
|
||||
obj := s.testServer.Obj
|
||||
if err := initEventNotifier(obj); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
globalEventNotifier.SetBucketNotificationConfig(bucketName, ¬ificationConfig{})
|
||||
nConfig := globalEventNotifier.GetBucketNotificationConfig(bucketName)
|
||||
if nConfig == nil {
|
||||
t.Errorf("Notification expected to be set, but notification not set.")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(nConfig, ¬ificationConfig{}) {
|
||||
t.Errorf("Mismatching notification configs.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitEventNotifier(t *testing.T) {
|
||||
currentIsDistXL := globalIsDistXL
|
||||
defer func() {
|
||||
globalIsDistXL = currentIsDistXL
|
||||
}()
|
||||
|
||||
s := TestPeerRPCServerData{serverType: "XL"}
|
||||
|
||||
// setup and teardown
|
||||
s.Setup(t)
|
||||
defer s.TearDown()
|
||||
|
||||
// test if empty object layer arg. returns expected error.
|
||||
if err := initEventNotifier(nil); err == nil || err != errInvalidArgument {
|
||||
t.Fatalf("initEventNotifier returned unexpected error value - %v", err)
|
||||
}
|
||||
|
||||
obj := s.testServer.Obj
|
||||
bucketName := getRandomBucketName()
|
||||
// declare sample configs
|
||||
filterRules := []filterRule{
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "minio",
|
||||
},
|
||||
{
|
||||
Name: "suffix",
|
||||
Value: "*.jpg",
|
||||
},
|
||||
}
|
||||
sampleSvcCfg := ServiceConfig{
|
||||
[]string{"s3:ObjectRemoved:*", "s3:ObjectCreated:*"},
|
||||
filterStruct{
|
||||
keyFilter{filterRules},
|
||||
},
|
||||
"1",
|
||||
}
|
||||
sampleNotifCfg := notificationConfig{
|
||||
QueueConfigs: []queueConfig{
|
||||
{
|
||||
ServiceConfig: sampleSvcCfg,
|
||||
QueueARN: "testqARN",
|
||||
},
|
||||
},
|
||||
}
|
||||
sampleListenCfg := []listenerConfig{
|
||||
{
|
||||
TopicConfig: topicConfig{ServiceConfig: sampleSvcCfg,
|
||||
TopicARN: "testlARN"},
|
||||
TargetServer: globalMinioAddr,
|
||||
},
|
||||
}
|
||||
|
||||
// create bucket
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
// bucket is created, now writing should not give errors.
|
||||
if err := persistNotificationConfig(bucketName, &sampleNotifCfg, obj); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
if err := persistListenerConfig(bucketName, sampleListenCfg, obj); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
// needed to load listener config from disk for testing (in
|
||||
// single peer mode, the listener config is ignored, but here
|
||||
// we want to test the loading from disk too.)
|
||||
globalIsDistXL = true
|
||||
|
||||
// test event notifier init
|
||||
if err := initEventNotifier(obj); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
// fetch bucket configs and verify
|
||||
ncfg := globalEventNotifier.GetBucketNotificationConfig(bucketName)
|
||||
if ncfg == nil {
|
||||
t.Error("Bucket notification was not present for ", bucketName)
|
||||
}
|
||||
if len(ncfg.QueueConfigs) != 1 || ncfg.QueueConfigs[0].QueueARN != "testqARN" {
|
||||
t.Error("Unexpected bucket notification found - ", *ncfg)
|
||||
}
|
||||
if globalEventNotifier.GetExternalTarget("testqARN") != nil {
|
||||
t.Error("A logger was not expected to be found as it was not enabled in the config.")
|
||||
}
|
||||
|
||||
lcfg := globalEventNotifier.GetBucketListenerConfig(bucketName)
|
||||
if lcfg == nil {
|
||||
t.Error("Bucket listener was not present for ", bucketName)
|
||||
}
|
||||
if len(lcfg) != 1 || lcfg[0].TargetServer != globalMinioAddr || lcfg[0].TopicConfig.TopicARN != "testlARN" {
|
||||
t.Error("Unexpected listener config found - ", lcfg[0])
|
||||
}
|
||||
if globalEventNotifier.GetInternalTarget("testlARN") == nil {
|
||||
t.Error("A listen logger was not found.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestListenBucketNotification(t *testing.T) {
|
||||
currentIsDistXL := globalIsDistXL
|
||||
defer func() {
|
||||
globalIsDistXL = currentIsDistXL
|
||||
}()
|
||||
|
||||
s := TestPeerRPCServerData{serverType: "XL"}
|
||||
// setup and teardown
|
||||
s.Setup(t)
|
||||
defer s.TearDown()
|
||||
|
||||
// test initialisation
|
||||
obj := s.testServer.Obj
|
||||
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
// Create the bucket to listen on
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
listenARN := fmt.Sprintf("%s:%s:1:%s-%s",
|
||||
minioTopic,
|
||||
globalServerConfig.GetRegion(),
|
||||
snsTypeMinio,
|
||||
s.testServer.Server.Listener.Addr(),
|
||||
)
|
||||
lcfg := listenerConfig{
|
||||
TopicConfig: topicConfig{
|
||||
ServiceConfig{
|
||||
[]string{"s3:ObjectRemoved:*", "s3:ObjectCreated:*"},
|
||||
filterStruct{},
|
||||
"0",
|
||||
},
|
||||
listenARN,
|
||||
},
|
||||
TargetServer: globalMinioAddr,
|
||||
}
|
||||
|
||||
// write listener config to storage layer
|
||||
lcfgs := []listenerConfig{lcfg}
|
||||
if err := persistListenerConfig(bucketName, lcfgs, obj); err != nil {
|
||||
t.Fatalf("Test Setup error: %v", err)
|
||||
}
|
||||
|
||||
// needed to load listener config from disk for testing (in
|
||||
// single peer mode, the listener config is ingored, but here
|
||||
// we want to test the loading from disk too.)
|
||||
globalIsDistXL = true
|
||||
|
||||
// Init event notifier
|
||||
if err := initEventNotifier(obj); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
// Check if the config is loaded
|
||||
listenerCfg := globalEventNotifier.GetBucketListenerConfig(bucketName)
|
||||
if listenerCfg == nil {
|
||||
t.Fatal("Cannot load bucket listener config")
|
||||
}
|
||||
if len(listenerCfg) != 1 {
|
||||
t.Fatal("Listener config is not correctly loaded. Exactly one listener config is expected")
|
||||
}
|
||||
|
||||
// Check if topic ARN is correct
|
||||
if listenerCfg[0].TopicConfig.TopicARN != listenARN {
|
||||
t.Fatal("Configured topic ARN is incorrect.")
|
||||
}
|
||||
|
||||
// Create a new notification event channel.
|
||||
nListenCh := newListenChan()
|
||||
// Add events channel for listener.
|
||||
if err := globalEventNotifier.AddListenerChan(listenARN, nListenCh); err != nil {
|
||||
t.Fatalf("Test Setup error: %v", err)
|
||||
}
|
||||
// Remove listen channel after the writer has closed or the
|
||||
// client disconnected.
|
||||
defer globalEventNotifier.RemoveListenerChan(listenARN)
|
||||
|
||||
// Fire an event notification
|
||||
go eventNotify(eventData{
|
||||
Type: ObjectRemovedDelete,
|
||||
Bucket: bucketName,
|
||||
ObjInfo: ObjectInfo{
|
||||
Bucket: bucketName,
|
||||
Name: objectName,
|
||||
},
|
||||
ReqParams: map[string]string{
|
||||
"sourceIPAddress": "localhost:1337",
|
||||
},
|
||||
})
|
||||
|
||||
// Wait for the event notification here, if nothing is received within 30 seconds,
|
||||
// test error will be fired
|
||||
select {
|
||||
case n := <-nListenCh.dataCh:
|
||||
// Check that received event
|
||||
if len(n) == 0 {
|
||||
t.Fatal("Unexpected error occurred")
|
||||
}
|
||||
if n[0].S3.Object.Key != objectName {
|
||||
t.Fatalf("Received wrong object name in notification, expected %s, received %s", n[0].S3.Object.Key, objectName)
|
||||
}
|
||||
case <-time.After(3 * time.Second):
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAddRemoveBucketListenerConfig(t *testing.T) {
|
||||
s := TestPeerRPCServerData{serverType: "XL"}
|
||||
|
||||
// setup and teardown
|
||||
s.Setup(t)
|
||||
defer s.TearDown()
|
||||
|
||||
// test code
|
||||
obj := s.testServer.Obj
|
||||
if err := initEventNotifier(obj); err != nil {
|
||||
t.Fatalf("Failed to initialize event notifier: %v", err)
|
||||
}
|
||||
|
||||
// Make a bucket to store topicConfigs.
|
||||
randBucket := getRandomBucketName()
|
||||
if err := obj.MakeBucketWithLocation(randBucket, ""); err != nil {
|
||||
t.Fatalf("Failed to make bucket %s", randBucket)
|
||||
}
|
||||
|
||||
// Add a topicConfig to an empty notificationConfig.
|
||||
accountID := fmt.Sprintf("%d", UTCNow().UnixNano())
|
||||
accountARN := fmt.Sprintf(
|
||||
"arn:minio:sqs:%s:%s:listen-%s",
|
||||
globalServerConfig.GetRegion(),
|
||||
accountID,
|
||||
globalMinioAddr,
|
||||
)
|
||||
|
||||
// Make topic configuration
|
||||
filterRules := []filterRule{
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "minio",
|
||||
},
|
||||
{
|
||||
Name: "suffix",
|
||||
Value: "*.jpg",
|
||||
},
|
||||
}
|
||||
sampleTopicCfg := topicConfig{
|
||||
TopicARN: accountARN,
|
||||
ServiceConfig: ServiceConfig{
|
||||
[]string{"s3:ObjectRemoved:*", "s3:ObjectCreated:*"},
|
||||
filterStruct{
|
||||
keyFilter{filterRules},
|
||||
},
|
||||
"sns-" + accountID,
|
||||
},
|
||||
}
|
||||
sampleListenerCfg := &listenerConfig{
|
||||
TopicConfig: sampleTopicCfg,
|
||||
TargetServer: globalMinioAddr,
|
||||
}
|
||||
testCases := []struct {
|
||||
lCfg *listenerConfig
|
||||
expectedErr error
|
||||
}{
|
||||
{sampleListenerCfg, nil},
|
||||
{nil, errInvalidArgument},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
err := AddBucketListenerConfig(randBucket, test.lCfg, obj)
|
||||
if err != test.expectedErr {
|
||||
t.Errorf(
|
||||
"Test %d: Failed with error %v, expected to fail with %v",
|
||||
i+1, err, test.expectedErr,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// test remove listener actually removes a listener
|
||||
RemoveBucketListenerConfig(randBucket, sampleListenerCfg, obj)
|
||||
// since it does not return errors we fetch the config and
|
||||
// check
|
||||
lcSlice := globalEventNotifier.GetBucketListenerConfig(randBucket)
|
||||
if len(lcSlice) != 0 {
|
||||
t.Errorf("Remove Listener Config Test: did not remove listener config - %v",
|
||||
lcSlice)
|
||||
}
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -169,8 +169,8 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
|
|||
return nil, fmt.Errorf("Unable to load all bucket policies. %s", err)
|
||||
}
|
||||
|
||||
// Initialize a new event notifier.
|
||||
if err = initEventNotifier(fs); err != nil {
|
||||
// Initialize notification system.
|
||||
if err = globalNotificationSys.Init(fs); err != nil {
|
||||
return nil, fmt.Errorf("Unable to initialize event notification. %s", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -162,6 +162,10 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
|
||||
initNSLock(false) // Enable local namespace lock.
|
||||
|
||||
// Initialize notification system.
|
||||
globalNotificationSys, err = NewNotificationSys(globalServerConfig, EndpointList{})
|
||||
fatalIf(err, "Unable to initialize notification system.")
|
||||
|
||||
newObject, err := gw.NewGatewayLayer(globalServerConfig.GetCredential())
|
||||
fatalIf(err, "Unable to initialize gateway layer")
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -114,8 +114,7 @@ var (
|
|||
// Holds the host that was passed using --address
|
||||
globalMinioHost = ""
|
||||
|
||||
// Peer communication struct
|
||||
globalS3Peers = s3Peers{}
|
||||
globalNotificationSys *NotificationSys
|
||||
|
||||
// CA root certificates, a nil value means system certs pool will be used
|
||||
globalRootCAs *x509.CertPool
|
||||
|
@ -160,9 +159,6 @@ var (
|
|||
globalOperationTimeout = newDynamicTimeout(10*time.Minute /*30*/, 600*time.Second) // default timeout for general ops
|
||||
globalHealingTimeout = newDynamicTimeout(30*time.Minute /*1*/, 30*time.Minute) // timeout for healing related ops
|
||||
|
||||
// Keep connection active for clients actively using ListenBucketNotification.
|
||||
globalSNSConnAlive = 5 * time.Second // Send a whitespace every 5 seconds.
|
||||
|
||||
// Storage classes
|
||||
// Set to indicate if storage class is set up
|
||||
globalIsStorageClass bool
|
||||
|
|
|
@ -0,0 +1,634 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
xerrors "github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
// NotificationSys - notification system.
|
||||
type NotificationSys struct {
|
||||
sync.RWMutex
|
||||
targetList *event.TargetList
|
||||
bucketRulesMap map[string]event.RulesMap
|
||||
bucketRemoteTargetRulesMap map[string]map[event.TargetID]event.RulesMap
|
||||
peerRPCClientMap map[xnet.Host]*PeerRPCClient
|
||||
}
|
||||
|
||||
// GetARNList - returns available ARNs.
|
||||
func (sys *NotificationSys) GetARNList() []string {
|
||||
arns := []string{}
|
||||
region := globalServerConfig.GetRegion()
|
||||
for _, targetID := range sys.targetList.List() {
|
||||
arns = append(arns, targetID.ToARN(region).String())
|
||||
}
|
||||
|
||||
return arns
|
||||
}
|
||||
|
||||
// GetPeerRPCClient - returns PeerRPCClient of addr.
|
||||
func (sys *NotificationSys) GetPeerRPCClient(addr xnet.Host) *PeerRPCClient {
|
||||
return sys.peerRPCClientMap[addr]
|
||||
}
|
||||
|
||||
// DeleteBucket - calls DeleteBucket RPC call on all peers.
|
||||
func (sys *NotificationSys) DeleteBucket(bucketName string) map[xnet.Host]error {
|
||||
errors := make(map[xnet.Host]error)
|
||||
var wg sync.WaitGroup
|
||||
for addr, client := range sys.peerRPCClientMap {
|
||||
wg.Add(1)
|
||||
go func(addr xnet.Host, client *PeerRPCClient) {
|
||||
defer wg.Done()
|
||||
if err := client.DeleteBucket(bucketName); err != nil {
|
||||
errors[addr] = err
|
||||
}
|
||||
}(addr, client)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// UpdateBucketPolicy - calls UpdateBucketPolicy RPC call on all peers.
|
||||
func (sys *NotificationSys) UpdateBucketPolicy(bucketName string) map[xnet.Host]error {
|
||||
errors := make(map[xnet.Host]error)
|
||||
var wg sync.WaitGroup
|
||||
for addr, client := range sys.peerRPCClientMap {
|
||||
wg.Add(1)
|
||||
go func(addr xnet.Host, client *PeerRPCClient) {
|
||||
defer wg.Done()
|
||||
if err := client.UpdateBucketPolicy(bucketName); err != nil {
|
||||
errors[addr] = err
|
||||
}
|
||||
}(addr, client)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// PutBucketNotification - calls PutBucketNotification RPC call on all peers.
|
||||
func (sys *NotificationSys) PutBucketNotification(bucketName string, rulesMap event.RulesMap) map[xnet.Host]error {
|
||||
errors := make(map[xnet.Host]error)
|
||||
var wg sync.WaitGroup
|
||||
for addr, client := range sys.peerRPCClientMap {
|
||||
wg.Add(1)
|
||||
go func(addr xnet.Host, client *PeerRPCClient, rulesMap event.RulesMap) {
|
||||
defer wg.Done()
|
||||
if err := client.PutBucketNotification(bucketName, rulesMap); err != nil {
|
||||
errors[addr] = err
|
||||
}
|
||||
}(addr, client, rulesMap.Clone())
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// ListenBucketNotification - calls ListenBucketNotification RPC call on all peers.
|
||||
func (sys *NotificationSys) ListenBucketNotification(bucketName string, eventNames []event.Name, pattern string, targetID event.TargetID, localPeer xnet.Host) map[xnet.Host]error {
|
||||
errors := make(map[xnet.Host]error)
|
||||
var wg sync.WaitGroup
|
||||
for addr, client := range sys.peerRPCClientMap {
|
||||
wg.Add(1)
|
||||
go func(addr xnet.Host, client *PeerRPCClient) {
|
||||
defer wg.Done()
|
||||
if err := client.ListenBucketNotification(bucketName, eventNames, pattern, targetID, localPeer); err != nil {
|
||||
errors[addr] = err
|
||||
}
|
||||
}(addr, client)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// AddRemoteTarget - adds event rules map, HTTP/PeerRPC client target to bucket name.
|
||||
func (sys *NotificationSys) AddRemoteTarget(bucketName string, target event.Target, rulesMap event.RulesMap) error {
|
||||
if err := sys.targetList.Add(target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
targetMap := sys.bucketRemoteTargetRulesMap[bucketName]
|
||||
if targetMap == nil {
|
||||
targetMap = make(map[event.TargetID]event.RulesMap)
|
||||
}
|
||||
targetMap[target.ID()] = rulesMap.Clone()
|
||||
sys.bucketRemoteTargetRulesMap[bucketName] = targetMap
|
||||
sys.Unlock()
|
||||
|
||||
sys.AddRulesMap(bucketName, rulesMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoteTargetExist - checks whether given target ID is a HTTP/PeerRPC client target or not.
|
||||
func (sys *NotificationSys) RemoteTargetExist(bucketName string, targetID event.TargetID) bool {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
targetMap, ok := sys.bucketRemoteTargetRulesMap[bucketName]
|
||||
if ok {
|
||||
_, ok = targetMap[targetID]
|
||||
}
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// initListeners - initializes PeerRPC clients available in listener.json.
|
||||
func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string) error {
|
||||
// listener.json is available/applicable only in DistXL mode.
|
||||
if !globalIsDistXL {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct path to listener.json for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
|
||||
transactionConfigFile := configFile + ".transaction"
|
||||
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
reader, err := readConfig(objAPI, configFile)
|
||||
if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
|
||||
return err
|
||||
}
|
||||
|
||||
listenerList := []ListenBucketNotificationArgs{}
|
||||
if reader != nil {
|
||||
if err = json.NewDecoder(reader).Decode(&listenerList); err != nil {
|
||||
errorIf(err, "Unable to parse listener.json.")
|
||||
return xerrors.Trace(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(listenerList) == 0 {
|
||||
// Nothing to initialize for empty listener list.
|
||||
return nil
|
||||
}
|
||||
|
||||
activeListenerList := []ListenBucketNotificationArgs{}
|
||||
for _, args := range listenerList {
|
||||
var found bool
|
||||
if found, err = isLocalHost(args.Addr.Name); err != nil {
|
||||
errorIf(err, "unable to check address %v is local host", args.Addr)
|
||||
return err
|
||||
}
|
||||
if found {
|
||||
// As this function is called at startup, skip HTTP listener to this host.
|
||||
continue
|
||||
}
|
||||
|
||||
rpcClient := sys.GetPeerRPCClient(args.Addr)
|
||||
if rpcClient == nil {
|
||||
return fmt.Errorf("unable to find PeerRPCClient by address %v in listener.json for bucket %v", args.Addr, bucketName)
|
||||
}
|
||||
|
||||
var exist bool
|
||||
if exist, err = rpcClient.RemoteTargetExist(bucketName, args.TargetID); err != nil {
|
||||
return err
|
||||
}
|
||||
if !exist {
|
||||
// Skip previously connected HTTP listener which is not found in remote peer.
|
||||
continue
|
||||
}
|
||||
|
||||
target := NewPeerRPCClientTarget(bucketName, args.TargetID, rpcClient)
|
||||
rulesMap := event.NewRulesMap(args.EventNames, args.Pattern, target.ID())
|
||||
if err = sys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
|
||||
return err
|
||||
}
|
||||
activeListenerList = append(activeListenerList, args)
|
||||
}
|
||||
|
||||
data, err := json.Marshal(activeListenerList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return saveConfig(objAPI, configFile, data)
|
||||
}
|
||||
|
||||
// Init - initializes notification system from notification.xml and listener.json of all buckets.
|
||||
func (sys *NotificationSys) Init(objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
buckets, err := objAPI.ListBuckets()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
config, err := readNotificationConfig(objAPI, bucket.Name)
|
||||
if err != nil {
|
||||
if !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
|
||||
errorIf(err, "Unable to load notification configuration of bucket %v", bucket.Name)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
sys.AddRulesMap(bucket.Name, config.ToRulesMap())
|
||||
}
|
||||
|
||||
if err = sys.initListeners(objAPI, bucket.Name); err != nil {
|
||||
errorIf(err, "Unable to initialize HTTP listener for bucket %v", bucket.Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddRulesMap - adds rules map for bucket name.
|
||||
func (sys *NotificationSys) AddRulesMap(bucketName string, rulesMap event.RulesMap) {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
rulesMap = rulesMap.Clone()
|
||||
|
||||
for _, targetRulesMap := range sys.bucketRemoteTargetRulesMap[bucketName] {
|
||||
rulesMap.Add(targetRulesMap)
|
||||
}
|
||||
|
||||
rulesMap.Add(sys.bucketRulesMap[bucketName])
|
||||
sys.bucketRulesMap[bucketName] = rulesMap
|
||||
}
|
||||
|
||||
// RemoveRulesMap - removes rules map for bucket name.
|
||||
func (sys *NotificationSys) RemoveRulesMap(bucketName string, rulesMap event.RulesMap) {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
sys.bucketRulesMap[bucketName].Remove(rulesMap)
|
||||
if len(sys.bucketRulesMap[bucketName]) == 0 {
|
||||
delete(sys.bucketRulesMap, bucketName)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveNotification - removes all notification configuration for bucket name.
|
||||
func (sys *NotificationSys) RemoveNotification(bucketName string) {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
delete(sys.bucketRulesMap, bucketName)
|
||||
|
||||
for targetID := range sys.bucketRemoteTargetRulesMap[bucketName] {
|
||||
sys.targetList.Remove(targetID)
|
||||
delete(sys.bucketRemoteTargetRulesMap[bucketName], targetID)
|
||||
}
|
||||
|
||||
delete(sys.bucketRemoteTargetRulesMap, bucketName)
|
||||
}
|
||||
|
||||
// RemoveAllRemoteTargets - closes and removes all HTTP/PeerRPC client targets.
|
||||
func (sys *NotificationSys) RemoveAllRemoteTargets() {
|
||||
for _, targetMap := range sys.bucketRemoteTargetRulesMap {
|
||||
for targetID := range targetMap {
|
||||
sys.targetList.Remove(targetID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveRemoteTarget - closes and removes target by target ID.
|
||||
func (sys *NotificationSys) RemoveRemoteTarget(bucketName string, targetID event.TargetID) {
|
||||
for id, err := range sys.targetList.Remove(targetID) {
|
||||
errorIf(err, "unable to close target ID %v", id)
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
if _, ok := sys.bucketRemoteTargetRulesMap[bucketName]; ok {
|
||||
delete(sys.bucketRemoteTargetRulesMap[bucketName], targetID)
|
||||
if len(sys.bucketRemoteTargetRulesMap[bucketName]) == 0 {
|
||||
delete(sys.bucketRemoteTargetRulesMap, bucketName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *NotificationSys) send(bucketName string, eventData event.Event, targetIDs ...event.TargetID) map[event.TargetID]error {
|
||||
errMap := sys.targetList.Send(eventData, targetIDs...)
|
||||
for targetID := range errMap {
|
||||
if sys.RemoteTargetExist(bucketName, targetID) {
|
||||
sys.RemoveRemoteTarget(bucketName, targetID)
|
||||
}
|
||||
}
|
||||
|
||||
return errMap
|
||||
}
|
||||
|
||||
// Send - sends event data to all matching targets.
|
||||
func (sys *NotificationSys) Send(args eventArgs) map[event.TargetID]error {
|
||||
sys.RLock()
|
||||
targetIDSet := sys.bucketRulesMap[args.BucketName].Match(args.EventName, args.Object.Name)
|
||||
sys.RUnlock()
|
||||
if len(targetIDSet) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
targetIDs := targetIDSet.ToSlice()
|
||||
return sys.send(args.BucketName, args.ToEvent(), targetIDs...)
|
||||
}
|
||||
|
||||
// NewNotificationSys - creates new notification system object.
|
||||
func NewNotificationSys(config *serverConfig, endpoints EndpointList) (*NotificationSys, error) {
|
||||
targetList, err := getNotificationTargets(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
peerRPCClientMap := makeRemoteRPCClients(endpoints)
|
||||
|
||||
// bucketRulesMap/bucketRemoteTargetRulesMap are initialized by NotificationSys.Init()
|
||||
return &NotificationSys{
|
||||
targetList: targetList,
|
||||
bucketRulesMap: make(map[string]event.RulesMap),
|
||||
bucketRemoteTargetRulesMap: make(map[string]map[event.TargetID]event.RulesMap),
|
||||
peerRPCClientMap: peerRPCClientMap,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type eventArgs struct {
|
||||
EventName event.Name
|
||||
BucketName string
|
||||
Object ObjectInfo
|
||||
ReqParams map[string]string
|
||||
Host string
|
||||
Port string
|
||||
UserAgent string
|
||||
}
|
||||
|
||||
// ToEvent - converts to notification event.
|
||||
func (args eventArgs) ToEvent() event.Event {
|
||||
getOriginEndpoint := func() string {
|
||||
host := globalMinioHost
|
||||
if host == "" {
|
||||
// FIXME: Send FQDN or hostname of this machine than sending IP address.
|
||||
host = localIP4.ToSlice()[0]
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s://%s:%s", getURLScheme(globalIsSSL), host, globalMinioPort)
|
||||
}
|
||||
|
||||
creds := globalServerConfig.GetCredential()
|
||||
eventTime := UTCNow()
|
||||
uniqueID := fmt.Sprintf("%X", eventTime.UnixNano())
|
||||
|
||||
newEvent := event.Event{
|
||||
EventVersion: "2.0",
|
||||
EventSource: "minio:s3",
|
||||
AwsRegion: globalServerConfig.GetRegion(),
|
||||
EventTime: eventTime.Format(event.AMZTimeFormat),
|
||||
EventName: args.EventName,
|
||||
UserIdentity: event.Identity{creds.AccessKey},
|
||||
RequestParameters: args.ReqParams,
|
||||
ResponseElements: map[string]string{
|
||||
"x-amz-request-id": uniqueID,
|
||||
"x-minio-origin-endpoint": getOriginEndpoint(), // Minio specific custom elements.
|
||||
},
|
||||
S3: event.Metadata{
|
||||
SchemaVersion: "1.0",
|
||||
ConfigurationID: "Config",
|
||||
Bucket: event.Bucket{
|
||||
Name: args.BucketName,
|
||||
OwnerIdentity: event.Identity{creds.AccessKey},
|
||||
ARN: bucketARNPrefix + args.BucketName,
|
||||
},
|
||||
Object: event.Object{
|
||||
Key: url.QueryEscape(args.Object.Name),
|
||||
VersionID: "1",
|
||||
Sequencer: uniqueID,
|
||||
},
|
||||
},
|
||||
Source: event.Source{
|
||||
Host: args.Host,
|
||||
Port: args.Port,
|
||||
UserAgent: args.UserAgent,
|
||||
},
|
||||
}
|
||||
|
||||
if args.EventName != event.ObjectRemovedDelete {
|
||||
newEvent.S3.Object.ETag = args.Object.ETag
|
||||
newEvent.S3.Object.Size = args.Object.Size
|
||||
newEvent.S3.Object.ContentType = args.Object.ContentType
|
||||
newEvent.S3.Object.UserMetadata = args.Object.UserDefined
|
||||
}
|
||||
|
||||
return newEvent
|
||||
}
|
||||
|
||||
func sendEvent(args eventArgs) {
|
||||
// globalNotificationSys is not initialized in gateway mode.
|
||||
if globalNotificationSys == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for targetID, err := range globalNotificationSys.Send(args) {
|
||||
errorIf(err, "unable to send event %v of bucket: %v, object: %v to target %v",
|
||||
args.EventName, args.BucketName, args.Object.Name, targetID)
|
||||
}
|
||||
}
|
||||
|
||||
func saveConfig(objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(minioMetaBucket, configFile, hashReader, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func readConfig(objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) {
|
||||
var buffer bytes.Buffer
|
||||
// Read entire content by setting size to -1
|
||||
err := objAPI.GetObject(minioMetaBucket, configFile, 0, -1, &buffer, "")
|
||||
if err != nil {
|
||||
// Ignore if err is ObjectNotFound or IncompleteBody when bucket is not configured with notification
|
||||
if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
|
||||
return nil, xerrors.Trace(errNoSuchNotifications)
|
||||
}
|
||||
errorIf(err, "Unable to read file %v", configFile)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return NoSuchNotifications on empty content.
|
||||
if buffer.Len() == 0 {
|
||||
return nil, xerrors.Trace(errNoSuchNotifications)
|
||||
}
|
||||
|
||||
return &buffer, nil
|
||||
}
|
||||
|
||||
func readNotificationConfig(objAPI ObjectLayer, bucketName string) (*event.Config, error) {
|
||||
// Construct path to notification.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig)
|
||||
|
||||
// Get read lock.
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, configFile)
|
||||
if err := objLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer objLock.RUnlock()
|
||||
|
||||
reader, err := readConfig(objAPI, configFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return event.ParseConfig(reader, globalServerConfig.GetRegion(), globalNotificationSys.targetList)
|
||||
}
|
||||
|
||||
func saveNotificationConfig(objAPI ObjectLayer, bucketName string, config *event.Config) error {
|
||||
data, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig)
|
||||
|
||||
// Get write lock.
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, configFile)
|
||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
return saveConfig(objAPI, configFile, data)
|
||||
}
|
||||
|
||||
// SaveListener - saves HTTP client currently listening for events to listener.json.
|
||||
func SaveListener(objAPI ObjectLayer, bucketName string, eventNames []event.Name, pattern string, targetID event.TargetID, addr xnet.Host) error {
|
||||
// listener.json is available/applicable only in DistXL mode.
|
||||
if !globalIsDistXL {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct path to listener.json for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
|
||||
transactionConfigFile := configFile + ".transaction"
|
||||
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
reader, err := readConfig(objAPI, configFile)
|
||||
if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
|
||||
return err
|
||||
}
|
||||
|
||||
listenerList := []ListenBucketNotificationArgs{}
|
||||
if reader != nil {
|
||||
if err = json.NewDecoder(reader).Decode(&listenerList); err != nil {
|
||||
errorIf(err, "Unable to parse listener.json.")
|
||||
return xerrors.Trace(err)
|
||||
}
|
||||
}
|
||||
|
||||
listenerList = append(listenerList, ListenBucketNotificationArgs{
|
||||
EventNames: eventNames,
|
||||
Pattern: pattern,
|
||||
TargetID: targetID,
|
||||
Addr: addr,
|
||||
})
|
||||
|
||||
data, err := json.Marshal(listenerList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return saveConfig(objAPI, configFile, data)
|
||||
}
|
||||
|
||||
// RemoveListener - removes HTTP client currently listening for events from listener.json.
|
||||
func RemoveListener(objAPI ObjectLayer, bucketName string, targetID event.TargetID, addr xnet.Host) error {
|
||||
// listener.json is available/applicable only in DistXL mode.
|
||||
if !globalIsDistXL {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct path to listener.json for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
|
||||
transactionConfigFile := configFile + ".transaction"
|
||||
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
reader, err := readConfig(objAPI, configFile)
|
||||
if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
|
||||
return err
|
||||
}
|
||||
|
||||
listenerList := []ListenBucketNotificationArgs{}
|
||||
if reader != nil {
|
||||
if err = json.NewDecoder(reader).Decode(&listenerList); err != nil {
|
||||
errorIf(err, "Unable to parse listener.json.")
|
||||
return xerrors.Trace(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(listenerList) == 0 {
|
||||
// Nothing to remove.
|
||||
return nil
|
||||
}
|
||||
|
||||
activeListenerList := []ListenBucketNotificationArgs{}
|
||||
for _, args := range listenerList {
|
||||
if args.TargetID == targetID && args.Addr.Equal(addr) {
|
||||
// Skip if matches
|
||||
continue
|
||||
}
|
||||
|
||||
activeListenerList = append(activeListenerList, args)
|
||||
}
|
||||
|
||||
data, err := json.Marshal(activeListenerList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return saveConfig(objAPI, configFile, data)
|
||||
}
|
|
@ -1,347 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Notifier represents collection of supported notification queues.
|
||||
type notifier struct {
|
||||
AMQP amqpConfigs `json:"amqp"`
|
||||
NATS natsConfigs `json:"nats"`
|
||||
ElasticSearch elasticSearchConfigs `json:"elasticsearch"`
|
||||
Redis redisConfigs `json:"redis"`
|
||||
PostgreSQL postgreSQLConfigs `json:"postgresql"`
|
||||
Kafka kafkaConfigs `json:"kafka"`
|
||||
Webhook webhookConfigs `json:"webhook"`
|
||||
MySQL mySQLConfigs `json:"mysql"`
|
||||
MQTT mqttConfigs `json:"mqtt"`
|
||||
// Add new notification queues. IMPORTANT: When new queues are
|
||||
// added, update `serverConfig.ConfigDiff()` to reflect the
|
||||
// change.
|
||||
}
|
||||
|
||||
type amqpConfigs map[string]amqpNotify
|
||||
|
||||
func (a amqpConfigs) Clone() amqpConfigs {
|
||||
a2 := make(amqpConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
func (a amqpConfigs) Validate() error {
|
||||
for k, v := range a {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("AMQP [%s] configuration invalid: %s", k, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type mqttConfigs map[string]mqttNotify
|
||||
|
||||
func (a mqttConfigs) Clone() mqttConfigs {
|
||||
a2 := make(mqttConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
func (a mqttConfigs) Validate() error {
|
||||
for k, v := range a {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("MQTT [%s] configuration invalid: %s", k, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type natsConfigs map[string]natsNotify
|
||||
|
||||
func (a natsConfigs) Clone() natsConfigs {
|
||||
a2 := make(natsConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
func (a natsConfigs) Validate() error {
|
||||
for k, v := range a {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("NATS [%s] configuration invalid: %s", k, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type elasticSearchConfigs map[string]elasticSearchNotify
|
||||
|
||||
func (a elasticSearchConfigs) Clone() elasticSearchConfigs {
|
||||
a2 := make(elasticSearchConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
func (a elasticSearchConfigs) Validate() error {
|
||||
for k, v := range a {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("ElasticSearch [%s] configuration invalid: %s", k, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type redisConfigs map[string]redisNotify
|
||||
|
||||
func (a redisConfigs) Clone() redisConfigs {
|
||||
a2 := make(redisConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
func (a redisConfigs) Validate() error {
|
||||
for k, v := range a {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("Redis [%s] configuration invalid: %s", k, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type postgreSQLConfigs map[string]postgreSQLNotify
|
||||
|
||||
func (a postgreSQLConfigs) Clone() postgreSQLConfigs {
|
||||
a2 := make(postgreSQLConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
func (a postgreSQLConfigs) Validate() error {
|
||||
for k, v := range a {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("PostgreSQL [%s] configuration invalid: %s", k, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type kafkaConfigs map[string]kafkaNotify
|
||||
|
||||
func (a kafkaConfigs) Clone() kafkaConfigs {
|
||||
a2 := make(kafkaConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
func (a kafkaConfigs) Validate() error {
|
||||
for k, v := range a {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("Kafka [%s] configuration invalid: %s", k, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type webhookConfigs map[string]webhookNotify
|
||||
|
||||
func (a webhookConfigs) Clone() webhookConfigs {
|
||||
a2 := make(webhookConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
func (a webhookConfigs) Validate() error {
|
||||
for k, v := range a {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("Webhook [%s] configuration invalid: %s", k, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type mySQLConfigs map[string]mySQLNotify
|
||||
|
||||
func (a mySQLConfigs) Clone() mySQLConfigs {
|
||||
a2 := make(mySQLConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
func (a mySQLConfigs) Validate() error {
|
||||
for k, v := range a {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("MySQL [%s] configuration invalid: %s", k, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *notifier) Validate() error {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
if err := n.AMQP.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := n.NATS.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := n.ElasticSearch.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := n.Redis.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := n.PostgreSQL.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := n.Kafka.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := n.Webhook.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := n.MySQL.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return n.MQTT.Validate()
|
||||
}
|
||||
|
||||
func (n *notifier) SetAMQPByID(accountID string, amqpn amqpNotify) {
|
||||
n.AMQP[accountID] = amqpn
|
||||
}
|
||||
|
||||
func (n *notifier) GetAMQP() map[string]amqpNotify {
|
||||
return n.AMQP.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetAMQPByID(accountID string) amqpNotify {
|
||||
return n.AMQP[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) SetMQTTByID(accountID string, mqttn mqttNotify) {
|
||||
n.MQTT[accountID] = mqttn
|
||||
}
|
||||
|
||||
func (n *notifier) GetMQTT() map[string]mqttNotify {
|
||||
return n.MQTT.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetMQTTByID(accountID string) mqttNotify {
|
||||
return n.MQTT[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) SetNATSByID(accountID string, natsn natsNotify) {
|
||||
n.NATS[accountID] = natsn
|
||||
}
|
||||
|
||||
func (n *notifier) GetNATS() map[string]natsNotify {
|
||||
return n.NATS.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetNATSByID(accountID string) natsNotify {
|
||||
return n.NATS[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) SetElasticSearchByID(accountID string, es elasticSearchNotify) {
|
||||
n.ElasticSearch[accountID] = es
|
||||
}
|
||||
|
||||
func (n *notifier) GetElasticSearchByID(accountID string) elasticSearchNotify {
|
||||
return n.ElasticSearch[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) GetElasticSearch() map[string]elasticSearchNotify {
|
||||
return n.ElasticSearch.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) SetRedisByID(accountID string, r redisNotify) {
|
||||
n.Redis[accountID] = r
|
||||
}
|
||||
|
||||
func (n *notifier) GetRedis() map[string]redisNotify {
|
||||
return n.Redis.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetRedisByID(accountID string) redisNotify {
|
||||
return n.Redis[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) GetWebhook() map[string]webhookNotify {
|
||||
return n.Webhook.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetWebhookByID(accountID string) webhookNotify {
|
||||
return n.Webhook[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) SetWebhookByID(accountID string, pgn webhookNotify) {
|
||||
n.Webhook[accountID] = pgn
|
||||
}
|
||||
|
||||
func (n *notifier) SetPostgreSQLByID(accountID string, pgn postgreSQLNotify) {
|
||||
n.PostgreSQL[accountID] = pgn
|
||||
}
|
||||
|
||||
func (n *notifier) GetPostgreSQL() map[string]postgreSQLNotify {
|
||||
return n.PostgreSQL.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetPostgreSQLByID(accountID string) postgreSQLNotify {
|
||||
return n.PostgreSQL[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) SetMySQLByID(accountID string, pgn mySQLNotify) {
|
||||
n.MySQL[accountID] = pgn
|
||||
}
|
||||
|
||||
func (n *notifier) GetMySQL() map[string]mySQLNotify {
|
||||
return n.MySQL.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetMySQLByID(accountID string) mySQLNotify {
|
||||
return n.MySQL[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) SetKafkaByID(accountID string, kn kafkaNotify) {
|
||||
n.Kafka[accountID] = kn
|
||||
}
|
||||
|
||||
func (n *notifier) GetKafka() map[string]kafkaNotify {
|
||||
return n.Kafka.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetKafkaByID(accountID string) kafkaNotify {
|
||||
return n.Kafka[accountID]
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
258
cmd/notifiers.go
258
cmd/notifiers.go
|
@ -1,258 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
)
|
||||
|
||||
// SQS type.
|
||||
const (
|
||||
// Minio sqs ARN prefix.
|
||||
minioSqs = "arn:minio:sqs:"
|
||||
|
||||
// Static string indicating queue type 'amqp'.
|
||||
queueTypeAMQP = "amqp"
|
||||
// Static string indicating queue type 'mqtt'.
|
||||
queueTypeMQTT = "mqtt"
|
||||
// Static string indicating queue type 'nats'.
|
||||
queueTypeNATS = "nats"
|
||||
// Static string indicating queue type 'elasticsearch'.
|
||||
queueTypeElastic = "elasticsearch"
|
||||
// Static string indicating queue type 'redis'.
|
||||
queueTypeRedis = "redis"
|
||||
// Static string indicating queue type 'postgresql'.
|
||||
queueTypePostgreSQL = "postgresql"
|
||||
// Static string indicating queue type 'mysql'.
|
||||
queueTypeMySQL = "mysql"
|
||||
// Static string indicating queue type 'kafka'.
|
||||
queueTypeKafka = "kafka"
|
||||
// Static string for Webhooks
|
||||
queueTypeWebhook = "webhook"
|
||||
|
||||
// Notifier format value constants
|
||||
formatNamespace = "namespace"
|
||||
formatAccess = "access"
|
||||
)
|
||||
|
||||
// Topic type.
|
||||
const (
|
||||
// Minio topic ARN prefix.
|
||||
minioTopic = "arn:minio:sns:"
|
||||
|
||||
// Static string indicating sns type 'listen'.
|
||||
snsTypeMinio = "listen"
|
||||
)
|
||||
|
||||
var errNotifyNotEnabled = errors.New("requested notifier not enabled")
|
||||
|
||||
// Returns true if queueArn is for an AMQP queue.
|
||||
func isAMQPQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeAMQP {
|
||||
return false
|
||||
}
|
||||
amqpL := globalServerConfig.Notify.GetAMQPByID(sqsArn.AccountID)
|
||||
if !amqpL.Enable {
|
||||
return false
|
||||
}
|
||||
// Connect to amqp server to validate.
|
||||
amqpC, err := dialAMQP(amqpL)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to connect to amqp service. %#v", amqpL)
|
||||
return false
|
||||
}
|
||||
defer amqpC.conn.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if mqttARN is for an MQTT queue.
|
||||
func isMQTTQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeMQTT {
|
||||
return false
|
||||
}
|
||||
mqttL := globalServerConfig.Notify.GetMQTTByID(sqsArn.AccountID)
|
||||
if !mqttL.Enable {
|
||||
return false
|
||||
}
|
||||
// Connect to mqtt server to validate.
|
||||
mqttC, err := dialMQTT(mqttL)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to connect to mqtt service. %#v", mqttL)
|
||||
return false
|
||||
}
|
||||
defer mqttC.Client.Disconnect(250)
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if natsArn is for an NATS queue.
|
||||
func isNATSQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeNATS {
|
||||
return false
|
||||
}
|
||||
natsL := globalServerConfig.Notify.GetNATSByID(sqsArn.AccountID)
|
||||
if !natsL.Enable {
|
||||
return false
|
||||
}
|
||||
// Connect to nats server to validate.
|
||||
natsC, err := dialNATS(natsL, true)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to connect to nats service. %#v", natsL)
|
||||
return false
|
||||
}
|
||||
closeNATS(natsC)
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if queueArn is for an Webhook queue
|
||||
func isWebhookQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeWebhook {
|
||||
return false
|
||||
}
|
||||
rNotify := globalServerConfig.Notify.GetWebhookByID(sqsArn.AccountID)
|
||||
return rNotify.Enable
|
||||
}
|
||||
|
||||
// Returns true if queueArn is for an Redis queue.
|
||||
func isRedisQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeRedis {
|
||||
return false
|
||||
}
|
||||
rNotify := globalServerConfig.Notify.GetRedisByID(sqsArn.AccountID)
|
||||
if !rNotify.Enable {
|
||||
return false
|
||||
}
|
||||
// Connect to redis server to validate.
|
||||
rPool, err := dialRedis(rNotify)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to connect to redis service. %#v", rNotify)
|
||||
return false
|
||||
}
|
||||
defer rPool.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if queueArn is for an ElasticSearch queue.
|
||||
func isElasticQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeElastic {
|
||||
return false
|
||||
}
|
||||
esNotify := globalServerConfig.Notify.GetElasticSearchByID(sqsArn.AccountID)
|
||||
if !esNotify.Enable {
|
||||
return false
|
||||
}
|
||||
elasticC, err := dialElastic(esNotify)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to connect to elasticsearch service %#v", esNotify)
|
||||
return false
|
||||
}
|
||||
defer elasticC.Stop()
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if queueArn is for PostgreSQL.
|
||||
func isPostgreSQLQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypePostgreSQL {
|
||||
return false
|
||||
}
|
||||
pgNotify := globalServerConfig.Notify.GetPostgreSQLByID(sqsArn.AccountID)
|
||||
if !pgNotify.Enable {
|
||||
return false
|
||||
}
|
||||
pgC, err := dialPostgreSQL(pgNotify)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to connect to PostgreSQL server %#v", pgNotify)
|
||||
return false
|
||||
}
|
||||
defer pgC.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if queueArn is for MySQL.
|
||||
func isMySQLQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeMySQL {
|
||||
return false
|
||||
}
|
||||
msqlNotify := globalServerConfig.Notify.GetMySQLByID(sqsArn.AccountID)
|
||||
if !msqlNotify.Enable {
|
||||
return false
|
||||
}
|
||||
myC, err := dialMySQL(msqlNotify)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to connect to MySQL server %#v", msqlNotify)
|
||||
return false
|
||||
}
|
||||
defer myC.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if queueArn is for Kafka.
|
||||
func isKafkaQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeKafka {
|
||||
return false
|
||||
}
|
||||
kafkaNotifyCfg := globalServerConfig.Notify.GetKafkaByID(sqsArn.AccountID)
|
||||
if !kafkaNotifyCfg.Enable {
|
||||
return false
|
||||
}
|
||||
kafkaC, err := dialKafka(kafkaNotifyCfg)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to dial Kafka server %#v", kafkaNotifyCfg)
|
||||
return false
|
||||
}
|
||||
defer kafkaC.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// Match function matches wild cards in 'pattern' for events.
|
||||
func eventMatch(eventType string, events []string) (ok bool) {
|
||||
for _, event := range events {
|
||||
ok = wildcard.MatchSimple(event, eventType)
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// Filter rule match, matches an object against the filter rules.
|
||||
func filterRuleMatch(object string, frs []filterRule) bool {
|
||||
var prefixMatch, suffixMatch = true, true
|
||||
for _, fr := range frs {
|
||||
if isValidFilterNamePrefix(fr.Name) {
|
||||
prefixMatch = hasPrefix(object, fr.Value)
|
||||
} else if isValidFilterNameSuffix(fr.Name) {
|
||||
suffixMatch = hasSuffix(object, fr.Value)
|
||||
}
|
||||
}
|
||||
return prefixMatch && suffixMatch
|
||||
}
|
||||
|
||||
// A type to represent dynamic error generation functions for
|
||||
// notifications.
|
||||
type notificationErrorFactoryFunc func(string, ...interface{}) error
|
||||
|
||||
// A function to build dynamic error generation functions for
|
||||
// notifications by setting an error prefix string.
|
||||
func newNotificationErrorFactory(prefix string) notificationErrorFactoryFunc {
|
||||
return func(msg string, a ...interface{}) error {
|
||||
s := fmt.Sprintf(msg, a...)
|
||||
return fmt.Errorf("%s: %s", prefix, s)
|
||||
}
|
||||
}
|
|
@ -1,214 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import "testing"
|
||||
|
||||
// Tests for event filter rules.
|
||||
func TestFilterMatch(t *testing.T) {
|
||||
testCases := []struct {
|
||||
objectName string
|
||||
rules []filterRule
|
||||
expectedRuleMatch bool
|
||||
}{
|
||||
// Prefix matches for a parent.
|
||||
{
|
||||
objectName: "test/test1/object.txt",
|
||||
rules: []filterRule{
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
expectedRuleMatch: true,
|
||||
},
|
||||
// Prefix matches for the object.
|
||||
{
|
||||
objectName: "test/test1/object.txt",
|
||||
rules: []filterRule{
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "test/test1/object",
|
||||
},
|
||||
},
|
||||
expectedRuleMatch: true,
|
||||
},
|
||||
// Prefix doesn't match.
|
||||
{
|
||||
objectName: "test/test1/object.txt",
|
||||
rules: []filterRule{
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "test/test1/object/",
|
||||
},
|
||||
},
|
||||
expectedRuleMatch: false,
|
||||
},
|
||||
// Suffix matches.
|
||||
{
|
||||
objectName: "test/test1/object.txt",
|
||||
rules: []filterRule{
|
||||
{
|
||||
Name: "suffix",
|
||||
Value: ".txt",
|
||||
},
|
||||
},
|
||||
expectedRuleMatch: true,
|
||||
},
|
||||
// Suffix doesn't match but prefix matches.
|
||||
{
|
||||
objectName: "test/test1/object.txt",
|
||||
rules: []filterRule{
|
||||
{
|
||||
Name: "suffix",
|
||||
Value: ".jpg",
|
||||
},
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "test/test1",
|
||||
},
|
||||
},
|
||||
expectedRuleMatch: false,
|
||||
},
|
||||
// Prefix doesn't match but suffix matches.
|
||||
{
|
||||
objectName: "test/test2/object.jpg",
|
||||
rules: []filterRule{
|
||||
{
|
||||
Name: "suffix",
|
||||
Value: ".jpg",
|
||||
},
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "test/test1",
|
||||
},
|
||||
},
|
||||
expectedRuleMatch: false,
|
||||
},
|
||||
// Suffix and prefix doesn't match.
|
||||
{
|
||||
objectName: "test/test2/object.jpg",
|
||||
rules: []filterRule{
|
||||
{
|
||||
Name: "suffix",
|
||||
Value: ".txt",
|
||||
},
|
||||
{
|
||||
Name: "prefix",
|
||||
Value: "test/test1",
|
||||
},
|
||||
},
|
||||
expectedRuleMatch: false,
|
||||
},
|
||||
}
|
||||
|
||||
// .. Validate all cases.
|
||||
for i, testCase := range testCases {
|
||||
ruleMatch := filterRuleMatch(testCase.objectName, testCase.rules)
|
||||
if ruleMatch != testCase.expectedRuleMatch {
|
||||
t.Errorf("Test %d: Expected %t, got %t", i+1, testCase.expectedRuleMatch, ruleMatch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests all event match.
|
||||
func TestEventMatch(t *testing.T) {
|
||||
testCases := []struct {
|
||||
eventName EventName
|
||||
events []string
|
||||
match bool
|
||||
}{
|
||||
// Valid object created PUT event.
|
||||
{
|
||||
eventName: ObjectCreatedPut,
|
||||
events: []string{
|
||||
"s3:ObjectCreated:Put",
|
||||
},
|
||||
match: true,
|
||||
},
|
||||
// Valid object removed DELETE event.
|
||||
{
|
||||
eventName: ObjectRemovedDelete,
|
||||
events: []string{
|
||||
"s3:ObjectRemoved:Delete",
|
||||
},
|
||||
match: true,
|
||||
},
|
||||
// Invalid events fails to match with empty events.
|
||||
{
|
||||
eventName: ObjectRemovedDelete,
|
||||
events: []string{""},
|
||||
match: false,
|
||||
},
|
||||
// Invalid events fails to match with valid events.
|
||||
{
|
||||
eventName: ObjectCreatedCompleteMultipartUpload,
|
||||
events: []string{
|
||||
"s3:ObjectRemoved:*",
|
||||
},
|
||||
match: false,
|
||||
},
|
||||
// Valid events wild card match.
|
||||
{
|
||||
eventName: ObjectCreatedPut,
|
||||
events: []string{
|
||||
"s3:ObjectCreated:*",
|
||||
},
|
||||
match: true,
|
||||
},
|
||||
// Valid events wild card match.
|
||||
{
|
||||
eventName: ObjectCreatedPost,
|
||||
events: []string{
|
||||
"s3:ObjectCreated:*",
|
||||
},
|
||||
match: true,
|
||||
},
|
||||
// Valid events wild card match.
|
||||
{
|
||||
eventName: ObjectCreatedCopy,
|
||||
events: []string{
|
||||
"s3:ObjectCreated:*",
|
||||
},
|
||||
match: true,
|
||||
},
|
||||
// Valid events wild card match.
|
||||
{
|
||||
eventName: ObjectCreatedCompleteMultipartUpload,
|
||||
events: []string{
|
||||
"s3:ObjectCreated:*",
|
||||
},
|
||||
match: true,
|
||||
},
|
||||
// Valid events wild card match.
|
||||
{
|
||||
eventName: ObjectCreatedPut,
|
||||
events: []string{
|
||||
"s3:ObjectCreated:*",
|
||||
"s3:ObjectRemoved:*",
|
||||
},
|
||||
match: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
ok := eventMatch(testCase.eventName.String(), testCase.events)
|
||||
if testCase.match != ok {
|
||||
t.Errorf("Test %d: Expected \"%t\", got \"%t\"", i+1, testCase.match, ok)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,192 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/streadway/amqp"
|
||||
)
|
||||
|
||||
// amqpNotify - represents logrus compatible AMQP hook.
|
||||
// All fields represent AMQP configuration details.
|
||||
type amqpNotify struct {
|
||||
Enable bool `json:"enable"`
|
||||
URL string `json:"url"`
|
||||
Exchange string `json:"exchange"`
|
||||
RoutingKey string `json:"routingKey"`
|
||||
ExchangeType string `json:"exchangeType"`
|
||||
DeliveryMode uint8 `json:"deliveryMode"`
|
||||
Mandatory bool `json:"mandatory"`
|
||||
Immediate bool `json:"immediate"`
|
||||
Durable bool `json:"durable"`
|
||||
Internal bool `json:"internal"`
|
||||
NoWait bool `json:"noWait"`
|
||||
AutoDeleted bool `json:"autoDeleted"`
|
||||
}
|
||||
|
||||
func (a *amqpNotify) Validate() error {
|
||||
if !a.Enable {
|
||||
return nil
|
||||
}
|
||||
if _, err := checkURL(a.URL); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// amqpConn implements a reconnecting amqp conn extending *amqp.Connection,
|
||||
// also provides additional protection for such a mutation.
|
||||
type amqpConn struct {
|
||||
sync.Mutex
|
||||
conn *amqp.Connection
|
||||
params amqpNotify
|
||||
}
|
||||
|
||||
// dialAMQP - dials and returns an amqpConnection instance,
|
||||
// for sending notifications. Returns error if amqp logger
|
||||
// is not enabled.
|
||||
func dialAMQP(amqpL amqpNotify) (*amqpConn, error) {
|
||||
if !amqpL.Enable {
|
||||
return nil, errNotifyNotEnabled
|
||||
}
|
||||
conn, err := amqp.Dial(amqpL.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &amqpConn{
|
||||
conn: conn,
|
||||
params: amqpL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newAMQPNotify(accountID string) (*logrus.Logger, error) {
|
||||
amqpL := globalServerConfig.Notify.GetAMQPByID(accountID)
|
||||
|
||||
// Connect to amqp server.
|
||||
amqpC, err := dialAMQP(amqpL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
amqpLog := logrus.New()
|
||||
|
||||
// Disable writing to console.
|
||||
amqpLog.Out = ioutil.Discard
|
||||
|
||||
// Add a amqp hook.
|
||||
amqpLog.Hooks.Add(amqpC)
|
||||
|
||||
// Set default JSON formatter.
|
||||
amqpLog.Formatter = new(logrus.JSONFormatter)
|
||||
|
||||
// Successfully enabled all AMQPs.
|
||||
return amqpLog, nil
|
||||
}
|
||||
|
||||
// Returns true if the error represents a closed
|
||||
// network error.
|
||||
func isAMQPClosedNetworkErr(err error) bool {
|
||||
// Any other error other than connection closed, return.
|
||||
if neterr, ok := err.(*net.OpError); ok &&
|
||||
neterr.Err.Error() == "use of closed network connection" {
|
||||
return true
|
||||
} else if err == amqp.ErrClosed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Channel is a wrapper implementation of amqp.Connection.Channel()
|
||||
// which implements transparent reconnection.
|
||||
func (q *amqpConn) Channel() (*amqp.Channel, error) {
|
||||
q.Lock()
|
||||
ch, err := q.conn.Channel()
|
||||
q.Unlock()
|
||||
if err != nil {
|
||||
if !isAMQPClosedNetworkErr(err) {
|
||||
return nil, err
|
||||
}
|
||||
// Attempt to connect again.
|
||||
var conn *amqp.Connection
|
||||
conn, err = amqp.Dial(q.params.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch, err = conn.Channel()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
q.Lock()
|
||||
q.conn = conn
|
||||
q.Unlock()
|
||||
}
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
// Fire is called when an event should be sent to the message broker.
|
||||
func (q *amqpConn) Fire(entry *logrus.Entry) error {
|
||||
ch, err := q.Channel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ch.Close()
|
||||
|
||||
err = ch.ExchangeDeclare(
|
||||
q.params.Exchange,
|
||||
q.params.ExchangeType,
|
||||
q.params.Durable,
|
||||
q.params.AutoDeleted,
|
||||
q.params.Internal,
|
||||
q.params.NoWait,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
body, err := entry.String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ch.Publish(
|
||||
q.params.Exchange,
|
||||
q.params.RoutingKey,
|
||||
q.params.Mandatory,
|
||||
q.params.Immediate,
|
||||
amqp.Publishing{
|
||||
ContentType: "application/json",
|
||||
DeliveryMode: q.params.DeliveryMode,
|
||||
Body: []byte(body),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels is available logging levels.
|
||||
func (q *amqpConn) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
}
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/streadway/amqp"
|
||||
)
|
||||
|
||||
// Tests for is closed network error.
|
||||
func TestIsClosedNetworkErr(t *testing.T) {
|
||||
testCases := []struct {
|
||||
err error
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
err: amqp.ErrClosed,
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
err: &net.OpError{Err: errors.New("use of closed network connection")},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
err: nil,
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
err: errors.New("testing error"),
|
||||
success: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
ok := isAMQPClosedNetworkErr(testCase.err)
|
||||
if ok != testCase.success {
|
||||
t.Errorf("Test %d: Expected %t, got %t", i+1, testCase.success, ok)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,183 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"gopkg.in/olivere/elastic.v5"
|
||||
)
|
||||
|
||||
var (
|
||||
esErrFunc = newNotificationErrorFactory("Elasticsearch")
|
||||
|
||||
errESFormat = esErrFunc(`"format" value is invalid - it must be one of "%s" or "%s".`, formatNamespace, formatAccess)
|
||||
errESIndex = esErrFunc("Index name was not specified in the configuration.")
|
||||
)
|
||||
|
||||
// elasticQueue is a elasticsearch event notification queue.
|
||||
type elasticSearchNotify struct {
|
||||
Enable bool `json:"enable"`
|
||||
Format string `json:"format"`
|
||||
URL string `json:"url"`
|
||||
Index string `json:"index"`
|
||||
}
|
||||
|
||||
func (e *elasticSearchNotify) Validate() error {
|
||||
if !e.Enable {
|
||||
return nil
|
||||
}
|
||||
if e.Format != formatNamespace && e.Format != formatAccess {
|
||||
return errESFormat
|
||||
}
|
||||
if _, err := checkURL(e.URL); err != nil {
|
||||
return err
|
||||
}
|
||||
if e.Index == "" {
|
||||
return errESIndex
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type elasticClient struct {
|
||||
*elastic.Client
|
||||
params elasticSearchNotify
|
||||
}
|
||||
|
||||
// Connects to elastic search instance at URL.
|
||||
func dialElastic(esNotify elasticSearchNotify) (*elastic.Client, error) {
|
||||
if !esNotify.Enable {
|
||||
return nil, errNotifyNotEnabled
|
||||
}
|
||||
return elastic.NewClient(
|
||||
elastic.SetURL(esNotify.URL),
|
||||
elastic.SetSniff(false),
|
||||
elastic.SetMaxRetries(10),
|
||||
)
|
||||
}
|
||||
|
||||
func newElasticNotify(accountID string) (*logrus.Logger, error) {
|
||||
esNotify := globalServerConfig.Notify.GetElasticSearchByID(accountID)
|
||||
|
||||
// Dial to elastic search.
|
||||
client, err := dialElastic(esNotify)
|
||||
if err != nil {
|
||||
return nil, esErrFunc("Error dialing the server: %v", err)
|
||||
}
|
||||
|
||||
// Use the IndexExists service to check if a specified index exists.
|
||||
exists, err := client.IndexExists(esNotify.Index).
|
||||
Do(context.Background())
|
||||
if err != nil {
|
||||
return nil, esErrFunc("Error checking if index exists: %v", err)
|
||||
}
|
||||
// Index does not exist, attempt to create it.
|
||||
if !exists {
|
||||
var createIndex *elastic.IndicesCreateResult
|
||||
createIndex, err = client.CreateIndex(esNotify.Index).
|
||||
Do(context.Background())
|
||||
if err != nil {
|
||||
return nil, esErrFunc("Error creating index `%s`: %v",
|
||||
esNotify.Index, err)
|
||||
}
|
||||
if !createIndex.Acknowledged {
|
||||
return nil, esErrFunc("Index not created")
|
||||
}
|
||||
}
|
||||
|
||||
elasticCl := elasticClient{
|
||||
Client: client,
|
||||
params: esNotify,
|
||||
}
|
||||
|
||||
elasticSearchLog := logrus.New()
|
||||
|
||||
// Disable writing to console.
|
||||
elasticSearchLog.Out = ioutil.Discard
|
||||
|
||||
// Add a elasticSearch hook.
|
||||
elasticSearchLog.Hooks.Add(elasticCl)
|
||||
|
||||
// Set default JSON formatter.
|
||||
elasticSearchLog.Formatter = new(logrus.JSONFormatter)
|
||||
|
||||
// Success, elastic search successfully initialized.
|
||||
return elasticSearchLog, nil
|
||||
}
|
||||
|
||||
// Fire is required to implement logrus hook
|
||||
func (q elasticClient) Fire(entry *logrus.Entry) (err error) {
|
||||
// Reflect on eventType and Key on their native type.
|
||||
entryStr, ok := entry.Data["EventType"].(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
keyStr, ok := entry.Data["Key"].(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch q.params.Format {
|
||||
case formatNamespace:
|
||||
// If event matches as delete, we purge the previous index.
|
||||
if eventMatch(entryStr, []string{"s3:ObjectRemoved:*"}) {
|
||||
_, err = q.Client.Delete().Index(q.params.Index).
|
||||
Type("event").Id(keyStr).Do(context.Background())
|
||||
break
|
||||
} // else we update elastic index or create a new one.
|
||||
_, err = q.Client.Index().Index(q.params.Index).
|
||||
Type("event").
|
||||
BodyJson(map[string]interface{}{
|
||||
"Records": entry.Data["Records"],
|
||||
}).Id(keyStr).Do(context.Background())
|
||||
case formatAccess:
|
||||
// eventTime is taken from the first entry in the
|
||||
// records.
|
||||
events, ok := entry.Data["Records"].([]NotificationEvent)
|
||||
if !ok {
|
||||
return esErrFunc("Unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"])
|
||||
}
|
||||
var eventTime time.Time
|
||||
eventTime, err = time.Parse(timeFormatAMZ, events[0].EventTime)
|
||||
if err != nil {
|
||||
return esErrFunc("Unable to parse event time \"%s\": %v",
|
||||
events[0].EventTime, err)
|
||||
}
|
||||
// Extract event time in milliseconds for Elasticsearch.
|
||||
eventTimeStr := fmt.Sprintf("%d", eventTime.UnixNano()/1000000)
|
||||
_, err = q.Client.Index().Index(q.params.Index).Type("event").
|
||||
Timestamp(eventTimeStr).
|
||||
BodyJson(map[string]interface{}{
|
||||
"Records": entry.Data["Records"],
|
||||
}).Do(context.Background())
|
||||
}
|
||||
if err != nil {
|
||||
return esErrFunc("Error inserting/deleting entry: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Required for logrus hook implementation
|
||||
func (q elasticClient) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
}
|
||||
}
|
|
@ -1,150 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
sarama "gopkg.in/Shopify/sarama.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
kkErrFunc = newNotificationErrorFactory("Kafka")
|
||||
)
|
||||
|
||||
// kafkaNotify holds the configuration of the Kafka server/cluster to
|
||||
// send notifications to.
|
||||
type kafkaNotify struct {
|
||||
// Flag to enable/disable this notification from configuration
|
||||
// file.
|
||||
Enable bool `json:"enable"`
|
||||
|
||||
// List of Kafka brokers in `addr:host` format.
|
||||
Brokers []string `json:"brokers"`
|
||||
|
||||
// Topic to which event notifications should be sent.
|
||||
Topic string `json:"topic"`
|
||||
}
|
||||
|
||||
func (k *kafkaNotify) Validate() error {
|
||||
if !k.Enable {
|
||||
return nil
|
||||
}
|
||||
if len(k.Brokers) == 0 {
|
||||
return kkErrFunc("No broker(s) specified.")
|
||||
}
|
||||
// Validate all specified brokers.
|
||||
for _, brokerAddr := range k.Brokers {
|
||||
if _, _, err := net.SplitHostPort(brokerAddr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// kafkaConn contains the active connection to the Kafka cluster and
|
||||
// the topic to send event notifications to.
|
||||
type kafkaConn struct {
|
||||
producer sarama.SyncProducer
|
||||
topic string
|
||||
}
|
||||
|
||||
func dialKafka(kn kafkaNotify) (kc kafkaConn, e error) {
|
||||
if !kn.Enable {
|
||||
return kc, errNotifyNotEnabled
|
||||
}
|
||||
|
||||
if kn.Topic == "" {
|
||||
return kc, kkErrFunc(
|
||||
"Topic was not specified in configuration")
|
||||
}
|
||||
|
||||
config := sarama.NewConfig()
|
||||
// Wait for all in-sync replicas to ack the message
|
||||
config.Producer.RequiredAcks = sarama.WaitForAll
|
||||
// Retry up to 10 times to produce the message
|
||||
config.Producer.Retry.Max = 10
|
||||
config.Producer.Return.Successes = true
|
||||
|
||||
p, err := sarama.NewSyncProducer(kn.Brokers, config)
|
||||
if err != nil {
|
||||
return kc, kkErrFunc("Failed to start producer: %v", err)
|
||||
}
|
||||
|
||||
return kafkaConn{p, kn.Topic}, nil
|
||||
}
|
||||
|
||||
func newKafkaNotify(accountID string) (*logrus.Logger, error) {
|
||||
kafkaNotifyCfg := globalServerConfig.Notify.GetKafkaByID(accountID)
|
||||
|
||||
// Try connecting to the configured Kafka broker(s).
|
||||
kc, err := dialKafka(kafkaNotifyCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Configure kafkaConn object as a Hook in logrus.
|
||||
kafkaLog := logrus.New()
|
||||
kafkaLog.Out = ioutil.Discard
|
||||
kafkaLog.Formatter = new(logrus.JSONFormatter)
|
||||
kafkaLog.Hooks.Add(kc)
|
||||
|
||||
return kafkaLog, nil
|
||||
}
|
||||
|
||||
func (kC kafkaConn) Close() {
|
||||
_ = kC.producer.Close()
|
||||
}
|
||||
|
||||
// Fire - to implement logrus.Hook interface
|
||||
func (kC kafkaConn) Fire(entry *logrus.Entry) error {
|
||||
body, err := entry.Reader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Extract the key of the event as a string
|
||||
keyStr, ok := entry.Data["Key"].(string)
|
||||
if !ok {
|
||||
return kkErrFunc("Unable to convert event key %v to string.",
|
||||
entry.Data["Key"])
|
||||
}
|
||||
|
||||
// Construct message to send to Kafka
|
||||
msg := sarama.ProducerMessage{
|
||||
Topic: kC.topic,
|
||||
Key: sarama.StringEncoder(keyStr),
|
||||
Value: sarama.ByteEncoder(body.Bytes()),
|
||||
}
|
||||
|
||||
// Attempt sending the message to Kafka
|
||||
_, _, err = kC.producer.SendMessage(&msg)
|
||||
if err != nil {
|
||||
return kkErrFunc("Error sending event to Kafka - %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels - to implement logrus.Hook interface
|
||||
func (kC kafkaConn) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
}
|
||||
}
|
|
@ -1,80 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
type listenerConn struct {
|
||||
TargetAddr string
|
||||
ListenerARN string
|
||||
BMSClient BucketMetaState
|
||||
}
|
||||
|
||||
type listenerLogger struct {
|
||||
log *logrus.Logger
|
||||
lconn listenerConn
|
||||
}
|
||||
|
||||
func newListenerLogger(listenerArn, targetAddr string) (*listenerLogger, error) {
|
||||
bmsClient := globalS3Peers.GetPeerClient(targetAddr)
|
||||
if bmsClient == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Peer %s was not initialized, unexpected error",
|
||||
targetAddr,
|
||||
)
|
||||
}
|
||||
lc := listenerConn{
|
||||
TargetAddr: targetAddr,
|
||||
ListenerARN: listenerArn,
|
||||
BMSClient: bmsClient,
|
||||
}
|
||||
|
||||
lcLog := logrus.New()
|
||||
|
||||
lcLog.Out = ioutil.Discard
|
||||
|
||||
lcLog.Formatter = new(logrus.JSONFormatter)
|
||||
|
||||
lcLog.Hooks.Add(lc)
|
||||
|
||||
return &listenerLogger{lcLog, lc}, nil
|
||||
}
|
||||
|
||||
// send event to target server via rpc client calls.
|
||||
func (lc listenerConn) Fire(entry *logrus.Entry) error {
|
||||
notificationEvent, ok := entry.Data["Records"].([]NotificationEvent)
|
||||
if !ok {
|
||||
// If the record is not of the expected type, silently
|
||||
// discard.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send Event RPC call and return error
|
||||
arg := EventArgs{Event: notificationEvent, Arn: lc.ListenerARN}
|
||||
return lc.BMSClient.SendEvent(&arg)
|
||||
}
|
||||
|
||||
func (lc listenerConn) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
}
|
||||
}
|
|
@ -1,123 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
MQTT "github.com/eclipse/paho.mqtt.golang"
|
||||
)
|
||||
|
||||
type mqttNotify struct {
|
||||
Enable bool `json:"enable"`
|
||||
Broker string `json:"broker"`
|
||||
Topic string `json:"topic"`
|
||||
QoS int `json:"qos"`
|
||||
ClientID string `json:"clientId"`
|
||||
User string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
func (m *mqttNotify) Validate() error {
|
||||
if !m.Enable {
|
||||
return nil
|
||||
}
|
||||
if _, err := checkURL(m.Broker); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type mqttConn struct {
|
||||
params mqttNotify
|
||||
Client MQTT.Client
|
||||
}
|
||||
|
||||
func dialMQTT(mqttL mqttNotify) (mc mqttConn, e error) {
|
||||
if !mqttL.Enable {
|
||||
return mc, errNotifyNotEnabled
|
||||
}
|
||||
connOpts := &MQTT.ClientOptions{
|
||||
ClientID: mqttL.ClientID,
|
||||
CleanSession: true,
|
||||
Username: mqttL.User,
|
||||
Password: mqttL.Password,
|
||||
MaxReconnectInterval: 1 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
TLSConfig: tls.Config{RootCAs: globalRootCAs},
|
||||
}
|
||||
connOpts.AddBroker(mqttL.Broker)
|
||||
client := MQTT.NewClient(connOpts)
|
||||
if token := client.Connect(); token.Wait() && token.Error() != nil {
|
||||
return mc, token.Error()
|
||||
}
|
||||
return mqttConn{Client: client, params: mqttL}, nil
|
||||
}
|
||||
|
||||
func newMQTTNotify(accountID string) (*logrus.Logger, error) {
|
||||
mqttL := globalServerConfig.Notify.GetMQTTByID(accountID)
|
||||
|
||||
//connect to MQTT Server
|
||||
mqttC, err := dialMQTT(mqttL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mqttLog := logrus.New()
|
||||
|
||||
// Disable writing to console.
|
||||
mqttLog.Out = ioutil.Discard
|
||||
|
||||
// Add a mqtt hook.
|
||||
mqttLog.Hooks.Add(mqttC)
|
||||
|
||||
// Set default JSON formatter
|
||||
mqttLog.Formatter = new(logrus.JSONFormatter)
|
||||
|
||||
// successfully enabled all MQTTs
|
||||
return mqttLog, nil
|
||||
}
|
||||
|
||||
// Fire if called when an event should be sent to the message broker.
|
||||
func (q mqttConn) Fire(entry *logrus.Entry) error {
|
||||
body, err := entry.String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !q.Client.IsConnected() {
|
||||
if token := q.Client.Connect(); token.Wait() && token.Error() != nil {
|
||||
return token.Error()
|
||||
}
|
||||
}
|
||||
token := q.Client.Publish(q.params.Topic, byte(q.params.QoS), false, body)
|
||||
if token.Wait() && token.Error() != nil {
|
||||
return token.Error()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels is available logging levels.
|
||||
func (q mqttConn) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
}
|
||||
}
|
|
@ -1,337 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// MySQL Notifier implementation. Two formats, "namespace" and
|
||||
// "access" are supported.
|
||||
//
|
||||
// * Namespace format
|
||||
//
|
||||
// On each create or update object event in Minio Object storage
|
||||
// server, a row is created or updated in the table in MySQL. On each
|
||||
// object removal, the corresponding row is deleted from the table.
|
||||
//
|
||||
// A table with a specific structure (column names, column types, and
|
||||
// primary key/uniqueness constraint) is used. The user may set the
|
||||
// table name in the configuration. A sample SQL command that creates
|
||||
// a command with the required structure is:
|
||||
//
|
||||
// CREATE TABLE myminio (
|
||||
// key_name VARCHAR(2048),
|
||||
// value JSONB,
|
||||
// PRIMARY KEY (key_name),
|
||||
// );
|
||||
//
|
||||
// MySQL's "INSERT ... ON DUPLICATE ..." feature (UPSERT) is used
|
||||
// here. The implementation has been tested with MySQL Ver 14.14
|
||||
// Distrib 5.7.17.
|
||||
//
|
||||
// * Access format
|
||||
//
|
||||
// On each event, a row is appended to the configured table. There is
|
||||
// no deletion or modification of existing rows.
|
||||
//
|
||||
// A different table schema is used for this format. A sample SQL
|
||||
// commant that creates a table with the required structure is:
|
||||
//
|
||||
// CREATE TABLE myminio (
|
||||
// event_time TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
// event_data JSONB
|
||||
// );
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/go-sql-driver/mysql"
|
||||
)
|
||||
|
||||
const (
|
||||
// Queries for format=namespace mode.
|
||||
upsertRowForNSMySQL = `INSERT INTO %s (key_name, value)
|
||||
VALUES (?, ?)
|
||||
ON DUPLICATE KEY UPDATE value=VALUES(value);
|
||||
`
|
||||
deleteRowForNSMySQL = ` DELETE FROM %s
|
||||
WHERE key_name = ?;`
|
||||
createTableForNSMySQL = `CREATE TABLE %s (
|
||||
key_name VARCHAR(2048),
|
||||
value JSON,
|
||||
PRIMARY KEY (key_name)
|
||||
);`
|
||||
|
||||
// Queries for format=access mode.
|
||||
insertRowForAccessMySQL = `INSERT INTO %s (event_time, event_data)
|
||||
VALUES (?, ?);`
|
||||
createTableForAccessMySQL = `CREATE TABLE %s (
|
||||
event_time DATETIME NOT NULL,
|
||||
event_data JSON
|
||||
);`
|
||||
|
||||
// Query to check if a table already exists.
|
||||
tableExistsMySQL = `SELECT 1 FROM %s;`
|
||||
)
|
||||
|
||||
var (
|
||||
mysqlErrFunc = newNotificationErrorFactory("MySQL")
|
||||
|
||||
errMysqlFormat = mysqlErrFunc(`"format" value is invalid - it must be one of "%s" or "%s".`, formatNamespace, formatAccess)
|
||||
errMysqlTable = mysqlErrFunc("Table was not specified in the configuration.")
|
||||
)
|
||||
|
||||
type mySQLNotify struct {
|
||||
Enable bool `json:"enable"`
|
||||
|
||||
Format string `json:"format"`
|
||||
|
||||
// pass data-source-name connection string in config
|
||||
// directly. This string is formatted according to
|
||||
// https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
||||
DsnString string `json:"dsnString"`
|
||||
// specifying a table name is required.
|
||||
Table string `json:"table"`
|
||||
|
||||
// uses the values below if no connection string is specified
|
||||
// - however the connection string method offers more
|
||||
// flexibility.
|
||||
Host string `json:"host"`
|
||||
Port string `json:"port"`
|
||||
User string `json:"user"`
|
||||
Password string `json:"password"`
|
||||
Database string `json:"database"`
|
||||
}
|
||||
|
||||
func (m *mySQLNotify) Validate() error {
|
||||
if !m.Enable {
|
||||
return nil
|
||||
}
|
||||
if m.Format != formatNamespace && m.Format != formatAccess {
|
||||
return errMysqlFormat
|
||||
}
|
||||
if m.DsnString == "" {
|
||||
if _, err := checkURL(m.Host); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if m.Table == "" {
|
||||
return errMysqlTable
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type mySQLConn struct {
|
||||
dsnStr string
|
||||
table string
|
||||
format string
|
||||
preparedStmts map[string]*sql.Stmt
|
||||
*sql.DB
|
||||
}
|
||||
|
||||
func dialMySQL(msql mySQLNotify) (mc mySQLConn, e error) {
|
||||
if !msql.Enable {
|
||||
return mc, errNotifyNotEnabled
|
||||
}
|
||||
|
||||
dsnStr := msql.DsnString
|
||||
// check if connection string is specified
|
||||
if dsnStr == "" {
|
||||
// build from other parameters
|
||||
config := mysql.Config{
|
||||
User: msql.User,
|
||||
Passwd: msql.Password,
|
||||
Net: "tcp",
|
||||
Addr: msql.Host + ":" + msql.Port,
|
||||
DBName: msql.Database,
|
||||
}
|
||||
dsnStr = config.FormatDSN()
|
||||
}
|
||||
|
||||
db, err := sql.Open("mysql", dsnStr)
|
||||
if err != nil {
|
||||
return mc, mysqlErrFunc(
|
||||
"Connection opening failure (dsnStr=%s): %v",
|
||||
dsnStr, err)
|
||||
}
|
||||
|
||||
// ping to check that server is actually reachable.
|
||||
err = db.Ping()
|
||||
if err != nil {
|
||||
return mc, mysqlErrFunc(
|
||||
"Ping to server failed with: %v", err)
|
||||
}
|
||||
|
||||
// check that table exists - if not, create it.
|
||||
_, err = db.Exec(fmt.Sprintf(tableExistsMySQL, msql.Table))
|
||||
if err != nil {
|
||||
createStmt := createTableForNSMySQL
|
||||
if msql.Format == formatAccess {
|
||||
createStmt = createTableForAccessMySQL
|
||||
}
|
||||
|
||||
// most likely, table does not exist. try to create it:
|
||||
_, errCreate := db.Exec(fmt.Sprintf(createStmt, msql.Table))
|
||||
if errCreate != nil {
|
||||
// failed to create the table. error out.
|
||||
return mc, mysqlErrFunc(
|
||||
"'Select' failed with %v, then 'Create Table' failed with %v",
|
||||
err, errCreate,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// create prepared statements
|
||||
stmts := make(map[string]*sql.Stmt)
|
||||
switch msql.Format {
|
||||
case formatNamespace:
|
||||
// insert or update statement
|
||||
stmts["upsertRow"], err = db.Prepare(fmt.Sprintf(upsertRowForNSMySQL,
|
||||
msql.Table))
|
||||
if err != nil {
|
||||
return mc, mysqlErrFunc("create UPSERT prepared statement failed with: %v", err)
|
||||
}
|
||||
// delete statement
|
||||
stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNSMySQL,
|
||||
msql.Table))
|
||||
if err != nil {
|
||||
return mc, mysqlErrFunc("create DELETE prepared statement failed with: %v", err)
|
||||
}
|
||||
case formatAccess:
|
||||
// insert statement
|
||||
stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccessMySQL,
|
||||
msql.Table))
|
||||
if err != nil {
|
||||
return mc, mysqlErrFunc(
|
||||
"create INSERT prepared statement failed with: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
return mySQLConn{dsnStr, msql.Table, msql.Format, stmts, db}, nil
|
||||
}
|
||||
|
||||
func newMySQLNotify(accountID string) (*logrus.Logger, error) {
|
||||
mysqlNotify := globalServerConfig.Notify.GetMySQLByID(accountID)
|
||||
|
||||
// Dial mysql
|
||||
myC, err := dialMySQL(mysqlNotify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mySQLLog := logrus.New()
|
||||
|
||||
mySQLLog.Out = ioutil.Discard
|
||||
|
||||
mySQLLog.Formatter = new(logrus.JSONFormatter)
|
||||
|
||||
mySQLLog.Hooks.Add(myC)
|
||||
|
||||
return mySQLLog, nil
|
||||
}
|
||||
|
||||
func (myC mySQLConn) Close() {
|
||||
// first close all prepared statements
|
||||
for _, v := range myC.preparedStmts {
|
||||
_ = v.Close()
|
||||
}
|
||||
// close db connection
|
||||
_ = myC.DB.Close()
|
||||
}
|
||||
|
||||
func (myC mySQLConn) Fire(entry *logrus.Entry) error {
|
||||
// get event type by trying to convert to string
|
||||
entryEventType, ok := entry.Data["EventType"].(string)
|
||||
if !ok {
|
||||
// ignore event if converting EventType to string
|
||||
// fails.
|
||||
return nil
|
||||
}
|
||||
|
||||
jsonEncoder := func(d interface{}) ([]byte, error) {
|
||||
value, err := json.Marshal(map[string]interface{}{
|
||||
"Records": d,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, mysqlErrFunc(
|
||||
"Unable to encode event %v to JSON: %v", d, err)
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
switch myC.format {
|
||||
case formatNamespace:
|
||||
// Check for event delete
|
||||
if eventMatch(entryEventType, []string{"s3:ObjectRemoved:*"}) {
|
||||
// delete row from the table
|
||||
_, err := myC.preparedStmts["deleteRow"].Exec(entry.Data["Key"])
|
||||
if err != nil {
|
||||
return mysqlErrFunc(
|
||||
"Error deleting event with key = %v - got mysql error - %v",
|
||||
entry.Data["Key"], err,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
value, err := jsonEncoder(entry.Data["Records"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// upsert row into the table
|
||||
_, err = myC.preparedStmts["upsertRow"].Exec(entry.Data["Key"], value)
|
||||
if err != nil {
|
||||
return mysqlErrFunc(
|
||||
"Unable to upsert event with Key=%v and Value=%v - got mysql error - %v",
|
||||
entry.Data["Key"], entry.Data["Records"], err,
|
||||
)
|
||||
}
|
||||
}
|
||||
case formatAccess:
|
||||
// eventTime is taken from the first entry in the
|
||||
// records.
|
||||
events, ok := entry.Data["Records"].([]NotificationEvent)
|
||||
if !ok {
|
||||
return mysqlErrFunc("unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"])
|
||||
}
|
||||
eventTime, err := time.Parse(timeFormatAMZ, events[0].EventTime)
|
||||
if err != nil {
|
||||
return mysqlErrFunc("unable to parse event time \"%s\": %v",
|
||||
events[0].EventTime, err)
|
||||
}
|
||||
|
||||
value, err := jsonEncodeEventData(entry.Data["Records"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = myC.preparedStmts["insertRow"].Exec(eventTime, value)
|
||||
if err != nil {
|
||||
return mysqlErrFunc("Unable to insert event with value=%v: %v",
|
||||
value, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (myC mySQLConn) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
}
|
||||
}
|
|
@ -1,196 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/nats-io/go-nats-streaming"
|
||||
"github.com/nats-io/nats"
|
||||
)
|
||||
|
||||
// natsNotifyStreaming contains specific options related to connection
|
||||
// to a NATS streaming server
|
||||
type natsNotifyStreaming struct {
|
||||
Enable bool `json:"enable"`
|
||||
ClusterID string `json:"clusterID"`
|
||||
ClientID string `json:"clientID"`
|
||||
Async bool `json:"async"`
|
||||
MaxPubAcksInflight int `json:"maxPubAcksInflight"`
|
||||
}
|
||||
|
||||
// natsNotify - represents logrus compatible NATS hook.
|
||||
// All fields represent NATS configuration details.
|
||||
type natsNotify struct {
|
||||
Enable bool `json:"enable"`
|
||||
Address string `json:"address"`
|
||||
Subject string `json:"subject"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Token string `json:"token"`
|
||||
Secure bool `json:"secure"`
|
||||
PingInterval int64 `json:"pingInterval"`
|
||||
Streaming natsNotifyStreaming `json:"streaming"`
|
||||
}
|
||||
|
||||
func (n *natsNotify) Validate() error {
|
||||
if !n.Enable {
|
||||
return nil
|
||||
}
|
||||
if _, _, err := net.SplitHostPort(n.Address); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// natsIOConn abstracts connection to any type of NATS server
|
||||
type natsIOConn struct {
|
||||
params natsNotify
|
||||
natsConn *nats.Conn
|
||||
stanConn stan.Conn
|
||||
}
|
||||
|
||||
// dialNATS - dials and returns an natsIOConn instance,
|
||||
// for sending notifications. Returns error if nats logger
|
||||
// is not enabled.
|
||||
func dialNATS(natsL natsNotify, testDial bool) (nioc natsIOConn, e error) {
|
||||
if !natsL.Enable {
|
||||
return nioc, errNotifyNotEnabled
|
||||
}
|
||||
|
||||
// Construct natsIOConn which holds all NATS connection information
|
||||
conn := natsIOConn{params: natsL}
|
||||
|
||||
if natsL.Streaming.Enable {
|
||||
// Construct scheme to differentiate between clear and TLS connections
|
||||
scheme := "nats"
|
||||
if natsL.Secure {
|
||||
scheme = "tls"
|
||||
}
|
||||
// Construct address URL
|
||||
addressURL := scheme + "://" + natsL.Username + ":" + natsL.Password + "@" + natsL.Address
|
||||
// Fetch the user-supplied client ID and provide a random one if not provided
|
||||
clientID := natsL.Streaming.ClientID
|
||||
if clientID == "" {
|
||||
clientID = mustGetUUID()
|
||||
}
|
||||
// Add test suffix to clientID to avoid clientID already registered error
|
||||
if testDial {
|
||||
clientID += "-test"
|
||||
}
|
||||
connOpts := []stan.Option{
|
||||
stan.NatsURL(addressURL),
|
||||
}
|
||||
// Setup MaxPubAcksInflight parameter
|
||||
if natsL.Streaming.MaxPubAcksInflight > 0 {
|
||||
connOpts = append(connOpts,
|
||||
stan.MaxPubAcksInflight(natsL.Streaming.MaxPubAcksInflight))
|
||||
}
|
||||
// Do the real connection to the NATS server
|
||||
sc, err := stan.Connect(natsL.Streaming.ClusterID, clientID, connOpts...)
|
||||
if err != nil {
|
||||
return nioc, err
|
||||
}
|
||||
// Save the created connection
|
||||
conn.stanConn = sc
|
||||
} else {
|
||||
// Configure and connect to NATS server
|
||||
natsC := nats.DefaultOptions
|
||||
natsC.Url = "nats://" + natsL.Address
|
||||
natsC.User = natsL.Username
|
||||
natsC.Password = natsL.Password
|
||||
natsC.Token = natsL.Token
|
||||
natsC.Secure = natsL.Secure
|
||||
// Do the real connection
|
||||
nc, err := natsC.Connect()
|
||||
if err != nil {
|
||||
return nioc, err
|
||||
}
|
||||
// Save the created connection
|
||||
conn.natsConn = nc
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// closeNATS - close the underlying NATS connection
|
||||
func closeNATS(conn natsIOConn) {
|
||||
if conn.params.Streaming.Enable {
|
||||
conn.stanConn.Close()
|
||||
} else {
|
||||
conn.natsConn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func newNATSNotify(accountID string) (*logrus.Logger, error) {
|
||||
natsL := globalServerConfig.Notify.GetNATSByID(accountID)
|
||||
|
||||
// Connect to nats server.
|
||||
natsC, err := dialNATS(natsL, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
natsLog := logrus.New()
|
||||
|
||||
// Disable writing to console.
|
||||
natsLog.Out = ioutil.Discard
|
||||
|
||||
// Add a nats hook.
|
||||
natsLog.Hooks.Add(natsC)
|
||||
|
||||
// Set default JSON formatter.
|
||||
natsLog.Formatter = new(logrus.JSONFormatter)
|
||||
|
||||
// Successfully enabled all NATSs.
|
||||
return natsLog, nil
|
||||
}
|
||||
|
||||
// Fire is called when an event should be sent to the message broker
|
||||
func (n natsIOConn) Fire(entry *logrus.Entry) error {
|
||||
body, err := entry.Reader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n.params.Streaming.Enable {
|
||||
// Streaming flag is enabled, publish the log synchronously or asynchronously
|
||||
// depending on the user supplied parameter
|
||||
if n.params.Streaming.Async {
|
||||
_, err = n.stanConn.PublishAsync(n.params.Subject, body.Bytes(), nil)
|
||||
} else {
|
||||
err = n.stanConn.Publish(n.params.Subject, body.Bytes())
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Publish the log
|
||||
err = n.natsConn.Publish(n.params.Subject, body.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels is available logging levels.
|
||||
func (n natsIOConn) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
}
|
||||
}
|
|
@ -1,353 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// PostgreSQL Notifier implementation. Two formats, "namespace" and
|
||||
// "access" are supported.
|
||||
//
|
||||
// * Namespace format
|
||||
//
|
||||
// On each create or update object event in Minio Object storage
|
||||
// server, a row is created or updated in the table in Postgres. On
|
||||
// each object removal, the corresponding row is deleted from the
|
||||
// table.
|
||||
//
|
||||
// A table with a specific structure (column names, column types, and
|
||||
// primary key/uniqueness constraint) is used. The user may set the
|
||||
// table name in the configuration. A sample SQL command that creates
|
||||
// a table with the required structure is:
|
||||
//
|
||||
// CREATE TABLE myminio (
|
||||
// key VARCHAR PRIMARY KEY,
|
||||
// value JSONB
|
||||
// );
|
||||
//
|
||||
// PostgreSQL's "INSERT ... ON CONFLICT ... DO UPDATE ..." feature
|
||||
// (UPSERT) is used here, so the minimum version of PostgreSQL
|
||||
// required is 9.5.
|
||||
//
|
||||
// * Access format
|
||||
//
|
||||
// On each event, a row is appended to the configured table. There is
|
||||
// no deletion or modification of existing rows.
|
||||
//
|
||||
// A different table schema is used for this format. A sample SQL
|
||||
// commant that creates a table with the required structure is:
|
||||
//
|
||||
// CREATE TABLE myminio (
|
||||
// event_time TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
// event_data JSONB
|
||||
// );
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
// Register postgres driver
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
const (
|
||||
// Queries for format=namespace mode. Here the `key` column is
|
||||
// the bucket and object of the event. When objects are
|
||||
// deleted, the corresponding row is deleted in the
|
||||
// table. When objects are created or over-written, rows are
|
||||
// inserted or updated respectively in the table.
|
||||
upsertRowForNS = `INSERT INTO %s (key, value)
|
||||
VALUES ($1, $2)
|
||||
ON CONFLICT (key)
|
||||
DO UPDATE SET value = EXCLUDED.value;`
|
||||
deleteRowForNS = ` DELETE FROM %s
|
||||
WHERE key = $1;`
|
||||
createTableForNS = `CREATE TABLE %s (
|
||||
key VARCHAR PRIMARY KEY,
|
||||
value JSONB
|
||||
);`
|
||||
|
||||
// Queries for format=access mode. Here the `event_time`
|
||||
// column of the table, stores the time at which the event
|
||||
// occurred in the Minio server.
|
||||
insertRowForAccess = `INSERT INTO %s (event_time, event_data)
|
||||
VALUES ($1, $2);`
|
||||
createTableForAccess = `CREATE TABLE %s (
|
||||
event_time TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
event_data JSONB
|
||||
);`
|
||||
|
||||
// Query to check if a table already exists.
|
||||
tableExists = `SELECT 1 FROM %s;`
|
||||
)
|
||||
|
||||
var (
|
||||
pgErrFunc = newNotificationErrorFactory("PostgreSQL")
|
||||
|
||||
errPGFormatError = pgErrFunc(`"format" value is invalid - it must be one of "%s" or "%s".`, formatNamespace, formatAccess)
|
||||
errPGTableError = pgErrFunc("Table was not specified in the configuration.")
|
||||
)
|
||||
|
||||
type postgreSQLNotify struct {
|
||||
Enable bool `json:"enable"`
|
||||
|
||||
Format string `json:"format"`
|
||||
|
||||
// Pass connection string in config directly. This string is
|
||||
// formatted according to
|
||||
// https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters
|
||||
ConnectionString string `json:"connectionString"`
|
||||
// specifying a table name is required.
|
||||
Table string `json:"table"`
|
||||
|
||||
// The values below, if non-empty are appended to
|
||||
// ConnectionString above. Default values are shown in
|
||||
// comments below (implicitly used by the library).
|
||||
Host string `json:"host"` // default: localhost
|
||||
Port string `json:"port"` // default: 5432
|
||||
User string `json:"user"` // default: user running minio
|
||||
Password string `json:"password"` // default: no password
|
||||
Database string `json:"database"` // default: same as user
|
||||
}
|
||||
|
||||
func (p *postgreSQLNotify) Validate() error {
|
||||
if !p.Enable {
|
||||
return nil
|
||||
}
|
||||
if p.Format != formatNamespace && p.Format != formatAccess {
|
||||
return errPGFormatError
|
||||
}
|
||||
if p.ConnectionString == "" {
|
||||
if _, err := checkURL(p.Host); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if p.Table == "" {
|
||||
return errPGTableError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type pgConn struct {
|
||||
connStr string
|
||||
table string
|
||||
format string
|
||||
preparedStmts map[string]*sql.Stmt
|
||||
*sql.DB
|
||||
}
|
||||
|
||||
func dialPostgreSQL(pgN postgreSQLNotify) (pc pgConn, e error) {
|
||||
if !pgN.Enable {
|
||||
return pc, errNotifyNotEnabled
|
||||
}
|
||||
|
||||
// collect connection params
|
||||
params := []string{pgN.ConnectionString}
|
||||
if pgN.Host != "" {
|
||||
params = append(params, "host="+pgN.Host)
|
||||
}
|
||||
if pgN.Port != "" {
|
||||
params = append(params, "port="+pgN.Port)
|
||||
}
|
||||
if pgN.User != "" {
|
||||
params = append(params, "user="+pgN.User)
|
||||
}
|
||||
if pgN.Password != "" {
|
||||
params = append(params, "password="+pgN.Password)
|
||||
}
|
||||
if pgN.Database != "" {
|
||||
params = append(params, "dbname="+pgN.Database)
|
||||
}
|
||||
connStr := strings.Join(params, " ")
|
||||
|
||||
db, err := sql.Open("postgres", connStr)
|
||||
if err != nil {
|
||||
return pc, pgErrFunc(
|
||||
"Connection opening failure (connectionString=%s): %v",
|
||||
connStr, err)
|
||||
}
|
||||
|
||||
// ping to check that server is actually reachable.
|
||||
err = db.Ping()
|
||||
if err != nil {
|
||||
return pc, pgErrFunc("Ping to server failed with: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
// check that table exists - if not, create it.
|
||||
_, err = db.Exec(fmt.Sprintf(tableExists, pgN.Table))
|
||||
if err != nil {
|
||||
createStmt := createTableForNS
|
||||
if pgN.Format == formatAccess {
|
||||
createStmt = createTableForAccess
|
||||
}
|
||||
|
||||
// most likely, table does not exist. try to create it:
|
||||
_, errCreate := db.Exec(fmt.Sprintf(createStmt, pgN.Table))
|
||||
if errCreate != nil {
|
||||
// failed to create the table. error out.
|
||||
return pc, pgErrFunc(
|
||||
"'Select' failed with %v, then 'Create Table' failed with %v",
|
||||
err, errCreate,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// create prepared statements
|
||||
stmts := make(map[string]*sql.Stmt)
|
||||
switch pgN.Format {
|
||||
case formatNamespace:
|
||||
// insert or update statement
|
||||
stmts["upsertRow"], err = db.Prepare(fmt.Sprintf(upsertRowForNS,
|
||||
pgN.Table))
|
||||
if err != nil {
|
||||
return pc, pgErrFunc(
|
||||
"create UPSERT prepared statement failed with: %v", err)
|
||||
}
|
||||
// delete statement
|
||||
stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNS,
|
||||
pgN.Table))
|
||||
if err != nil {
|
||||
return pc, pgErrFunc(
|
||||
"create DELETE prepared statement failed with: %v", err)
|
||||
}
|
||||
case formatAccess:
|
||||
// insert statement
|
||||
stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccess,
|
||||
pgN.Table))
|
||||
if err != nil {
|
||||
return pc, pgErrFunc(
|
||||
"create INSERT prepared statement failed with: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return pgConn{connStr, pgN.Table, pgN.Format, stmts, db}, nil
|
||||
}
|
||||
|
||||
func newPostgreSQLNotify(accountID string) (*logrus.Logger, error) {
|
||||
pgNotify := globalServerConfig.Notify.GetPostgreSQLByID(accountID)
|
||||
|
||||
// Dial postgres
|
||||
pgC, err := dialPostgreSQL(pgNotify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pgLog := logrus.New()
|
||||
|
||||
pgLog.Out = ioutil.Discard
|
||||
|
||||
pgLog.Formatter = new(logrus.JSONFormatter)
|
||||
|
||||
pgLog.Hooks.Add(pgC)
|
||||
|
||||
return pgLog, nil
|
||||
}
|
||||
|
||||
func (pgC pgConn) Close() {
|
||||
// first close all prepared statements
|
||||
for _, v := range pgC.preparedStmts {
|
||||
_ = v.Close()
|
||||
}
|
||||
// close db connection
|
||||
_ = pgC.DB.Close()
|
||||
}
|
||||
|
||||
func jsonEncodeEventData(d interface{}) ([]byte, error) {
|
||||
// json encode the value for the row
|
||||
value, err := json.Marshal(map[string]interface{}{
|
||||
"Records": d,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, pgErrFunc(
|
||||
"Unable to encode event %v to JSON: %v", d, err)
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func (pgC pgConn) Fire(entry *logrus.Entry) error {
|
||||
// get event type by trying to convert to string
|
||||
entryEventType, ok := entry.Data["EventType"].(string)
|
||||
if !ok {
|
||||
// ignore event if converting EventType to string
|
||||
// fails.
|
||||
return nil
|
||||
}
|
||||
|
||||
switch pgC.format {
|
||||
case formatNamespace:
|
||||
// Check for event delete
|
||||
if eventMatch(entryEventType, []string{"s3:ObjectRemoved:*"}) {
|
||||
// delete row from the table
|
||||
_, err := pgC.preparedStmts["deleteRow"].Exec(entry.Data["Key"])
|
||||
if err != nil {
|
||||
return pgErrFunc(
|
||||
"Error deleting event with key=%v: %v",
|
||||
entry.Data["Key"], err,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
value, err := jsonEncodeEventData(entry.Data["Records"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// upsert row into the table
|
||||
_, err = pgC.preparedStmts["upsertRow"].Exec(entry.Data["Key"], value)
|
||||
if err != nil {
|
||||
return pgErrFunc(
|
||||
"Unable to upsert event with key=%v and value=%v: %v",
|
||||
entry.Data["Key"], entry.Data["Records"], err,
|
||||
)
|
||||
}
|
||||
}
|
||||
case formatAccess:
|
||||
// eventTime is taken from the first entry in the
|
||||
// records.
|
||||
events, ok := entry.Data["Records"].([]NotificationEvent)
|
||||
if !ok {
|
||||
return pgErrFunc("unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"])
|
||||
}
|
||||
eventTime, err := time.Parse(timeFormatAMZ, events[0].EventTime)
|
||||
if err != nil {
|
||||
return pgErrFunc("unable to parse event time \"%s\": %v",
|
||||
events[0].EventTime, err)
|
||||
}
|
||||
|
||||
value, err := jsonEncodeEventData(entry.Data["Records"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = pgC.preparedStmts["insertRow"].Exec(eventTime, value)
|
||||
if err != nil {
|
||||
return pgErrFunc("Unable to insert event with value=%v: %v",
|
||||
value, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pgC pgConn) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
}
|
||||
}
|
|
@ -1,222 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/garyburd/redigo/redis"
|
||||
)
|
||||
|
||||
var (
|
||||
redisErrFunc = newNotificationErrorFactory("Redis")
|
||||
|
||||
errRedisFormat = redisErrFunc(`"format" value is invalid - it must be one of "access" or "namespace".`)
|
||||
errRedisKeyError = redisErrFunc("Key was not specified in the configuration.")
|
||||
)
|
||||
|
||||
// redisNotify to send logs to Redis server
|
||||
type redisNotify struct {
|
||||
Enable bool `json:"enable"`
|
||||
Format string `json:"format"`
|
||||
Addr string `json:"address"`
|
||||
Password string `json:"password"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
func (r *redisNotify) Validate() error {
|
||||
if !r.Enable {
|
||||
return nil
|
||||
}
|
||||
if r.Format != formatNamespace && r.Format != formatAccess {
|
||||
return errRedisFormat
|
||||
}
|
||||
if _, _, err := net.SplitHostPort(r.Addr); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.Key == "" {
|
||||
return errRedisKeyError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type redisConn struct {
|
||||
*redis.Pool
|
||||
params redisNotify
|
||||
}
|
||||
|
||||
// Dial a new connection to redis instance at addr, optionally with a
|
||||
// password if any.
|
||||
func dialRedis(rNotify redisNotify) (*redis.Pool, error) {
|
||||
// Return error if redis not enabled.
|
||||
if !rNotify.Enable {
|
||||
return nil, errNotifyNotEnabled
|
||||
}
|
||||
|
||||
addr := rNotify.Addr
|
||||
password := rNotify.Password
|
||||
rPool := &redis.Pool{
|
||||
MaxIdle: 3,
|
||||
IdleTimeout: 240 * time.Second, // Time 2minutes.
|
||||
Dial: func() (redis.Conn, error) {
|
||||
c, err := redis.Dial("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if password != "" {
|
||||
if _, derr := c.Do("AUTH", password); derr != nil {
|
||||
c.Close()
|
||||
return nil, derr
|
||||
}
|
||||
}
|
||||
return c, err
|
||||
},
|
||||
TestOnBorrow: func(c redis.Conn, t time.Time) error {
|
||||
_, err := c.Do("PING")
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
// Test if connection with REDIS can be established.
|
||||
rConn := rPool.Get()
|
||||
defer rConn.Close()
|
||||
|
||||
// Check connection.
|
||||
_, err := rConn.Do("PING")
|
||||
if err != nil {
|
||||
return nil, redisErrFunc("Error connecting to server: %v", err)
|
||||
}
|
||||
|
||||
// Test that Key is of desired type
|
||||
reply, err := redis.String(rConn.Do("TYPE", rNotify.Key))
|
||||
if err != nil {
|
||||
return nil, redisErrFunc("Error getting type of Key=%s: %v",
|
||||
rNotify.Key, err)
|
||||
}
|
||||
if reply != "none" {
|
||||
expectedType := "hash"
|
||||
if rNotify.Format == formatAccess {
|
||||
expectedType = "list"
|
||||
}
|
||||
if reply != expectedType {
|
||||
return nil, redisErrFunc(
|
||||
"Key=%s has type %s, but we expect it to be a %s",
|
||||
rNotify.Key, reply, expectedType)
|
||||
}
|
||||
}
|
||||
|
||||
// Return pool.
|
||||
return rPool, nil
|
||||
}
|
||||
|
||||
func newRedisNotify(accountID string) (*logrus.Logger, error) {
|
||||
rNotify := globalServerConfig.Notify.GetRedisByID(accountID)
|
||||
|
||||
// Dial redis.
|
||||
rPool, err := dialRedis(rNotify)
|
||||
if err != nil {
|
||||
return nil, redisErrFunc("Error dialing server: %v", err)
|
||||
}
|
||||
|
||||
rrConn := redisConn{
|
||||
Pool: rPool,
|
||||
params: rNotify,
|
||||
}
|
||||
|
||||
redisLog := logrus.New()
|
||||
|
||||
redisLog.Out = ioutil.Discard
|
||||
|
||||
// Set default JSON formatter.
|
||||
redisLog.Formatter = new(logrus.JSONFormatter)
|
||||
|
||||
redisLog.Hooks.Add(rrConn)
|
||||
|
||||
// Success, redis enabled.
|
||||
return redisLog, nil
|
||||
}
|
||||
|
||||
// Fire is called when an event should be sent to the message broker.
|
||||
func (r redisConn) Fire(entry *logrus.Entry) error {
|
||||
rConn := r.Pool.Get()
|
||||
defer rConn.Close()
|
||||
|
||||
// Fetch event type upon reflecting on its original type.
|
||||
entryStr, ok := entry.Data["EventType"].(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch r.params.Format {
|
||||
case formatNamespace:
|
||||
// Match the event if its a delete request, attempt to delete the key
|
||||
if eventMatch(entryStr, []string{"s3:ObjectRemoved:*"}) {
|
||||
_, err := rConn.Do("HDEL", r.params.Key, entry.Data["Key"])
|
||||
if err != nil {
|
||||
return redisErrFunc("Error deleting entry: %v",
|
||||
err)
|
||||
}
|
||||
return nil
|
||||
} // else save this as new entry or update any existing ones.
|
||||
|
||||
value, err := json.Marshal(map[string]interface{}{
|
||||
"Records": entry.Data["Records"],
|
||||
})
|
||||
if err != nil {
|
||||
return redisErrFunc(
|
||||
"Unable to encode event %v to JSON: %v",
|
||||
entry.Data["Records"], err)
|
||||
}
|
||||
_, err = rConn.Do("HSET", r.params.Key, entry.Data["Key"],
|
||||
value)
|
||||
if err != nil {
|
||||
return redisErrFunc("Error updating hash entry: %v",
|
||||
err)
|
||||
}
|
||||
case formatAccess:
|
||||
// eventTime is taken from the first entry in the
|
||||
// records.
|
||||
events, ok := entry.Data["Records"].([]NotificationEvent)
|
||||
if !ok {
|
||||
return redisErrFunc("unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"])
|
||||
}
|
||||
eventTime := events[0].EventTime
|
||||
|
||||
listEntry := []interface{}{eventTime, entry.Data["Records"]}
|
||||
jsonValue, err := json.Marshal(listEntry)
|
||||
if err != nil {
|
||||
return redisErrFunc("JSON encoding error: %v", err)
|
||||
}
|
||||
_, err = rConn.Do("RPUSH", r.params.Key, jsonValue)
|
||||
if err != nil {
|
||||
return redisErrFunc("Error appending to Redis list: %v",
|
||||
err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Required for logrus hook implementation
|
||||
func (r redisConn) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
}
|
||||
}
|
|
@ -1,212 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
type webhookNotify struct {
|
||||
Enable bool `json:"enable"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
}
|
||||
|
||||
func (w *webhookNotify) Validate() error {
|
||||
if !w.Enable {
|
||||
return nil
|
||||
}
|
||||
if _, err := checkURL(w.Endpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type httpConn struct {
|
||||
*http.Client
|
||||
Endpoint string
|
||||
}
|
||||
|
||||
// isNetErrorIgnored - is network error ignored.
|
||||
func isNetErrorIgnored(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") {
|
||||
return true
|
||||
}
|
||||
switch err.(type) {
|
||||
case net.Error:
|
||||
switch e := err.(type) {
|
||||
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
|
||||
return true
|
||||
case *url.Error:
|
||||
// Fixes https://github.com/minio/minio/issues/4050
|
||||
switch e.Err.(type) {
|
||||
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
|
||||
return true
|
||||
}
|
||||
// For a URL error, where it replies back "connection closed"
|
||||
// retry again.
|
||||
if strings.Contains(err.Error(), "Connection closed by foreign host") {
|
||||
return true
|
||||
}
|
||||
default:
|
||||
if strings.Contains(err.Error(), "net/http: TLS handshake timeout") {
|
||||
// If error is - tlsHandshakeTimeoutError, retry.
|
||||
return true
|
||||
} else if strings.Contains(err.Error(), "i/o timeout") {
|
||||
// If error is - tcp timeoutError, retry.
|
||||
return true
|
||||
} else if strings.Contains(err.Error(), "connection timed out") {
|
||||
// If err is a net.Dial timeout, retry.
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Lookup endpoint address by successfully POSTting
|
||||
// empty body.
|
||||
func lookupEndpoint(urlStr string) error {
|
||||
req, err := http.NewRequest("POST", urlStr, bytes.NewReader([]byte("")))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: 1 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
// Need to close connection after usage.
|
||||
DisableKeepAlives: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Set content-length to zero as there is no payload.
|
||||
req.ContentLength = 0
|
||||
|
||||
// Set proper server user-agent.
|
||||
req.Header.Set("User-Agent", globalServerUserAgent)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
if isNetErrorIgnored(err) {
|
||||
errorIf(err, "Unable to lookup webhook endpoint %s", urlStr)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
// HTTP status OK/NoContent.
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
|
||||
return fmt.Errorf("Unable to lookup webhook endpoint %s response(%s)", urlStr, resp.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initializes new webhook logrus notifier.
|
||||
func newWebhookNotify(accountID string) (*logrus.Logger, error) {
|
||||
rNotify := globalServerConfig.Notify.GetWebhookByID(accountID)
|
||||
if rNotify.Endpoint == "" {
|
||||
return nil, errInvalidArgument
|
||||
}
|
||||
|
||||
if err := lookupEndpoint(rNotify.Endpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn := httpConn{
|
||||
// Configure aggressive timeouts for client posts.
|
||||
Client: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{RootCAs: globalRootCAs},
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 5 * time.Second,
|
||||
KeepAlive: 5 * time.Second,
|
||||
}).DialContext,
|
||||
TLSHandshakeTimeout: 3 * time.Second,
|
||||
ResponseHeaderTimeout: 3 * time.Second,
|
||||
ExpectContinueTimeout: 2 * time.Second,
|
||||
},
|
||||
},
|
||||
Endpoint: rNotify.Endpoint,
|
||||
}
|
||||
|
||||
notifyLog := logrus.New()
|
||||
notifyLog.Out = ioutil.Discard
|
||||
|
||||
// Set default JSON formatter.
|
||||
notifyLog.Formatter = new(logrus.JSONFormatter)
|
||||
|
||||
notifyLog.Hooks.Add(conn)
|
||||
|
||||
// Success
|
||||
return notifyLog, nil
|
||||
}
|
||||
|
||||
// Fire is called when an event should be sent to the message broker.
|
||||
func (n httpConn) Fire(entry *logrus.Entry) error {
|
||||
body, err := entry.Reader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", n.Endpoint, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set content-type.
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
// Set proper server user-agent.
|
||||
req.Header.Set("User-Agent", globalServerUserAgent)
|
||||
|
||||
// Initiate the http request.
|
||||
resp, err := n.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure to close the response body so the connection can be re-used.
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK &&
|
||||
resp.StatusCode != http.StatusAccepted &&
|
||||
resp.StatusCode != http.StatusContinue {
|
||||
return fmt.Errorf("Unable to send event %s", resp.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels are Required for logrus hook implementation
|
||||
func (httpConn) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
}
|
||||
}
|
|
@ -1,119 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Custom post handler to handle POST requests.
|
||||
type postHandler struct{}
|
||||
|
||||
func (p postHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "POST" {
|
||||
http.Error(w, fmt.Sprintf("Unexpected method %s", r.Method), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
io.Copy(w, r.Body)
|
||||
}
|
||||
|
||||
type errorHandler struct{}
|
||||
|
||||
func (e errorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, fmt.Sprintf("Unexpected method %s", r.Method), http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// Tests web hook initialization.
|
||||
func TestNewWebHookNotify(t *testing.T) {
|
||||
root, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
server := httptest.NewServer(postHandler{})
|
||||
defer server.Close()
|
||||
|
||||
_, err = newWebhookNotify("1")
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail")
|
||||
}
|
||||
|
||||
globalServerConfig.Notify.SetWebhookByID("10", webhookNotify{Enable: true, Endpoint: server.URL})
|
||||
_, err = newWebhookNotify("10")
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected should not fail with lookupEndpoint", err)
|
||||
}
|
||||
|
||||
globalServerConfig.Notify.SetWebhookByID("15", webhookNotify{Enable: true, Endpoint: "http://%"})
|
||||
_, err = newWebhookNotify("15")
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail with invalid URL escape")
|
||||
}
|
||||
|
||||
globalServerConfig.Notify.SetWebhookByID("20", webhookNotify{Enable: true, Endpoint: server.URL})
|
||||
webhook, err := newWebhookNotify("20")
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected shouldn't fail", err)
|
||||
}
|
||||
|
||||
webhook.WithFields(logrus.Fields{
|
||||
"Key": path.Join("bucket", "object"),
|
||||
"EventType": "s3:ObjectCreated:Put",
|
||||
}).Info()
|
||||
}
|
||||
|
||||
// Add tests for lookup endpoint.
|
||||
func TestLookupEndpoint(t *testing.T) {
|
||||
server := httptest.NewServer(errorHandler{})
|
||||
defer server.Close()
|
||||
|
||||
testCases := []struct {
|
||||
endpoint string
|
||||
err error
|
||||
}{
|
||||
// Ignore endpoints which don't exist.
|
||||
{
|
||||
endpoint: "http://unknown",
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
endpoint: "%%%",
|
||||
err: errors.New("parse %%%: invalid URL escape \"%%%\""),
|
||||
},
|
||||
{
|
||||
endpoint: server.URL,
|
||||
err: fmt.Errorf("Unable to lookup webhook endpoint %s response(400 Bad Request)", server.URL),
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
if err := lookupEndpoint(test.endpoint); err != nil {
|
||||
if err.Error() != test.err.Error() {
|
||||
t.Errorf("Expected %s, got %s", test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -89,19 +89,11 @@ func deleteBucketMetadata(bucket string, objAPI ObjectLayer) {
|
|||
// Delete bucket access policy, if present - ignore any errors.
|
||||
_ = removeBucketPolicy(bucket, objAPI)
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketPolicy(bucket)
|
||||
|
||||
// Delete notification config, if present - ignore any errors.
|
||||
_ = removeNotificationConfig(bucket, objAPI)
|
||||
_ = removeNotificationConfig(objAPI, bucket)
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketNotification(bucket, nil)
|
||||
// Delete listener config, if present - ignore any errors.
|
||||
_ = removeListenerConfig(bucket, objAPI)
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketListener(bucket, []listenerConfig{})
|
||||
_ = removeListenerConfig(objAPI, bucket)
|
||||
}
|
||||
|
||||
// House keeping code for FS/XL and distributed Minio setup.
|
||||
|
@ -194,3 +186,23 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error {
|
|||
err := delFunc(retainSlash(pathJoin(dirPath)))
|
||||
return err
|
||||
}
|
||||
|
||||
// Removes notification.xml for a given bucket, only used during DeleteBucket.
|
||||
func removeNotificationConfig(objAPI ObjectLayer, bucket string) error {
|
||||
// Verify bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
|
||||
|
||||
return objAPI.DeleteObject(minioMetaBucket, ncPath)
|
||||
}
|
||||
|
||||
// Remove listener configuration from storage layer. Used when a bucket is deleted.
|
||||
func removeListenerConfig(objAPI ObjectLayer, bucket string) error {
|
||||
// make the path
|
||||
lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
|
||||
|
||||
return objAPI.DeleteObject(minioMetaBucket, lcPath)
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -21,6 +21,8 @@ import (
|
|||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/event"
|
||||
)
|
||||
|
||||
// Validates the preconditions for CopyObjectPart, returns true if CopyObjectPart
|
||||
|
@ -240,10 +242,10 @@ func deleteObject(obj ObjectLayer, bucket, object string, r *http.Request) (err
|
|||
host, port, _ := net.SplitHostPort(r.RemoteAddr)
|
||||
|
||||
// Notify object deleted event.
|
||||
eventNotify(eventData{
|
||||
Type: ObjectRemovedDelete,
|
||||
Bucket: bucket,
|
||||
ObjInfo: ObjectInfo{
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Name: object,
|
||||
},
|
||||
ReqParams: extractReqParams(r),
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
|
||||
mux "github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/ioutil"
|
||||
|
@ -191,10 +192,10 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
}
|
||||
|
||||
// Notify object accessed via a GET request.
|
||||
eventNotify(eventData{
|
||||
Type: ObjectAccessedGet,
|
||||
Bucket: bucket,
|
||||
ObjInfo: objInfo,
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectAccessedGet,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
|
@ -267,10 +268,10 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
// Notify object accessed via a HEAD request.
|
||||
eventNotify(eventData{
|
||||
Type: ObjectAccessedHead,
|
||||
Bucket: bucket,
|
||||
ObjInfo: objInfo,
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectAccessedHead,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
|
@ -523,10 +524,10 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
// Notify object created event.
|
||||
eventNotify(eventData{
|
||||
Type: ObjectCreatedCopy,
|
||||
Bucket: dstBucket,
|
||||
ObjInfo: objInfo,
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectCreatedCopy,
|
||||
BucketName: dstBucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
|
@ -706,10 +707,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
}
|
||||
|
||||
// Notify object created event.
|
||||
eventNotify(eventData{
|
||||
Type: ObjectCreatedPut,
|
||||
Bucket: bucket,
|
||||
ObjInfo: objInfo,
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectCreatedPut,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
|
@ -1303,10 +1304,10 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||
}
|
||||
|
||||
// Notify object created event.
|
||||
eventNotify(eventData{
|
||||
Type: ObjectCreatedCompleteMultipartUpload,
|
||||
Bucket: bucket,
|
||||
ObjInfo: objInfo,
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectCreatedCompleteMultipartUpload,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -478,11 +478,8 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
|||
bytesData := bytes.Repeat([]byte{'a'}, bytesDataLen)
|
||||
oneKData := bytes.Repeat([]byte("a"), 1*humanize.KiByte)
|
||||
|
||||
err := initEventNotifier(obj)
|
||||
if err != nil {
|
||||
t.Fatalf("[%s] - Failed to initialize event notifiers <ERROR> %v", instanceType, err)
|
||||
var err error
|
||||
|
||||
}
|
||||
type streamFault int
|
||||
const (
|
||||
None streamFault = iota
|
||||
|
@ -787,12 +784,7 @@ func TestAPIPutObjectHandler(t *testing.T) {
|
|||
func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
// register event notifier.
|
||||
err := initEventNotifier(obj)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal("Notifier initialization failed.")
|
||||
}
|
||||
var err error
|
||||
objectName := "test-object"
|
||||
// byte data for PutObject.
|
||||
bytesData := generateBytesData(6 * humanize.KiByte)
|
||||
|
@ -1041,11 +1033,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
|||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
objectName := "test-object"
|
||||
// register event notifier.
|
||||
err := initEventNotifier(obj)
|
||||
if err != nil {
|
||||
t.Fatalf("Initializing event notifiers failed")
|
||||
}
|
||||
var err error
|
||||
|
||||
// set of byte data for PutObject.
|
||||
// object has to be created before running tests for Copy Object.
|
||||
|
@ -1156,11 +1144,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
objectName := "test-object"
|
||||
// register event notifier.
|
||||
err := initEventNotifier(obj)
|
||||
if err != nil {
|
||||
t.Fatalf("Initializing event notifiers failed")
|
||||
}
|
||||
var err error
|
||||
|
||||
// set of byte data for PutObject.
|
||||
// object has to be created before running tests for Copy Object.
|
||||
|
@ -1488,11 +1472,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
objectName := "test-object"
|
||||
// object used for anonymous HTTP request test.
|
||||
anonObject := "anon-object"
|
||||
// register event notifier.
|
||||
err := initEventNotifier(obj)
|
||||
if err != nil {
|
||||
t.Fatalf("Initializing event notifiers failed")
|
||||
}
|
||||
var err error
|
||||
|
||||
// set of byte data for PutObject.
|
||||
// object has to be created before running tests for Copy Object.
|
||||
|
@ -2110,12 +2090,6 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
|||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
var err error
|
||||
// register event notifier.
|
||||
err = initEventNotifier(obj)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal("Notifier initialization failed.")
|
||||
}
|
||||
|
||||
// object used for the test.
|
||||
objectName := "test-object-new-multipart"
|
||||
|
@ -2465,12 +2439,6 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
var err error
|
||||
// register event notifier.
|
||||
err = initEventNotifier(obj)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal("Notifier initialization failed.")
|
||||
}
|
||||
|
||||
// object used for the test.
|
||||
objectName := "test-object-new-multipart"
|
||||
|
@ -2633,12 +2601,7 @@ func TestAPIDeleteObjectHandler(t *testing.T) {
|
|||
func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
// register event notifier.
|
||||
err := initEventNotifier(obj)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal("Notifier initialization failed.")
|
||||
}
|
||||
var err error
|
||||
|
||||
objectName := "test-object"
|
||||
// Object used for anonymous API request test.
|
||||
|
|
|
@ -0,0 +1,311 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xerrors "github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
const s3Path = "/s3/remote"
|
||||
|
||||
// PeerRPCReceiver - Peer RPC receiver for peer RPC server.
|
||||
type PeerRPCReceiver struct {
|
||||
AuthRPCServer
|
||||
}
|
||||
|
||||
// DeleteBucketArgs - delete bucket RPC arguments.
|
||||
type DeleteBucketArgs struct {
|
||||
AuthRPCArgs
|
||||
BucketName string
|
||||
}
|
||||
|
||||
// DeleteBucket - handles delete bucket RPC call which removes all values of given bucket in global NotificationSys object.
|
||||
func (receiver *PeerRPCReceiver) DeleteBucket(args *DeleteBucketArgs, reply *AuthRPCArgs) error {
|
||||
globalNotificationSys.RemoveNotification(args.BucketName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBucketPolicyArgs - update bucket policy RPC arguments.
|
||||
type UpdateBucketPolicyArgs struct {
|
||||
AuthRPCArgs
|
||||
BucketName string
|
||||
}
|
||||
|
||||
// UpdateBucketPolicy - handles update bucket policy RPC call which sets bucket policies to given bucket in global BucketPolicies object.
|
||||
func (receiver *PeerRPCReceiver) UpdateBucketPolicy(args *UpdateBucketPolicyArgs, reply *AuthRPCArgs) error {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
// If the object layer is just coming up then it will load the policy from the disk.
|
||||
return nil
|
||||
}
|
||||
return objectAPI.RefreshBucketPolicy(args.BucketName)
|
||||
}
|
||||
|
||||
// PutBucketNotificationArgs - put bucket notification RPC arguments.
|
||||
type PutBucketNotificationArgs struct {
|
||||
AuthRPCArgs
|
||||
BucketName string
|
||||
RulesMap event.RulesMap
|
||||
}
|
||||
|
||||
// PutBucketNotification - handles put bucket notification RPC call which adds rules to given bucket to global NotificationSys object.
|
||||
func (receiver *PeerRPCReceiver) PutBucketNotification(args *PutBucketNotificationArgs, reply *AuthRPCReply) error {
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
globalNotificationSys.AddRulesMap(args.BucketName, args.RulesMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListenBucketNotificationArgs - listen bucket notification RPC arguments.
|
||||
type ListenBucketNotificationArgs struct {
|
||||
AuthRPCArgs `json:"-"`
|
||||
BucketName string `json:"-"`
|
||||
EventNames []event.Name `json:"eventNames"`
|
||||
Pattern string `json:"pattern"`
|
||||
TargetID event.TargetID `json:"targetId"`
|
||||
Addr xnet.Host `json:"addr"`
|
||||
}
|
||||
|
||||
// ListenBucketNotification - handles listen bucket notification RPC call. It creates PeerRPCClient target which pushes requested events to target in remote peer.
|
||||
func (receiver *PeerRPCReceiver) ListenBucketNotification(args *ListenBucketNotificationArgs, reply *AuthRPCReply) error {
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rpcClient := globalNotificationSys.GetPeerRPCClient(args.Addr)
|
||||
if rpcClient == nil {
|
||||
return fmt.Errorf("unable to find PeerRPCClient for provided address %v. This happens only if remote and this minio run with different set of endpoints", args.Addr)
|
||||
}
|
||||
|
||||
target := NewPeerRPCClientTarget(args.BucketName, args.TargetID, rpcClient)
|
||||
rulesMap := event.NewRulesMap(args.EventNames, args.Pattern, target.ID())
|
||||
if err := globalNotificationSys.AddRemoteTarget(args.BucketName, target, rulesMap); err != nil {
|
||||
errorIf(err, "Unable to add PeerRPCClientTarget %v to globalNotificationSys.targetList.", target)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoteTargetExistArgs - remote target ID exist RPC arguments.
|
||||
type RemoteTargetExistArgs struct {
|
||||
AuthRPCArgs
|
||||
BucketName string
|
||||
TargetID event.TargetID
|
||||
}
|
||||
|
||||
// RemoteTargetExistReply - remote target ID exist RPC reply.
|
||||
type RemoteTargetExistReply struct {
|
||||
AuthRPCReply
|
||||
Exist bool
|
||||
}
|
||||
|
||||
// RemoteTargetExist - handles target ID exist RPC call which checks whether given target ID is a HTTP client target or not.
|
||||
func (receiver *PeerRPCReceiver) RemoteTargetExist(args *RemoteTargetExistArgs, reply *RemoteTargetExistReply) error {
|
||||
reply.Exist = globalNotificationSys.RemoteTargetExist(args.BucketName, args.TargetID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendEventArgs - send event RPC arguments.
|
||||
type SendEventArgs struct {
|
||||
AuthRPCArgs
|
||||
Event event.Event
|
||||
TargetID event.TargetID
|
||||
BucketName string
|
||||
}
|
||||
|
||||
// SendEventReply - send event RPC reply.
|
||||
type SendEventReply struct {
|
||||
AuthRPCReply
|
||||
Error error
|
||||
}
|
||||
|
||||
// SendEvent - handles send event RPC call which sends given event to target by given target ID.
|
||||
func (receiver *PeerRPCReceiver) SendEvent(args *SendEventArgs, reply *SendEventReply) error {
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
if errMap := globalNotificationSys.send(args.BucketName, args.Event, args.TargetID); len(errMap) != 0 {
|
||||
var found bool
|
||||
if err, found = errMap[args.TargetID]; !found {
|
||||
// errMap must be zero or one element map because we sent to only one target ID.
|
||||
panic(fmt.Errorf("error for target %v not found in error map %+v", args.TargetID, errMap))
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
errorIf(err, "unable to send event %v to target %v", args.Event, args.TargetID)
|
||||
}
|
||||
|
||||
reply.Error = err
|
||||
return nil
|
||||
}
|
||||
|
||||
// registerS3PeerRPCRouter - creates and registers Peer RPC server and its router.
|
||||
func registerS3PeerRPCRouter(router *mux.Router) error {
|
||||
peerRPCServer := newRPCServer()
|
||||
if err := peerRPCServer.RegisterName("Peer", &PeerRPCReceiver{}); err != nil {
|
||||
return xerrors.Trace(err)
|
||||
}
|
||||
|
||||
subrouter := router.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
|
||||
subrouter.Path(s3Path).Handler(peerRPCServer)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PeerRPCClient - peer RPC client talks to peer RPC server.
|
||||
type PeerRPCClient struct {
|
||||
*AuthRPCClient
|
||||
}
|
||||
|
||||
// DeleteBucket - calls delete bucket RPC.
|
||||
func (rpcClient *PeerRPCClient) DeleteBucket(bucketName string) error {
|
||||
args := DeleteBucketArgs{BucketName: bucketName}
|
||||
reply := AuthRPCReply{}
|
||||
return rpcClient.Call("Peer.DeleteBucket", &args, &reply)
|
||||
}
|
||||
|
||||
// UpdateBucketPolicy - calls update bucket policy RPC.
|
||||
func (rpcClient *PeerRPCClient) UpdateBucketPolicy(bucketName string) error {
|
||||
args := UpdateBucketPolicyArgs{
|
||||
BucketName: bucketName,
|
||||
}
|
||||
reply := AuthRPCReply{}
|
||||
return rpcClient.Call("Peer.UpdateBucketPolicy", &args, &reply)
|
||||
}
|
||||
|
||||
// PutBucketNotification - calls put bukcet notification RPC.
|
||||
func (rpcClient *PeerRPCClient) PutBucketNotification(bucketName string, rulesMap event.RulesMap) error {
|
||||
args := PutBucketNotificationArgs{
|
||||
BucketName: bucketName,
|
||||
RulesMap: rulesMap,
|
||||
}
|
||||
reply := AuthRPCReply{}
|
||||
return rpcClient.Call("Peer.PutBucketNotification", &args, &reply)
|
||||
}
|
||||
|
||||
// ListenBucketNotification - calls listen bucket notification RPC.
|
||||
func (rpcClient *PeerRPCClient) ListenBucketNotification(bucketName string, eventNames []event.Name,
|
||||
pattern string, targetID event.TargetID, addr xnet.Host) error {
|
||||
args := ListenBucketNotificationArgs{
|
||||
BucketName: bucketName,
|
||||
EventNames: eventNames,
|
||||
Pattern: pattern,
|
||||
TargetID: targetID,
|
||||
Addr: addr,
|
||||
}
|
||||
reply := AuthRPCReply{}
|
||||
return rpcClient.Call("Peer.ListenBucketNotification", &args, &reply)
|
||||
}
|
||||
|
||||
// RemoteTargetExist - calls remote target ID exist RPC.
|
||||
func (rpcClient *PeerRPCClient) RemoteTargetExist(bucketName string, targetID event.TargetID) (bool, error) {
|
||||
args := RemoteTargetExistArgs{
|
||||
BucketName: bucketName,
|
||||
TargetID: targetID,
|
||||
}
|
||||
|
||||
reply := RemoteTargetExistReply{}
|
||||
if err := rpcClient.Call("Peer.RemoteTargetExist", &args, &reply); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return reply.Exist, nil
|
||||
}
|
||||
|
||||
// SendEvent - calls send event RPC.
|
||||
func (rpcClient *PeerRPCClient) SendEvent(bucketName string, targetID, remoteTargetID event.TargetID, eventData event.Event) error {
|
||||
args := SendEventArgs{
|
||||
BucketName: bucketName,
|
||||
TargetID: remoteTargetID,
|
||||
Event: eventData,
|
||||
}
|
||||
reply := SendEventReply{}
|
||||
if err := rpcClient.Call("Peer.SendEvent", &args, &reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if reply.Error != nil {
|
||||
errorIf(reply.Error, "unable to send event %v to rpc target %v of bucket %v", args, targetID, bucketName)
|
||||
globalNotificationSys.RemoveRemoteTarget(bucketName, targetID)
|
||||
}
|
||||
|
||||
return reply.Error
|
||||
}
|
||||
|
||||
// makeRemoteRPCClients - creates Peer RPCClients for given endpoint list.
|
||||
func makeRemoteRPCClients(endpoints EndpointList) map[xnet.Host]*PeerRPCClient {
|
||||
peerRPCClientMap := make(map[xnet.Host]*PeerRPCClient)
|
||||
|
||||
cred := globalServerConfig.GetCredential()
|
||||
serviceEndpoint := path.Join(minioReservedBucketPath, s3Path)
|
||||
for _, hostStr := range GetRemotePeers(endpoints) {
|
||||
host := xnet.MustParseHost(hostStr)
|
||||
peerRPCClientMap[*host] = &PeerRPCClient{newAuthRPCClient(authConfig{
|
||||
accessKey: cred.AccessKey,
|
||||
secretKey: cred.SecretKey,
|
||||
serverAddr: hostStr,
|
||||
serviceEndpoint: serviceEndpoint,
|
||||
secureConn: globalIsSSL,
|
||||
serviceName: "Peer",
|
||||
})}
|
||||
}
|
||||
|
||||
return peerRPCClientMap
|
||||
}
|
||||
|
||||
// PeerRPCClientTarget - RPCClient is an event.Target which sends event to target of remote peer.
|
||||
type PeerRPCClientTarget struct {
|
||||
id event.TargetID
|
||||
remoteTargetID event.TargetID
|
||||
rpcClient *PeerRPCClient
|
||||
bucketName string
|
||||
}
|
||||
|
||||
// ID - returns target ID.
|
||||
func (target *PeerRPCClientTarget) ID() event.TargetID {
|
||||
return target.id
|
||||
}
|
||||
|
||||
// Send - sends event to remote peer by making RPC call.
|
||||
func (target *PeerRPCClientTarget) Send(eventData event.Event) error {
|
||||
return target.rpcClient.SendEvent(target.bucketName, target.id, target.remoteTargetID, eventData)
|
||||
}
|
||||
|
||||
// Close - does nothing and available for interface compatibility.
|
||||
func (target *PeerRPCClientTarget) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewPeerRPCClientTarget - creates RPCClient target with given target ID available in remote peer.
|
||||
func NewPeerRPCClientTarget(bucketName string, targetID event.TargetID, rpcClient *PeerRPCClient) *PeerRPCClientTarget {
|
||||
return &PeerRPCClientTarget{
|
||||
id: event.TargetID{targetID.ID, targetID.Name + "+" + mustGetUUID()},
|
||||
remoteTargetID: targetID,
|
||||
bucketName: bucketName,
|
||||
rpcClient: rpcClient,
|
||||
}
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -123,12 +123,6 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
|||
}
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
// Register event notifier.
|
||||
err = initEventNotifier(obj)
|
||||
if err != nil {
|
||||
t.Fatalf("Initializing event notifiers failed")
|
||||
}
|
||||
|
||||
// get random bucket name.
|
||||
bucketName := getRandomBucketName()
|
||||
|
||||
|
@ -431,12 +425,6 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
|
|||
}
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
// Register event notifier.
|
||||
err = initEventNotifier(obj)
|
||||
if err != nil {
|
||||
t.Fatalf("Initializing event notifiers failed")
|
||||
}
|
||||
|
||||
// get random bucket name.
|
||||
bucketName := getRandomBucketName()
|
||||
|
||||
|
|
|
@ -1,182 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
)
|
||||
|
||||
// s3Peer structs contains the address of a peer in the cluster, and
|
||||
// its BucketMetaState interface objects.
|
||||
type s3Peer struct {
|
||||
// address in `host:port` format
|
||||
addr string
|
||||
// BucketMetaState client interface
|
||||
bmsClient BucketMetaState
|
||||
}
|
||||
|
||||
// type representing all peers in the cluster
|
||||
type s3Peers []s3Peer
|
||||
|
||||
// makeS3Peers makes an s3Peers struct value from the given urls
|
||||
// slice. The urls slice is assumed to be non-empty and free of nil
|
||||
// values.
|
||||
func makeS3Peers(endpoints EndpointList) (s3PeerList s3Peers) {
|
||||
localAddr := GetLocalPeer(endpoints)
|
||||
s3PeerList = append(s3PeerList, s3Peer{
|
||||
localAddr,
|
||||
&localBucketMetaState{ObjectAPI: newObjectLayerFn},
|
||||
})
|
||||
|
||||
hostSet := set.CreateStringSet(localAddr)
|
||||
cred := globalServerConfig.GetCredential()
|
||||
serviceEndpoint := path.Join(minioReservedBucketPath, s3Path)
|
||||
for _, host := range GetRemotePeers(endpoints) {
|
||||
if hostSet.Contains(host) {
|
||||
continue
|
||||
}
|
||||
hostSet.Add(host)
|
||||
s3PeerList = append(s3PeerList, s3Peer{
|
||||
addr: host,
|
||||
bmsClient: &remoteBucketMetaState{
|
||||
newAuthRPCClient(authConfig{
|
||||
accessKey: cred.AccessKey,
|
||||
secretKey: cred.SecretKey,
|
||||
serverAddr: host,
|
||||
serviceEndpoint: serviceEndpoint,
|
||||
secureConn: globalIsSSL,
|
||||
serviceName: "S3",
|
||||
})},
|
||||
})
|
||||
}
|
||||
return s3PeerList
|
||||
}
|
||||
|
||||
// initGlobalS3Peers - initialize globalS3Peers by passing in
|
||||
// endpoints - intended to be called early in program start-up.
|
||||
func initGlobalS3Peers(endpoints EndpointList) {
|
||||
globalS3Peers = makeS3Peers(endpoints)
|
||||
}
|
||||
|
||||
// GetPeerClient - fetch BucketMetaState interface by peer address
|
||||
func (s3p s3Peers) GetPeerClient(peer string) BucketMetaState {
|
||||
for _, p := range s3p {
|
||||
if p.addr == peer {
|
||||
return p.bmsClient
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendUpdate sends bucket metadata updates to all given peer
|
||||
// indices. The update calls are sent in parallel, and errors are
|
||||
// returned per peer in an array. The returned error arrayslice is
|
||||
// always as long as s3p.peers.addr.
|
||||
//
|
||||
// The input peerIndex slice can be nil if the update is to be sent to
|
||||
// all peers. This is the common case.
|
||||
//
|
||||
// The updates are sent via a type implementing the BucketMetaState
|
||||
// interface. This makes sure that the local node is directly updated,
|
||||
// and remote nodes are updated via RPC calls.
|
||||
func (s3p s3Peers) SendUpdate(peerIndex []int, args BucketUpdater) []error {
|
||||
|
||||
// peer error array
|
||||
errs := make([]error, len(s3p))
|
||||
|
||||
// Start a wait group and make RPC requests to peers.
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Function that sends update to peer at `index`
|
||||
sendUpdateToPeer := func(index int) {
|
||||
defer wg.Done()
|
||||
errs[index] = args.BucketUpdate(s3p[index].bmsClient)
|
||||
}
|
||||
|
||||
// Special (but common) case of peerIndex == nil, implies send
|
||||
// update to all peers.
|
||||
if peerIndex == nil {
|
||||
for idx := 0; idx < len(s3p); idx++ {
|
||||
wg.Add(1)
|
||||
go sendUpdateToPeer(idx)
|
||||
}
|
||||
} else {
|
||||
// Send update only to given peer indices.
|
||||
for _, idx := range peerIndex {
|
||||
// check idx is in array bounds.
|
||||
if !(idx >= 0 && idx < len(s3p)) {
|
||||
errorIf(
|
||||
fmt.Errorf("Bad peer index %d input to SendUpdate()", idx),
|
||||
"peerIndex out of bounds",
|
||||
)
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go sendUpdateToPeer(idx)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for requests to complete and return
|
||||
wg.Wait()
|
||||
return errs
|
||||
}
|
||||
|
||||
// S3PeersUpdateBucketNotification - Sends Update Bucket notification
|
||||
// request to all peers. Currently we log an error and continue.
|
||||
func S3PeersUpdateBucketNotification(bucket string, ncfg *notificationConfig) {
|
||||
setBNPArgs := &SetBucketNotificationPeerArgs{Bucket: bucket, NCfg: ncfg}
|
||||
errs := globalS3Peers.SendUpdate(nil, setBNPArgs)
|
||||
for idx, err := range errs {
|
||||
errorIf(
|
||||
err,
|
||||
"Error sending update bucket notification to %s - %v",
|
||||
globalS3Peers[idx].addr, err,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// S3PeersUpdateBucketListener - Sends Update Bucket listeners request
|
||||
// to all peers. Currently we log an error and continue.
|
||||
func S3PeersUpdateBucketListener(bucket string, lcfg []listenerConfig) {
|
||||
setBLPArgs := &SetBucketListenerPeerArgs{Bucket: bucket, LCfg: lcfg}
|
||||
errs := globalS3Peers.SendUpdate(nil, setBLPArgs)
|
||||
for idx, err := range errs {
|
||||
errorIf(
|
||||
err,
|
||||
"Error sending update bucket listener to %s - %v",
|
||||
globalS3Peers[idx].addr, err,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// S3PeersUpdateBucketPolicy - Sends update bucket policy request to
|
||||
// all peers. Currently we log an error and continue.
|
||||
func S3PeersUpdateBucketPolicy(bucket string) {
|
||||
setBPPArgs := &SetBucketPolicyPeerArgs{Bucket: bucket}
|
||||
errs := globalS3Peers.SendUpdate(nil, setBPPArgs)
|
||||
for idx, err := range errs {
|
||||
errorIf(
|
||||
err,
|
||||
"Error sending update bucket policy to %s - %v",
|
||||
globalS3Peers[idx].addr, err,
|
||||
)
|
||||
}
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Validates makeS3Peers, fetches all peers based on list of storage
|
||||
// endpoints.
|
||||
func TestMakeS3Peers(t *testing.T) {
|
||||
// Initialize configuration
|
||||
root, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("%s", err)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
// test cases
|
||||
testCases := []struct {
|
||||
gMinioAddr string
|
||||
eps EndpointList
|
||||
peers []string
|
||||
}{
|
||||
{"127.0.0.1:9000", mustGetNewEndpointList("/mnt/disk1"), []string{"127.0.0.1:9000"}},
|
||||
{"example.org:9000", mustGetNewEndpointList("http://example.org:9000/d1", "http://example.com:9000/d1", "http://example.net:9000/d1", "http://example.edu:9000/d1"), []string{"example.org:9000", "example.com:9000", "example.edu:9000", "example.net:9000"}},
|
||||
}
|
||||
|
||||
getPeersHelper := func(s3p s3Peers) []string {
|
||||
r := []string{}
|
||||
for _, p := range s3p {
|
||||
r = append(r, p.addr)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// execute tests
|
||||
for i, testCase := range testCases {
|
||||
globalMinioAddr = testCase.gMinioAddr
|
||||
s3peers := makeS3Peers(testCase.eps)
|
||||
referencePeers := getPeersHelper(s3peers)
|
||||
if !reflect.DeepEqual(testCase.peers, referencePeers) {
|
||||
t.Errorf("Test %d: Expected %v, got %v", i+1, testCase.peers, referencePeers)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
router "github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
s3Path = "/s3/remote"
|
||||
)
|
||||
|
||||
type s3PeerAPIHandlers struct {
|
||||
AuthRPCServer
|
||||
bms BucketMetaState
|
||||
}
|
||||
|
||||
func registerS3PeerRPCRouter(mux *router.Router) error {
|
||||
s3PeerHandlers := &s3PeerAPIHandlers{
|
||||
AuthRPCServer{},
|
||||
&localBucketMetaState{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
},
|
||||
}
|
||||
|
||||
s3PeerRPCServer := newRPCServer()
|
||||
err := s3PeerRPCServer.RegisterName("S3", s3PeerHandlers)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
s3PeerRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
|
||||
s3PeerRouter.Path(s3Path).Handler(s3PeerRPCServer)
|
||||
return nil
|
||||
}
|
|
@ -1,115 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
// SetBucketNotificationPeerArgs - Arguments collection to SetBucketNotificationPeer RPC
|
||||
// call
|
||||
type SetBucketNotificationPeerArgs struct {
|
||||
// For Auth
|
||||
AuthRPCArgs
|
||||
|
||||
Bucket string
|
||||
|
||||
// Notification config for the given bucket.
|
||||
NCfg *notificationConfig
|
||||
}
|
||||
|
||||
// BucketUpdate - implements bucket notification updates,
|
||||
// the underlying operation is a network call updates all
|
||||
// the peers participating in bucket notification.
|
||||
func (s *SetBucketNotificationPeerArgs) BucketUpdate(client BucketMetaState) error {
|
||||
return client.UpdateBucketNotification(s)
|
||||
}
|
||||
|
||||
func (s3 *s3PeerAPIHandlers) SetBucketNotificationPeer(args *SetBucketNotificationPeerArgs, reply *AuthRPCReply) error {
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s3.bms.UpdateBucketNotification(args)
|
||||
}
|
||||
|
||||
// SetBucketListenerPeerArgs - Arguments collection to SetBucketListenerPeer RPC call
|
||||
type SetBucketListenerPeerArgs struct {
|
||||
// For Auth
|
||||
AuthRPCArgs
|
||||
|
||||
Bucket string
|
||||
|
||||
// Listener config for a given bucket.
|
||||
LCfg []listenerConfig
|
||||
}
|
||||
|
||||
// BucketUpdate - implements bucket listener updates,
|
||||
// the underlying operation is a network call updates all
|
||||
// the peers participating in listen bucket notification.
|
||||
func (s *SetBucketListenerPeerArgs) BucketUpdate(client BucketMetaState) error {
|
||||
return client.UpdateBucketListener(s)
|
||||
}
|
||||
|
||||
func (s3 *s3PeerAPIHandlers) SetBucketListenerPeer(args *SetBucketListenerPeerArgs, reply *AuthRPCReply) error {
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s3.bms.UpdateBucketListener(args)
|
||||
}
|
||||
|
||||
// EventArgs - Arguments collection for Event RPC call
|
||||
type EventArgs struct {
|
||||
// For Auth
|
||||
AuthRPCArgs
|
||||
|
||||
// event being sent
|
||||
Event []NotificationEvent
|
||||
|
||||
// client that it is meant for
|
||||
Arn string
|
||||
}
|
||||
|
||||
// submit an event to the receiving server.
|
||||
func (s3 *s3PeerAPIHandlers) Event(args *EventArgs, reply *AuthRPCReply) error {
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s3.bms.SendEvent(args)
|
||||
}
|
||||
|
||||
// SetBucketPolicyPeerArgs - Arguments collection for SetBucketPolicyPeer RPC call
|
||||
type SetBucketPolicyPeerArgs struct {
|
||||
// For Auth
|
||||
AuthRPCArgs
|
||||
|
||||
Bucket string
|
||||
}
|
||||
|
||||
// BucketUpdate - implements bucket policy updates,
|
||||
// the underlying operation is a network call updates all
|
||||
// the peers participating for new set/unset policies.
|
||||
func (s *SetBucketPolicyPeerArgs) BucketUpdate(client BucketMetaState) error {
|
||||
return client.UpdateBucketPolicy(s)
|
||||
}
|
||||
|
||||
// tell receiving server to update a bucket policy
|
||||
func (s3 *s3PeerAPIHandlers) SetBucketPolicyPeer(args *SetBucketPolicyPeerArgs, reply *AuthRPCReply) error {
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s3.bms.UpdateBucketPolicy(args)
|
||||
}
|
|
@ -1,108 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type TestRPCS3PeerSuite struct {
|
||||
testServer TestServer
|
||||
testAuthConf authConfig
|
||||
disks []string
|
||||
}
|
||||
|
||||
// Set up the suite and start the test server.
|
||||
func (s *TestRPCS3PeerSuite) SetUpSuite(t *testing.T) {
|
||||
s.testServer, s.disks = StartTestS3PeerRPCServer(t)
|
||||
s.testAuthConf = authConfig{
|
||||
serverAddr: s.testServer.Server.Listener.Addr().String(),
|
||||
accessKey: s.testServer.AccessKey,
|
||||
secretKey: s.testServer.SecretKey,
|
||||
serviceEndpoint: path.Join(minioReservedBucketPath, s3Path),
|
||||
serviceName: "S3",
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestRPCS3PeerSuite) TearDownSuite(t *testing.T) {
|
||||
s.testServer.Stop()
|
||||
removeRoots(s.disks)
|
||||
os.RemoveAll(s.testServer.Root)
|
||||
}
|
||||
|
||||
func TestS3PeerRPC(t *testing.T) {
|
||||
// setup
|
||||
s := &TestRPCS3PeerSuite{}
|
||||
s.SetUpSuite(t)
|
||||
|
||||
// run test
|
||||
s.testS3PeerRPC(t)
|
||||
|
||||
// teardown
|
||||
s.TearDownSuite(t)
|
||||
}
|
||||
|
||||
// Test S3 RPC handlers
|
||||
func (s *TestRPCS3PeerSuite) testS3PeerRPC(t *testing.T) {
|
||||
// Validate for invalid token.
|
||||
args := AuthRPCArgs{}
|
||||
rclient := newAuthRPCClient(s.testAuthConf)
|
||||
defer rclient.Close()
|
||||
|
||||
if err := rclient.Login(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rclient.authToken = "garbage"
|
||||
err := rclient.Call("S3.SetBucketNotificationPeer", &args, &AuthRPCReply{})
|
||||
if err != nil {
|
||||
if err.Error() != errInvalidToken.Error() {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check bucket notification call works.
|
||||
BNPArgs := SetBucketNotificationPeerArgs{Bucket: "bucket", NCfg: ¬ificationConfig{}}
|
||||
client := newAuthRPCClient(s.testAuthConf)
|
||||
defer client.Close()
|
||||
err = client.Call("S3.SetBucketNotificationPeer", &BNPArgs, &AuthRPCReply{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check bucket listener update call works.
|
||||
BLPArgs := SetBucketListenerPeerArgs{Bucket: "bucket", LCfg: nil}
|
||||
err = client.Call("S3.SetBucketListenerPeer", &BLPArgs, &AuthRPCReply{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
BPPArgs := SetBucketPolicyPeerArgs{Bucket: "bucket"}
|
||||
err = client.Call("S3.SetBucketPolicyPeer", &BPPArgs, &AuthRPCReply{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check event send event call works.
|
||||
evArgs := EventArgs{Event: nil, Arn: "localhost:9000"}
|
||||
err = client.Call("S3.Event", &evArgs, &AuthRPCReply{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -223,8 +223,9 @@ func serverMain(ctx *cli.Context) {
|
|||
handler, err = configureServerHandler(globalEndpoints)
|
||||
fatalIf(err, "Unable to configure one of server's RPC services.")
|
||||
|
||||
// Initialize S3 Peers inter-node communication only in distributed setup.
|
||||
initGlobalS3Peers(globalEndpoints)
|
||||
// Initialize notification system.
|
||||
globalNotificationSys, err = NewNotificationSys(globalServerConfig, globalEndpoints)
|
||||
fatalIf(err, "Unable to initialize notification system.")
|
||||
|
||||
// Initialize Admin Peers inter-node communication only in distributed setup.
|
||||
initGlobalAdminPeers(globalEndpoints)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -125,19 +125,16 @@ func printServerCommonMsg(apiEndpoints []string) {
|
|||
|
||||
// Prints bucket notification configurations.
|
||||
func printEventNotifiers() {
|
||||
if globalEventNotifier == nil {
|
||||
// In case initEventNotifier() was not done or failed.
|
||||
return
|
||||
}
|
||||
// Get all configured external notification targets
|
||||
externalTargets := globalEventNotifier.GetAllExternalTargets()
|
||||
if len(externalTargets) == 0 {
|
||||
arns := globalNotificationSys.GetARNList()
|
||||
if len(arns) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
arnMsg := colorBlue("SQS ARNs: ")
|
||||
for queueArn := range externalTargets {
|
||||
arnMsg += colorBold(fmt.Sprintf(getFormatStr(len(queueArn), 1), queueArn))
|
||||
for _, arn := range arns {
|
||||
arnMsg += colorBold(fmt.Sprintf(getFormatStr(len(arn), 1), arn))
|
||||
}
|
||||
|
||||
log.Println(arnMsg)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -75,13 +75,10 @@ func verifyError(c *check, response *http.Response, code, description string, st
|
|||
|
||||
func runAllTests(suite *TestSuiteCommon, c *check) {
|
||||
suite.SetUpSuite(c)
|
||||
suite.TestBucketSQSNotificationWebHook(c)
|
||||
suite.TestObjectDir(c)
|
||||
suite.TestBucketSQSNotificationAMQP(c)
|
||||
suite.TestBucketPolicy(c)
|
||||
suite.TestDeleteBucket(c)
|
||||
suite.TestDeleteBucketNotEmpty(c)
|
||||
suite.TestListenBucketNotificationHandler(c)
|
||||
suite.TestDeleteMultipleObjects(c)
|
||||
suite.TestDeleteObject(c)
|
||||
suite.TestNonExistentBucket(c)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -38,6 +38,10 @@ func handleSignals() {
|
|||
stopProcess := func() bool {
|
||||
var err, oerr error
|
||||
|
||||
if globalNotificationSys != nil {
|
||||
globalNotificationSys.RemoveAllRemoteTargets()
|
||||
}
|
||||
|
||||
err = globalHTTPServer.Shutdown()
|
||||
errorIf(err, "Unable to shutdown http server")
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -351,7 +351,10 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
|
|||
globalMinioHost = host
|
||||
globalMinioPort = port
|
||||
globalMinioAddr = getEndpointsLocalAddr(testServer.Disks)
|
||||
initGlobalS3Peers(testServer.Disks)
|
||||
globalNotificationSys, err = NewNotificationSys(globalServerConfig, testServer.Disks)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize queue configuration")
|
||||
}
|
||||
|
||||
return testServer
|
||||
}
|
||||
|
@ -511,11 +514,6 @@ func resetGlobalNSLock() {
|
|||
}
|
||||
}
|
||||
|
||||
// reset Global event notifier.
|
||||
func resetGlobalEventnotify() {
|
||||
globalEventNotifier = nil
|
||||
}
|
||||
|
||||
func resetGlobalEndpoints() {
|
||||
globalEndpoints = EndpointList{}
|
||||
}
|
||||
|
@ -558,8 +556,6 @@ func resetTestGlobals() {
|
|||
resetGlobalConfig()
|
||||
// Reset global NSLock.
|
||||
resetGlobalNSLock()
|
||||
// Reset global event notifier.
|
||||
resetGlobalEventnotify()
|
||||
// Reset global endpoints.
|
||||
resetGlobalEndpoints()
|
||||
// Reset global isXL flag.
|
||||
|
@ -1637,18 +1633,6 @@ func getCompleteMultipartUploadURL(endPoint, bucketName, objectName, uploadID st
|
|||
return makeTestTargetURL(endPoint, bucketName, objectName, queryValue)
|
||||
}
|
||||
|
||||
// return URL for put bucket notification.
|
||||
func getPutBucketNotificationURL(endPoint, bucketName string) string {
|
||||
return getGetBucketNotificationURL(endPoint, bucketName)
|
||||
}
|
||||
|
||||
// return URL for get bucket notification.
|
||||
func getGetBucketNotificationURL(endPoint, bucketName string) string {
|
||||
queryValue := url.Values{}
|
||||
queryValue.Set("notification", "")
|
||||
return makeTestTargetURL(endPoint, bucketName, "", queryValue)
|
||||
}
|
||||
|
||||
// return URL for listen bucket notification.
|
||||
func getListenBucketNotificationURL(endPoint, bucketName string, prefixes, suffixes, events []string) string {
|
||||
queryValue := url.Values{}
|
||||
|
@ -1720,7 +1704,7 @@ func newTestObjectLayer(endpoints EndpointList) (newObject ObjectLayer, err erro
|
|||
}
|
||||
|
||||
// Initialize a new event notifier.
|
||||
if err = initEventNotifier(xl); err != nil {
|
||||
if globalNotificationSys, err = NewNotificationSys(globalServerConfig, endpoints); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -36,6 +36,7 @@ import (
|
|||
"github.com/minio/minio/browser"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
|
@ -568,10 +569,10 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
// Notify object created event.
|
||||
eventNotify(eventData{
|
||||
Type: ObjectCreatedPut,
|
||||
Bucket: bucket,
|
||||
ObjInfo: objInfo,
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectCreatedPut,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -41,6 +41,21 @@ import (
|
|||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// Implement a dummy flush writer.
|
||||
type flushWriter struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// Flush writer is a dummy writer compatible with http.Flusher and http.ResponseWriter.
|
||||
func (f *flushWriter) Flush() {}
|
||||
func (f *flushWriter) Write(b []byte) (n int, err error) { return f.Writer.Write(b) }
|
||||
func (f *flushWriter) Header() http.Header { return http.Header{} }
|
||||
func (f *flushWriter) WriteHeader(code int) {}
|
||||
|
||||
func newFlushWriter(writer io.Writer) http.ResponseWriter {
|
||||
return &flushWriter{writer}
|
||||
}
|
||||
|
||||
// Tests private function writeWebErrorResponse.
|
||||
func TestWriteWebErrorResponse(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
|
|
|
@ -228,9 +228,9 @@ func newXLSets(endpoints EndpointList, format *formatXLV2, setCount int, drivesP
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize a new event notifier.
|
||||
if err := initEventNotifier(s); err != nil {
|
||||
return nil, err
|
||||
// Initialize notification system.
|
||||
if err = globalNotificationSys.Init(s); err != nil {
|
||||
return nil, fmt.Errorf("Unable to initialize event notification. %s", err)
|
||||
}
|
||||
|
||||
// Start the disk monitoring and connect routine.
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ARN - SQS resource name representation.
|
||||
type ARN struct {
|
||||
TargetID
|
||||
region string
|
||||
}
|
||||
|
||||
// String - returns string representation.
|
||||
func (arn ARN) String() string {
|
||||
if arn.TargetID.ID == "" && arn.TargetID.Name == "" && arn.region == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
return "arn:minio:sqs:" + arn.region + ":" + arn.TargetID.String()
|
||||
}
|
||||
|
||||
// MarshalXML - encodes to XML data.
|
||||
func (arn ARN) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(arn.String(), start)
|
||||
}
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (arn *ARN) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var s string
|
||||
if err := d.DecodeElement(&s, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parsedARN, err := parseARN(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*arn = *parsedARN
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseARN - parses string to ARN.
|
||||
func parseARN(s string) (*ARN, error) {
|
||||
// ARN must be in the format of arn:minio:sqs:<REGION>:<ID>:<TYPE>
|
||||
if !strings.HasPrefix(s, "arn:minio:sqs:") {
|
||||
return nil, &ErrInvalidARN{s}
|
||||
}
|
||||
|
||||
tokens := strings.Split(s, ":")
|
||||
if len(tokens) != 6 {
|
||||
return nil, &ErrInvalidARN{s}
|
||||
}
|
||||
|
||||
if tokens[4] == "" || tokens[5] == "" {
|
||||
return nil, &ErrInvalidARN{s}
|
||||
}
|
||||
|
||||
return &ARN{
|
||||
region: tokens[3],
|
||||
TargetID: TargetID{
|
||||
ID: tokens[4],
|
||||
Name: tokens[5],
|
||||
},
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,129 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestARNString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
arn ARN
|
||||
expectedResult string
|
||||
}{
|
||||
{ARN{}, ""},
|
||||
{ARN{TargetID{"1", "webhook"}, ""}, "arn:minio:sqs::1:webhook"},
|
||||
{ARN{TargetID{"1", "webhook"}, "us-east-1"}, "arn:minio:sqs:us-east-1:1:webhook"},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.arn.String()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestARNMarshalXML(t *testing.T) {
|
||||
testCases := []struct {
|
||||
arn ARN
|
||||
expectedData []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{ARN{}, []byte("<ARN></ARN>"), false},
|
||||
{ARN{TargetID{"1", "webhook"}, ""}, []byte("<ARN>arn:minio:sqs::1:webhook</ARN>"), false},
|
||||
{ARN{TargetID{"1", "webhook"}, "us-east-1"}, []byte("<ARN>arn:minio:sqs:us-east-1:1:webhook</ARN>"), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
data, err := xml.Marshal(testCase.arn)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(data, testCase.expectedData) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestARNUnmarshalXML(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedARN *ARN
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte("<ARN></ARN>"), nil, true},
|
||||
{[]byte("<ARN>arn:minio:sqs:::</ARN>"), nil, true},
|
||||
{[]byte("<ARN>arn:minio:sqs::1:webhook</ARN>"), &ARN{TargetID{"1", "webhook"}, ""}, false},
|
||||
{[]byte("<ARN>arn:minio:sqs:us-east-1:1:webhook</ARN>"), &ARN{TargetID{"1", "webhook"}, "us-east-1"}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
arn := &ARN{}
|
||||
err := xml.Unmarshal(testCase.data, &arn)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if *arn != *testCase.expectedARN {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedARN, arn)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseARN(t *testing.T) {
|
||||
testCases := []struct {
|
||||
s string
|
||||
expectedARN *ARN
|
||||
expectErr bool
|
||||
}{
|
||||
{"", nil, true},
|
||||
{"arn:minio:sqs:::", nil, true},
|
||||
{"arn:minio:sqs::1:webhook:remote", nil, true},
|
||||
{"arn:aws:sqs::1:webhook", nil, true},
|
||||
{"arn:minio:sns::1:webhook", nil, true},
|
||||
{"arn:minio:sqs::1:webhook", &ARN{TargetID{"1", "webhook"}, ""}, false},
|
||||
{"arn:minio:sqs:us-east-1:1:webhook", &ARN{TargetID{"1", "webhook"}, "us-east-1"}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
arn, err := parseARN(testCase.s)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if *arn != *testCase.expectedARN {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedARN, arn)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,292 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
)
|
||||
|
||||
// ValidateFilterRuleValue - checks if given value is filter rule value or not.
|
||||
func ValidateFilterRuleValue(value string) error {
|
||||
for _, segment := range strings.Split(value, "/") {
|
||||
if segment == "." || segment == ".." {
|
||||
return &ErrInvalidFilterValue{value}
|
||||
}
|
||||
}
|
||||
|
||||
if len(value) <= 1024 && utf8.ValidString(value) && !strings.Contains(value, `\`) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &ErrInvalidFilterValue{value}
|
||||
}
|
||||
|
||||
// FilterRule - represents elements inside <FilterRule>...</FilterRule>
|
||||
type FilterRule struct {
|
||||
Name string `xml:"Name"`
|
||||
Value string `xml:"Value"`
|
||||
}
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (filter *FilterRule) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// Make subtype to avoid recursive UnmarshalXML().
|
||||
type filterRule FilterRule
|
||||
rule := filterRule{}
|
||||
if err := d.DecodeElement(&rule, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rule.Name != "prefix" && rule.Name != "suffix" {
|
||||
return &ErrInvalidFilterName{rule.Name}
|
||||
}
|
||||
|
||||
if err := ValidateFilterRuleValue(filter.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*filter = FilterRule(rule)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FilterRuleList - represents multiple <FilterRule>...</FilterRule>
|
||||
type FilterRuleList struct {
|
||||
Rules []FilterRule `xml:"FilterRule,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (ruleList *FilterRuleList) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// Make subtype to avoid recursive UnmarshalXML().
|
||||
type filterRuleList FilterRuleList
|
||||
rules := filterRuleList{}
|
||||
if err := d.DecodeElement(&rules, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// FilterRuleList must have only one prefix and/or suffix.
|
||||
nameSet := set.NewStringSet()
|
||||
for _, rule := range rules.Rules {
|
||||
if nameSet.Contains(rule.Name) {
|
||||
if rule.Name == "prefix" {
|
||||
return &ErrFilterNamePrefix{}
|
||||
}
|
||||
|
||||
return &ErrFilterNameSuffix{}
|
||||
}
|
||||
|
||||
nameSet.Add(rule.Name)
|
||||
}
|
||||
|
||||
*ruleList = FilterRuleList(rules)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Pattern - returns pattern using prefix and suffix values.
|
||||
func (ruleList FilterRuleList) Pattern() string {
|
||||
var prefix string
|
||||
var suffix string
|
||||
|
||||
for _, rule := range ruleList.Rules {
|
||||
switch rule.Name {
|
||||
case "prefix":
|
||||
prefix = rule.Value
|
||||
case "suffix":
|
||||
suffix = rule.Value
|
||||
}
|
||||
}
|
||||
|
||||
return NewPattern(prefix, suffix)
|
||||
}
|
||||
|
||||
// S3Key - represents elements inside <S3Key>...</S3Key>
|
||||
type S3Key struct {
|
||||
RuleList FilterRuleList `xml:"S3Key,omitempty" json:"S3Key,omitempty"`
|
||||
}
|
||||
|
||||
// common - represents common elements inside <QueueConfiguration>, <CloudFunctionConfiguration>
|
||||
// and <TopicConfiguration>
|
||||
type common struct {
|
||||
ID string `xml:"Id" json:"Id"`
|
||||
Filter S3Key `xml:"Filter" json:"Filter"`
|
||||
Events []Name `xml:"Event" json:"Event"`
|
||||
}
|
||||
|
||||
// Queue - represents elements inside <QueueConfiguration>
|
||||
type Queue struct {
|
||||
common
|
||||
ARN ARN `xml:"Queue"`
|
||||
}
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (q *Queue) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// Make subtype to avoid recursive UnmarshalXML().
|
||||
type queue Queue
|
||||
parsedQueue := queue{}
|
||||
if err := d.DecodeElement(&parsedQueue, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(parsedQueue.Events) == 0 {
|
||||
return errors.New("missing event name(s)")
|
||||
}
|
||||
|
||||
eventStringSet := set.NewStringSet()
|
||||
for _, eventName := range parsedQueue.Events {
|
||||
if eventStringSet.Contains(eventName.String()) {
|
||||
return &ErrDuplicateEventName{eventName}
|
||||
}
|
||||
|
||||
eventStringSet.Add(eventName.String())
|
||||
}
|
||||
|
||||
*q = Queue(parsedQueue)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate - checks whether queue has valid values or not.
|
||||
func (q Queue) Validate(region string, targetList *TargetList) error {
|
||||
if region != "" && q.ARN.region != region {
|
||||
return &ErrUnknownRegion{q.ARN.region}
|
||||
}
|
||||
|
||||
if !targetList.Exists(q.ARN.TargetID) {
|
||||
return &ErrARNNotFound{q.ARN}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRegion - sets region value to queue's ARN.
|
||||
func (q *Queue) SetRegion(region string) {
|
||||
q.ARN.region = region
|
||||
}
|
||||
|
||||
// ToRulesMap - converts Queue to RulesMap
|
||||
func (q Queue) ToRulesMap() RulesMap {
|
||||
pattern := q.Filter.RuleList.Pattern()
|
||||
return NewRulesMap(q.Events, pattern, q.ARN.TargetID)
|
||||
}
|
||||
|
||||
// Unused. Available for completion.
|
||||
type lambda struct {
|
||||
common
|
||||
ARN string `xml:"CloudFunction"`
|
||||
}
|
||||
|
||||
// Unused. Available for completion.
|
||||
type topic struct {
|
||||
common
|
||||
ARN string `xml:"Topic" json:"Topic"`
|
||||
}
|
||||
|
||||
// Config - notification configuration described in
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
|
||||
type Config struct {
|
||||
XMLName xml.Name `xml:"NotificationConfiguration"`
|
||||
QueueList []Queue `xml:"QueueConfiguration,omitempty"`
|
||||
LambdaList []lambda `xml:"CloudFunctionConfiguration,omitempty"`
|
||||
TopicList []topic `xml:"TopicConfiguration,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (conf *Config) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// Make subtype to avoid recursive UnmarshalXML().
|
||||
type config Config
|
||||
parsedConfig := config{}
|
||||
if err := d.DecodeElement(&parsedConfig, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(parsedConfig.QueueList) == 0 {
|
||||
return errors.New("missing queue configuration(s)")
|
||||
}
|
||||
|
||||
for i, q1 := range parsedConfig.QueueList[:len(parsedConfig.QueueList)-1] {
|
||||
for _, q2 := range parsedConfig.QueueList[i+1:] {
|
||||
if reflect.DeepEqual(q1, q2) {
|
||||
return &ErrDuplicateQueueConfiguration{q1}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(parsedConfig.LambdaList) > 0 || len(parsedConfig.TopicList) > 0 {
|
||||
return &ErrUnsupportedConfiguration{}
|
||||
}
|
||||
|
||||
*conf = Config(parsedConfig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate - checks whether config has valid values or not.
|
||||
func (conf Config) Validate(region string, targetList *TargetList) error {
|
||||
for _, queue := range conf.QueueList {
|
||||
if err := queue.Validate(region, targetList); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Need to discuss/check why same ARN cannot be used in another queue configuration.
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRegion - sets region to all queue configuration.
|
||||
func (conf *Config) SetRegion(region string) {
|
||||
for i := range conf.QueueList {
|
||||
conf.QueueList[i].SetRegion(region)
|
||||
}
|
||||
}
|
||||
|
||||
// ToRulesMap - converts all queue configuration to RulesMap.
|
||||
func (conf *Config) ToRulesMap() RulesMap {
|
||||
rulesMap := make(RulesMap)
|
||||
|
||||
for _, queue := range conf.QueueList {
|
||||
rulesMap.Add(queue.ToRulesMap())
|
||||
}
|
||||
|
||||
return rulesMap
|
||||
}
|
||||
|
||||
// ParseConfig - parses data in reader to notification configuration.
|
||||
func ParseConfig(reader io.Reader, region string, targetList *TargetList) (*Config, error) {
|
||||
var config Config
|
||||
if err := xml.NewDecoder(reader).Decode(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(config.QueueList) == 0 {
|
||||
return nil, errors.New("missing queue configuration(s)")
|
||||
}
|
||||
|
||||
if err := config.Validate(region, targetList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.SetRegion(region)
|
||||
|
||||
return &config, nil
|
||||
}
|
|
@ -0,0 +1,961 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestValidateFilterRuleValue(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value string
|
||||
expectErr bool
|
||||
}{
|
||||
{"foo/.", true},
|
||||
{"../foo", true},
|
||||
{`foo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/baz`, true},
|
||||
{string([]byte{0xff, 0xfe, 0xfd}), true},
|
||||
{`foo\bar`, true},
|
||||
{"Hello/世界", false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := ValidateFilterRuleValue(testCase.value)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterRuleUnmarshalXML(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult *FilterRule
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`<FilterRule></FilterRule>`), nil, true},
|
||||
{[]byte(`<FilterRule><Name></Name></FilterRule>`), nil, true},
|
||||
{[]byte(`<FilterRule><Value></Value></FilterRule>`), nil, true},
|
||||
{[]byte(`<FilterRule><Name></Name><Value></Value></FilterRule>`), nil, true},
|
||||
{[]byte(`<FilterRule><Name>Prefix</Name><Value>Hello/世界</Value></FilterRule>`), nil, true},
|
||||
{[]byte(`<FilterRule><Name>ends</Name><Value>foo/bar</Value></FilterRule>`), nil, true},
|
||||
{[]byte(`<FilterRule><Name>prefix</Name><Value>Hello/世界</Value></FilterRule>`), &FilterRule{"prefix", "Hello/世界"}, false},
|
||||
{[]byte(`<FilterRule><Name>suffix</Name><Value>foo/bar</Value></FilterRule>`), &FilterRule{"suffix", "foo/bar"}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := &FilterRule{}
|
||||
err := xml.Unmarshal(testCase.data, result)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterRuleListUnmarshalXML(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult *FilterRuleList
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`<S3Key><FilterRule><Name>suffix</Name><Value>Hello/世界</Value></FilterRule><FilterRule><Name>suffix</Name><Value>foo/bar</Value></FilterRule></S3Key>`), nil, true},
|
||||
{[]byte(`<S3Key><FilterRule><Name>prefix</Name><Value>Hello/世界</Value></FilterRule><FilterRule><Name>prefix</Name><Value>foo/bar</Value></FilterRule></S3Key>`), nil, true},
|
||||
{[]byte(`<S3Key><FilterRule><Name>prefix</Name><Value>Hello/世界</Value></FilterRule></S3Key>`), &FilterRuleList{[]FilterRule{{"prefix", "Hello/世界"}}}, false},
|
||||
{[]byte(`<S3Key><FilterRule><Name>suffix</Name><Value>foo/bar</Value></FilterRule></S3Key>`), &FilterRuleList{[]FilterRule{{"suffix", "foo/bar"}}}, false},
|
||||
{[]byte(`<S3Key><FilterRule><Name>prefix</Name><Value>Hello/世界</Value></FilterRule><FilterRule><Name>suffix</Name><Value>foo/bar</Value></FilterRule></S3Key>`), &FilterRuleList{[]FilterRule{{"prefix", "Hello/世界"}, {"suffix", "foo/bar"}}}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := &FilterRuleList{}
|
||||
err := xml.Unmarshal(testCase.data, result)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterRuleListPattern(t *testing.T) {
|
||||
testCases := []struct {
|
||||
filterRuleList FilterRuleList
|
||||
expectedResult string
|
||||
}{
|
||||
{FilterRuleList{}, ""},
|
||||
{FilterRuleList{[]FilterRule{{"prefix", "Hello/世界"}}}, "Hello/世界*"},
|
||||
{FilterRuleList{[]FilterRule{{"suffix", "foo/bar"}}}, "*foo/bar"},
|
||||
{FilterRuleList{[]FilterRule{{"prefix", "Hello/世界"}, {"suffix", "foo/bar"}}}, "Hello/世界*foo/bar"},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.filterRuleList.Pattern()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueueUnmarshalXML(t *testing.T) {
|
||||
dataCase1 := []byte(`
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>`)
|
||||
|
||||
dataCase2 := []byte(`
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>`)
|
||||
|
||||
dataCase3 := []byte(`
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>`)
|
||||
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{dataCase1, false},
|
||||
{dataCase2, false},
|
||||
{dataCase3, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := xml.Unmarshal(testCase.data, &Queue{})
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueueValidate(t *testing.T) {
|
||||
var data []byte
|
||||
data = []byte(`
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>`)
|
||||
queue1 := &Queue{}
|
||||
if err := xml.Unmarshal(data, queue1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data = []byte(`
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>`)
|
||||
queue2 := &Queue{}
|
||||
if err := xml.Unmarshal(data, queue2); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data = []byte(`
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:eu-west-2:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>`)
|
||||
queue3 := &Queue{}
|
||||
if err := xml.Unmarshal(data, queue3); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
targetList1 := NewTargetList()
|
||||
|
||||
targetList2 := NewTargetList()
|
||||
if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
queue *Queue
|
||||
region string
|
||||
targetList *TargetList
|
||||
expectErr bool
|
||||
}{
|
||||
{queue1, "eu-west-1", nil, true},
|
||||
{queue2, "us-east-1", targetList1, true},
|
||||
{queue3, "", targetList2, false},
|
||||
{queue2, "us-east-1", targetList2, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := testCase.queue.Validate(testCase.region, testCase.targetList)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueueSetRegion(t *testing.T) {
|
||||
var data []byte
|
||||
data = []byte(`
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>`)
|
||||
queue1 := &Queue{}
|
||||
if err := xml.Unmarshal(data, queue1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data = []byte(`
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs::1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>`)
|
||||
queue2 := &Queue{}
|
||||
if err := xml.Unmarshal(data, queue2); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
queue *Queue
|
||||
region string
|
||||
expectedResult ARN
|
||||
}{
|
||||
{queue1, "eu-west-1", ARN{TargetID{"1", "webhook"}, "eu-west-1"}},
|
||||
{queue1, "", ARN{TargetID{"1", "webhook"}, ""}},
|
||||
{queue2, "us-east-1", ARN{TargetID{"1", "webhook"}, "us-east-1"}},
|
||||
{queue2, "", ARN{TargetID{"1", "webhook"}, ""}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
testCase.queue.SetRegion(testCase.region)
|
||||
result := testCase.queue.ARN
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueueToRulesMap(t *testing.T) {
|
||||
var data []byte
|
||||
data = []byte(`
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>`)
|
||||
queueCase1 := &Queue{}
|
||||
if err := xml.Unmarshal(data, queueCase1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data = []byte(`
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>`)
|
||||
queueCase2 := &Queue{}
|
||||
if err := xml.Unmarshal(data, queueCase2); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rulesMapCase1 := NewRulesMap([]Name{ObjectAccessedAll, ObjectCreatedAll, ObjectRemovedAll}, "*", TargetID{"1", "webhook"})
|
||||
rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedPut}, "images/*jpg", TargetID{"1", "webhook"})
|
||||
|
||||
testCases := []struct {
|
||||
queue *Queue
|
||||
expectedResult RulesMap
|
||||
}{
|
||||
{queueCase1, rulesMapCase1},
|
||||
{queueCase2, rulesMapCase2},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.queue.ToRulesMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigUnmarshalXML(t *testing.T) {
|
||||
dataCase1 := []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
|
||||
dataCase2 := []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
|
||||
dataCase3 := []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>2</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
|
||||
dataCase4 := []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>
|
||||
<CloudFunctionConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>.jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Cloudcode>arn:aws:lambda:us-west-2:444455556666:cloud-function-A</Cloudcode>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</CloudFunctionConfiguration>
|
||||
<TopicConfiguration>
|
||||
<Topic>arn:aws:sns:us-west-2:444455556666:sns-notification-one</Topic>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
</TopicConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{dataCase1, false},
|
||||
{dataCase2, false},
|
||||
{dataCase3, false},
|
||||
{dataCase4, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := xml.Unmarshal(testCase.data, &Config{})
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigValidate(t *testing.T) {
|
||||
var data []byte
|
||||
data = []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
config1 := &Config{}
|
||||
if err := xml.Unmarshal(data, config1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data = []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
config2 := &Config{}
|
||||
if err := xml.Unmarshal(data, config2); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data = []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>2</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
config3 := &Config{}
|
||||
if err := xml.Unmarshal(data, config3); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
targetList1 := NewTargetList()
|
||||
|
||||
targetList2 := NewTargetList()
|
||||
if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
config *Config
|
||||
region string
|
||||
targetList *TargetList
|
||||
expectErr bool
|
||||
}{
|
||||
{config1, "eu-west-1", nil, true},
|
||||
{config2, "us-east-1", targetList1, true},
|
||||
{config3, "", targetList2, false},
|
||||
{config2, "us-east-1", targetList2, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := testCase.config.Validate(testCase.region, testCase.targetList)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigSetRegion(t *testing.T) {
|
||||
var data []byte
|
||||
data = []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
config1 := &Config{}
|
||||
if err := xml.Unmarshal(data, config1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data = []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs::1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
config2 := &Config{}
|
||||
if err := xml.Unmarshal(data, config2); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data = []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>2</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:2:amqp</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
config3 := &Config{}
|
||||
if err := xml.Unmarshal(data, config3); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
config *Config
|
||||
region string
|
||||
expectedResult []ARN
|
||||
}{
|
||||
{config1, "eu-west-1", []ARN{{TargetID{"1", "webhook"}, "eu-west-1"}}},
|
||||
{config1, "", []ARN{{TargetID{"1", "webhook"}, ""}}},
|
||||
{config2, "us-east-1", []ARN{{TargetID{"1", "webhook"}, "us-east-1"}}},
|
||||
{config2, "", []ARN{{TargetID{"1", "webhook"}, ""}}},
|
||||
{config3, "us-east-1", []ARN{{TargetID{"1", "webhook"}, "us-east-1"}, {TargetID{"2", "amqp"}, "us-east-1"}}},
|
||||
{config3, "", []ARN{{TargetID{"1", "webhook"}, ""}, {TargetID{"2", "amqp"}, ""}}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
testCase.config.SetRegion(testCase.region)
|
||||
result := []ARN{}
|
||||
for _, queue := range testCase.config.QueueList {
|
||||
result = append(result, queue.ARN)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigToRulesMap(t *testing.T) {
|
||||
var data []byte
|
||||
data = []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
config1 := &Config{}
|
||||
if err := xml.Unmarshal(data, config1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data = []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs::1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
config2 := &Config{}
|
||||
if err := xml.Unmarshal(data, config2); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data = []byte(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>2</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:2:amqp</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
config3 := &Config{}
|
||||
if err := xml.Unmarshal(data, config3); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rulesMapCase1 := NewRulesMap([]Name{ObjectAccessedAll, ObjectCreatedAll, ObjectRemovedAll}, "*", TargetID{"1", "webhook"})
|
||||
|
||||
rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedPut}, "images/*jpg", TargetID{"1", "webhook"})
|
||||
|
||||
rulesMapCase3 := NewRulesMap([]Name{ObjectAccessedAll, ObjectCreatedAll, ObjectRemovedAll}, "*", TargetID{"1", "webhook"})
|
||||
rulesMapCase3.add([]Name{ObjectCreatedPut}, "images/*jpg", TargetID{"2", "amqp"})
|
||||
|
||||
testCases := []struct {
|
||||
config *Config
|
||||
expectedResult RulesMap
|
||||
}{
|
||||
{config1, rulesMapCase1},
|
||||
{config2, rulesMapCase2},
|
||||
{config3, rulesMapCase3},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.config.ToRulesMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseConfig(t *testing.T) {
|
||||
reader1 := strings.NewReader(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
|
||||
reader2 := strings.NewReader(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
|
||||
reader3 := strings.NewReader(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>2</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>prefix</Name>
|
||||
<Value>images/</Value>
|
||||
</FilterRule>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</QueueConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
|
||||
reader4 := strings.NewReader(`
|
||||
<NotificationConfiguration>
|
||||
<QueueConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter></Filter>
|
||||
<Queue>arn:minio:sqs:us-east-1:1:webhook</Queue>
|
||||
<Event>s3:ObjectAccessed:*</Event>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Event>s3:ObjectRemoved:*</Event>
|
||||
</QueueConfiguration>
|
||||
<CloudFunctionConfiguration>
|
||||
<Id>1</Id>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule>
|
||||
<Name>suffix</Name>
|
||||
<Value>.jpg</Value>
|
||||
</FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
<Cloudcode>arn:aws:lambda:us-west-2:444455556666:cloud-function-A</Cloudcode>
|
||||
<Event>s3:ObjectCreated:Put</Event>
|
||||
</CloudFunctionConfiguration>
|
||||
<TopicConfiguration>
|
||||
<Topic>arn:aws:sns:us-west-2:444455556666:sns-notification-one</Topic>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
</TopicConfiguration>
|
||||
</NotificationConfiguration>
|
||||
`)
|
||||
|
||||
targetList1 := NewTargetList()
|
||||
|
||||
targetList2 := NewTargetList()
|
||||
if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
reader *strings.Reader
|
||||
region string
|
||||
targetList *TargetList
|
||||
expectErr bool
|
||||
}{
|
||||
{reader1, "eu-west-1", nil, true},
|
||||
{reader2, "us-east-1", targetList1, true},
|
||||
{reader4, "us-east-1", targetList1, true},
|
||||
{reader3, "", targetList2, false},
|
||||
{reader2, "us-east-1", targetList2, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
if _, err := testCase.reader.Seek(0, 0); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err := ParseConfig(testCase.reader, testCase.region, testCase.targetList)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,152 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// IsEventError - checks whether given error is event error or not.
|
||||
func IsEventError(err error) bool {
|
||||
switch err.(type) {
|
||||
case ErrInvalidFilterName, *ErrInvalidFilterName:
|
||||
return true
|
||||
case ErrFilterNamePrefix, *ErrFilterNamePrefix:
|
||||
return true
|
||||
case ErrFilterNameSuffix, *ErrFilterNameSuffix:
|
||||
return true
|
||||
case ErrInvalidFilterValue, *ErrInvalidFilterValue:
|
||||
return true
|
||||
case ErrDuplicateEventName, *ErrDuplicateEventName:
|
||||
return true
|
||||
case ErrUnsupportedConfiguration, *ErrUnsupportedConfiguration:
|
||||
return true
|
||||
case ErrDuplicateQueueConfiguration, *ErrDuplicateQueueConfiguration:
|
||||
return true
|
||||
case ErrUnknownRegion, *ErrUnknownRegion:
|
||||
return true
|
||||
case ErrARNNotFound, *ErrARNNotFound:
|
||||
return true
|
||||
case ErrInvalidARN, *ErrInvalidARN:
|
||||
return true
|
||||
case ErrInvalidEventName, *ErrInvalidEventName:
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ErrInvalidFilterName - invalid filter name error.
|
||||
type ErrInvalidFilterName struct {
|
||||
FilterName string
|
||||
}
|
||||
|
||||
func (err ErrInvalidFilterName) Error() string {
|
||||
return fmt.Sprintf("invalid filter name '%v'", err.FilterName)
|
||||
}
|
||||
|
||||
// ErrFilterNamePrefix - more than one prefix usage error.
|
||||
type ErrFilterNamePrefix struct{}
|
||||
|
||||
func (err ErrFilterNamePrefix) Error() string {
|
||||
return fmt.Sprintf("more than one prefix in filter rule")
|
||||
}
|
||||
|
||||
// ErrFilterNameSuffix - more than one suffix usage error.
|
||||
type ErrFilterNameSuffix struct{}
|
||||
|
||||
func (err ErrFilterNameSuffix) Error() string {
|
||||
return fmt.Sprintf("more than one suffix in filter rule")
|
||||
}
|
||||
|
||||
// ErrInvalidFilterValue - invalid filter value error.
|
||||
type ErrInvalidFilterValue struct {
|
||||
FilterValue string
|
||||
}
|
||||
|
||||
func (err ErrInvalidFilterValue) Error() string {
|
||||
return fmt.Sprintf("invalid filter value '%v'", err.FilterValue)
|
||||
}
|
||||
|
||||
// ErrDuplicateEventName - duplicate event name error.
|
||||
type ErrDuplicateEventName struct {
|
||||
EventName Name
|
||||
}
|
||||
|
||||
func (err ErrDuplicateEventName) Error() string {
|
||||
return fmt.Sprintf("duplicate event name '%v' found", err.EventName)
|
||||
}
|
||||
|
||||
// ErrUnsupportedConfiguration - unsupported configuration error.
|
||||
type ErrUnsupportedConfiguration struct{}
|
||||
|
||||
func (err ErrUnsupportedConfiguration) Error() string {
|
||||
return "topic or cloud function configuration is not supported"
|
||||
}
|
||||
|
||||
// ErrDuplicateQueueConfiguration - duplicate queue configuration error.
|
||||
type ErrDuplicateQueueConfiguration struct {
|
||||
Queue Queue
|
||||
}
|
||||
|
||||
func (err ErrDuplicateQueueConfiguration) Error() string {
|
||||
var message string
|
||||
if data, xerr := xml.Marshal(err.Queue); xerr != nil {
|
||||
message = fmt.Sprintf("%+v", err.Queue)
|
||||
} else {
|
||||
message = string(data)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("duplicate queue configuration %v", message)
|
||||
}
|
||||
|
||||
// ErrUnknownRegion - unknown region error.
|
||||
type ErrUnknownRegion struct {
|
||||
Region string
|
||||
}
|
||||
|
||||
func (err ErrUnknownRegion) Error() string {
|
||||
return fmt.Sprintf("unknown region '%v'", err.Region)
|
||||
}
|
||||
|
||||
// ErrARNNotFound - ARN not found error.
|
||||
type ErrARNNotFound struct {
|
||||
ARN ARN
|
||||
}
|
||||
|
||||
func (err ErrARNNotFound) Error() string {
|
||||
return fmt.Sprintf("ARN '%v' not found", err.ARN)
|
||||
}
|
||||
|
||||
// ErrInvalidARN - invalid ARN error.
|
||||
type ErrInvalidARN struct {
|
||||
ARN string
|
||||
}
|
||||
|
||||
func (err ErrInvalidARN) Error() string {
|
||||
return fmt.Sprintf("invalid ARN '%v'", err.ARN)
|
||||
}
|
||||
|
||||
// ErrInvalidEventName - invalid event name error.
|
||||
type ErrInvalidEventName struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func (err ErrInvalidEventName) Error() string {
|
||||
return fmt.Sprintf("invalid event name '%v'", err.Name)
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
const (
|
||||
// NamespaceFormat - namespace log format used in some event targets.
|
||||
NamespaceFormat = "namespace"
|
||||
|
||||
// AccessFormat - access log format used in some event targets.
|
||||
AccessFormat = "access"
|
||||
|
||||
// AMZTimeFormat - event time format.
|
||||
AMZTimeFormat = "2006-01-02T15:04:05Z"
|
||||
)
|
||||
|
||||
// Identity represents access key who caused the event.
|
||||
type Identity struct {
|
||||
PrincipalID string `json:"principalId"`
|
||||
}
|
||||
|
||||
// Bucket represents bucket metadata of the event.
|
||||
type Bucket struct {
|
||||
Name string `json:"name"`
|
||||
OwnerIdentity Identity `json:"ownerIdentity"`
|
||||
ARN string `json:"arn"`
|
||||
}
|
||||
|
||||
// Object represents object metadata of the event.
|
||||
type Object struct {
|
||||
Key string `json:"key"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
ETag string `json:"eTag,omitempty"`
|
||||
ContentType string `json:"contentType,omitempty"`
|
||||
UserMetadata map[string]string `json:"userMetadata,omitempty"`
|
||||
VersionID string `json:"versionId,omitempty"`
|
||||
Sequencer string `json:"sequencer"`
|
||||
}
|
||||
|
||||
// Metadata represents event metadata.
|
||||
type Metadata struct {
|
||||
SchemaVersion string `json:"s3SchemaVersion"`
|
||||
ConfigurationID string `json:"configurationId"`
|
||||
Bucket Bucket `json:"bucket"`
|
||||
Object Object `json:"object"`
|
||||
}
|
||||
|
||||
// Source represents client information who triggered the event.
|
||||
type Source struct {
|
||||
Host string `json:"host"`
|
||||
Port string `json:"port"`
|
||||
UserAgent string `json:"userAgent"`
|
||||
}
|
||||
|
||||
// Event represents event notification information defined in
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html.
|
||||
type Event struct {
|
||||
EventVersion string `json:"eventVersion"`
|
||||
EventSource string `json:"eventSource"`
|
||||
AwsRegion string `json:"awsRegion"`
|
||||
EventTime string `json:"eventTime"`
|
||||
EventName Name `json:"eventName"`
|
||||
UserIdentity Identity `json:"userIdentity"`
|
||||
RequestParameters map[string]string `json:"requestParameters"`
|
||||
ResponseElements map[string]string `json:"responseElements"`
|
||||
S3 Metadata `json:"s3"`
|
||||
Source Source `json:"source"`
|
||||
}
|
||||
|
||||
// Log represents event information for some event targets.
|
||||
type Log struct {
|
||||
EventName Name
|
||||
Key string
|
||||
Records []Event
|
||||
}
|
|
@ -0,0 +1,152 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
// Name - event type enum.
|
||||
// Refer http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
|
||||
type Name int
|
||||
|
||||
// Values of Name
|
||||
const (
|
||||
ObjectAccessedAll Name = 1 + iota
|
||||
ObjectAccessedGet
|
||||
ObjectAccessedHead
|
||||
ObjectCreatedAll
|
||||
ObjectCreatedCompleteMultipartUpload
|
||||
ObjectCreatedCopy
|
||||
ObjectCreatedPost
|
||||
ObjectCreatedPut
|
||||
ObjectRemovedAll
|
||||
ObjectRemovedDelete
|
||||
)
|
||||
|
||||
// Expand - returns expanded values of abbreviated event type.
|
||||
func (name Name) Expand() []Name {
|
||||
switch name {
|
||||
case ObjectAccessedAll:
|
||||
return []Name{ObjectAccessedGet, ObjectAccessedHead}
|
||||
case ObjectCreatedAll:
|
||||
return []Name{ObjectCreatedCompleteMultipartUpload, ObjectCreatedCopy, ObjectCreatedPost, ObjectCreatedPut}
|
||||
case ObjectRemovedAll:
|
||||
return []Name{ObjectRemovedDelete}
|
||||
default:
|
||||
return []Name{name}
|
||||
}
|
||||
}
|
||||
|
||||
// String - returns string representation of event type.
|
||||
func (name Name) String() string {
|
||||
switch name {
|
||||
case ObjectAccessedAll:
|
||||
return "s3:ObjectAccessed:*"
|
||||
case ObjectAccessedGet:
|
||||
return "s3:ObjectAccessed:Get"
|
||||
case ObjectAccessedHead:
|
||||
return "s3:ObjectAccessed:Head"
|
||||
case ObjectCreatedAll:
|
||||
return "s3:ObjectCreated:*"
|
||||
case ObjectCreatedCompleteMultipartUpload:
|
||||
return "s3:ObjectCreated:CompleteMultipartUpload"
|
||||
case ObjectCreatedCopy:
|
||||
return "s3:ObjectCreated:Copy"
|
||||
case ObjectCreatedPost:
|
||||
return "s3:ObjectCreated:Post"
|
||||
case ObjectCreatedPut:
|
||||
return "s3:ObjectCreated:Put"
|
||||
case ObjectRemovedAll:
|
||||
return "s3:ObjectRemoved:*"
|
||||
case ObjectRemovedDelete:
|
||||
return "s3:ObjectRemoved:Delete"
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// MarshalXML - encodes to XML data.
|
||||
func (name Name) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(name.String(), start)
|
||||
}
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (name *Name) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var s string
|
||||
if err := d.DecodeElement(&s, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eventName, err := ParseName(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*name = eventName
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes to JSON data.
|
||||
func (name Name) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(name.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data.
|
||||
func (name *Name) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eventName, err := ParseName(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*name = eventName
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseName - parses string to Name.
|
||||
func ParseName(s string) (Name, error) {
|
||||
switch s {
|
||||
case "s3:ObjectAccessed:*":
|
||||
return ObjectAccessedAll, nil
|
||||
case "s3:ObjectAccessed:Get":
|
||||
return ObjectAccessedGet, nil
|
||||
case "s3:ObjectAccessed:Head":
|
||||
return ObjectAccessedHead, nil
|
||||
case "s3:ObjectCreated:*":
|
||||
return ObjectCreatedAll, nil
|
||||
case "s3:ObjectCreated:CompleteMultipartUpload":
|
||||
return ObjectCreatedCompleteMultipartUpload, nil
|
||||
case "s3:ObjectCreated:Copy":
|
||||
return ObjectCreatedCopy, nil
|
||||
case "s3:ObjectCreated:Post":
|
||||
return ObjectCreatedPost, nil
|
||||
case "s3:ObjectCreated:Put":
|
||||
return ObjectCreatedPut, nil
|
||||
case "s3:ObjectRemoved:*":
|
||||
return ObjectRemovedAll, nil
|
||||
case "s3:ObjectRemoved:Delete":
|
||||
return ObjectRemovedDelete, nil
|
||||
default:
|
||||
return 0, &ErrInvalidEventName{s}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,220 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNameExpand(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name Name
|
||||
expectedResult []Name
|
||||
}{
|
||||
{ObjectAccessedAll, []Name{ObjectAccessedGet, ObjectAccessedHead}},
|
||||
{ObjectCreatedAll, []Name{ObjectCreatedCompleteMultipartUpload, ObjectCreatedCopy, ObjectCreatedPost, ObjectCreatedPut}},
|
||||
{ObjectRemovedAll, []Name{ObjectRemovedDelete}},
|
||||
{ObjectAccessedHead, []Name{ObjectAccessedHead}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.name.Expand()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNameString(t *testing.T) {
|
||||
var blankName Name
|
||||
|
||||
testCases := []struct {
|
||||
name Name
|
||||
expectedResult string
|
||||
}{
|
||||
{ObjectAccessedAll, "s3:ObjectAccessed:*"},
|
||||
{ObjectAccessedGet, "s3:ObjectAccessed:Get"},
|
||||
{ObjectAccessedHead, "s3:ObjectAccessed:Head"},
|
||||
{ObjectCreatedAll, "s3:ObjectCreated:*"},
|
||||
{ObjectCreatedCompleteMultipartUpload, "s3:ObjectCreated:CompleteMultipartUpload"},
|
||||
{ObjectCreatedCopy, "s3:ObjectCreated:Copy"},
|
||||
{ObjectCreatedPost, "s3:ObjectCreated:Post"},
|
||||
{ObjectCreatedPut, "s3:ObjectCreated:Put"},
|
||||
{ObjectRemovedAll, "s3:ObjectRemoved:*"},
|
||||
{ObjectRemovedDelete, "s3:ObjectRemoved:Delete"},
|
||||
{blankName, ""},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.name.String()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNameMarshalXML(t *testing.T) {
|
||||
var blankName Name
|
||||
|
||||
testCases := []struct {
|
||||
name Name
|
||||
expectedData []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{ObjectAccessedAll, []byte("<Name>s3:ObjectAccessed:*</Name>"), false},
|
||||
{ObjectRemovedDelete, []byte("<Name>s3:ObjectRemoved:Delete</Name>"), false},
|
||||
{blankName, []byte("<Name></Name>"), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
data, err := xml.Marshal(testCase.name)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(data, testCase.expectedData) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNameUnmarshalXML(t *testing.T) {
|
||||
var blankName Name
|
||||
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedName Name
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte("<Name>s3:ObjectAccessed:*</Name>"), ObjectAccessedAll, false},
|
||||
{[]byte("<Name>s3:ObjectRemoved:Delete</Name>"), ObjectRemovedDelete, false},
|
||||
{[]byte("<Name></Name>"), blankName, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var name Name
|
||||
err := xml.Unmarshal(testCase.data, &name)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(name, testCase.expectedName) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedName, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNameMarshalJSON(t *testing.T) {
|
||||
var blankName Name
|
||||
|
||||
testCases := []struct {
|
||||
name Name
|
||||
expectedData []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{ObjectAccessedAll, []byte(`"s3:ObjectAccessed:*"`), false},
|
||||
{ObjectRemovedDelete, []byte(`"s3:ObjectRemoved:Delete"`), false},
|
||||
{blankName, []byte(`""`), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
data, err := json.Marshal(testCase.name)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(data, testCase.expectedData) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNameUnmarshalJSON(t *testing.T) {
|
||||
var blankName Name
|
||||
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedName Name
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`"s3:ObjectAccessed:*"`), ObjectAccessedAll, false},
|
||||
{[]byte(`"s3:ObjectRemoved:Delete"`), ObjectRemovedDelete, false},
|
||||
{[]byte(`""`), blankName, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var name Name
|
||||
err := json.Unmarshal(testCase.data, &name)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(name, testCase.expectedName) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedName, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseName(t *testing.T) {
|
||||
var blankName Name
|
||||
|
||||
testCases := []struct {
|
||||
s string
|
||||
expectedName Name
|
||||
expectErr bool
|
||||
}{
|
||||
{"s3:ObjectAccessed:*", ObjectAccessedAll, false},
|
||||
{"s3:ObjectRemoved:Delete", ObjectRemovedDelete, false},
|
||||
{"", blankName, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
name, err := ParseName(testCase.s)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(name, testCase.expectedName) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedName, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
)
|
||||
|
||||
// NewPattern - create new pattern for prefix/suffix.
|
||||
func NewPattern(prefix, suffix string) (pattern string) {
|
||||
if prefix != "" {
|
||||
if !strings.HasSuffix(prefix, "*") {
|
||||
prefix += "*"
|
||||
}
|
||||
|
||||
pattern = prefix
|
||||
}
|
||||
|
||||
if suffix != "" {
|
||||
if !strings.HasPrefix(suffix, "*") {
|
||||
suffix = "*" + suffix
|
||||
}
|
||||
|
||||
pattern += suffix
|
||||
}
|
||||
|
||||
pattern = strings.Replace(pattern, "**", "*", -1)
|
||||
|
||||
return pattern
|
||||
}
|
||||
|
||||
// Rules - event rules
|
||||
type Rules map[string]TargetIDSet
|
||||
|
||||
// Add - adds pattern and target ID.
|
||||
func (rules Rules) Add(pattern string, targetID TargetID) {
|
||||
rules[pattern] = NewTargetIDSet(targetID).Union(rules[pattern])
|
||||
}
|
||||
|
||||
// Match - returns TargetIDSet matching object name in rules.
|
||||
func (rules Rules) Match(objectName string) TargetIDSet {
|
||||
targetIDs := NewTargetIDSet()
|
||||
|
||||
for pattern, targetIDSet := range rules {
|
||||
if wildcard.MatchSimple(pattern, objectName) {
|
||||
targetIDs = targetIDs.Union(targetIDSet)
|
||||
}
|
||||
}
|
||||
|
||||
return targetIDs
|
||||
}
|
||||
|
||||
// Clone - returns copy of this rules.
|
||||
func (rules Rules) Clone() Rules {
|
||||
rulesCopy := make(Rules)
|
||||
|
||||
for pattern, targetIDSet := range rules {
|
||||
rulesCopy[pattern] = targetIDSet.Clone()
|
||||
}
|
||||
|
||||
return rulesCopy
|
||||
}
|
||||
|
||||
// Union - returns union with given rules as new rules.
|
||||
func (rules Rules) Union(rules2 Rules) Rules {
|
||||
nrules := rules.Clone()
|
||||
|
||||
for pattern, targetIDSet := range rules2 {
|
||||
nrules[pattern] = nrules[pattern].Union(targetIDSet)
|
||||
}
|
||||
|
||||
return nrules
|
||||
}
|
||||
|
||||
// Difference - returns diffrence with given rules as new rules.
|
||||
func (rules Rules) Difference(rules2 Rules) Rules {
|
||||
nrules := make(Rules)
|
||||
|
||||
for pattern, targetIDSet := range rules {
|
||||
if nv := targetIDSet.Difference(rules2[pattern]); len(nv) > 0 {
|
||||
nrules[pattern] = nv
|
||||
}
|
||||
}
|
||||
|
||||
return nrules
|
||||
}
|
|
@ -0,0 +1,275 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewPattern(t *testing.T) {
|
||||
testCases := []struct {
|
||||
prefix string
|
||||
suffix string
|
||||
expectedResult string
|
||||
}{
|
||||
{"", "", ""},
|
||||
{"*", "", "*"},
|
||||
{"", "*", "*"},
|
||||
{"images/", "", "images/*"},
|
||||
{"images/*", "", "images/*"},
|
||||
{"", "jpg", "*jpg"},
|
||||
{"", "*jpg", "*jpg"},
|
||||
{"images/", "jpg", "images/*jpg"},
|
||||
{"images/*", "jpg", "images/*jpg"},
|
||||
{"images/", "*jpg", "images/*jpg"},
|
||||
{"images/*", "*jpg", "images/*jpg"},
|
||||
{"201*/images/", "jpg", "201*/images/*jpg"},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := NewPattern(testCase.prefix, testCase.suffix)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRulesAdd(t *testing.T) {
|
||||
rulesCase1 := make(Rules)
|
||||
|
||||
rulesCase2 := make(Rules)
|
||||
rulesCase2.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
|
||||
rulesCase3 := make(Rules)
|
||||
rulesCase3.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
|
||||
rulesCase4 := make(Rules)
|
||||
rulesCase4.Add(NewPattern("", "*.jpg"), TargetID{"1", "webhook"})
|
||||
|
||||
rulesCase5 := make(Rules)
|
||||
|
||||
rulesCase6 := make(Rules)
|
||||
rulesCase6.Add(NewPattern("", "*.jpg"), TargetID{"1", "webhook"})
|
||||
|
||||
rulesCase7 := make(Rules)
|
||||
rulesCase7.Add(NewPattern("", "*.jpg"), TargetID{"1", "webhook"})
|
||||
|
||||
rulesCase8 := make(Rules)
|
||||
rulesCase8.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
|
||||
testCases := []struct {
|
||||
rules Rules
|
||||
pattern string
|
||||
targetID TargetID
|
||||
expectedResult int
|
||||
}{
|
||||
{rulesCase1, NewPattern("*", ""), TargetID{"1", "webhook"}, 1},
|
||||
{rulesCase2, NewPattern("*", ""), TargetID{"2", "amqp"}, 2},
|
||||
{rulesCase3, NewPattern("2010*", ""), TargetID{"1", "webhook"}, 1},
|
||||
{rulesCase4, NewPattern("*", ""), TargetID{"1", "webhook"}, 2},
|
||||
{rulesCase5, NewPattern("", "*.jpg"), TargetID{"1", "webhook"}, 1},
|
||||
{rulesCase6, NewPattern("", "*"), TargetID{"2", "amqp"}, 2},
|
||||
{rulesCase7, NewPattern("", "*.jpg"), TargetID{"1", "webhook"}, 1},
|
||||
{rulesCase8, NewPattern("", "*.jpg"), TargetID{"1", "webhook"}, 2},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
testCase.rules.Add(testCase.pattern, testCase.targetID)
|
||||
result := len(testCase.rules)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRulesMatch(t *testing.T) {
|
||||
rulesCase1 := make(Rules)
|
||||
|
||||
rulesCase2 := make(Rules)
|
||||
rulesCase2.Add(NewPattern("*", "*"), TargetID{"1", "webhook"})
|
||||
|
||||
rulesCase3 := make(Rules)
|
||||
rulesCase3.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
rulesCase3.Add(NewPattern("", "*.png"), TargetID{"2", "amqp"})
|
||||
|
||||
rulesCase4 := make(Rules)
|
||||
rulesCase4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
|
||||
testCases := []struct {
|
||||
rules Rules
|
||||
objectName string
|
||||
expectedResult TargetIDSet
|
||||
}{
|
||||
{rulesCase1, "photos.jpg", NewTargetIDSet()},
|
||||
{rulesCase2, "photos.jpg", NewTargetIDSet(TargetID{"1", "webhook"})},
|
||||
{rulesCase3, "2010/photos.jpg", NewTargetIDSet(TargetID{"1", "webhook"})},
|
||||
{rulesCase4, "2000/photos.jpg", NewTargetIDSet()},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.rules.Match(testCase.objectName)
|
||||
|
||||
if !reflect.DeepEqual(testCase.expectedResult, result) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRulesClone(t *testing.T) {
|
||||
rulesCase1 := make(Rules)
|
||||
|
||||
rulesCase2 := make(Rules)
|
||||
rulesCase2.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
|
||||
rulesCase3 := make(Rules)
|
||||
rulesCase3.Add(NewPattern("", "*.jpg"), TargetID{"1", "webhook"})
|
||||
|
||||
testCases := []struct {
|
||||
rules Rules
|
||||
prefix string
|
||||
targetID TargetID
|
||||
}{
|
||||
{rulesCase1, "2010*", TargetID{"1", "webhook"}},
|
||||
{rulesCase2, "2000*", TargetID{"2", "amqp"}},
|
||||
{rulesCase3, "2010*", TargetID{"1", "webhook"}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.rules.Clone()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.rules) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.rules, result)
|
||||
}
|
||||
|
||||
result.Add(NewPattern(testCase.prefix, ""), testCase.targetID)
|
||||
if reflect.DeepEqual(result, testCase.rules) {
|
||||
t.Fatalf("test %v: result: expected: not equal, got: equal", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRulesUnion(t *testing.T) {
|
||||
rulesCase1 := make(Rules)
|
||||
rules2Case1 := make(Rules)
|
||||
expectedResultCase1 := make(Rules)
|
||||
|
||||
rulesCase2 := make(Rules)
|
||||
rules2Case2 := make(Rules)
|
||||
rules2Case2.Add(NewPattern("*", ""), TargetID{"1", "webhook"})
|
||||
expectedResultCase2 := make(Rules)
|
||||
expectedResultCase2.Add(NewPattern("*", ""), TargetID{"1", "webhook"})
|
||||
|
||||
rulesCase3 := make(Rules)
|
||||
rulesCase3.Add(NewPattern("", "*"), TargetID{"1", "webhook"})
|
||||
rules2Case3 := make(Rules)
|
||||
expectedResultCase3 := make(Rules)
|
||||
expectedResultCase3.Add(NewPattern("", "*"), TargetID{"1", "webhook"})
|
||||
|
||||
rulesCase4 := make(Rules)
|
||||
rulesCase4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
rules2Case4 := make(Rules)
|
||||
rules2Case4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
expectedResultCase4 := make(Rules)
|
||||
expectedResultCase4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
|
||||
rulesCase5 := make(Rules)
|
||||
rulesCase5.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
rulesCase5.Add(NewPattern("", "*.png"), TargetID{"2", "amqp"})
|
||||
rules2Case5 := make(Rules)
|
||||
rules2Case5.Add(NewPattern("*", ""), TargetID{"1", "webhook"})
|
||||
expectedResultCase5 := make(Rules)
|
||||
expectedResultCase5.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
expectedResultCase5.Add(NewPattern("", "*.png"), TargetID{"2", "amqp"})
|
||||
expectedResultCase5.Add(NewPattern("*", ""), TargetID{"1", "webhook"})
|
||||
|
||||
testCases := []struct {
|
||||
rules Rules
|
||||
rules2 Rules
|
||||
expectedResult Rules
|
||||
}{
|
||||
{rulesCase1, rules2Case1, expectedResultCase1},
|
||||
{rulesCase2, rules2Case2, expectedResultCase2},
|
||||
{rulesCase3, rules2Case3, expectedResultCase3},
|
||||
{rulesCase4, rules2Case4, expectedResultCase4},
|
||||
{rulesCase5, rules2Case5, expectedResultCase5},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.rules.Union(testCase.rules2)
|
||||
|
||||
if !reflect.DeepEqual(testCase.expectedResult, result) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRulesDifference(t *testing.T) {
|
||||
rulesCase1 := make(Rules)
|
||||
rules2Case1 := make(Rules)
|
||||
expectedResultCase1 := make(Rules)
|
||||
|
||||
rulesCase2 := make(Rules)
|
||||
rules2Case2 := make(Rules)
|
||||
rules2Case2.Add(NewPattern("*", "*"), TargetID{"1", "webhook"})
|
||||
expectedResultCase2 := make(Rules)
|
||||
|
||||
rulesCase3 := make(Rules)
|
||||
rulesCase3.Add(NewPattern("*", "*"), TargetID{"1", "webhook"})
|
||||
rules2Case3 := make(Rules)
|
||||
expectedResultCase3 := make(Rules)
|
||||
expectedResultCase3.Add(NewPattern("*", "*"), TargetID{"1", "webhook"})
|
||||
|
||||
rulesCase4 := make(Rules)
|
||||
rulesCase4.Add(NewPattern("*", "*"), TargetID{"1", "webhook"})
|
||||
rules2Case4 := make(Rules)
|
||||
rules2Case4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
rules2Case4.Add(NewPattern("", "*.png"), TargetID{"2", "amqp"})
|
||||
expectedResultCase4 := make(Rules)
|
||||
expectedResultCase4.Add(NewPattern("*", "*"), TargetID{"1", "webhook"})
|
||||
|
||||
rulesCase5 := make(Rules)
|
||||
rulesCase5.Add(NewPattern("*", ""), TargetID{"1", "webhook"})
|
||||
rulesCase5.Add(NewPattern("", "*"), TargetID{"2", "amqp"})
|
||||
rules2Case5 := make(Rules)
|
||||
rules2Case5.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"})
|
||||
rules2Case5.Add(NewPattern("", "*"), TargetID{"2", "amqp"})
|
||||
expectedResultCase5 := make(Rules)
|
||||
expectedResultCase5.Add(NewPattern("*", ""), TargetID{"1", "webhook"})
|
||||
|
||||
testCases := []struct {
|
||||
rules Rules
|
||||
rules2 Rules
|
||||
expectedResult Rules
|
||||
}{
|
||||
{rulesCase1, rules2Case1, expectedResultCase1},
|
||||
{rulesCase2, rules2Case2, expectedResultCase2},
|
||||
{rulesCase3, rules2Case3, expectedResultCase3},
|
||||
{rulesCase4, rules2Case4, expectedResultCase4},
|
||||
{rulesCase5, rules2Case5, expectedResultCase5},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.rules.Difference(testCase.rules2)
|
||||
|
||||
if !reflect.DeepEqual(testCase.expectedResult, result) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
// RulesMap - map of rules for every event name.
|
||||
type RulesMap map[Name]Rules
|
||||
|
||||
// add - adds event names, prefixes, suffixes and target ID to rules map.
|
||||
func (rulesMap RulesMap) add(eventNames []Name, pattern string, targetID TargetID) {
|
||||
rules := make(Rules)
|
||||
rules.Add(pattern, targetID)
|
||||
|
||||
for _, eventName := range eventNames {
|
||||
for _, name := range eventName.Expand() {
|
||||
rulesMap[name] = rulesMap[name].Union(rules)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clone - returns copy of this rules map.
|
||||
func (rulesMap RulesMap) Clone() RulesMap {
|
||||
rulesMapCopy := make(RulesMap)
|
||||
|
||||
for eventName, rules := range rulesMap {
|
||||
rulesMapCopy[eventName] = rules.Clone()
|
||||
}
|
||||
|
||||
return rulesMapCopy
|
||||
}
|
||||
|
||||
// Add - adds given rules map.
|
||||
func (rulesMap RulesMap) Add(rulesMap2 RulesMap) {
|
||||
for eventName, rules := range rulesMap2 {
|
||||
rulesMap[eventName] = rules.Union(rulesMap[eventName])
|
||||
}
|
||||
}
|
||||
|
||||
// Remove - removes given rules map.
|
||||
func (rulesMap RulesMap) Remove(rulesMap2 RulesMap) {
|
||||
for eventName, rules := range rulesMap {
|
||||
if nr := rules.Difference(rulesMap2[eventName]); len(nr) != 0 {
|
||||
rulesMap[eventName] = nr
|
||||
} else {
|
||||
delete(rulesMap, eventName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Match - returns TargetIDSet matching object name and event name in rules map.
|
||||
func (rulesMap RulesMap) Match(eventName Name, objectName string) TargetIDSet {
|
||||
return rulesMap[eventName].Match(objectName)
|
||||
}
|
||||
|
||||
// NewRulesMap - creates new rules map with given values.
|
||||
func NewRulesMap(eventNames []Name, pattern string, targetID TargetID) RulesMap {
|
||||
// If pattern is empty, add '*' wildcard to match all.
|
||||
if pattern == "" {
|
||||
pattern = "*"
|
||||
}
|
||||
|
||||
rulesMap := make(RulesMap)
|
||||
rulesMap.add(eventNames, pattern, targetID)
|
||||
return rulesMap
|
||||
}
|
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRulesMapClone(t *testing.T) {
|
||||
rulesMapCase1 := make(RulesMap)
|
||||
rulesMapToAddCase1 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"})
|
||||
|
||||
rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"})
|
||||
rulesMapToAddCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"})
|
||||
|
||||
rulesMapCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"})
|
||||
rulesMapToAddCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"})
|
||||
|
||||
testCases := []struct {
|
||||
rulesMap RulesMap
|
||||
rulesMapToAdd RulesMap
|
||||
}{
|
||||
{rulesMapCase1, rulesMapToAddCase1},
|
||||
{rulesMapCase2, rulesMapToAddCase2},
|
||||
{rulesMapCase3, rulesMapToAddCase3},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.rulesMap.Clone()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.rulesMap) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.rulesMap, result)
|
||||
}
|
||||
|
||||
result.Add(testCase.rulesMapToAdd)
|
||||
if reflect.DeepEqual(result, testCase.rulesMap) {
|
||||
t.Fatalf("test %v: result: expected: not equal, got: equal", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRulesMapAdd(t *testing.T) {
|
||||
rulesMapCase1 := make(RulesMap)
|
||||
rulesMapToAddCase1 := make(RulesMap)
|
||||
expectedResultCase1 := make(RulesMap)
|
||||
|
||||
rulesMapCase2 := make(RulesMap)
|
||||
rulesMapToAddCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"})
|
||||
expectedResultCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"})
|
||||
|
||||
rulesMapCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"})
|
||||
rulesMapToAddCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"})
|
||||
expectedResultCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"})
|
||||
expectedResultCase3.add([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"})
|
||||
|
||||
testCases := []struct {
|
||||
rulesMap RulesMap
|
||||
rulesMapToAdd RulesMap
|
||||
expectedResult RulesMap
|
||||
}{
|
||||
{rulesMapCase1, rulesMapToAddCase1, expectedResultCase1},
|
||||
{rulesMapCase2, rulesMapToAddCase2, expectedResultCase2},
|
||||
{rulesMapCase3, rulesMapToAddCase3, expectedResultCase3},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
testCase.rulesMap.Add(testCase.rulesMapToAdd)
|
||||
|
||||
if !reflect.DeepEqual(testCase.rulesMap, testCase.expectedResult) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, testCase.rulesMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRulesMapRemove(t *testing.T) {
|
||||
rulesMapCase1 := make(RulesMap)
|
||||
rulesMapToAddCase1 := make(RulesMap)
|
||||
expectedResultCase1 := make(RulesMap)
|
||||
|
||||
rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"})
|
||||
rulesMapToAddCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"})
|
||||
expectedResultCase2 := make(RulesMap)
|
||||
|
||||
rulesMapCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"})
|
||||
rulesMapCase3.add([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"})
|
||||
rulesMapToAddCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"})
|
||||
expectedResultCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"})
|
||||
|
||||
testCases := []struct {
|
||||
rulesMap RulesMap
|
||||
rulesMapToAdd RulesMap
|
||||
expectedResult RulesMap
|
||||
}{
|
||||
{rulesMapCase1, rulesMapToAddCase1, expectedResultCase1},
|
||||
{rulesMapCase2, rulesMapToAddCase2, expectedResultCase2},
|
||||
{rulesMapCase3, rulesMapToAddCase3, expectedResultCase3},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
testCase.rulesMap.Remove(testCase.rulesMapToAdd)
|
||||
|
||||
if !reflect.DeepEqual(testCase.rulesMap, testCase.expectedResult) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, testCase.rulesMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRulesMapMatch(t *testing.T) {
|
||||
rulesMapCase1 := make(RulesMap)
|
||||
|
||||
rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"})
|
||||
|
||||
rulesMapCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"})
|
||||
|
||||
rulesMapCase4 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"})
|
||||
rulesMapCase4.add([]Name{ObjectCreatedAll}, "*", TargetID{"2", "amqp"})
|
||||
|
||||
testCases := []struct {
|
||||
rulesMap RulesMap
|
||||
eventName Name
|
||||
objectName string
|
||||
expectedResult TargetIDSet
|
||||
}{
|
||||
{rulesMapCase1, ObjectCreatedPut, "2010/photo.jpg", NewTargetIDSet()},
|
||||
{rulesMapCase2, ObjectCreatedPut, "2010/photo.jpg", NewTargetIDSet(TargetID{"1", "webhook"})},
|
||||
{rulesMapCase3, ObjectCreatedPut, "2000/photo.png", NewTargetIDSet()},
|
||||
{rulesMapCase4, ObjectCreatedPut, "2000/photo.png", NewTargetIDSet(TargetID{"2", "amqp"})},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.rulesMap.Match(testCase.eventName, testCase.objectName)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRulesMap(t *testing.T) {
|
||||
rulesMapCase1 := make(RulesMap)
|
||||
rulesMapCase1.add([]Name{ObjectAccessedGet, ObjectAccessedHead}, "*", TargetID{"1", "webhook"})
|
||||
|
||||
rulesMapCase2 := make(RulesMap)
|
||||
rulesMapCase2.add([]Name{ObjectAccessedGet, ObjectAccessedHead, ObjectCreatedPut}, "*", TargetID{"1", "webhook"})
|
||||
|
||||
rulesMapCase3 := make(RulesMap)
|
||||
rulesMapCase3.add([]Name{ObjectRemovedDelete}, "2010*.jpg", TargetID{"1", "webhook"})
|
||||
|
||||
testCases := []struct {
|
||||
eventNames []Name
|
||||
pattern string
|
||||
targetID TargetID
|
||||
expectedResult RulesMap
|
||||
}{
|
||||
{[]Name{ObjectAccessedAll}, "", TargetID{"1", "webhook"}, rulesMapCase1},
|
||||
{[]Name{ObjectAccessedAll, ObjectCreatedPut}, "", TargetID{"1", "webhook"}, rulesMapCase2},
|
||||
{[]Name{ObjectRemovedDelete}, "2010*.jpg", TargetID{"1", "webhook"}, rulesMapCase3},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := NewRulesMap(testCase.eventNames, testCase.pattern, testCase.targetID)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,150 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package target
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/pkg/event"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/streadway/amqp"
|
||||
)
|
||||
|
||||
// AMQPArgs - AMQP target arguments.
|
||||
type AMQPArgs struct {
|
||||
Enable bool `json:"enable"`
|
||||
URL xnet.URL `json:"url"`
|
||||
Exchange string `json:"exchange"`
|
||||
RoutingKey string `json:"routingKey"`
|
||||
ExchangeType string `json:"exchangeType"`
|
||||
DeliveryMode uint8 `json:"deliveryMode"`
|
||||
Mandatory bool `json:"mandatory"`
|
||||
Immediate bool `json:"immediate"`
|
||||
Durable bool `json:"durable"`
|
||||
Internal bool `json:"internal"`
|
||||
NoWait bool `json:"noWait"`
|
||||
AutoDeleted bool `json:"autoDeleted"`
|
||||
}
|
||||
|
||||
// AMQPTarget - AMQP target
|
||||
type AMQPTarget struct {
|
||||
id event.TargetID
|
||||
args AMQPArgs
|
||||
conn *amqp.Connection
|
||||
connMutex sync.Mutex
|
||||
}
|
||||
|
||||
// ID - returns TargetID.
|
||||
func (target *AMQPTarget) ID() event.TargetID {
|
||||
return target.id
|
||||
}
|
||||
|
||||
func (target *AMQPTarget) channel() (*amqp.Channel, error) {
|
||||
isAMQPClosedErr := func(err error) bool {
|
||||
if err == amqp.ErrClosed {
|
||||
return true
|
||||
}
|
||||
|
||||
if nerr, ok := err.(*net.OpError); ok {
|
||||
return (nerr.Err.Error() == "use of closed network connection")
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
target.connMutex.Lock()
|
||||
defer target.connMutex.Unlock()
|
||||
|
||||
ch, err := target.conn.Channel()
|
||||
if err == nil {
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
if !isAMQPClosedErr(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var conn *amqp.Connection
|
||||
if conn, err = amqp.Dial(target.args.URL.String()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ch, err = conn.Channel(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
target.conn = conn
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
// Send - sends event to AMQP.
|
||||
func (target *AMQPTarget) Send(eventData event.Event) error {
|
||||
ch, err := target.channel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
// FIXME: log returned error. ignore time being.
|
||||
_ = ch.Close()
|
||||
}()
|
||||
|
||||
objectName, err := url.QueryUnescape(eventData.S3.Object.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := eventData.S3.Bucket.Name + "/" + objectName
|
||||
|
||||
data, err := json.Marshal(event.Log{eventData.EventName, key, []event.Event{eventData}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ch.ExchangeDeclare(target.args.Exchange, target.args.ExchangeType, target.args.Durable,
|
||||
target.args.AutoDeleted, target.args.Internal, target.args.NoWait, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ch.Publish(target.args.Exchange, target.args.RoutingKey, target.args.Mandatory,
|
||||
target.args.Immediate, amqp.Publishing{
|
||||
ContentType: "application/json",
|
||||
DeliveryMode: target.args.DeliveryMode,
|
||||
Body: data,
|
||||
})
|
||||
}
|
||||
|
||||
// Close - does nothing and available for interface compatibility.
|
||||
func (target *AMQPTarget) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewAMQPTarget - creates new AMQP target.
|
||||
func NewAMQPTarget(id string, args AMQPArgs) (*AMQPTarget, error) {
|
||||
conn, err := amqp.Dial(args.URL.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AMQPTarget{
|
||||
id: event.TargetID{id, "amqp"},
|
||||
args: args,
|
||||
conn: conn,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package target
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/event"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5"
|
||||
)
|
||||
|
||||
// ElasticsearchArgs - Elasticsearch target arguments.
|
||||
type ElasticsearchArgs struct {
|
||||
Enable bool `json:"enable"`
|
||||
Format string `json:"format"`
|
||||
URL xnet.URL `json:"url"`
|
||||
Index string `json:"index"`
|
||||
}
|
||||
|
||||
// ElasticsearchTarget - Elasticsearch target.
|
||||
type ElasticsearchTarget struct {
|
||||
id event.TargetID
|
||||
args ElasticsearchArgs
|
||||
client *elastic.Client
|
||||
}
|
||||
|
||||
// ID - returns target ID.
|
||||
func (target *ElasticsearchTarget) ID() event.TargetID {
|
||||
return target.id
|
||||
}
|
||||
|
||||
// Send - sends event to Elasticsearch.
|
||||
func (target *ElasticsearchTarget) Send(eventData event.Event) (err error) {
|
||||
var key string
|
||||
|
||||
remove := func() error {
|
||||
_, err := target.client.Delete().Index(target.args.Index).Type("event").Id(key).Do(context.Background())
|
||||
return err
|
||||
}
|
||||
|
||||
update := func() error {
|
||||
_, err := target.client.Index().Index(target.args.Index).Type("event").BodyJson(map[string]interface{}{"Records": []event.Event{eventData}}).Id(key).Do(context.Background())
|
||||
return err
|
||||
}
|
||||
|
||||
add := func() error {
|
||||
eventTime, err := time.Parse(event.AMZTimeFormat, eventData.EventTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eventTimeMS := fmt.Sprintf("%d", eventTime.UnixNano()/1000000)
|
||||
_, err = target.client.Index().Index(target.args.Index).Type("event").Timestamp(eventTimeMS).BodyJson(map[string]interface{}{"Records": []event.Event{eventData}}).Do(context.Background())
|
||||
return err
|
||||
}
|
||||
|
||||
if target.args.Format == event.NamespaceFormat {
|
||||
objectName, err := url.QueryUnescape(eventData.S3.Object.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key = eventData.S3.Bucket.Name + "/" + objectName
|
||||
if eventData.EventName == event.ObjectRemovedDelete {
|
||||
err = remove()
|
||||
} else {
|
||||
err = update()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if target.args.Format == event.AccessFormat {
|
||||
return add()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close - does nothing and available for interface compatibility.
|
||||
func (target *ElasticsearchTarget) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewElasticsearchTarget - creates new Elasticsearch target.
|
||||
func NewElasticsearchTarget(id string, args ElasticsearchArgs) (*ElasticsearchTarget, error) {
|
||||
client, err := elastic.NewClient(elastic.SetURL(args.URL.String()), elastic.SetSniff(false), elastic.SetMaxRetries(10))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exists, err := client.IndexExists(args.Index).Do(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
var createIndex *elastic.IndicesCreateResult
|
||||
if createIndex, err = client.CreateIndex(args.Index).Do(context.Background()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !createIndex.Acknowledged {
|
||||
return nil, fmt.Errorf("index %v not created", args.Index)
|
||||
}
|
||||
}
|
||||
|
||||
return &ElasticsearchTarget{
|
||||
id: event.TargetID{id, "elasticsearch"},
|
||||
args: args,
|
||||
client: client,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package target
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/event"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
)
|
||||
|
||||
// HTTPClientTarget - HTTP client target.
|
||||
type HTTPClientTarget struct {
|
||||
id event.TargetID
|
||||
w http.ResponseWriter
|
||||
eventCh chan []byte
|
||||
DoneCh chan struct{}
|
||||
stopCh chan struct{}
|
||||
isStopped uint32
|
||||
isRunning uint32
|
||||
}
|
||||
|
||||
// ID - returns target ID.
|
||||
func (target HTTPClientTarget) ID() event.TargetID {
|
||||
return target.id
|
||||
}
|
||||
|
||||
func (target *HTTPClientTarget) start() {
|
||||
go func() {
|
||||
defer func() {
|
||||
atomic.AddUint32(&target.isRunning, 1)
|
||||
|
||||
// Close DoneCh to indicate we are done.
|
||||
close(target.DoneCh)
|
||||
}()
|
||||
|
||||
write := func(event []byte) error {
|
||||
if _, err := target.w.Write(event); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
target.w.(http.Flusher).Flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
select {
|
||||
case <-target.stopCh:
|
||||
// We are asked to stop.
|
||||
return
|
||||
case event, ok := <-target.eventCh:
|
||||
if !ok {
|
||||
// Got read error. Exit the goroutine.
|
||||
return
|
||||
}
|
||||
if err := write(event); err != nil {
|
||||
// Got write error to the client. Exit the goroutine.
|
||||
return
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if err := write([]byte(" ")); err != nil {
|
||||
// Got write error to the client. Exit the goroutine.
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Send - sends event to HTTP client.
|
||||
func (target *HTTPClientTarget) Send(eventData event.Event) error {
|
||||
if atomic.LoadUint32(&target.isRunning) != 0 {
|
||||
return errors.New("closed http connection")
|
||||
}
|
||||
|
||||
data, err := json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data = append(data, byte('\n'))
|
||||
|
||||
select {
|
||||
case target.eventCh <- data:
|
||||
return nil
|
||||
case <-target.DoneCh:
|
||||
return errors.New("error in sending event")
|
||||
}
|
||||
}
|
||||
|
||||
// Close - closes underneath goroutine.
|
||||
func (target *HTTPClientTarget) Close() error {
|
||||
atomic.AddUint32(&target.isStopped, 1)
|
||||
if atomic.LoadUint32(&target.isStopped) == 1 {
|
||||
close(target.stopCh)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustGetNewUUID() string {
|
||||
uuid, err := uuid.New()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("%s. Unable to generate random UUID", err))
|
||||
}
|
||||
|
||||
return uuid.String()
|
||||
}
|
||||
|
||||
// NewHTTPClientTarget - creates new HTTP client target.
|
||||
func NewHTTPClientTarget(host xnet.Host, w http.ResponseWriter) *HTTPClientTarget {
|
||||
c := &HTTPClientTarget{
|
||||
id: event.TargetID{"httpclient" + "+" + mustGetNewUUID() + "+" + host.Name, host.Port.String()},
|
||||
w: w,
|
||||
eventCh: make(chan []byte),
|
||||
DoneCh: make(chan struct{}),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
c.start()
|
||||
return c
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package target
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio/pkg/event"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
|
||||
sarama "gopkg.in/Shopify/sarama.v1"
|
||||
)
|
||||
|
||||
// KafkaArgs - Kafka target arguments.
|
||||
type KafkaArgs struct {
|
||||
Enable bool `json:"enable"`
|
||||
Brokers []xnet.Host `json:"brokers"`
|
||||
Topic string `json:"topic"`
|
||||
}
|
||||
|
||||
// KafkaTarget - Kafka target.
|
||||
type KafkaTarget struct {
|
||||
id event.TargetID
|
||||
args KafkaArgs
|
||||
producer sarama.SyncProducer
|
||||
}
|
||||
|
||||
// ID - returns target ID.
|
||||
func (target *KafkaTarget) ID() event.TargetID {
|
||||
return target.id
|
||||
}
|
||||
|
||||
// Send - sends event to Kafka.
|
||||
func (target *KafkaTarget) Send(eventData event.Event) error {
|
||||
objectName, err := url.QueryUnescape(eventData.S3.Object.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := eventData.S3.Bucket.Name + "/" + objectName
|
||||
|
||||
data, err := json.Marshal(event.Log{eventData.EventName, key, []event.Event{eventData}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg := sarama.ProducerMessage{
|
||||
Topic: target.args.Topic,
|
||||
Key: sarama.StringEncoder(key),
|
||||
Value: sarama.ByteEncoder(data),
|
||||
}
|
||||
_, _, err = target.producer.SendMessage(&msg)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Close - closes underneath kafka connection.
|
||||
func (target *KafkaTarget) Close() error {
|
||||
return target.producer.Close()
|
||||
}
|
||||
|
||||
// NewKafkaTarget - creates new Kafka target.
|
||||
func NewKafkaTarget(id string, args KafkaArgs) (*KafkaTarget, error) {
|
||||
config := sarama.NewConfig()
|
||||
config.Producer.RequiredAcks = sarama.WaitForAll
|
||||
config.Producer.Retry.Max = 10
|
||||
config.Producer.Return.Successes = true
|
||||
|
||||
brokers := []string{}
|
||||
for _, broker := range args.Brokers {
|
||||
brokers = append(brokers, broker.String())
|
||||
}
|
||||
producer, err := sarama.NewSyncProducer(brokers, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &KafkaTarget{
|
||||
id: event.TargetID{id, "kafka"},
|
||||
args: args,
|
||||
producer: producer,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package target
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
// MQTTArgs - MQTT target arguments.
|
||||
type MQTTArgs struct {
|
||||
Enable bool `json:"enable"`
|
||||
Broker xnet.URL `json:"broker"`
|
||||
Topic string `json:"topic"`
|
||||
QoS byte `json:"qos"`
|
||||
ClientID string `json:"clientId"`
|
||||
User string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
MaxReconnectInterval time.Duration `json:"reconnectInterval"`
|
||||
KeepAlive time.Duration `json:"keepAliveInterval"`
|
||||
RootCAs *x509.CertPool `json:"-"`
|
||||
}
|
||||
|
||||
// MQTTTarget - MQTT target.
|
||||
type MQTTTarget struct {
|
||||
id event.TargetID
|
||||
args MQTTArgs
|
||||
client mqtt.Client
|
||||
}
|
||||
|
||||
// ID - returns target ID.
|
||||
func (target *MQTTTarget) ID() event.TargetID {
|
||||
return target.id
|
||||
}
|
||||
|
||||
// Send - sends event to MQTT.
|
||||
func (target *MQTTTarget) Send(eventData event.Event) error {
|
||||
if !target.client.IsConnected() {
|
||||
token := target.client.Connect()
|
||||
if token.Wait() {
|
||||
if err := token.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
objectName, err := url.QueryUnescape(eventData.S3.Object.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := eventData.S3.Bucket.Name + "/" + objectName
|
||||
|
||||
data, err := json.Marshal(event.Log{eventData.EventName, key, []event.Event{eventData}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
token := target.client.Publish(target.args.Topic, target.args.QoS, false, string(data))
|
||||
|
||||
if token.Wait() {
|
||||
return token.Error()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close - does nothing and available for interface compatibility.
|
||||
func (target *MQTTTarget) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewMQTTTarget - creates new MQTT target.
|
||||
func NewMQTTTarget(id string, args MQTTArgs) (*MQTTTarget, error) {
|
||||
options := &mqtt.ClientOptions{
|
||||
ClientID: args.ClientID,
|
||||
CleanSession: true,
|
||||
Username: args.User,
|
||||
Password: args.Password,
|
||||
MaxReconnectInterval: args.MaxReconnectInterval,
|
||||
KeepAlive: args.KeepAlive,
|
||||
TLSConfig: tls.Config{RootCAs: args.RootCAs},
|
||||
}
|
||||
options.AddBroker(args.Broker.String())
|
||||
|
||||
client := mqtt.NewClient(options)
|
||||
token := client.Connect()
|
||||
if token.Wait() && token.Error() != nil {
|
||||
return nil, token.Error()
|
||||
}
|
||||
|
||||
return &MQTTTarget{
|
||||
id: event.TargetID{id, "mqtt"},
|
||||
args: args,
|
||||
client: client,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,226 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// MySQL Notifier implementation. Two formats, "namespace" and
|
||||
// "access" are supported.
|
||||
//
|
||||
// * Namespace format
|
||||
//
|
||||
// On each create or update object event in Minio Object storage
|
||||
// server, a row is created or updated in the table in MySQL. On each
|
||||
// object removal, the corresponding row is deleted from the table.
|
||||
//
|
||||
// A table with a specific structure (column names, column types, and
|
||||
// primary key/uniqueness constraint) is used. The user may set the
|
||||
// table name in the configuration. A sample SQL command that creates
|
||||
// a command with the required structure is:
|
||||
//
|
||||
// CREATE TABLE myminio (
|
||||
// key_name VARCHAR(2048),
|
||||
// value JSONB,
|
||||
// PRIMARY KEY (key_name),
|
||||
// );
|
||||
//
|
||||
// MySQL's "INSERT ... ON DUPLICATE ..." feature (UPSERT) is used
|
||||
// here. The implementation has been tested with MySQL Ver 14.14
|
||||
// Distrib 5.7.17.
|
||||
//
|
||||
// * Access format
|
||||
//
|
||||
// On each event, a row is appended to the configured table. There is
|
||||
// no deletion or modification of existing rows.
|
||||
//
|
||||
// A different table schema is used for this format. A sample SQL
|
||||
// commant that creates a table with the required structure is:
|
||||
//
|
||||
// CREATE TABLE myminio (
|
||||
// event_time TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
// event_data JSONB
|
||||
// );
|
||||
|
||||
package target
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
const (
|
||||
mysqlTableExists = `SELECT 1 FROM %s;`
|
||||
mysqlCreateNamespaceTable = `CREATE TABLE %s (key_name VARCHAR(2048), value JSON, PRIMARY KEY (key_name));`
|
||||
mysqlCreateAccessTable = `CREATE TABLE %s (event_time DATETIME NOT NULL, event_data JSON);`
|
||||
|
||||
mysqlUpdateRow = `INSERT INTO %s (key_name, value) VALUES (?, ?) ON DUPLICATE KEY UPDATE value=VALUES(value);`
|
||||
mysqlDeleteRow = `DELETE FROM %s WHERE key_name = ?;`
|
||||
mysqlInsertRow = `INSERT INTO %s (event_time, event_data) VALUES (?, ?);`
|
||||
)
|
||||
|
||||
// MySQLArgs - MySQL target arguments.
|
||||
type MySQLArgs struct {
|
||||
Enable bool `json:"enable"`
|
||||
Format string `json:"format"`
|
||||
DSN string `json:"dsnString"`
|
||||
Table string `json:"table"`
|
||||
Host xnet.URL `json:"host"`
|
||||
Port string `json:"port"`
|
||||
User string `json:"user"`
|
||||
Password string `json:"password"`
|
||||
Database string `json:"database"`
|
||||
}
|
||||
|
||||
// MySQLTarget - MySQL target.
|
||||
type MySQLTarget struct {
|
||||
id event.TargetID
|
||||
args MySQLArgs
|
||||
updateStmt *sql.Stmt
|
||||
deleteStmt *sql.Stmt
|
||||
insertStmt *sql.Stmt
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// ID - returns target ID.
|
||||
func (target *MySQLTarget) ID() event.TargetID {
|
||||
return target.id
|
||||
}
|
||||
|
||||
// Send - sends event to MySQL.
|
||||
func (target *MySQLTarget) Send(eventData event.Event) error {
|
||||
if target.args.Format == event.NamespaceFormat {
|
||||
objectName, err := url.QueryUnescape(eventData.S3.Object.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := eventData.S3.Bucket.Name + "/" + objectName
|
||||
|
||||
if eventData.EventName == event.ObjectRemovedDelete {
|
||||
_, err = target.deleteStmt.Exec(key)
|
||||
} else {
|
||||
var data []byte
|
||||
if data, err = json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = target.updateStmt.Exec(key, data)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if target.args.Format == event.AccessFormat {
|
||||
eventTime, err := time.Parse(event.AMZTimeFormat, eventData.EventTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = target.insertStmt.Exec(eventTime, data)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close - closes underneath connections to MySQL database.
|
||||
func (target *MySQLTarget) Close() error {
|
||||
if target.updateStmt != nil {
|
||||
// FIXME: log returned error. ignore time being.
|
||||
_ = target.updateStmt.Close()
|
||||
}
|
||||
|
||||
if target.deleteStmt != nil {
|
||||
// FIXME: log returned error. ignore time being.
|
||||
_ = target.deleteStmt.Close()
|
||||
}
|
||||
|
||||
if target.insertStmt != nil {
|
||||
// FIXME: log returned error. ignore time being.
|
||||
_ = target.insertStmt.Close()
|
||||
}
|
||||
|
||||
return target.db.Close()
|
||||
}
|
||||
|
||||
// NewMySQLTarget - creates new MySQL target.
|
||||
func NewMySQLTarget(id string, args MySQLArgs) (*MySQLTarget, error) {
|
||||
if args.DSN == "" {
|
||||
config := mysql.Config{
|
||||
User: args.User,
|
||||
Passwd: args.Password,
|
||||
Net: "tcp",
|
||||
Addr: args.Host.String() + ":" + args.Port,
|
||||
DBName: args.Database,
|
||||
}
|
||||
|
||||
args.DSN = config.FormatDSN()
|
||||
}
|
||||
|
||||
db, err := sql.Open("mysql", args.DSN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = db.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err = db.Exec(fmt.Sprintf(mysqlTableExists, args.Table)); err != nil {
|
||||
createStmt := mysqlCreateNamespaceTable
|
||||
if args.Format == event.AccessFormat {
|
||||
createStmt = mysqlCreateAccessTable
|
||||
}
|
||||
|
||||
if _, err = db.Exec(fmt.Sprintf(createStmt, args.Table)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var updateStmt, deleteStmt, insertStmt *sql.Stmt
|
||||
switch args.Format {
|
||||
case event.NamespaceFormat:
|
||||
// insert or update statement
|
||||
if updateStmt, err = db.Prepare(fmt.Sprintf(mysqlUpdateRow, args.Table)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// delete statement
|
||||
if deleteStmt, err = db.Prepare(fmt.Sprintf(mysqlDeleteRow, args.Table)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case event.AccessFormat:
|
||||
// insert statement
|
||||
if insertStmt, err = db.Prepare(fmt.Sprintf(mysqlInsertRow, args.Table)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &MySQLTarget{
|
||||
id: event.TargetID{id, "mysql"},
|
||||
args: args,
|
||||
updateStmt: updateStmt,
|
||||
deleteStmt: deleteStmt,
|
||||
insertStmt: insertStmt,
|
||||
db: db,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package target
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio/pkg/event"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/nats-io/go-nats-streaming"
|
||||
"github.com/nats-io/nats"
|
||||
)
|
||||
|
||||
// NATSArgs - NATS target arguments.
|
||||
type NATSArgs struct {
|
||||
Enable bool `json:"enable"`
|
||||
Address xnet.Host `json:"address"`
|
||||
Subject string `json:"subject"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Token string `json:"token"`
|
||||
Secure bool `json:"secure"`
|
||||
PingInterval int64 `json:"pingInterval"`
|
||||
Streaming struct {
|
||||
Enable bool `json:"enable"`
|
||||
ClusterID string `json:"clusterID"`
|
||||
ClientID string `json:"clientID"`
|
||||
Async bool `json:"async"`
|
||||
MaxPubAcksInflight int `json:"maxPubAcksInflight"`
|
||||
} `json:"streaming"`
|
||||
}
|
||||
|
||||
// NATSTarget - NATS target.
|
||||
type NATSTarget struct {
|
||||
id event.TargetID
|
||||
args NATSArgs
|
||||
natsConn *nats.Conn
|
||||
stanConn stan.Conn
|
||||
}
|
||||
|
||||
// ID - returns target ID.
|
||||
func (target *NATSTarget) ID() event.TargetID {
|
||||
return target.id
|
||||
}
|
||||
|
||||
// Send - sends event to NATS.
|
||||
func (target *NATSTarget) Send(eventData event.Event) (err error) {
|
||||
objectName, err := url.QueryUnescape(eventData.S3.Object.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := eventData.S3.Bucket.Name + "/" + objectName
|
||||
|
||||
data, err := json.Marshal(event.Log{eventData.EventName, key, []event.Event{eventData}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if target.stanConn != nil {
|
||||
if target.args.Streaming.Async {
|
||||
_, err = target.stanConn.PublishAsync(target.args.Subject, data, nil)
|
||||
} else {
|
||||
err = target.stanConn.Publish(target.args.Subject, data)
|
||||
}
|
||||
} else {
|
||||
err = target.natsConn.Publish(target.args.Subject, data)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Close - closes underneath connections to NATS server.
|
||||
func (target *NATSTarget) Close() (err error) {
|
||||
if target.stanConn != nil {
|
||||
err = target.stanConn.Close()
|
||||
}
|
||||
|
||||
if target.natsConn != nil {
|
||||
target.natsConn.Close()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// NewNATSTarget - creates new NATS target.
|
||||
func NewNATSTarget(id string, args NATSArgs) (*NATSTarget, error) {
|
||||
var natsConn *nats.Conn
|
||||
var stanConn stan.Conn
|
||||
var err error
|
||||
|
||||
if args.Streaming.Enable {
|
||||
scheme := "nats"
|
||||
if args.Secure {
|
||||
scheme = "tls"
|
||||
}
|
||||
addressURL := scheme + "://" + args.Username + ":" + args.Password + "@" + args.Address.String()
|
||||
|
||||
clientID := args.Streaming.ClientID
|
||||
if clientID == "" {
|
||||
clientID = mustGetNewUUID()
|
||||
}
|
||||
|
||||
connOpts := []stan.Option{stan.NatsURL(addressURL)}
|
||||
if args.Streaming.MaxPubAcksInflight > 0 {
|
||||
connOpts = append(connOpts, stan.MaxPubAcksInflight(args.Streaming.MaxPubAcksInflight))
|
||||
}
|
||||
|
||||
stanConn, err = stan.Connect(args.Streaming.ClusterID, clientID, connOpts...)
|
||||
} else {
|
||||
options := nats.DefaultOptions
|
||||
options.Url = "nats://" + args.Address.String()
|
||||
options.User = args.Username
|
||||
options.Password = args.Password
|
||||
options.Token = args.Token
|
||||
options.Secure = args.Secure
|
||||
natsConn, err = options.Connect()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &NATSTarget{
|
||||
id: event.TargetID{id, "nats"},
|
||||
args: args,
|
||||
stanConn: stanConn,
|
||||
natsConn: natsConn,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,233 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// PostgreSQL Notifier implementation. Two formats, "namespace" and
|
||||
// "access" are supported.
|
||||
//
|
||||
// * Namespace format
|
||||
//
|
||||
// On each create or update object event in Minio Object storage
|
||||
// server, a row is created or updated in the table in Postgres. On
|
||||
// each object removal, the corresponding row is deleted from the
|
||||
// table.
|
||||
//
|
||||
// A table with a specific structure (column names, column types, and
|
||||
// primary key/uniqueness constraint) is used. The user may set the
|
||||
// table name in the configuration. A sample SQL command that creates
|
||||
// a table with the required structure is:
|
||||
//
|
||||
// CREATE TABLE myminio (
|
||||
// key VARCHAR PRIMARY KEY,
|
||||
// value JSONB
|
||||
// );
|
||||
//
|
||||
// PostgreSQL's "INSERT ... ON CONFLICT ... DO UPDATE ..." feature
|
||||
// (UPSERT) is used here, so the minimum version of PostgreSQL
|
||||
// required is 9.5.
|
||||
//
|
||||
// * Access format
|
||||
//
|
||||
// On each event, a row is appended to the configured table. There is
|
||||
// no deletion or modification of existing rows.
|
||||
//
|
||||
// A different table schema is used for this format. A sample SQL
|
||||
// commant that creates a table with the required structure is:
|
||||
//
|
||||
// CREATE TABLE myminio (
|
||||
// event_time TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
// event_data JSONB
|
||||
// );
|
||||
|
||||
package target
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "github.com/lib/pq" // Register postgres driver
|
||||
"github.com/minio/minio/pkg/event"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
const (
|
||||
psqlTableExists = `SELECT 1 FROM %s;`
|
||||
psqlCreateNamespaceTable = `CREATE TABLE %s (key VARCHAR PRIMARY KEY, value JSONB);`
|
||||
psqlCreateAccessTable = `CREATE TABLE %s (event_time TIMESTAMP WITH TIME ZONE NOT NULL, event_data JSONB);`
|
||||
|
||||
psqlUpdateRow = `INSERT INTO %s (key, value) VALUES ($1, $2) ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value;`
|
||||
psqlDeleteRow = `DELETE FROM %s WHERE key = $1;`
|
||||
psqlInsertRow = `INSERT INTO %s (event_time, event_data) VALUES ($1, $2);`
|
||||
)
|
||||
|
||||
// PostgreSQLArgs - PostgreSQL target arguments.
|
||||
type PostgreSQLArgs struct {
|
||||
Enable bool `json:"enable"`
|
||||
Format string `json:"format"`
|
||||
ConnectionString string `json:"connectionString"`
|
||||
Table string `json:"table"`
|
||||
Host xnet.URL `json:"host"` // default: localhost
|
||||
Port string `json:"port"` // default: 5432
|
||||
User string `json:"user"` // default: user running minio
|
||||
Password string `json:"password"` // default: no password
|
||||
Database string `json:"database"` // default: same as user
|
||||
}
|
||||
|
||||
// PostgreSQLTarget - PostgreSQL target.
|
||||
type PostgreSQLTarget struct {
|
||||
id event.TargetID
|
||||
args PostgreSQLArgs
|
||||
updateStmt *sql.Stmt
|
||||
deleteStmt *sql.Stmt
|
||||
insertStmt *sql.Stmt
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// ID - returns target ID.
|
||||
func (target *PostgreSQLTarget) ID() event.TargetID {
|
||||
return target.id
|
||||
}
|
||||
|
||||
// Send - sends event to PostgreSQL.
|
||||
func (target *PostgreSQLTarget) Send(eventData event.Event) error {
|
||||
if target.args.Format == event.NamespaceFormat {
|
||||
objectName, err := url.QueryUnescape(eventData.S3.Object.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := eventData.S3.Bucket.Name + "/" + objectName
|
||||
|
||||
if eventData.EventName == event.ObjectRemovedDelete {
|
||||
_, err = target.deleteStmt.Exec(key)
|
||||
} else {
|
||||
var data []byte
|
||||
if data, err = json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = target.updateStmt.Exec(key, data)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if target.args.Format == event.AccessFormat {
|
||||
eventTime, err := time.Parse(event.AMZTimeFormat, eventData.EventTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = target.insertStmt.Exec(eventTime, data)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close - closes underneath connections to PostgreSQL database.
|
||||
func (target *PostgreSQLTarget) Close() error {
|
||||
if target.updateStmt != nil {
|
||||
// FIXME: log returned error. ignore time being.
|
||||
_ = target.updateStmt.Close()
|
||||
}
|
||||
|
||||
if target.deleteStmt != nil {
|
||||
// FIXME: log returned error. ignore time being.
|
||||
_ = target.deleteStmt.Close()
|
||||
}
|
||||
|
||||
if target.insertStmt != nil {
|
||||
// FIXME: log returned error. ignore time being.
|
||||
_ = target.insertStmt.Close()
|
||||
}
|
||||
|
||||
return target.db.Close()
|
||||
}
|
||||
|
||||
// NewPostgreSQLTarget - creates new PostgreSQL target.
|
||||
func NewPostgreSQLTarget(id string, args PostgreSQLArgs) (*PostgreSQLTarget, error) {
|
||||
params := []string{args.ConnectionString}
|
||||
if !args.Host.IsEmpty() {
|
||||
params = append(params, "host="+args.Host.String())
|
||||
}
|
||||
if args.Port != "" {
|
||||
params = append(params, "port="+args.Port)
|
||||
}
|
||||
if args.User != "" {
|
||||
params = append(params, "user="+args.User)
|
||||
}
|
||||
if args.Password != "" {
|
||||
params = append(params, "password="+args.Password)
|
||||
}
|
||||
if args.Database != "" {
|
||||
params = append(params, "dbname="+args.Database)
|
||||
}
|
||||
connStr := strings.Join(params, " ")
|
||||
|
||||
db, err := sql.Open("postgres", connStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = db.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err = db.Exec(fmt.Sprintf(psqlTableExists, args.Table)); err != nil {
|
||||
createStmt := psqlCreateNamespaceTable
|
||||
if args.Format == event.AccessFormat {
|
||||
createStmt = psqlCreateAccessTable
|
||||
}
|
||||
|
||||
if _, err = db.Exec(fmt.Sprintf(createStmt, args.Table)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var updateStmt, deleteStmt, insertStmt *sql.Stmt
|
||||
switch args.Format {
|
||||
case event.NamespaceFormat:
|
||||
// insert or update statement
|
||||
if updateStmt, err = db.Prepare(fmt.Sprintf(psqlUpdateRow, args.Table)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// delete statement
|
||||
if deleteStmt, err = db.Prepare(fmt.Sprintf(psqlDeleteRow, args.Table)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case event.AccessFormat:
|
||||
// insert statement
|
||||
if insertStmt, err = db.Prepare(fmt.Sprintf(psqlInsertRow, args.Table)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &PostgreSQLTarget{
|
||||
id: event.TargetID{id, "postgresql"},
|
||||
args: args,
|
||||
updateStmt: updateStmt,
|
||||
deleteStmt: deleteStmt,
|
||||
insertStmt: insertStmt,
|
||||
db: db,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package target
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/garyburd/redigo/redis"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
// RedisArgs - Redis target arguments.
|
||||
type RedisArgs struct {
|
||||
Enable bool `json:"enable"`
|
||||
Format string `json:"format"`
|
||||
Addr xnet.Host `json:"address"`
|
||||
Password string `json:"password"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
// RedisTarget - Redis target.
|
||||
type RedisTarget struct {
|
||||
id event.TargetID
|
||||
args RedisArgs
|
||||
pool *redis.Pool
|
||||
}
|
||||
|
||||
// ID - returns target ID.
|
||||
func (target *RedisTarget) ID() event.TargetID {
|
||||
return target.id
|
||||
}
|
||||
|
||||
// Send - sends event to Redis.
|
||||
func (target *RedisTarget) Send(eventData event.Event) error {
|
||||
conn := target.pool.Get()
|
||||
defer func() {
|
||||
// FIXME: log returned error. ignore time being.
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
if target.args.Format == event.NamespaceFormat {
|
||||
objectName, err := url.QueryUnescape(eventData.S3.Object.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := eventData.S3.Bucket.Name + "/" + objectName
|
||||
|
||||
if eventData.EventName == event.ObjectRemovedDelete {
|
||||
_, err = conn.Do("HDEL", target.args.Key, key)
|
||||
} else {
|
||||
var data []byte
|
||||
if data, err = json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = conn.Do("HSET", target.args.Key, key, data)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if target.args.Format == event.AccessFormat {
|
||||
data, err := json.Marshal([]interface{}{eventData.EventTime, []event.Event{eventData}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = conn.Do("RPUSH", target.args.Key, data)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close - does nothing and available for interface compatibility.
|
||||
func (target *RedisTarget) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewRedisTarget - creates new Redis target.
|
||||
func NewRedisTarget(id string, args RedisArgs) (*RedisTarget, error) {
|
||||
pool := &redis.Pool{
|
||||
MaxIdle: 3,
|
||||
IdleTimeout: 2 * 60 * time.Second,
|
||||
Dial: func() (redis.Conn, error) {
|
||||
conn, err := redis.Dial("tcp", args.Addr.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if args.Password == "" {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
if _, err = conn.Do("AUTH", args.Password); err != nil {
|
||||
// FIXME: log returned error. ignore time being.
|
||||
_ = conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
},
|
||||
TestOnBorrow: func(c redis.Conn, t time.Time) error {
|
||||
_, err := c.Do("PING")
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
conn := pool.Get()
|
||||
defer func() {
|
||||
// FIXME: log returned error. ignore time being.
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
if _, err := conn.Do("PING"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
typeAvailable, err := redis.String(conn.Do("TYPE", args.Key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if typeAvailable != "none" {
|
||||
expectedType := "hash"
|
||||
if args.Format == event.AccessFormat {
|
||||
expectedType = "list"
|
||||
}
|
||||
|
||||
if typeAvailable != expectedType {
|
||||
return nil, fmt.Errorf("expected type %v does not match with available type %v", expectedType, typeAvailable)
|
||||
}
|
||||
}
|
||||
|
||||
return &RedisTarget{
|
||||
id: event.TargetID{id, "redis"},
|
||||
args: args,
|
||||
pool: pool,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,113 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package target
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/event"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
// WebhookArgs - Webhook target arguments.
|
||||
type WebhookArgs struct {
|
||||
Enable bool `json:"enable"`
|
||||
Endpoint xnet.URL `json:"endpoint"`
|
||||
RootCAs *x509.CertPool `json:"-"`
|
||||
}
|
||||
|
||||
// WebhookTarget - Webhook target.
|
||||
type WebhookTarget struct {
|
||||
id event.TargetID
|
||||
args WebhookArgs
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// ID - returns target ID.
|
||||
func (target WebhookTarget) ID() event.TargetID {
|
||||
return target.id
|
||||
}
|
||||
|
||||
// Send - sends event to Webhook.
|
||||
func (target *WebhookTarget) Send(eventData event.Event) error {
|
||||
objectName, err := url.QueryUnescape(eventData.S3.Object.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := eventData.S3.Bucket.Name + "/" + objectName
|
||||
|
||||
data, err := json.Marshal(event.Log{eventData.EventName, key, []event.Event{eventData}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", target.args.Endpoint.String(), bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// req.Header.Set("User-Agent", globalServerUserAgent)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := target.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// FIXME: log returned error. ignore time being.
|
||||
_ = resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusAccepted, http.StatusContinue:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("sending event failed with %v", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
// Close - does nothing and available for interface compatibility.
|
||||
func (target *WebhookTarget) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewWebhookTarget - creates new Webhook target.
|
||||
func NewWebhookTarget(id string, args WebhookArgs) *WebhookTarget {
|
||||
return &WebhookTarget{
|
||||
id: event.TargetID{id, "webhook"},
|
||||
args: args,
|
||||
httpClient: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{RootCAs: args.RootCAs},
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 5 * time.Second,
|
||||
KeepAlive: 5 * time.Second,
|
||||
}).DialContext,
|
||||
TLSHandshakeTimeout: 3 * time.Second,
|
||||
ResponseHeaderTimeout: 3 * time.Second,
|
||||
ExpectContinueTimeout: 2 * time.Second,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TargetID - holds identification and name strings of notification target.
|
||||
type TargetID struct {
|
||||
ID string
|
||||
Name string
|
||||
}
|
||||
|
||||
// String - returns string representation.
|
||||
func (tid TargetID) String() string {
|
||||
return tid.ID + ":" + tid.Name
|
||||
}
|
||||
|
||||
// ToARN - converts to ARN.
|
||||
func (tid TargetID) ToARN(region string) ARN {
|
||||
return ARN{TargetID: tid, region: region}
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes to JSON data.
|
||||
func (tid TargetID) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(tid.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data.
|
||||
func (tid *TargetID) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
targetID, err := parseTargetID(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*tid = *targetID
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseTargetID - parses string to TargetID.
|
||||
func parseTargetID(s string) (*TargetID, error) {
|
||||
tokens := strings.Split(s, ":")
|
||||
if len(tokens) != 2 {
|
||||
return nil, fmt.Errorf("invalid TargetID format '%v'", s)
|
||||
}
|
||||
|
||||
return &TargetID{
|
||||
ID: tokens[0],
|
||||
Name: tokens[1],
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTargetDString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
tid TargetID
|
||||
expectedResult string
|
||||
}{
|
||||
{TargetID{}, ":"},
|
||||
{TargetID{"1", "webhook"}, "1:webhook"},
|
||||
{TargetID{"httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531", "localhost:55638"}, "httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531:localhost:55638"},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.tid.String()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetDToARN(t *testing.T) {
|
||||
tid := TargetID{"1", "webhook"}
|
||||
testCases := []struct {
|
||||
tid TargetID
|
||||
region string
|
||||
expectedARN ARN
|
||||
}{
|
||||
{tid, "", ARN{TargetID: tid, region: ""}},
|
||||
{tid, "us-east-1", ARN{TargetID: tid, region: "us-east-1"}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
arn := testCase.tid.ToARN(testCase.region)
|
||||
|
||||
if arn != testCase.expectedARN {
|
||||
t.Fatalf("test %v: ARN: expected: %v, got: %v", i+1, testCase.expectedARN, arn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetDMarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
tid TargetID
|
||||
expectedData []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{TargetID{}, []byte(`":"`), false},
|
||||
{TargetID{"1", "webhook"}, []byte(`"1:webhook"`), false},
|
||||
{TargetID{"httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531", "localhost:55638"}, []byte(`"httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531:localhost:55638"`), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
data, err := testCase.tid.MarshalJSON()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(data, testCase.expectedData) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetDUnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedTargetID *TargetID
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`""`), nil, true},
|
||||
{[]byte(`"httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531:localhost:55638"`), nil, true},
|
||||
{[]byte(`":"`), &TargetID{}, false},
|
||||
{[]byte(`"1:webhook"`), &TargetID{"1", "webhook"}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
targetID := &TargetID{}
|
||||
err := targetID.UnmarshalJSON(testCase.data)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if *targetID != *testCase.expectedTargetID {
|
||||
t.Fatalf("test %v: TargetID: expected: %v, got: %v", i+1, testCase.expectedTargetID, targetID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import "fmt"
|
||||
|
||||
// TargetIDSet - Set representation of TargetIDs.
|
||||
type TargetIDSet map[TargetID]struct{}
|
||||
|
||||
// ToSlice - returns TargetID slice from TargetIDSet.
|
||||
func (set TargetIDSet) ToSlice() []TargetID {
|
||||
keys := make([]TargetID, 0, len(set))
|
||||
for k := range set {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// String - returns string representation.
|
||||
func (set TargetIDSet) String() string {
|
||||
return fmt.Sprintf("%v", set.ToSlice())
|
||||
}
|
||||
|
||||
// Clone - returns copy of this set.
|
||||
func (set TargetIDSet) Clone() TargetIDSet {
|
||||
setCopy := NewTargetIDSet()
|
||||
for k, v := range set {
|
||||
setCopy[k] = v
|
||||
}
|
||||
return setCopy
|
||||
}
|
||||
|
||||
// add - adds TargetID to the set.
|
||||
func (set TargetIDSet) add(targetID TargetID) {
|
||||
set[targetID] = struct{}{}
|
||||
}
|
||||
|
||||
// Union - returns union with given set as new set.
|
||||
func (set TargetIDSet) Union(sset TargetIDSet) TargetIDSet {
|
||||
nset := set.Clone()
|
||||
|
||||
for k := range sset {
|
||||
nset.add(k)
|
||||
}
|
||||
|
||||
return nset
|
||||
}
|
||||
|
||||
// Difference - returns diffrence with given set as new set.
|
||||
func (set TargetIDSet) Difference(sset TargetIDSet) TargetIDSet {
|
||||
nset := NewTargetIDSet()
|
||||
for k := range set {
|
||||
if _, ok := sset[k]; !ok {
|
||||
nset.add(k)
|
||||
}
|
||||
}
|
||||
|
||||
return nset
|
||||
}
|
||||
|
||||
// NewTargetIDSet - creates new TargetID set with given TargetIDs.
|
||||
func NewTargetIDSet(targetIDs ...TargetID) TargetIDSet {
|
||||
set := make(TargetIDSet)
|
||||
for _, targetID := range targetIDs {
|
||||
set.add(targetID)
|
||||
}
|
||||
return set
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTargetIDSetToSlice(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set TargetIDSet
|
||||
expectedResult []TargetID
|
||||
}{
|
||||
{NewTargetIDSet(), []TargetID{}},
|
||||
{NewTargetIDSet(TargetID{"1", "webhook"}), []TargetID{{"1", "webhook"}}},
|
||||
{NewTargetIDSet(TargetID{"1", "webhook"}, TargetID{"2", "amqp"}), []TargetID{{"1", "webhook"}, {"2", "amqp"}}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.set.ToSlice()
|
||||
|
||||
if len(result) != len(testCase.expectedResult) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
|
||||
for _, targetID1 := range result {
|
||||
var found bool
|
||||
for _, targetID2 := range testCase.expectedResult {
|
||||
if reflect.DeepEqual(targetID1, targetID2) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetIDSetString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set TargetIDSet
|
||||
expectedResult string
|
||||
}{
|
||||
{NewTargetIDSet(), "[]"},
|
||||
{NewTargetIDSet(TargetID{"1", "webhook"}), "[1:webhook]"},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.set.String()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetIDSetClone(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set TargetIDSet
|
||||
targetIDToAdd TargetID
|
||||
}{
|
||||
{NewTargetIDSet(), TargetID{"1", "webhook"}},
|
||||
{NewTargetIDSet(TargetID{"1", "webhook"}), TargetID{"2", "webhook"}},
|
||||
{NewTargetIDSet(TargetID{"1", "webhook"}, TargetID{"2", "amqp"}), TargetID{"2", "webhook"}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.set.Clone()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.set) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.set, result)
|
||||
}
|
||||
|
||||
result.add(testCase.targetIDToAdd)
|
||||
if reflect.DeepEqual(result, testCase.set) {
|
||||
t.Fatalf("test %v: result: expected: not equal, got: equal", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetIDSetUnion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set TargetIDSet
|
||||
setToAdd TargetIDSet
|
||||
expectedResult TargetIDSet
|
||||
}{
|
||||
{NewTargetIDSet(), NewTargetIDSet(), NewTargetIDSet()},
|
||||
{NewTargetIDSet(), NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"1", "webhook"})},
|
||||
{NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(), NewTargetIDSet(TargetID{"1", "webhook"})},
|
||||
{NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"2", "amqp"}), NewTargetIDSet(TargetID{"1", "webhook"}, TargetID{"2", "amqp"})},
|
||||
{NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"1", "webhook"})},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.set.Union(testCase.setToAdd)
|
||||
|
||||
if !reflect.DeepEqual(testCase.expectedResult, result) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetIDSetDifference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set TargetIDSet
|
||||
setToRemove TargetIDSet
|
||||
expectedResult TargetIDSet
|
||||
}{
|
||||
{NewTargetIDSet(), NewTargetIDSet(), NewTargetIDSet()},
|
||||
{NewTargetIDSet(), NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet()},
|
||||
{NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(), NewTargetIDSet(TargetID{"1", "webhook"})},
|
||||
{NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"2", "amqp"}), NewTargetIDSet(TargetID{"1", "webhook"})},
|
||||
{NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet()},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.set.Difference(testCase.setToRemove)
|
||||
|
||||
if !reflect.DeepEqual(testCase.expectedResult, result) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTargetIDSet(t *testing.T) {
|
||||
testCases := []struct {
|
||||
targetIDs []TargetID
|
||||
expectedResult TargetIDSet
|
||||
}{
|
||||
{[]TargetID{}, NewTargetIDSet()},
|
||||
{[]TargetID{{"1", "webhook"}}, NewTargetIDSet(TargetID{"1", "webhook"})},
|
||||
{[]TargetID{{"1", "webhook"}, {"2", "amqp"}}, NewTargetIDSet(TargetID{"1", "webhook"}, TargetID{"2", "amqp"})},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := NewTargetIDSet(testCase.targetIDs...)
|
||||
|
||||
if !reflect.DeepEqual(testCase.expectedResult, result) {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Target - event target interface
|
||||
type Target interface {
|
||||
ID() TargetID
|
||||
Send(Event) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
// TargetList - holds list of targets indexed by target ID.
|
||||
type TargetList struct {
|
||||
sync.RWMutex
|
||||
targets map[TargetID]Target
|
||||
}
|
||||
|
||||
// Add - adds unique target to target list.
|
||||
func (list *TargetList) Add(target Target) error {
|
||||
list.Lock()
|
||||
defer list.Unlock()
|
||||
|
||||
if _, ok := list.targets[target.ID()]; ok {
|
||||
return fmt.Errorf("target %v already exists", target.ID())
|
||||
}
|
||||
|
||||
list.targets[target.ID()] = target
|
||||
return nil
|
||||
}
|
||||
|
||||
// Exists - checks whether target by target ID exists or not.
|
||||
func (list *TargetList) Exists(id TargetID) bool {
|
||||
list.RLock()
|
||||
defer list.RUnlock()
|
||||
|
||||
_, found := list.targets[id]
|
||||
return found
|
||||
}
|
||||
|
||||
// Remove - closes and removes targets by given target IDs.
|
||||
func (list *TargetList) Remove(ids ...TargetID) map[TargetID]error {
|
||||
list.Lock()
|
||||
defer list.Unlock()
|
||||
|
||||
errors := make(map[TargetID]error)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, id := range ids {
|
||||
if target, ok := list.targets[id]; ok {
|
||||
wg.Add(1)
|
||||
go func(id TargetID, target Target) {
|
||||
defer wg.Done()
|
||||
if err := target.Close(); err != nil {
|
||||
errors[id] = err
|
||||
}
|
||||
}(id, target)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
for _, id := range ids {
|
||||
delete(list.targets, id)
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// List - returns available target IDs.
|
||||
func (list *TargetList) List() []TargetID {
|
||||
list.RLock()
|
||||
defer list.RUnlock()
|
||||
|
||||
keys := []TargetID{}
|
||||
for k := range list.targets {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// Send - sends events to targets identified by target IDs.
|
||||
func (list *TargetList) Send(event Event, targetIDs ...TargetID) map[TargetID]error {
|
||||
list.Lock()
|
||||
defer list.Unlock()
|
||||
|
||||
errors := make(map[TargetID]error)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, id := range targetIDs {
|
||||
if target, ok := list.targets[id]; ok {
|
||||
wg.Add(1)
|
||||
go func(id TargetID, target Target) {
|
||||
defer wg.Done()
|
||||
if err := target.Send(event); err != nil {
|
||||
errors[id] = err
|
||||
}
|
||||
}(id, target)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// NewTargetList - creates TargetList.
|
||||
func NewTargetList() *TargetList {
|
||||
return &TargetList{targets: make(map[TargetID]Target)}
|
||||
}
|
|
@ -0,0 +1,272 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ExampleTarget struct {
|
||||
id TargetID
|
||||
sendErr bool
|
||||
closeErr bool
|
||||
}
|
||||
|
||||
func (target ExampleTarget) ID() TargetID {
|
||||
return target.id
|
||||
}
|
||||
|
||||
func (target ExampleTarget) Send(eventData Event) error {
|
||||
b := make([]byte, 1)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(b[0]) * time.Millisecond)
|
||||
|
||||
if target.sendErr {
|
||||
return errors.New("send error")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (target ExampleTarget) Close() error {
|
||||
if target.closeErr {
|
||||
return errors.New("close error")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestTargetListAdd(t *testing.T) {
|
||||
targetListCase1 := NewTargetList()
|
||||
|
||||
targetListCase2 := NewTargetList()
|
||||
if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
targetListCase3 := NewTargetList()
|
||||
if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
targetList *TargetList
|
||||
target Target
|
||||
expectedResult []TargetID
|
||||
expectErr bool
|
||||
}{
|
||||
{targetListCase1, &ExampleTarget{TargetID{"1", "webhook"}, false, false}, []TargetID{{"1", "webhook"}}, false},
|
||||
{targetListCase2, &ExampleTarget{TargetID{"1", "webhook"}, false, false}, []TargetID{{"2", "testcase"}, {"1", "webhook"}}, false},
|
||||
{targetListCase3, &ExampleTarget{TargetID{"3", "testcase"}, false, false}, nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := testCase.targetList.Add(testCase.target)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
result := testCase.targetList.List()
|
||||
|
||||
if len(result) != len(testCase.expectedResult) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
|
||||
for _, targetID1 := range result {
|
||||
var found bool
|
||||
for _, targetID2 := range testCase.expectedResult {
|
||||
if reflect.DeepEqual(targetID1, targetID2) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetListExists(t *testing.T) {
|
||||
targetListCase1 := NewTargetList()
|
||||
|
||||
targetListCase2 := NewTargetList()
|
||||
if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
targetListCase3 := NewTargetList()
|
||||
if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
targetList *TargetList
|
||||
targetID TargetID
|
||||
expectedResult bool
|
||||
}{
|
||||
{targetListCase1, TargetID{"1", "webhook"}, false},
|
||||
{targetListCase2, TargetID{"1", "webhook"}, false},
|
||||
{targetListCase3, TargetID{"3", "testcase"}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.targetList.Exists(testCase.targetID)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetListRemove(t *testing.T) {
|
||||
targetListCase1 := NewTargetList()
|
||||
|
||||
targetListCase2 := NewTargetList()
|
||||
if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
targetListCase3 := NewTargetList()
|
||||
if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, true}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
targetList *TargetList
|
||||
targetID TargetID
|
||||
expectErr bool
|
||||
}{
|
||||
{targetListCase1, TargetID{"1", "webhook"}, false},
|
||||
{targetListCase2, TargetID{"1", "webhook"}, false},
|
||||
{targetListCase3, TargetID{"3", "testcase"}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
errors := testCase.targetList.Remove(testCase.targetID)
|
||||
err := errors[testCase.targetID]
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetListList(t *testing.T) {
|
||||
targetListCase1 := NewTargetList()
|
||||
|
||||
targetListCase2 := NewTargetList()
|
||||
if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
targetListCase3 := NewTargetList()
|
||||
if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := targetListCase3.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
targetList *TargetList
|
||||
expectedResult []TargetID
|
||||
}{
|
||||
{targetListCase1, []TargetID{}},
|
||||
{targetListCase2, []TargetID{{"2", "testcase"}}},
|
||||
{targetListCase3, []TargetID{{"3", "testcase"}, {"1", "webhook"}}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.targetList.List()
|
||||
|
||||
if len(result) != len(testCase.expectedResult) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
|
||||
for _, targetID1 := range result {
|
||||
var found bool
|
||||
for _, targetID2 := range testCase.expectedResult {
|
||||
if reflect.DeepEqual(targetID1, targetID2) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetListSend(t *testing.T) {
|
||||
targetListCase1 := NewTargetList()
|
||||
|
||||
targetListCase2 := NewTargetList()
|
||||
if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
targetListCase3 := NewTargetList()
|
||||
if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
targetListCase4 := NewTargetList()
|
||||
if err := targetListCase4.Add(&ExampleTarget{TargetID{"4", "testcase"}, true, false}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
targetList *TargetList
|
||||
targetID TargetID
|
||||
expectErr bool
|
||||
}{
|
||||
{targetListCase1, TargetID{"1", "webhook"}, false},
|
||||
{targetListCase2, TargetID{"1", "non-existent"}, false},
|
||||
{targetListCase3, TargetID{"3", "testcase"}, false},
|
||||
{targetListCase4, TargetID{"4", "testcase"}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
errors := testCase.targetList.Send(Event{}, testCase.targetID)
|
||||
err := errors[testCase.targetID]
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTargetList(t *testing.T) {
|
||||
if result := NewTargetList(); result == nil {
|
||||
t.Fatalf("test: result: expected: <non-nil>, got: <nil>")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,150 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var hostLabelRegexp = regexp.MustCompile("^[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?$")
|
||||
|
||||
// Host - holds network host IP/name and its port.
|
||||
type Host struct {
|
||||
Name string
|
||||
Port Port
|
||||
IsPortSet bool
|
||||
}
|
||||
|
||||
// IsEmpty - returns whether Host is empty or not
|
||||
func (host Host) IsEmpty() bool {
|
||||
return host.Name == ""
|
||||
}
|
||||
|
||||
// String - returns string representation of Host.
|
||||
func (host Host) String() string {
|
||||
if !host.IsPortSet {
|
||||
return host.Name
|
||||
}
|
||||
|
||||
return host.Name + ":" + host.Port.String()
|
||||
}
|
||||
|
||||
// Equal - checks whether given host is equal or not.
|
||||
func (host Host) Equal(compHost Host) bool {
|
||||
return host.String() == compHost.String()
|
||||
}
|
||||
|
||||
// MarshalJSON - converts Host into JSON data
|
||||
func (host Host) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(host.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON - parses data into Host.
|
||||
func (host *Host) UnmarshalJSON(data []byte) (err error) {
|
||||
var s string
|
||||
if err = json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Allow empty string
|
||||
if s == "" {
|
||||
*host = Host{}
|
||||
return nil
|
||||
}
|
||||
|
||||
var h *Host
|
||||
if h, err = ParseHost(s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*host = *h
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseHost - parses string into Host
|
||||
func ParseHost(s string) (*Host, error) {
|
||||
isValidHost := func(host string) bool {
|
||||
if host == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// host is not a valid IPv4 or IPv6 address
|
||||
// host may be a hostname
|
||||
// refer https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names
|
||||
// why checks are done like below
|
||||
if len(host) < 1 || len(host) > 253 {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, label := range strings.Split(host, ".") {
|
||||
if len(label) < 1 || len(label) > 63 {
|
||||
return false
|
||||
}
|
||||
|
||||
if !hostLabelRegexp.MatchString(label) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
var port Port
|
||||
var isPortSet bool
|
||||
host, portStr, err := net.SplitHostPort(s)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "missing port in address") {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
host = s
|
||||
portStr = ""
|
||||
} else {
|
||||
if port, err = ParsePort(portStr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isPortSet = true
|
||||
}
|
||||
|
||||
if !isValidHost(host) {
|
||||
return nil, errors.New("invalid hostname")
|
||||
}
|
||||
|
||||
return &Host{
|
||||
Name: host,
|
||||
Port: port,
|
||||
IsPortSet: isPortSet,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MustParseHost - parses given string to Host, else panics.
|
||||
func MustParseHost(s string) *Host {
|
||||
host, err := ParseHost(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return host
|
||||
}
|
|
@ -0,0 +1,236 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHostIsEmpty(t *testing.T) {
|
||||
testCases := []struct {
|
||||
host Host
|
||||
expectedResult bool
|
||||
}{
|
||||
{Host{"", 0, false}, true},
|
||||
{Host{"", 0, true}, true},
|
||||
{Host{"play", 9000, false}, false},
|
||||
{Host{"play", 9000, true}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.host.IsEmpty()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
host Host
|
||||
expectedStr string
|
||||
}{
|
||||
{Host{"", 0, false}, ""},
|
||||
{Host{"", 0, true}, ":0"},
|
||||
{Host{"play", 9000, false}, "play"},
|
||||
{Host{"play", 9000, true}, "play:9000"},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
str := testCase.host.String()
|
||||
|
||||
if str != testCase.expectedStr {
|
||||
t.Fatalf("test %v: string: expected: %v, got: %v", i+1, testCase.expectedStr, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostEqual(t *testing.T) {
|
||||
testCases := []struct {
|
||||
host Host
|
||||
compHost Host
|
||||
expectedResult bool
|
||||
}{
|
||||
{Host{"", 0, false}, Host{"", 0, true}, false},
|
||||
{Host{"play", 9000, true}, Host{"play", 9000, false}, false},
|
||||
{Host{"", 0, true}, Host{"", 0, true}, true},
|
||||
{Host{"play", 9000, false}, Host{"play", 9000, false}, true},
|
||||
{Host{"play", 9000, true}, Host{"play", 9000, true}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.host.Equal(testCase.compHost)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: string: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostMarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
host Host
|
||||
expectedData []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{Host{}, []byte(`""`), false},
|
||||
{Host{"play", 0, false}, []byte(`"play"`), false},
|
||||
{Host{"play", 0, true}, []byte(`"play:0"`), false},
|
||||
{Host{"play", 9000, true}, []byte(`"play:9000"`), false},
|
||||
{Host{"play.minio.io", 0, false}, []byte(`"play.minio.io"`), false},
|
||||
{Host{"play.minio.io", 9000, true}, []byte(`"play.minio.io:9000"`), false},
|
||||
{Host{"147.75.201.93", 0, false}, []byte(`"147.75.201.93"`), false},
|
||||
{Host{"147.75.201.93", 9000, true}, []byte(`"147.75.201.93:9000"`), false},
|
||||
{Host{"play12", 0, false}, []byte(`"play12"`), false},
|
||||
{Host{"12play", 0, false}, []byte(`"12play"`), false},
|
||||
{Host{"play-minio-io", 0, false}, []byte(`"play-minio-io"`), false},
|
||||
{Host{"play--minio.io", 0, false}, []byte(`"play--minio.io"`), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
data, err := testCase.host.MarshalJSON()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(data, testCase.expectedData) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostUnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedHost *Host
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`""`), &Host{}, false},
|
||||
{[]byte(`"play"`), &Host{"play", 0, false}, false},
|
||||
{[]byte(`"play:0"`), &Host{"play", 0, true}, false},
|
||||
{[]byte(`"play:9000"`), &Host{"play", 9000, true}, false},
|
||||
{[]byte(`"play.minio.io"`), &Host{"play.minio.io", 0, false}, false},
|
||||
{[]byte(`"play.minio.io:9000"`), &Host{"play.minio.io", 9000, true}, false},
|
||||
{[]byte(`"147.75.201.93"`), &Host{"147.75.201.93", 0, false}, false},
|
||||
{[]byte(`"147.75.201.93:9000"`), &Host{"147.75.201.93", 9000, true}, false},
|
||||
{[]byte(`"play12"`), &Host{"play12", 0, false}, false},
|
||||
{[]byte(`"12play"`), &Host{"12play", 0, false}, false},
|
||||
{[]byte(`"play-minio-io"`), &Host{"play-minio-io", 0, false}, false},
|
||||
{[]byte(`"play--minio.io"`), &Host{"play--minio.io", 0, false}, false},
|
||||
{[]byte(`":9000"`), nil, true},
|
||||
{[]byte(`"play:"`), nil, true},
|
||||
{[]byte(`"play::"`), nil, true},
|
||||
{[]byte(`"play:90000"`), nil, true},
|
||||
{[]byte(`"play:-10"`), nil, true},
|
||||
{[]byte(`"play-"`), nil, true},
|
||||
{[]byte(`"play.minio..io"`), nil, true},
|
||||
{[]byte(`":"`), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var host Host
|
||||
err := host.UnmarshalJSON(testCase.data)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(&host, testCase.expectedHost) {
|
||||
t.Fatalf("test %v: host: expected: %#v, got: %#v", i+1, testCase.expectedHost, host)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseHost(t *testing.T) {
|
||||
testCases := []struct {
|
||||
s string
|
||||
expectedHost *Host
|
||||
expectErr bool
|
||||
}{
|
||||
{"play", &Host{"play", 0, false}, false},
|
||||
{"play:0", &Host{"play", 0, true}, false},
|
||||
{"play:9000", &Host{"play", 9000, true}, false},
|
||||
{"play.minio.io", &Host{"play.minio.io", 0, false}, false},
|
||||
{"play.minio.io:9000", &Host{"play.minio.io", 9000, true}, false},
|
||||
{"147.75.201.93", &Host{"147.75.201.93", 0, false}, false},
|
||||
{"147.75.201.93:9000", &Host{"147.75.201.93", 9000, true}, false},
|
||||
{"play12", &Host{"play12", 0, false}, false},
|
||||
{"12play", &Host{"12play", 0, false}, false},
|
||||
{"play-minio-io", &Host{"play-minio-io", 0, false}, false},
|
||||
{"play--minio.io", &Host{"play--minio.io", 0, false}, false},
|
||||
{":9000", nil, true},
|
||||
{"play:", nil, true},
|
||||
{"play::", nil, true},
|
||||
{"play:90000", nil, true},
|
||||
{"play:-10", nil, true},
|
||||
{"play-", nil, true},
|
||||
{"play.minio..io", nil, true},
|
||||
{":", nil, true},
|
||||
{"", nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
host, err := ParseHost(testCase.s)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(host, testCase.expectedHost) {
|
||||
t.Fatalf("test %v: host: expected: %#v, got: %#v", i+1, testCase.expectedHost, host)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMustParseHost(t *testing.T) {
|
||||
testCases := []struct {
|
||||
s string
|
||||
expectedHost *Host
|
||||
}{
|
||||
{"play", &Host{"play", 0, false}},
|
||||
{"play:0", &Host{"play", 0, true}},
|
||||
{"play:9000", &Host{"play", 9000, true}},
|
||||
{"play.minio.io", &Host{"play.minio.io", 0, false}},
|
||||
{"play.minio.io:9000", &Host{"play.minio.io", 9000, true}},
|
||||
{"147.75.201.93", &Host{"147.75.201.93", 0, false}},
|
||||
{"147.75.201.93:9000", &Host{"147.75.201.93", 9000, true}},
|
||||
{"play12", &Host{"play12", 0, false}},
|
||||
{"12play", &Host{"12play", 0, false}},
|
||||
{"play-minio-io", &Host{"play-minio-io", 0, false}},
|
||||
{"play--minio.io", &Host{"play--minio.io", 0, false}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
host := MustParseHost(testCase.s)
|
||||
|
||||
if !reflect.DeepEqual(host, testCase.expectedHost) {
|
||||
t.Fatalf("test %v: host: expected: %#v, got: %#v", i+1, testCase.expectedHost, host)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Port - network port
|
||||
type Port uint16
|
||||
|
||||
// String - returns string representation of port.
|
||||
func (p Port) String() string {
|
||||
return strconv.Itoa(int(p))
|
||||
}
|
||||
|
||||
// ParsePort - parses string into Port
|
||||
func ParsePort(s string) (p Port, err error) {
|
||||
var i int
|
||||
if i, err = strconv.Atoi(s); err != nil {
|
||||
return p, errors.New("invalid port number")
|
||||
}
|
||||
|
||||
if i < 0 || i > 65535 {
|
||||
return p, errors.New("port must be between 0 to 65535")
|
||||
}
|
||||
|
||||
return Port(i), nil
|
||||
}
|
||||
|
||||
// MustParsePort - parses string into Port, else panics
|
||||
func MustParsePort(s string) Port {
|
||||
p, err := ParsePort(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPortString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
port Port
|
||||
expectedStr string
|
||||
}{
|
||||
{Port(0), "0"},
|
||||
{Port(9000), "9000"},
|
||||
{Port(65535), "65535"},
|
||||
{Port(1024), "1024"},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
str := testCase.port.String()
|
||||
|
||||
if str != testCase.expectedStr {
|
||||
t.Fatalf("test %v: error: port: %v, got: %v", i+1, testCase.expectedStr, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePort(t *testing.T) {
|
||||
testCases := []struct {
|
||||
s string
|
||||
expectedPort Port
|
||||
expectErr bool
|
||||
}{
|
||||
{"0", Port(0), false},
|
||||
{"9000", Port(9000), false},
|
||||
{"65535", Port(65535), false},
|
||||
{"90000", Port(0), true},
|
||||
{"-10", Port(0), true},
|
||||
{"", Port(0), true},
|
||||
{"http", Port(0), true},
|
||||
{" 1024", Port(0), true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
port, err := ParsePort(testCase.s)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if port != testCase.expectedPort {
|
||||
t.Fatalf("test %v: error: port: %v, got: %v", i+1, testCase.expectedPort, port)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMustParsePort(t *testing.T) {
|
||||
testCases := []struct {
|
||||
s string
|
||||
expectedPort Port
|
||||
}{
|
||||
{"0", Port(0)},
|
||||
{"9000", Port(9000)},
|
||||
{"65535", Port(65535)},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
port := MustParsePort(testCase.s)
|
||||
|
||||
if port != testCase.expectedPort {
|
||||
t.Fatalf("test %v: error: port: %v, got: %v", i+1, testCase.expectedPort, port)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/url"
|
||||
"path"
|
||||
)
|
||||
|
||||
// URL - improved JSON friendly url.URL.
|
||||
type URL url.URL
|
||||
|
||||
// IsEmpty - checks URL is empty or not.
|
||||
func (u URL) IsEmpty() bool {
|
||||
return u.String() == ""
|
||||
}
|
||||
|
||||
// String - returns string representation of URL.
|
||||
func (u URL) String() string {
|
||||
// if port number 80 and 443, remove for http and https scheme respectively
|
||||
if u.Host != "" {
|
||||
host := MustParseHost(u.Host)
|
||||
switch {
|
||||
case u.Scheme == "http" && host.Port == 80:
|
||||
fallthrough
|
||||
case u.Scheme == "https" && host.Port == 443:
|
||||
u.Host = host.Name
|
||||
}
|
||||
}
|
||||
|
||||
uu := url.URL(u)
|
||||
return uu.String()
|
||||
}
|
||||
|
||||
// MarshalJSON - converts to JSON string data.
|
||||
func (u URL) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(u.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON - parses given data into URL.
|
||||
func (u *URL) UnmarshalJSON(data []byte) (err error) {
|
||||
var s string
|
||||
if err = json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Allow empty string
|
||||
if s == "" {
|
||||
*u = URL{}
|
||||
return nil
|
||||
}
|
||||
|
||||
var ru *URL
|
||||
if ru, err = ParseURL(s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*u = *ru
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseURL - parses string into URL.
|
||||
func ParseURL(s string) (u *URL, err error) {
|
||||
var uu *url.URL
|
||||
if uu, err = url.Parse(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uu.Host == "" {
|
||||
if uu.Scheme != "" {
|
||||
return nil, errors.New("scheme appears with empty host")
|
||||
}
|
||||
} else if _, err = ParseHost(uu.Host); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Clean path in the URL.
|
||||
// Note: path.Clean() is used on purpose because in MS Windows filepath.Clean() converts
|
||||
// `/` into `\` ie `/foo` becomes `\foo`
|
||||
if uu.Path != "" {
|
||||
uu.Path = path.Clean(uu.Path)
|
||||
}
|
||||
|
||||
v := URL(*uu)
|
||||
u = &v
|
||||
return u, nil
|
||||
}
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestURLIsEmpty(t *testing.T) {
|
||||
testCases := []struct {
|
||||
url URL
|
||||
expectedResult bool
|
||||
}{
|
||||
{URL{}, true},
|
||||
{URL{Scheme: "http", Host: "play"}, false},
|
||||
{URL{Path: "path/to/play"}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.url.IsEmpty()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
url URL
|
||||
expectedStr string
|
||||
}{
|
||||
{URL{}, ""},
|
||||
{URL{Scheme: "http", Host: "play"}, "http://play"},
|
||||
{URL{Scheme: "https", Host: "play:443"}, "https://play"},
|
||||
{URL{Scheme: "https", Host: "play.minio.io:80"}, "https://play.minio.io:80"},
|
||||
{URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, "https://147.75.201.93:9000/"},
|
||||
{URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, "https://s3.amazonaws.com/?location"},
|
||||
{URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject"}, "http://myminio:10000/mybucket/myobject"},
|
||||
{URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, "ftp://myftp.server:10000/myuser"},
|
||||
{URL{Path: "path/to/play"}, "path/to/play"},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
str := testCase.url.String()
|
||||
|
||||
if str != testCase.expectedStr {
|
||||
t.Fatalf("test %v: string: expected: %v, got: %v", i+1, testCase.expectedStr, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLMarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
url URL
|
||||
expectedData []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{URL{}, []byte(`""`), false},
|
||||
{URL{Scheme: "http", Host: "play"}, []byte(`"http://play"`), false},
|
||||
{URL{Scheme: "https", Host: "play.minio.io:0"}, []byte(`"https://play.minio.io:0"`), false},
|
||||
{URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, []byte(`"https://147.75.201.93:9000/"`), false},
|
||||
{URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, []byte(`"https://s3.amazonaws.com/?location"`), false},
|
||||
{URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject"}, []byte(`"http://myminio:10000/mybucket/myobject"`), false},
|
||||
{URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, []byte(`"ftp://myftp.server:10000/myuser"`), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
data, err := testCase.url.MarshalJSON()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(data, testCase.expectedData) {
|
||||
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLUnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedURL *URL
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`""`), &URL{}, false},
|
||||
{[]byte(`"http://play"`), &URL{Scheme: "http", Host: "play"}, false},
|
||||
{[]byte(`"https://play.minio.io:0"`), &URL{Scheme: "https", Host: "play.minio.io:0"}, false},
|
||||
{[]byte(`"https://147.75.201.93:9000/"`), &URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, false},
|
||||
{[]byte(`"https://s3.amazonaws.com/?location"`), &URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, false},
|
||||
{[]byte(`"http://myminio:10000/mybucket//myobject/"`), &URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject"}, false},
|
||||
{[]byte(`"ftp://myftp.server:10000/myuser"`), &URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, false},
|
||||
{[]byte(`"myserver:1000"`), nil, true},
|
||||
{[]byte(`"http://:1000/mybucket"`), nil, true},
|
||||
{[]byte(`"https://147.75.201.93:90000/"`), nil, true},
|
||||
{[]byte(`"http:/play"`), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var url URL
|
||||
err := url.UnmarshalJSON(testCase.data)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(&url, testCase.expectedURL) {
|
||||
t.Fatalf("test %v: host: expected: %#v, got: %#v", i+1, testCase.expectedURL, url)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseURL(t *testing.T) {
|
||||
testCases := []struct {
|
||||
s string
|
||||
expectedURL *URL
|
||||
expectErr bool
|
||||
}{
|
||||
{"http://play", &URL{Scheme: "http", Host: "play"}, false},
|
||||
{"https://play.minio.io:0", &URL{Scheme: "https", Host: "play.minio.io:0"}, false},
|
||||
{"https://147.75.201.93:9000/", &URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, false},
|
||||
{"https://s3.amazonaws.com/?location", &URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, false},
|
||||
{"http://myminio:10000/mybucket//myobject/", &URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject"}, false},
|
||||
{"ftp://myftp.server:10000/myuser", &URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, false},
|
||||
{"myserver:1000", nil, true},
|
||||
{"http://:1000/mybucket", nil, true},
|
||||
{"https://147.75.201.93:90000/", nil, true},
|
||||
{"http:/play", nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
url, err := ParseURL(testCase.s)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(url, testCase.expectedURL) {
|
||||
t.Fatalf("test %v: host: expected: %#v, got: %#v", i+1, testCase.expectedURL, url)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
# 0.10.0
|
||||
|
||||
* feature: Add a test hook (#180)
|
||||
* feature: `ParseLevel` is now case-insensitive (#326)
|
||||
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
|
||||
* performance: avoid re-allocations on `WithFields` (#335)
|
||||
|
||||
# 0.9.0
|
||||
|
||||
* logrus/text_formatter: don't emit empty msg
|
||||
* logrus/hooks/airbrake: move out of main repository
|
||||
* logrus/hooks/sentry: move out of main repository
|
||||
* logrus/hooks/papertrail: move out of main repository
|
||||
* logrus/hooks/bugsnag: move out of main repository
|
||||
* logrus/core: run tests with `-race`
|
||||
* logrus/core: detect TTY based on `stderr`
|
||||
* logrus/core: support `WithError` on logger
|
||||
* logrus/core: Solaris support
|
||||
|
||||
# 0.8.7
|
||||
|
||||
* logrus/core: fix possible race (#216)
|
||||
* logrus/doc: small typo fixes and doc improvements
|
||||
|
||||
|
||||
# 0.8.6
|
||||
|
||||
* hooks/raven: allow passing an initialized client
|
||||
|
||||
# 0.8.5
|
||||
|
||||
* logrus/core: revert #208
|
||||
|
||||
# 0.8.4
|
||||
|
||||
* formatter/text: fix data race (#218)
|
||||
|
||||
# 0.8.3
|
||||
|
||||
* logrus/core: fix entry log level (#208)
|
||||
* logrus/core: improve performance of text formatter by 40%
|
||||
* logrus/core: expose `LevelHooks` type
|
||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||
* formatter/text: print structs more verbosely
|
||||
|
||||
# 0.8.2
|
||||
|
||||
* logrus: fix more Fatal family functions
|
||||
|
||||
# 0.8.1
|
||||
|
||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
|
||||
# 0.8.0
|
||||
|
||||
* logrus: defaults to stderr instead of stdout
|
||||
* hooks/sentry: add special field for `*http.Request`
|
||||
* formatter/text: ignore Windows for colors
|
||||
|
||||
# 0.7.3
|
||||
|
||||
* formatter/\*: allow configuration of timestamp layout
|
||||
|
||||
# 0.7.2
|
||||
|
||||
* formatter/text: Add configuration option for time format (#158)
|
|
@ -1,21 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Simon Eskildsen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue