minio/cmd/s3-peer-rpc-handlers_test.go
Harshavardhana 50b4e54a75 fs: Do not return reservedBucket names in ListBuckets() (#3754)
Make sure to skip reserved bucket names in `ListBuckets()`
current code didn't skip this properly and also generalize
this behavior for both XL and FS.
2017-02-16 14:52:14 -08:00

110 lines
3.0 KiB
Go

/*
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/json"
"path"
"testing"
"time"
)
type TestRPCS3PeerSuite struct {
testServer TestServer
testAuthConf authConfig
disks []string
}
// Set up the suite and start the test server.
func (s *TestRPCS3PeerSuite) SetUpSuite(t *testing.T) {
s.testServer, s.disks = StartTestS3PeerRPCServer(t)
s.testAuthConf = authConfig{
serverAddr: s.testServer.Server.Listener.Addr().String(),
accessKey: s.testServer.AccessKey,
secretKey: s.testServer.SecretKey,
serviceEndpoint: path.Join(minioReservedBucketPath, s3Path),
serviceName: "S3",
}
}
func (s *TestRPCS3PeerSuite) TearDownSuite(t *testing.T) {
s.testServer.Stop()
removeRoots(s.disks)
removeAll(s.testServer.Root)
}
func TestS3PeerRPC(t *testing.T) {
// setup
s := &TestRPCS3PeerSuite{}
s.SetUpSuite(t)
// run test
s.testS3PeerRPC(t)
// teardown
s.TearDownSuite(t)
}
// Test S3 RPC handlers
func (s *TestRPCS3PeerSuite) testS3PeerRPC(t *testing.T) {
// Validate for invalid token.
args := AuthRPCArgs{AuthToken: "garbage", RequestTime: time.Now().UTC()}
rclient := newRPCClient(s.testAuthConf.serverAddr, s.testAuthConf.serviceEndpoint, false)
defer rclient.Close()
err := rclient.Call("S3.SetBucketNotificationPeer", &args, &AuthRPCReply{})
if err != nil {
if err.Error() != errInvalidToken.Error() {
t.Fatal(err)
}
}
// Check bucket notification call works.
BNPArgs := SetBucketNotificationPeerArgs{Bucket: "bucket", NCfg: &notificationConfig{}}
client := newAuthRPCClient(s.testAuthConf)
defer client.Close()
err = client.Call("S3.SetBucketNotificationPeer", &BNPArgs, &AuthRPCReply{})
if err != nil {
t.Fatal(err)
}
// Check bucket listener update call works.
BLPArgs := SetBucketListenerPeerArgs{Bucket: "bucket", LCfg: nil}
err = client.Call("S3.SetBucketListenerPeer", &BLPArgs, &AuthRPCReply{})
if err != nil {
t.Fatal(err)
}
// Check bucket policy update call works.
pCh := policyChange{IsRemove: true}
pChBytes, err := json.Marshal(pCh)
if err != nil {
t.Fatal(err)
}
BPPArgs := SetBucketPolicyPeerArgs{Bucket: "bucket", PChBytes: pChBytes}
err = client.Call("S3.SetBucketPolicyPeer", &BPPArgs, &AuthRPCReply{})
if err != nil {
t.Fatal(err)
}
// Check event send event call works.
evArgs := EventArgs{Event: nil, Arn: "localhost:9000"}
err = client.Call("S3.Event", &evArgs, &AuthRPCReply{})
if err != nil {
t.Fatal(err)
}
}