2016-05-06 14:57:04 -04:00
/ *
* Minio Cloud Storage , ( C ) 2015 , 2016 Minio , Inc .
*
* Licensed under the Apache License , Version 2.0 ( the "License" ) ;
* you may not use this file except in compliance with the License .
* You may obtain a copy of the License at
*
* http : //www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing , software
* distributed under the License is distributed on an "AS IS" BASIS ,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
* See the License for the specific language governing permissions and
* limitations under the License .
* /
2016-08-18 19:23:42 -04:00
package cmd
2016-05-06 14:57:04 -04:00
import (
2016-06-21 15:10:18 -04:00
"bytes"
2016-09-30 17:32:13 -04:00
"crypto/hmac"
"crypto/sha1"
2016-06-21 15:10:18 -04:00
"encoding/base64"
"encoding/hex"
2016-08-15 19:13:03 -04:00
"encoding/json"
"errors"
2016-06-21 15:10:18 -04:00
"fmt"
"io"
2016-05-06 14:57:04 -04:00
"io/ioutil"
2016-06-25 22:07:44 -04:00
"math/rand"
2016-06-21 15:10:18 -04:00
"net/http"
"net/http/httptest"
2016-06-25 22:07:44 -04:00
"net/url"
2016-05-06 14:57:04 -04:00
"os"
2016-09-16 16:06:49 -04:00
"reflect"
2016-06-21 15:10:18 -04:00
"sort"
2016-06-29 06:13:44 -04:00
"strconv"
2016-06-21 15:10:18 -04:00
"strings"
2016-06-29 06:13:44 -04:00
"sync"
2016-05-06 14:57:04 -04:00
"testing"
2016-06-21 15:10:18 -04:00
"time"
2016-07-02 22:05:16 -04:00
2016-10-05 15:48:07 -04:00
"github.com/fatih/color"
2016-07-02 22:05:16 -04:00
router "github.com/gorilla/mux"
2016-05-06 14:57:04 -04:00
)
2016-07-07 22:50:44 -04:00
// Tests should initNSLock only once.
func init ( ) {
// Initialize name space lock.
2016-08-10 16:08:11 -04:00
isDist := false
initNSLock ( isDist )
2016-10-05 15:48:07 -04:00
// Disable printing console messages during tests.
color . Output = ioutil . Discard
2016-07-07 22:50:44 -04:00
}
2016-08-30 22:22:27 -04:00
func prepareFS ( ) ( ObjectLayer , string , error ) {
fsDirs , err := getRandomDisks ( 1 )
if err != nil {
return nil , "" , err
}
2016-10-05 15:48:07 -04:00
obj , _ , err := initObjectLayer ( fsDirs , nil )
2016-08-30 22:22:27 -04:00
if err != nil {
removeRoots ( fsDirs )
return nil , "" , err
}
return obj , fsDirs [ 0 ] , nil
}
func prepareXL ( ) ( ObjectLayer , [ ] string , error ) {
nDisks := 16
fsDirs , err := getRandomDisks ( nDisks )
if err != nil {
return nil , nil , err
}
2016-10-05 15:48:07 -04:00
obj , _ , err := initObjectLayer ( fsDirs , nil )
2016-08-30 22:22:27 -04:00
if err != nil {
removeRoots ( fsDirs )
return nil , nil , err
}
return obj , fsDirs , nil
}
2016-07-25 23:39:14 -04:00
// TestErrHandler - Golang Testing.T and Testing.B, and gocheck.C satisfy this interface.
2016-06-21 15:10:18 -04:00
// This makes it easy to run the TestServer from any of the tests.
2016-07-25 23:39:14 -04:00
// Using this interface, functionalities to be used in tests can be made generalized, and can be integrated in benchmarks/unit tests/go check suite tests.
2016-06-21 15:10:18 -04:00
type TestErrHandler interface {
Error ( args ... interface { } )
Errorf ( format string , args ... interface { } )
Failed ( ) bool
Fatal ( args ... interface { } )
Fatalf ( format string , args ... interface { } )
}
2016-05-06 14:57:04 -04:00
const (
// singleNodeTestStr is the string which is used as notation for Single node ObjectLayer in the unit tests.
2016-07-08 21:26:04 -04:00
singleNodeTestStr string = "FS"
2016-05-06 14:57:04 -04:00
// xLTestStr is the string which is used as notation for XL ObjectLayer in the unit tests.
xLTestStr string = "XL"
)
2016-06-25 22:07:44 -04:00
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1 << letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
2016-06-29 06:13:44 -04:00
// Random number state.
// We generate random temporary file names so that there's a good
// chance the file doesn't exist yet.
var randN uint32
var randmu sync . Mutex
2016-07-02 04:59:28 -04:00
// reseed - returns a new seed every time the function is called.
2016-06-29 06:13:44 -04:00
func reseed ( ) uint32 {
return uint32 ( time . Now ( ) . UnixNano ( ) + int64 ( os . Getpid ( ) ) )
}
2016-07-02 04:59:28 -04:00
// nextSuffix - provides a new unique suffix every time the function is called.
2016-06-29 06:13:44 -04:00
func nextSuffix ( ) string {
randmu . Lock ( )
r := randN
// Initial seed required, generate one.
if r == 0 {
r = reseed ( )
}
// constants from Numerical Recipes
r = r * 1664525 + 1013904223
randN = r
randmu . Unlock ( )
return strconv . Itoa ( int ( 1e9 + r % 1e9 ) ) [ 1 : ]
}
2016-09-16 16:06:49 -04:00
// isSameType - compares two object types via reflect.TypeOf
func isSameType ( obj1 , obj2 interface { } ) bool {
return reflect . TypeOf ( obj1 ) == reflect . TypeOf ( obj2 )
}
2016-06-21 15:10:18 -04:00
// TestServer encapsulates an instantiation of a Minio instance with a temporary backend.
// Example usage:
// s := StartTestServer(t,"XL")
// defer s.Stop()
type TestServer struct {
Root string
Disks [ ] string
AccessKey string
SecretKey string
Server * httptest . Server
2016-08-30 22:22:27 -04:00
Obj ObjectLayer
2016-06-21 15:10:18 -04:00
}
// Starts the test server and returns the TestServer instance.
func StartTestServer ( t TestErrHandler , instanceType string ) TestServer {
// create an instance of TestServer.
testServer := TestServer { }
// create temporary backend for the test server.
2016-08-30 22:22:27 -04:00
nDisks := 16
disks , err := getRandomDisks ( nDisks )
2016-06-21 15:10:18 -04:00
if err != nil {
2016-08-30 22:22:27 -04:00
t . Fatal ( "Failed to create disks for the backend" )
2016-06-21 15:10:18 -04:00
}
2016-07-02 22:05:16 -04:00
2016-07-26 03:01:35 -04:00
root , err := newTestConfig ( "us-east-1" )
2016-06-21 15:10:18 -04:00
if err != nil {
2016-07-02 22:05:16 -04:00
t . Fatalf ( "%s" , err )
2016-06-21 15:10:18 -04:00
}
2016-07-26 03:01:35 -04:00
2016-08-30 22:22:27 -04:00
// Test Server needs to start before formatting of disks.
2016-07-26 03:01:35 -04:00
// Get credential.
credentials := serverConfig . GetCredential ( )
2016-06-21 15:10:18 -04:00
testServer . Root = root
2016-08-30 22:22:27 -04:00
testServer . Disks = disks
2016-07-02 22:05:16 -04:00
testServer . AccessKey = credentials . AccessKeyID
testServer . SecretKey = credentials . SecretAccessKey
2016-10-05 15:48:07 -04:00
objLayer , storageDisks , err := initObjectLayer ( disks , nil )
2016-08-30 22:22:27 -04:00
if err != nil {
t . Fatalf ( "Failed obtaining Temp Backend: <ERROR> %s" , err )
}
2016-10-05 15:48:07 -04:00
// Run TestServer.
testServer . Server = httptest . NewServer ( configureServerHandler (
serverCmdConfig {
disks : disks ,
storageDisks : storageDisks ,
} ,
) )
2016-08-30 22:22:27 -04:00
testServer . Obj = objLayer
objLayerMutex . Lock ( )
globalObjectAPI = objLayer
objLayerMutex . Unlock ( )
2016-07-02 22:05:16 -04:00
return testServer
}
2016-10-06 05:30:54 -04:00
// Initializes storage RPC endpoints.
// The object Layer will be a temp back used for testing purpose.
func initTestStorageRPCEndPoint ( srvCmdConfig serverCmdConfig ) http . Handler {
// Initialize router.
muxRouter := router . NewRouter ( )
registerStorageRPCRouters ( muxRouter , srvCmdConfig )
return muxRouter
}
// StartTestStorageRPCServer - Creates a temp XL/FS backend and initializes storage RPC end points,
// then starts a test server with those storage RPC end points registered.
func StartTestStorageRPCServer ( t TestErrHandler , instanceType string , diskN int ) TestServer {
// create temporary backend for the test server.
disks , err := getRandomDisks ( diskN )
if err != nil {
t . Fatal ( "Failed to create disks for the backend" )
}
root , err := newTestConfig ( "us-east-1" )
if err != nil {
t . Fatalf ( "%s" , err )
}
// Create an instance of TestServer.
testRPCServer := TestServer { }
// Get credential.
credentials := serverConfig . GetCredential ( )
testRPCServer . Root = root
testRPCServer . Disks = disks
testRPCServer . AccessKey = credentials . AccessKeyID
testRPCServer . SecretKey = credentials . SecretAccessKey
// Run TestServer.
testRPCServer . Server = httptest . NewServer ( initTestStorageRPCEndPoint ( serverCmdConfig {
disks : disks ,
} ) )
return testRPCServer
}
// Initializes control RPC endpoints.
2016-08-31 14:39:08 -04:00
// The object Layer will be a temp back used for testing purpose.
2016-10-05 15:48:07 -04:00
func initTestControlRPCEndPoint ( srvCmdConfig serverCmdConfig ) http . Handler {
2016-08-31 14:39:08 -04:00
// Initialize router.
muxRouter := router . NewRouter ( )
2016-10-05 15:48:07 -04:00
registerControllerRPCRouter ( muxRouter , srvCmdConfig )
2016-08-31 14:39:08 -04:00
return muxRouter
}
2016-10-06 05:30:54 -04:00
// StartTestControlRPCServer - Creates a temp XL/FS backend and initializes control RPC end points,
2016-08-31 14:39:08 -04:00
// then starts a test server with those control RPC end points registered.
2016-10-06 05:30:54 -04:00
func StartTestControlRPCServer ( t TestErrHandler , instanceType string ) TestServer {
2016-08-31 14:39:08 -04:00
// create temporary backend for the test server.
nDisks := 16
disks , err := getRandomDisks ( nDisks )
if err != nil {
t . Fatal ( "Failed to create disks for the backend" )
}
root , err := newTestConfig ( "us-east-1" )
if err != nil {
t . Fatalf ( "%s" , err )
}
2016-10-05 15:48:07 -04:00
// create an instance of TestServer.
testRPCServer := TestServer { }
2016-08-31 14:39:08 -04:00
// Get credential.
credentials := serverConfig . GetCredential ( )
testRPCServer . Root = root
testRPCServer . Disks = disks
testRPCServer . AccessKey = credentials . AccessKeyID
testRPCServer . SecretKey = credentials . SecretAccessKey
2016-10-05 15:48:07 -04:00
// create temporary backend for the test server.
objLayer , storageDisks , err := initObjectLayer ( disks , nil )
if err != nil {
t . Fatalf ( "Failed obtaining Temp Backend: <ERROR> %s" , err )
}
objLayerMutex . Lock ( )
globalObjectAPI = objLayer
objLayerMutex . Unlock ( )
2016-08-31 14:39:08 -04:00
// Run TestServer.
2016-10-05 15:48:07 -04:00
testRPCServer . Server = httptest . NewServer ( initTestControlRPCEndPoint ( serverCmdConfig {
storageDisks : storageDisks ,
} ) )
2016-08-31 14:39:08 -04:00
return testRPCServer
}
2016-07-02 22:05:16 -04:00
// Configure the server for the test run.
2016-07-26 03:01:35 -04:00
func newTestConfig ( bucketLocation string ) ( rootPath string , err error ) {
// Get test root.
rootPath , err = getTestRoot ( )
2016-07-02 22:05:16 -04:00
if err != nil {
2016-07-26 03:01:35 -04:00
return "" , err
2016-07-02 22:05:16 -04:00
}
2016-07-26 03:01:35 -04:00
2016-06-21 15:10:18 -04:00
// Do this only once here.
2016-07-02 22:05:16 -04:00
setGlobalConfigPath ( rootPath )
2016-06-21 15:10:18 -04:00
2016-07-26 03:01:35 -04:00
// Initialize server config.
if err = initConfig ( ) ; err != nil {
return "" , err
2016-06-21 15:10:18 -04:00
}
2016-07-26 03:01:35 -04:00
// Set a default region.
serverConfig . SetRegion ( bucketLocation )
// Save config.
if err = serverConfig . Save ( ) ; err != nil {
return "" , err
}
// Return root path.
return rootPath , nil
2016-06-21 15:10:18 -04:00
}
// Deleting the temporary backend and stopping the server.
func ( testServer TestServer ) Stop ( ) {
removeAll ( testServer . Root )
for _ , disk := range testServer . Disks {
removeAll ( disk )
}
testServer . Server . Close ( )
}
2016-09-04 16:37:14 -04:00
// Sign given request using Signature V4.
func signStreamingRequest ( req * http . Request , accessKey , secretKey string ) ( string , error ) {
// Get hashed payload.
hashedPayload := req . Header . Get ( "x-amz-content-sha256" )
if hashedPayload == "" {
return "" , fmt . Errorf ( "Invalid hashed payload." )
}
currTime := time . Now ( ) . UTC ( )
// Set x-amz-date.
req . Header . Set ( "x-amz-date" , currTime . Format ( iso8601Format ) )
// Get header map.
headerMap := make ( map [ string ] [ ] string )
for k , vv := range req . Header {
// If request header key is not in ignored headers, then add it.
if _ , ok := ignoredStreamingHeaders [ http . CanonicalHeaderKey ( k ) ] ; ! ok {
headerMap [ strings . ToLower ( k ) ] = vv
}
}
// Get header keys.
headers := [ ] string { "host" }
for k := range headerMap {
headers = append ( headers , k )
}
sort . Strings ( headers )
// Get canonical headers.
var buf bytes . Buffer
for _ , k := range headers {
buf . WriteString ( k )
buf . WriteByte ( ':' )
switch {
case k == "host" :
buf . WriteString ( req . URL . Host )
fallthrough
default :
for idx , v := range headerMap [ k ] {
if idx > 0 {
buf . WriteByte ( ',' )
}
buf . WriteString ( v )
}
buf . WriteByte ( '\n' )
}
}
canonicalHeaders := buf . String ( )
// Get signed headers.
signedHeaders := strings . Join ( headers , ";" )
// Get canonical query string.
req . URL . RawQuery = strings . Replace ( req . URL . Query ( ) . Encode ( ) , "+" , "%20" , - 1 )
// Get canonical URI.
canonicalURI := getURLEncodedName ( req . URL . Path )
// Get canonical request.
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
canonicalRequest := strings . Join ( [ ] string {
req . Method ,
canonicalURI ,
req . URL . RawQuery ,
canonicalHeaders ,
signedHeaders ,
hashedPayload ,
} , "\n" )
// Get scope.
scope := strings . Join ( [ ] string {
currTime . Format ( yyyymmdd ) ,
"us-east-1" ,
"s3" ,
"aws4_request" ,
} , "/" )
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime . Format ( iso8601Format ) + "\n"
stringToSign = stringToSign + scope + "\n"
stringToSign = stringToSign + hex . EncodeToString ( sum256 ( [ ] byte ( canonicalRequest ) ) )
date := sumHMAC ( [ ] byte ( "AWS4" + secretKey ) , [ ] byte ( currTime . Format ( yyyymmdd ) ) )
region := sumHMAC ( date , [ ] byte ( "us-east-1" ) )
service := sumHMAC ( region , [ ] byte ( "s3" ) )
signingKey := sumHMAC ( service , [ ] byte ( "aws4_request" ) )
signature := hex . EncodeToString ( sumHMAC ( signingKey , [ ] byte ( stringToSign ) ) )
// final Authorization header
parts := [ ] string {
"AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope ,
"SignedHeaders=" + signedHeaders ,
"Signature=" + signature ,
}
auth := strings . Join ( parts , ", " )
req . Header . Set ( "Authorization" , auth )
return signature , nil
}
// Returns new HTTP request object.
func newTestStreamingRequest ( method , urlStr string , dataLength , chunkSize int64 , body io . ReadSeeker ) ( * http . Request , error ) {
if method == "" {
method = "POST"
}
req , err := http . NewRequest ( method , urlStr , nil )
if err != nil {
return nil , err
}
if body == nil {
// this is added to avoid panic during ioutil.ReadAll(req.Body).
// th stack trace can be found here https://github.com/minio/minio/pull/2074 .
// This is very similar to https://github.com/golang/go/issues/7527.
req . Body = ioutil . NopCloser ( bytes . NewReader ( [ ] byte ( "" ) ) )
}
contentLength := calculateStreamContentLength ( dataLength , chunkSize )
req . Header . Set ( "x-amz-content-sha256" , "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" )
req . Header . Set ( "content-encoding" , "aws-chunked" )
req . Header . Set ( "x-amz-decoded-content-length" , strconv . FormatInt ( dataLength , 10 ) )
req . Header . Set ( "content-length" , strconv . FormatInt ( contentLength , 10 ) )
// Seek back to beginning.
body . Seek ( 0 , 0 )
2016-09-16 05:45:42 -04:00
2016-09-04 16:37:14 -04:00
// Add body
req . Body = ioutil . NopCloser ( body )
req . ContentLength = contentLength
return req , nil
}
// Returns new HTTP request object signed with streaming signature v4.
func newTestStreamingSignedRequest ( method , urlStr string , contentLength , chunkSize int64 , body io . ReadSeeker , accessKey , secretKey string ) ( * http . Request , error ) {
req , err := newTestStreamingRequest ( method , urlStr , contentLength , chunkSize , body )
if err != nil {
return nil , err
}
signature , err := signStreamingRequest ( req , accessKey , secretKey )
if err != nil {
return nil , err
}
2016-09-16 05:45:42 -04:00
regionStr := serverConfig . GetRegion ( )
2016-09-04 16:37:14 -04:00
var stream [ ] byte
var buffer [ ] byte
body . Seek ( 0 , 0 )
for {
buffer = make ( [ ] byte , chunkSize )
n , err := body . Read ( buffer )
if err != nil && err != io . EOF {
return nil , err
}
currTime := time . Now ( ) . UTC ( )
// Get scope.
scope := strings . Join ( [ ] string {
currTime . Format ( yyyymmdd ) ,
2016-09-16 05:45:42 -04:00
regionStr ,
2016-09-04 16:37:14 -04:00
"s3" ,
"aws4_request" ,
} , "/" )
stringToSign := "AWS4-HMAC-SHA256-PAYLOAD" + "\n"
stringToSign = stringToSign + currTime . Format ( iso8601Format ) + "\n"
stringToSign = stringToSign + scope + "\n"
stringToSign = stringToSign + signature + "\n"
stringToSign = stringToSign + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + "\n" // hex(sum256(""))
stringToSign = stringToSign + hex . EncodeToString ( sum256 ( buffer [ : n ] ) )
date := sumHMAC ( [ ] byte ( "AWS4" + secretKey ) , [ ] byte ( currTime . Format ( yyyymmdd ) ) )
2016-09-16 05:45:42 -04:00
region := sumHMAC ( date , [ ] byte ( regionStr ) )
2016-09-04 16:37:14 -04:00
service := sumHMAC ( region , [ ] byte ( "s3" ) )
signingKey := sumHMAC ( service , [ ] byte ( "aws4_request" ) )
signature = hex . EncodeToString ( sumHMAC ( signingKey , [ ] byte ( stringToSign ) ) )
stream = append ( stream , [ ] byte ( fmt . Sprintf ( "%x" , n ) + ";chunk-signature=" + signature + "\r\n" ) ... )
stream = append ( stream , buffer [ : n ] ... )
stream = append ( stream , [ ] byte ( "\r\n" ) ... )
if n <= 0 {
break
}
}
req . Body = ioutil . NopCloser ( bytes . NewReader ( stream ) )
return req , nil
}
2016-10-05 15:48:07 -04:00
// Replaces any occurring '/' in string, into its encoded representation.
func percentEncodeSlash ( s string ) string {
return strings . Replace ( s , "/" , "%2F" , - 1 )
}
// queryEncode - encodes query values in their URL encoded form. In
// addition to the percent encoding performed by getURLEncodedName() used
// here, it also percent encodes '/' (forward slash)
func queryEncode ( v url . Values ) string {
if v == nil {
return ""
}
var buf bytes . Buffer
keys := make ( [ ] string , 0 , len ( v ) )
for k := range v {
keys = append ( keys , k )
}
sort . Strings ( keys )
for _ , k := range keys {
vs := v [ k ]
prefix := percentEncodeSlash ( getURLEncodedName ( k ) ) + "="
for _ , v := range vs {
if buf . Len ( ) > 0 {
buf . WriteByte ( '&' )
}
buf . WriteString ( prefix )
buf . WriteString ( percentEncodeSlash ( getURLEncodedName ( v ) ) )
}
}
return buf . String ( )
}
2016-09-30 17:32:13 -04:00
// preSignV2 - presign the request in following style.
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
func preSignV2 ( req * http . Request , accessKeyID , secretAccessKey string , expires int64 ) error {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return errors . New ( "Presign cannot be generated without access and secret keys" )
}
d := time . Now ( ) . UTC ( )
// Find epoch expires when the request will expire.
epochExpires := d . Unix ( ) + expires
// Add expires header if not present.
2016-10-05 12:18:53 -04:00
expiresStr := req . Header . Get ( "Expires" )
if expiresStr == "" {
expiresStr = strconv . FormatInt ( epochExpires , 10 )
req . Header . Set ( "Expires" , expiresStr )
}
// url.RawPath will be valid if path has any encoded characters, if not it will
// be empty - in which case we need to consider url.Path (bug in net/http?)
encodedResource := req . URL . RawPath
encodedQuery := req . URL . RawQuery
if encodedResource == "" {
splits := strings . Split ( req . URL . Path , "?" )
if len ( splits ) > 0 {
encodedResource = splits [ 0 ]
}
2016-09-30 17:32:13 -04:00
}
// Get presigned string to sign.
2016-10-05 12:18:53 -04:00
stringToSign := presignV2STS ( req . Method , encodedResource , encodedQuery , req . Header , expiresStr )
2016-09-30 17:32:13 -04:00
hm := hmac . New ( sha1 . New , [ ] byte ( secretAccessKey ) )
hm . Write ( [ ] byte ( stringToSign ) )
// Calculate signature.
signature := base64 . StdEncoding . EncodeToString ( hm . Sum ( nil ) )
query := req . URL . Query ( )
// Handle specially for Google Cloud Storage.
query . Set ( "AWSAccessKeyId" , accessKeyID )
// Fill in Expires for presigned query.
query . Set ( "Expires" , strconv . FormatInt ( epochExpires , 10 ) )
// Encode query and save.
req . URL . RawQuery = queryEncode ( query )
// Save signature finally.
req . URL . RawQuery += "&Signature=" + getURLEncodedName ( signature )
// Success.
return nil
}
// Sign given request using Signature V2.
func signRequestV2 ( req * http . Request , accessKey , secretKey string ) error {
// Initial time.
d := time . Now ( ) . UTC ( )
// Add date if not present.
if date := req . Header . Get ( "Date" ) ; date == "" {
req . Header . Set ( "Date" , d . Format ( http . TimeFormat ) )
}
2016-10-05 12:18:53 -04:00
// url.RawPath will be valid if path has any encoded characters, if not it will
// be empty - in which case we need to consider url.Path (bug in net/http?)
encodedResource := req . URL . RawPath
encodedQuery := req . URL . RawQuery
if encodedResource == "" {
splits := strings . Split ( req . URL . Path , "?" )
if len ( splits ) > 0 {
encodedResource = splits [ 0 ]
}
}
2016-09-30 17:32:13 -04:00
// Calculate HMAC for secretAccessKey.
2016-10-05 12:18:53 -04:00
stringToSign := signV2STS ( req . Method , encodedResource , encodedQuery , req . Header )
2016-09-30 17:32:13 -04:00
hm := hmac . New ( sha1 . New , [ ] byte ( secretKey ) )
hm . Write ( [ ] byte ( stringToSign ) )
// Prepare auth header.
authHeader := new ( bytes . Buffer )
authHeader . WriteString ( fmt . Sprintf ( "%s %s:" , signV2Algorithm , accessKey ) )
encoder := base64 . NewEncoder ( base64 . StdEncoding , authHeader )
encoder . Write ( hm . Sum ( nil ) )
encoder . Close ( )
// Set Authorization header.
req . Header . Set ( "Authorization" , authHeader . String ( ) )
return nil
}
2016-07-10 14:10:59 -04:00
// Sign given request using Signature V4.
2016-09-30 17:32:13 -04:00
func signRequestV4 ( req * http . Request , accessKey , secretKey string ) error {
2016-07-10 14:10:59 -04:00
// Get hashed payload.
hashedPayload := req . Header . Get ( "x-amz-content-sha256" )
if hashedPayload == "" {
return fmt . Errorf ( "Invalid hashed payload." )
2016-06-21 15:10:18 -04:00
}
2016-07-10 14:10:59 -04:00
currTime := time . Now ( ) . UTC ( )
2016-06-21 15:10:18 -04:00
2016-07-10 14:10:59 -04:00
// Set x-amz-date.
req . Header . Set ( "x-amz-date" , currTime . Format ( iso8601Format ) )
2016-06-21 15:10:18 -04:00
2016-07-10 14:10:59 -04:00
// Get header map.
headerMap := make ( map [ string ] [ ] string )
for k , vv := range req . Header {
// If request header key is not in ignored headers, then add it.
if _ , ok := ignoredHeaders [ http . CanonicalHeaderKey ( k ) ] ; ! ok {
headerMap [ strings . ToLower ( k ) ] = vv
2016-06-21 15:10:18 -04:00
}
}
2016-07-10 14:10:59 -04:00
// Get header keys.
headers := [ ] string { "host" }
for k := range headerMap {
headers = append ( headers , k )
2016-06-21 15:10:18 -04:00
}
sort . Strings ( headers )
2016-09-15 02:53:42 -04:00
region := serverConfig . GetRegion ( )
2016-07-10 14:10:59 -04:00
// Get canonical headers.
var buf bytes . Buffer
2016-06-21 15:10:18 -04:00
for _ , k := range headers {
2016-07-10 14:10:59 -04:00
buf . WriteString ( k )
buf . WriteByte ( ':' )
2016-06-21 15:10:18 -04:00
switch {
case k == "host" :
2016-07-10 14:10:59 -04:00
buf . WriteString ( req . URL . Host )
2016-06-21 15:10:18 -04:00
fallthrough
default :
2016-07-10 14:10:59 -04:00
for idx , v := range headerMap [ k ] {
2016-06-21 15:10:18 -04:00
if idx > 0 {
2016-07-10 14:10:59 -04:00
buf . WriteByte ( ',' )
2016-06-21 15:10:18 -04:00
}
2016-07-10 14:10:59 -04:00
buf . WriteString ( v )
2016-06-21 15:10:18 -04:00
}
2016-07-10 14:10:59 -04:00
buf . WriteByte ( '\n' )
2016-06-21 15:10:18 -04:00
}
}
2016-07-10 14:10:59 -04:00
canonicalHeaders := buf . String ( )
2016-06-21 15:10:18 -04:00
2016-07-10 14:10:59 -04:00
// Get signed headers.
2016-06-21 15:10:18 -04:00
signedHeaders := strings . Join ( headers , ";" )
2016-07-10 14:10:59 -04:00
// Get canonical query string.
2016-06-21 15:10:18 -04:00
req . URL . RawQuery = strings . Replace ( req . URL . Query ( ) . Encode ( ) , "+" , "%20" , - 1 )
2016-07-10 14:10:59 -04:00
// Get canonical URI.
canonicalURI := getURLEncodedName ( req . URL . Path )
// Get canonical request.
2016-06-21 15:10:18 -04:00
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
canonicalRequest := strings . Join ( [ ] string {
req . Method ,
2016-07-10 14:10:59 -04:00
canonicalURI ,
2016-06-21 15:10:18 -04:00
req . URL . RawQuery ,
2016-07-10 14:10:59 -04:00
canonicalHeaders ,
2016-06-21 15:10:18 -04:00
signedHeaders ,
hashedPayload ,
} , "\n" )
2016-07-10 14:10:59 -04:00
// Get scope.
2016-06-21 15:10:18 -04:00
scope := strings . Join ( [ ] string {
2016-07-10 14:10:59 -04:00
currTime . Format ( yyyymmdd ) ,
2016-09-15 02:53:42 -04:00
region ,
2016-06-21 15:10:18 -04:00
"s3" ,
"aws4_request" ,
} , "/" )
2016-07-10 14:10:59 -04:00
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime . Format ( iso8601Format ) + "\n"
2016-06-21 15:10:18 -04:00
stringToSign = stringToSign + scope + "\n"
stringToSign = stringToSign + hex . EncodeToString ( sum256 ( [ ] byte ( canonicalRequest ) ) )
2016-07-10 14:10:59 -04:00
date := sumHMAC ( [ ] byte ( "AWS4" + secretKey ) , [ ] byte ( currTime . Format ( yyyymmdd ) ) )
2016-09-15 02:53:42 -04:00
regionHMAC := sumHMAC ( date , [ ] byte ( region ) )
service := sumHMAC ( regionHMAC , [ ] byte ( "s3" ) )
2016-06-21 15:10:18 -04:00
signingKey := sumHMAC ( service , [ ] byte ( "aws4_request" ) )
signature := hex . EncodeToString ( sumHMAC ( signingKey , [ ] byte ( stringToSign ) ) )
// final Authorization header
parts := [ ] string {
"AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope ,
"SignedHeaders=" + signedHeaders ,
"Signature=" + signature ,
}
auth := strings . Join ( parts , ", " )
req . Header . Set ( "Authorization" , auth )
2016-07-10 14:10:59 -04:00
return nil
}
2016-09-13 22:00:01 -04:00
// getCredential generate a credential string.
func getCredential ( accessKeyID , location string , t time . Time ) string {
return accessKeyID + "/" + getScope ( t , location )
}
2016-07-10 14:10:59 -04:00
// Returns new HTTP request object.
func newTestRequest ( method , urlStr string , contentLength int64 , body io . ReadSeeker ) ( * http . Request , error ) {
if method == "" {
method = "POST"
}
req , err := http . NewRequest ( method , urlStr , nil )
if err != nil {
return nil , err
}
// Add Content-Length
req . ContentLength = contentLength
// Save for subsequent use
var hashedPayload string
switch {
case body == nil :
hashedPayload = hex . EncodeToString ( sum256 ( [ ] byte { } ) )
default :
2016-09-30 17:32:13 -04:00
payloadBytes , err := ioutil . ReadAll ( body )
if err != nil {
return nil , err
2016-07-10 14:10:59 -04:00
}
hashedPayload = hex . EncodeToString ( sum256 ( payloadBytes ) )
md5Base64 := base64 . StdEncoding . EncodeToString ( sumMD5 ( payloadBytes ) )
req . Header . Set ( "Content-Md5" , md5Base64 )
}
req . Header . Set ( "x-amz-content-sha256" , hashedPayload )
// Seek back to beginning.
if body != nil {
body . Seek ( 0 , 0 )
// Add body
req . Body = ioutil . NopCloser ( body )
} else {
// this is added to avoid panic during ioutil.ReadAll(req.Body).
// th stack trace can be found here https://github.com/minio/minio/pull/2074 .
// This is very similar to https://github.com/golang/go/issues/7527.
req . Body = ioutil . NopCloser ( bytes . NewReader ( [ ] byte ( "" ) ) )
}
return req , nil
}
2016-09-30 17:32:13 -04:00
func newTestSignedRequestV2ContentType ( method , urlStr string , contentLength int64 , body io . ReadSeeker , accessKey , secretKey , contentType string ) ( * http . Request , error ) {
req , err := newTestRequest ( method , urlStr , contentLength , body )
if err != nil {
return nil , err
}
req . Header . Del ( "x-amz-content-sha256" )
req . Header . Set ( "Content-Type" , contentType )
// Anonymous request return quickly.
if accessKey == "" || secretKey == "" {
return req , nil
}
err = signRequestV2 ( req , accessKey , secretKey )
if err != nil {
return nil , err
}
return req , nil
}
// Returns new HTTP request object signed with signature v2.
func newTestSignedRequestV2 ( method , urlStr string , contentLength int64 , body io . ReadSeeker , accessKey , secretKey string ) ( * http . Request , error ) {
req , err := newTestRequest ( method , urlStr , contentLength , body )
if err != nil {
return nil , err
}
req . Header . Del ( "x-amz-content-sha256" )
// Anonymous request return quickly.
if accessKey == "" || secretKey == "" {
return req , nil
}
err = signRequestV2 ( req , accessKey , secretKey )
if err != nil {
return nil , err
}
return req , nil
}
2016-07-10 14:10:59 -04:00
// Returns new HTTP request object signed with signature v4.
2016-09-30 17:32:13 -04:00
func newTestSignedRequestV4 ( method , urlStr string , contentLength int64 , body io . ReadSeeker , accessKey , secretKey string ) ( * http . Request , error ) {
2016-07-10 14:10:59 -04:00
req , err := newTestRequest ( method , urlStr , contentLength , body )
if err != nil {
return nil , err
}
2016-09-09 13:18:38 -04:00
// Anonymous request return quickly.
if accessKey == "" || secretKey == "" {
return req , nil
}
2016-09-30 17:32:13 -04:00
err = signRequestV4 ( req , accessKey , secretKey )
2016-07-10 14:10:59 -04:00
if err != nil {
return nil , err
}
2016-06-21 15:10:18 -04:00
return req , nil
}
2016-08-15 19:13:03 -04:00
// Return new WebRPC request object.
func newWebRPCRequest ( methodRPC , authorization string , body io . ReadSeeker ) ( * http . Request , error ) {
req , err := http . NewRequest ( "POST" , "/minio/webrpc" , nil )
if err != nil {
return nil , err
}
req . Header . Set ( "Content-Type" , "application/json" )
if authorization != "" {
req . Header . Set ( "Authorization" , "Bearer " + authorization )
}
// Seek back to beginning.
if body != nil {
body . Seek ( 0 , 0 )
// Add body
req . Body = ioutil . NopCloser ( body )
} else {
// this is added to avoid panic during ioutil.ReadAll(req.Body).
// th stack trace can be found here https://github.com/minio/minio/pull/2074 .
// This is very similar to https://github.com/golang/go/issues/7527.
req . Body = ioutil . NopCloser ( bytes . NewReader ( [ ] byte ( "" ) ) )
}
return req , nil
}
// Marshal request and return a new HTTP request object to call the webrpc
func newTestWebRPCRequest ( rpcMethod string , authorization string , data interface { } ) ( * http . Request , error ) {
type genericJSON struct {
JSONRPC string ` json:"jsonrpc" `
ID string ` json:"id" `
Method string ` json:"method" `
Params interface { } ` json:"params" `
}
encapsulatedData := genericJSON { JSONRPC : "2.0" , ID : "1" , Method : rpcMethod , Params : data }
jsonData , err := json . Marshal ( encapsulatedData )
2016-09-02 02:10:50 -04:00
if err != nil {
return nil , err
}
2016-08-15 19:13:03 -04:00
req , err := newWebRPCRequest ( rpcMethod , authorization , bytes . NewReader ( jsonData ) )
if err != nil {
return nil , err
}
return req , nil
}
type ErrWebRPC struct {
Code int ` json:"code" `
Message string ` json:"message" `
Data interface { } ` json:"data" `
}
// Unmarshal response and return the webrpc response
func getTestWebRPCResponse ( resp * httptest . ResponseRecorder , data interface { } ) error {
type rpcReply struct {
ID string ` json:"id" `
JSONRPC string ` json:"jsonrpc" `
Result interface { } ` json:"result" `
Error * ErrWebRPC ` json:"error" `
}
reply := & rpcReply { Result : & data }
err := json . NewDecoder ( resp . Body ) . Decode ( reply )
if err != nil {
return err
}
// For the moment, web handlers errors code are not meaningful
// Return only the error message
if reply . Error != nil {
return errors . New ( reply . Error . Message )
}
return nil
}
2016-06-25 22:07:44 -04:00
var src = rand . NewSource ( time . Now ( ) . UTC ( ) . UnixNano ( ) )
// Function to generate random string for bucket/object names.
func randString ( n int ) string {
b := make ( [ ] byte , n )
// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
for i , cache , remain := n - 1 , src . Int63 ( ) , letterIdxMax ; i >= 0 ; {
if remain == 0 {
cache , remain = src . Int63 ( ) , letterIdxMax
}
if idx := int ( cache & letterIdxMask ) ; idx < len ( letterBytes ) {
b [ i ] = letterBytes [ idx ]
i --
}
cache >>= letterIdxBits
remain --
}
return string ( b )
}
2016-07-08 17:28:06 -04:00
// generate random object name.
func getRandomObjectName ( ) string {
return randString ( 16 )
}
2016-06-25 22:07:44 -04:00
// generate random bucket name.
func getRandomBucketName ( ) string {
return randString ( 60 )
}
2016-07-30 16:36:43 -04:00
// TruncateWriter - Writes `n` bytes, then returns with number of bytes written.
// differs from iotest.TruncateWriter, the difference is commented in the Write method.
func TruncateWriter ( w io . Writer , n int64 ) io . Writer {
return & truncateWriter { w , n }
}
type truncateWriter struct {
w io . Writer
n int64
}
func ( t * truncateWriter ) Write ( p [ ] byte ) ( n int , err error ) {
if t . n <= 0 {
return len ( p ) , nil
}
// real write
n = len ( p )
if int64 ( n ) > t . n {
n = int ( t . n )
}
n , err = t . w . Write ( p [ 0 : n ] )
t . n -= int64 ( n )
// Removed from iotest.TruncateWriter.
// Need the Write method to return truncated number of bytes written, not the size of the buffer requested to be written.
// if err == nil {
// n = len(p)
// }
return
}
2016-07-08 21:26:04 -04:00
// NewEOFWriter returns a Writer that writes to w,
// but returns EOF error after writing n bytes.
func NewEOFWriter ( w io . Writer , n int64 ) io . Writer {
return & EOFWriter { w , n }
}
type EOFWriter struct {
w io . Writer
n int64
}
2016-07-09 20:11:08 -04:00
// io.Writer implementation designed to error out with io.EOF after reading `n` bytes.
2016-07-08 21:26:04 -04:00
func ( t * EOFWriter ) Write ( p [ ] byte ) ( n int , err error ) {
if t . n <= 0 {
return - 1 , io . EOF
}
// real write
n = len ( p )
if int64 ( n ) > t . n {
n = int ( t . n )
}
n , err = t . w . Write ( p [ 0 : n ] )
t . n -= int64 ( n )
if err == nil {
n = len ( p )
}
return
}
2016-06-25 22:07:44 -04:00
// construct URL for http requests for bucket operations.
func makeTestTargetURL ( endPoint , bucketName , objectName string , queryValues url . Values ) string {
urlStr := endPoint + "/"
if bucketName != "" {
urlStr = urlStr + bucketName + "/"
}
if objectName != "" {
2016-09-30 17:32:13 -04:00
urlStr = urlStr + getURLEncodedName ( objectName )
2016-06-25 22:07:44 -04:00
}
if len ( queryValues ) > 0 {
urlStr = urlStr + "?" + queryEncode ( queryValues )
}
return urlStr
}
// return URL for uploading object into the bucket.
func getPutObjectURL ( endPoint , bucketName , objectName string ) string {
return makeTestTargetURL ( endPoint , bucketName , objectName , url . Values { } )
}
2016-10-01 11:23:26 -04:00
func getPutObjectPartURL ( endPoint , bucketName , objectName , uploadID , partNumber string ) string {
queryValues := url . Values { }
queryValues . Set ( "uploadId" , uploadID )
queryValues . Set ( "partNumber" , partNumber )
return makeTestTargetURL ( endPoint , bucketName , objectName , queryValues )
}
2016-06-25 22:07:44 -04:00
// return URL for fetching object from the bucket.
func getGetObjectURL ( endPoint , bucketName , objectName string ) string {
return makeTestTargetURL ( endPoint , bucketName , objectName , url . Values { } )
}
// return URL for deleting the object from the bucket.
func getDeleteObjectURL ( endPoint , bucketName , objectName string ) string {
return makeTestTargetURL ( endPoint , bucketName , objectName , url . Values { } )
}
2016-09-02 04:59:08 -04:00
// return URL for deleting multiple objects from a bucket.
func getMultiDeleteObjectURL ( endPoint , bucketName string ) string {
queryValue := url . Values { }
queryValue . Set ( "delete" , "" )
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
2016-09-21 20:41:34 -04:00
2016-09-02 04:59:08 -04:00
}
2016-08-16 22:24:23 -04:00
// return URL for HEAD on the object.
2016-06-25 22:07:44 -04:00
func getHeadObjectURL ( endPoint , bucketName , objectName string ) string {
return makeTestTargetURL ( endPoint , bucketName , objectName , url . Values { } )
}
2016-08-16 22:24:23 -04:00
// return url to be used while copying the object.
func getCopyObjectURL ( endPoint , bucketName , objectName string ) string {
return makeTestTargetURL ( endPoint , bucketName , objectName , url . Values { } )
}
2016-08-05 01:01:58 -04:00
// return URL for inserting bucket notification.
func getPutNotificationURL ( endPoint , bucketName string ) string {
queryValue := url . Values { }
queryValue . Set ( "notification" , "" )
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
}
// return URL for fetching bucket notification.
func getGetNotificationURL ( endPoint , bucketName string ) string {
queryValue := url . Values { }
queryValue . Set ( "notification" , "" )
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
}
2016-06-25 22:07:44 -04:00
// return URL for inserting bucket policy.
func getPutPolicyURL ( endPoint , bucketName string ) string {
queryValue := url . Values { }
queryValue . Set ( "policy" , "" )
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
}
// return URL for fetching bucket policy.
func getGetPolicyURL ( endPoint , bucketName string ) string {
queryValue := url . Values { }
queryValue . Set ( "policy" , "" )
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
}
// return URL for deleting bucket policy.
func getDeletePolicyURL ( endPoint , bucketName string ) string {
2016-07-04 01:35:30 -04:00
queryValue := url . Values { }
queryValue . Set ( "policy" , "" )
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
2016-06-25 22:07:44 -04:00
}
// return URL for creating the bucket.
func getMakeBucketURL ( endPoint , bucketName string ) string {
return makeTestTargetURL ( endPoint , bucketName , "" , url . Values { } )
}
// return URL for listing buckets.
func getListBucketURL ( endPoint string ) string {
return makeTestTargetURL ( endPoint , "" , "" , url . Values { } )
}
// return URL for HEAD on the bucket.
func getHEADBucketURL ( endPoint , bucketName string ) string {
return makeTestTargetURL ( endPoint , bucketName , "" , url . Values { } )
}
// return URL for deleting the bucket.
func getDeleteBucketURL ( endPoint , bucketName string ) string {
return makeTestTargetURL ( endPoint , bucketName , "" , url . Values { } )
2016-09-09 13:18:38 -04:00
}
2016-06-25 22:07:44 -04:00
2016-09-09 13:18:38 -04:00
// return URL For fetching location of the bucket.
func getBucketLocationURL ( endPoint , bucketName string ) string {
queryValue := url . Values { }
queryValue . Set ( "location" , "" )
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
2016-06-25 22:07:44 -04:00
}
2016-07-17 15:32:05 -04:00
// return URL for listing objects in the bucket with V1 legacy API.
func getListObjectsV1URL ( endPoint , bucketName string , maxKeys string ) string {
2016-06-28 02:54:56 -04:00
queryValue := url . Values { }
if maxKeys != "" {
queryValue . Set ( "max-keys" , maxKeys )
}
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
}
2016-07-17 15:32:05 -04:00
// return URL for listing objects in the bucket with V2 API.
2016-09-10 13:44:38 -04:00
func getListObjectsV2URL ( endPoint , bucketName string , maxKeys string , fetchOwner string ) string {
2016-07-17 15:32:05 -04:00
queryValue := url . Values { }
queryValue . Set ( "list-type" , "2" ) // Enables list objects V2 URL.
if maxKeys != "" {
queryValue . Set ( "max-keys" , maxKeys )
}
2016-09-10 13:44:38 -04:00
if fetchOwner != "" {
queryValue . Set ( "fetch-owner" , fetchOwner )
}
2016-07-17 15:32:05 -04:00
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
}
2016-06-28 02:54:56 -04:00
// return URL for a new multipart upload.
func getNewMultipartURL ( endPoint , bucketName , objectName string ) string {
queryValue := url . Values { }
queryValue . Set ( "uploads" , "" )
return makeTestTargetURL ( endPoint , bucketName , objectName , queryValue )
}
// return URL for a new multipart upload.
func getPartUploadURL ( endPoint , bucketName , objectName , uploadID , partNumber string ) string {
queryValues := url . Values { }
queryValues . Set ( "uploadId" , uploadID )
queryValues . Set ( "partNumber" , partNumber )
return makeTestTargetURL ( endPoint , bucketName , objectName , queryValues )
}
// return URL for aborting multipart upload.
func getAbortMultipartUploadURL ( endPoint , bucketName , objectName , uploadID string ) string {
queryValue := url . Values { }
queryValue . Set ( "uploadId" , uploadID )
return makeTestTargetURL ( endPoint , bucketName , objectName , queryValue )
}
2016-09-13 22:00:01 -04:00
// return URL for a listing pending multipart uploads.
2016-06-28 02:54:56 -04:00
func getListMultipartURL ( endPoint , bucketName string ) string {
queryValue := url . Values { }
queryValue . Set ( "uploads" , "" )
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
}
2016-09-13 22:00:01 -04:00
// return URL for listing pending multipart uploads with parameters.
func getListMultipartUploadsURLWithParams ( endPoint , bucketName , prefix , keyMarker , uploadIDMarker , delimiter , maxUploads string ) string {
queryValue := url . Values { }
queryValue . Set ( "uploads" , "" )
queryValue . Set ( "prefix" , prefix )
queryValue . Set ( "delimiter" , delimiter )
queryValue . Set ( "key-marker" , keyMarker )
queryValue . Set ( "upload-id-marker" , uploadIDMarker )
queryValue . Set ( "max-uploads" , maxUploads )
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
}
// return URL for a listing parts on a given upload id.
2016-10-03 11:54:57 -04:00
func getListMultipartURLWithParams ( endPoint , bucketName , objectName , uploadID , maxParts , partNumberMarker , encoding string ) string {
2016-06-28 02:54:56 -04:00
queryValues := url . Values { }
queryValues . Set ( "uploadId" , uploadID )
queryValues . Set ( "max-parts" , maxParts )
2016-10-03 11:54:57 -04:00
if partNumberMarker != "" {
queryValues . Set ( "part-number-marker" , partNumberMarker )
}
2016-06-28 02:54:56 -04:00
return makeTestTargetURL ( endPoint , bucketName , objectName , queryValues )
}
// return URL for completing multipart upload.
// complete multipart upload request is sent after all parts are uploaded.
func getCompleteMultipartUploadURL ( endPoint , bucketName , objectName , uploadID string ) string {
queryValue := url . Values { }
queryValue . Set ( "uploadId" , uploadID )
return makeTestTargetURL ( endPoint , bucketName , objectName , queryValue )
}
2016-09-21 20:41:34 -04:00
// return URL for put bucket notification.
func getPutBucketNotificationURL ( endPoint , bucketName string ) string {
return getGetBucketNotificationURL ( endPoint , bucketName )
}
// return URL for get bucket notification.
func getGetBucketNotificationURL ( endPoint , bucketName string ) string {
queryValue := url . Values { }
queryValue . Set ( "notification" , "" )
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
}
2016-09-28 04:08:03 -04:00
// return URL for listen bucket notification.
func getListenBucketNotificationURL ( endPoint , bucketName , prefix , suffix string , events [ ] string ) string {
queryValue := url . Values { }
queryValue . Set ( "prefix" , prefix )
queryValue . Set ( "suffix" , suffix )
queryValue [ "events" ] = events
return makeTestTargetURL ( endPoint , bucketName , "" , queryValue )
}
2016-06-21 15:10:18 -04:00
// returns temp root directory. `
func getTestRoot ( ) ( string , error ) {
return ioutil . TempDir ( os . TempDir ( ) , "api-" )
}
2016-08-30 22:22:27 -04:00
// getRandomDisks - Creates a slice of N random disks, each of the form - minio-XXX
func getRandomDisks ( N int ) ( [ ] string , error ) {
2016-06-07 21:15:04 -04:00
var erasureDisks [ ] string
2016-08-30 22:22:27 -04:00
for i := 0 ; i < N ; i ++ {
2016-06-07 21:15:04 -04:00
path , err := ioutil . TempDir ( os . TempDir ( ) , "minio-" )
2016-05-06 14:57:04 -04:00
if err != nil {
2016-08-30 22:22:27 -04:00
// Remove directories created so far.
removeRoots ( erasureDisks )
return nil , err
2016-05-06 14:57:04 -04:00
}
2016-06-07 21:15:04 -04:00
erasureDisks = append ( erasureDisks , path )
2016-05-06 14:57:04 -04:00
}
2016-08-30 22:22:27 -04:00
return erasureDisks , nil
}
2016-05-06 14:57:04 -04:00
2016-10-05 15:48:07 -04:00
// initObjectLayer - Instantiates object layer and returns it.
func initObjectLayer ( disks [ ] string , ignoredDisks [ ] string ) ( ObjectLayer , [ ] StorageAPI , error ) {
storageDisks , err := initStorageDisks ( disks , ignoredDisks )
2016-08-30 22:22:27 -04:00
if err != nil {
2016-10-05 15:48:07 -04:00
return nil , nil , err
2016-08-30 22:22:27 -04:00
}
2016-10-05 15:48:07 -04:00
err = waitForFormatDisks ( true , "" , storageDisks )
2016-06-07 21:15:04 -04:00
if err != nil {
2016-10-05 15:48:07 -04:00
return nil , nil , err
}
objLayer , err := newObjectLayer ( storageDisks )
if err != nil {
return nil , nil , err
2016-06-07 21:15:04 -04:00
}
2016-10-05 15:48:07 -04:00
2016-08-16 22:24:23 -04:00
// Disabling the cache for integration tests.
// Should use the object layer tests for validating cache.
if xl , ok := objLayer . ( xlObjects ) ; ok {
xl . objCacheEnabled = false
}
2016-10-05 15:48:07 -04:00
// Success.
return objLayer , storageDisks , nil
2016-06-07 21:15:04 -04:00
}
// removeRoots - Cleans up initialized directories during tests.
func removeRoots ( roots [ ] string ) {
for _ , root := range roots {
2016-06-13 05:53:09 -04:00
removeAll ( root )
2016-05-06 14:57:04 -04:00
}
2016-06-07 21:15:04 -04:00
}
2016-05-06 14:57:04 -04:00
2016-06-29 01:32:00 -04:00
//removeDiskN - removes N disks from supplied disk slice.
func removeDiskN ( disks [ ] string , n int ) {
if n > len ( disks ) {
n = len ( disks )
}
for _ , disk := range disks [ : n ] {
removeAll ( disk )
2016-06-07 21:15:04 -04:00
}
}
2016-10-05 15:48:07 -04:00
// Makes a entire new copy of a StorageAPI slice.
func deepCopyStorageDisks ( storageDisks [ ] StorageAPI ) [ ] StorageAPI {
newStorageDisks := make ( [ ] StorageAPI , len ( storageDisks ) )
for i , disk := range storageDisks {
newStorageDisks [ i ] = disk
}
return newStorageDisks
}
// Initializes storage disks with 'N' errored disks, N disks return 'err' for each disk access.
func prepareNErroredDisks ( storageDisks [ ] StorageAPI , offline int , err error , t * testing . T ) [ ] StorageAPI {
if offline > len ( storageDisks ) {
t . Fatal ( "Requested more offline disks than supplied storageDisks slice" , offline , len ( storageDisks ) )
}
for i := 0 ; i < offline ; i ++ {
d := storageDisks [ i ] . ( * posix )
storageDisks [ i ] = & naughtyDisk { disk : d , defaultErr : err }
}
return storageDisks
}
// Initializes storage disks with 'N' offline disks, N disks returns 'errDiskNotFound' for each disk access.
func prepareNOfflineDisks ( storageDisks [ ] StorageAPI , offline int , t * testing . T ) [ ] StorageAPI {
return prepareNErroredDisks ( storageDisks , offline , errDiskNotFound , t )
}
// Initializes backend storage disks.
func prepareXLStorageDisks ( t * testing . T ) ( [ ] StorageAPI , [ ] string ) {
nDisks := 16
fsDirs , err := getRandomDisks ( nDisks )
if err != nil {
t . Fatal ( "Unexpected error: " , err )
}
_ , storageDisks , err := initObjectLayer ( fsDirs , nil )
if err != nil {
removeRoots ( fsDirs )
t . Fatal ( "Unable to initialize storage disks" , err )
}
return storageDisks , fsDirs
}
2016-09-10 17:47:27 -04:00
// creates a bucket for the tests and returns the bucket name.
// initializes the specified API endpoints for the tests.
// initialies the root and returns its path.
// return credentials.
func initAPIHandlerTest ( obj ObjectLayer , endPoints [ ] string ) ( bucketName , rootPath string , apiRouter http . Handler , err error ) {
// get random bucket name.
bucketName = getRandomBucketName ( )
// Create bucket.
err = obj . MakeBucket ( bucketName )
if err != nil {
// failed to create newbucket, return err.
return "" , "" , nil , err
}
// Register the API end points with XL/FS object layer.
// Registering only the GetObject handler.
apiRouter = initTestAPIEndPoints ( obj , endPoints )
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
rootPath , err = newTestConfig ( "us-east-1" )
if err != nil {
return "" , "" , nil , err
}
return bucketName , rootPath , apiRouter , nil
}
2016-10-07 14:16:11 -04:00
// ExecObjectLayerAPIAnonTest - Helper function to validate object Layer API handler response for anonymous/unsigned HTTP request.
// Here is the brief description of some of the arguments to the function below.
// apiRouter - http.Handler with the relevant API endPoint (API endPoint under test) registered.
// anonReq - unsigned *http.Request to invoke the handler's response for anonymous requests.
// policyFunc - function to return bucketPolicy statement which would permit the anonymous request to be served.
// The test works in 2 steps, here is the description of the steps.
// STEP 1: Call the handler with the unsigned HTTP request (anonReq), assert for the `ErrAccessDenied` error response.
// STEP 2: Set the policy to allow the unsigned request, use the policyFunc to obtain the relevant statement and call the handler again to verify its success.
func ExecObjectLayerAPIAnonTest ( t * testing . T , testName , bucketName , objectName , instanceType string , apiRouter http . Handler ,
anonReq * http . Request , policyFunc func ( string , string ) policyStatement ) {
// simple function which ends the test by printing the common message which gives the context of the test
// and then followed by the the actual error message.
failTest := func ( failMsg string ) {
t . Fatalf ( "Minio %s: Anonymous HTTP request test Fail for \"%s\": \n<Error> %s." , instanceType , testName , failMsg )
}
// httptest Recorder to capture all the response by the http handler.
rec := httptest . NewRecorder ( )
// reading the body to preserve it so that it can be used again for second attempt of sending unsigned HTTP request.
// If the body is read in the handler the same request cannot be made use of.
buf , err := ioutil . ReadAll ( anonReq . Body )
if err != nil {
failTest ( err . Error ( ) )
}
// creating 2 read closer (to set as request body) from the body content.
readerOne := ioutil . NopCloser ( bytes . NewBuffer ( buf ) )
readerTwo := ioutil . NopCloser ( bytes . NewBuffer ( buf ) )
anonReq . Body = readerOne
// call the HTTP handler.
apiRouter . ServeHTTP ( rec , anonReq )
// expected error response when the unsigned HTTP request is not permitted.
accesDeniedHTTPStatus := getAPIError ( ErrAccessDenied ) . HTTPStatusCode
if rec . Code != accesDeniedHTTPStatus {
failTest ( fmt . Sprintf ( "Object API Nil Test expected to fail with %d, but failed with %d." , accesDeniedHTTPStatus , rec . Code ) )
}
// expected error response in bytes when objectLayer is not initialized, or set to `nil`.
expectedErrResponse := encodeResponse ( getAPIErrorResponse ( getAPIError ( ErrAccessDenied ) , getGetObjectURL ( "" , bucketName , objectName ) ) )
// read the response body.
actualContent , err := ioutil . ReadAll ( rec . Body )
if err != nil {
2016-10-08 02:28:50 -04:00
failTest ( fmt . Sprintf ( "Failed parsing response body: <ERROR> %v" , err ) )
2016-10-07 14:16:11 -04:00
}
// verify whether actual error response (from the response body), matches the expected error response.
if ! bytes . Equal ( expectedErrResponse , actualContent ) {
failTest ( "Object content differs from expected value." )
}
// Set write only policy on bucket to allow anonymous HTTP request for the operation under test.
// request to go through.
policy := bucketPolicy {
Version : "1.0" ,
Statements : [ ] policyStatement { policyFunc ( bucketName , "" ) } ,
}
2016-10-08 04:04:26 -04:00
2016-10-07 14:16:11 -04:00
globalBucketPolicies . SetBucketPolicy ( bucketName , & policy )
// now call the handler again with the unsigned/anonymous request, it should be accepted.
rec = httptest . NewRecorder ( )
anonReq . Body = readerTwo
apiRouter . ServeHTTP ( rec , anonReq )
2016-10-08 02:28:50 -04:00
var expectedHTTPStatus int
// expectedHTTPStatus returns 204 (http.StatusNoContent) on success.
if testName == "TestAPIDeleteObjectHandler" {
expectedHTTPStatus = http . StatusNoContent
2016-10-08 04:04:26 -04:00
} else if strings . Contains ( testName , "BucketPolicyHandler" ) {
// BucketPolicyHandler's doesn't support anonymous request, policy changes should allow unsigned requests.
expectedHTTPStatus = http . StatusForbidden
2016-10-08 02:28:50 -04:00
} else {
// other API handlers return 200OK on success.
expectedHTTPStatus = http . StatusOK
}
2016-10-08 04:04:26 -04:00
2016-10-08 02:28:50 -04:00
// compare the HTTP response status code with the expected one.
if rec . Code != expectedHTTPStatus {
2016-10-07 14:16:11 -04:00
failTest ( fmt . Sprintf ( "Expected the anonymous HTTP request to be served after the policy changes\n,Expected response HTTP status code to be %d, got %d." ,
2016-10-08 02:28:50 -04:00
expectedHTTPStatus , rec . Code ) )
2016-10-07 14:16:11 -04:00
}
}
2016-10-06 16:34:33 -04:00
// ExecObjectLayerAPINilTest - Sets the object layer to `nil`, and calls rhe registered object layer API endpoint, and assert the error response.
// The purpose is to validate the API handlers response when the object layer is uninitialized.
// Usage hint: Should be used at the end of the API end points tests (ex: check the last few lines of `testAPIListObjectPartsHandler`), need a sample HTTP request
// to be sent as argument so that the relevant handler is called,
// the handler registration is expected to be done since its called from within the API handler tests,
// the reference to the registered HTTP handler has to be sent as an argument.
func ExecObjectLayerAPINilTest ( t TestErrHandler , bucketName , objectName , instanceType string , apiRouter http . Handler , req * http . Request ) {
// httptest Recorder to capture all the response by the http handler.
rec := httptest . NewRecorder ( )
// The API handler gets the referece to the object layer via the global object Layer,
// setting it to `nil` in order test for handlers response for uninitialized object layer.
objLayerMutex . Lock ( )
globalObjectAPI = nil
objLayerMutex . Unlock ( )
// call the HTTP handler.
apiRouter . ServeHTTP ( rec , req )
// expected error response when the API handler is called before the object layer is initialized,
// or when objectLayer is `nil`.
serverNotInitializedErr := getAPIError ( ErrServerNotInitialized ) . HTTPStatusCode
if rec . Code != serverNotInitializedErr {
t . Errorf ( "Object API Nil Test expected to fail with %d, but failed with %d." , serverNotInitializedErr , rec . Code )
}
// expected error response in bytes when objectLayer is not initialized, or set to `nil`.
expectedErrResponse := encodeResponse ( getAPIErrorResponse ( getAPIError ( ErrServerNotInitialized ) , getGetObjectURL ( "" , bucketName , objectName ) ) )
// read the response body.
actualContent , err := ioutil . ReadAll ( rec . Body )
if err != nil {
t . Fatalf ( "Minio %s: Failed parsing response body: <ERROR> %v." , instanceType , err )
}
// verify whether actual error response (from the response body), matches the expected error response.
if ! bytes . Equal ( expectedErrResponse , actualContent ) {
t . Errorf ( "Minio %s: Object content differs from expected value." , instanceType )
}
}
2016-09-10 17:47:27 -04:00
// ExecObjectLayerAPITest - executes object layer API tests.
// Creates single node and XL ObjectLayer instance, registers the specified API end points and runs test for both the layers.
2016-10-07 11:02:37 -04:00
func ExecObjectLayerAPITest ( t * testing . T , objAPITest objAPITestType , endPoints [ ] string ) {
2016-09-10 17:47:27 -04:00
objLayer , fsDir , err := prepareFS ( )
if err != nil {
t . Fatalf ( "Initialization of object layer failed for single node setup: %s" , err )
}
bucketFS , fsRoot , fsAPIRouter , err := initAPIHandlerTest ( objLayer , endPoints )
if err != nil {
t . Fatalf ( "Initialzation of API handler tests failed: <ERROR> %s" , err )
}
credentials := serverConfig . GetCredential ( )
// Executing the object layer tests for single node setup.
objAPITest ( objLayer , singleNodeTestStr , bucketFS , fsAPIRouter , credentials , t )
objLayer , xlDisks , err := prepareXL ( )
if err != nil {
t . Fatalf ( "Initialization of object layer failed for XL setup: %s" , err )
}
bucketXL , xlRoot , xlAPIRouter , err := initAPIHandlerTest ( objLayer , endPoints )
if err != nil {
t . Fatalf ( "Initialzation of API handler tests failed: <ERROR> %s" , err )
}
credentials = serverConfig . GetCredential ( )
// Executing the object layer tests for XL.
objAPITest ( objLayer , xLTestStr , bucketXL , xlAPIRouter , credentials , t )
2016-10-06 16:34:33 -04:00
// clean up the temporary test backend.
removeRoots ( append ( xlDisks , fsDir , fsRoot , xlRoot ) )
2016-09-10 17:47:27 -04:00
}
// function to be passed to ExecObjectLayerAPITest, for executing object layr API handler tests.
type objAPITestType func ( obj ObjectLayer , instanceType string , bucketName string ,
2016-10-07 11:02:37 -04:00
apiRouter http . Handler , credentials credential , t * testing . T )
2016-09-10 17:47:27 -04:00
2016-06-07 21:15:04 -04:00
// Regular object test type.
2016-07-07 18:05:51 -04:00
type objTestType func ( obj ObjectLayer , instanceType string , t TestErrHandler )
2016-06-07 21:15:04 -04:00
// Special object test type for disk not found situations.
type objTestDiskNotFoundType func ( obj ObjectLayer , instanceType string , dirs [ ] string , t * testing . T )
// ExecObjectLayerTest - executes object layer tests.
// Creates single node and XL ObjectLayer instance and runs test for both the layers.
2016-07-07 18:05:51 -04:00
func ExecObjectLayerTest ( t TestErrHandler , objTest objTestType ) {
2016-08-30 22:22:27 -04:00
objLayer , fsDir , err := prepareFS ( )
2016-05-06 14:57:04 -04:00
if err != nil {
2016-07-02 22:05:16 -04:00
t . Fatalf ( "Initialization of object layer failed for single node setup: %s" , err )
2016-05-06 14:57:04 -04:00
}
2016-05-26 17:43:17 -04:00
// Executing the object layer tests for single node setup.
objTest ( objLayer , singleNodeTestStr , t )
2016-05-20 23:48:47 -04:00
2016-08-30 22:22:27 -04:00
objLayer , fsDirs , err := prepareXL ( )
2016-05-06 14:57:04 -04:00
if err != nil {
2016-07-02 22:05:16 -04:00
t . Fatalf ( "Initialization of object layer failed for XL setup: %s" , err )
2016-05-06 14:57:04 -04:00
}
// Executing the object layer tests for XL.
objTest ( objLayer , xLTestStr , t )
defer removeRoots ( append ( fsDirs , fsDir ) )
}
2016-06-07 21:15:04 -04:00
// ExecObjectLayerDiskNotFoundTest - executes object layer tests while deleting
// disks in between tests. Creates XL ObjectLayer instance and runs test for XL layer.
func ExecObjectLayerDiskNotFoundTest ( t * testing . T , objTest objTestDiskNotFoundType ) {
2016-08-30 22:22:27 -04:00
objLayer , fsDirs , err := prepareXL ( )
2016-06-07 21:15:04 -04:00
if err != nil {
2016-07-02 22:05:16 -04:00
t . Fatalf ( "Initialization of object layer failed for XL setup: %s" , err )
2016-06-07 21:15:04 -04:00
}
// Executing the object layer tests for XL.
objTest ( objLayer , xLTestStr , fsDirs , t )
defer removeRoots ( fsDirs )
}
2016-06-29 05:28:46 -04:00
// Special object test type for stale files situations.
type objTestStaleFilesType func ( obj ObjectLayer , instanceType string , dirs [ ] string , t * testing . T )
// ExecObjectLayerStaleFilesTest - executes object layer tests those leaves stale
// files/directories under .minio/tmp. Creates XL ObjectLayer instance and runs test for XL layer.
func ExecObjectLayerStaleFilesTest ( t * testing . T , objTest objTestStaleFilesType ) {
2016-08-30 22:22:27 -04:00
nDisks := 16
erasureDisks , err := getRandomDisks ( nDisks )
if err != nil {
t . Fatalf ( "Initialization of disks for XL setup: %s" , err )
}
2016-10-05 15:48:07 -04:00
objLayer , _ , err := initObjectLayer ( erasureDisks , nil )
2016-06-29 05:28:46 -04:00
if err != nil {
2016-07-02 22:05:16 -04:00
t . Fatalf ( "Initialization of object layer failed for XL setup: %s" , err )
2016-06-29 05:28:46 -04:00
}
// Executing the object layer tests for XL.
2016-08-30 22:22:27 -04:00
objTest ( objLayer , xLTestStr , erasureDisks , t )
defer removeRoots ( erasureDisks )
2016-06-29 05:28:46 -04:00
}
2016-07-02 22:05:16 -04:00
2016-10-05 15:48:07 -04:00
func registerBucketLevelFunc ( bucket * router . Router , api objectAPIHandlers , apiFunctions ... string ) {
for _ , apiFunction := range apiFunctions {
switch apiFunction {
case "PostPolicy" :
// Register PostPolicy handler.
bucket . Methods ( "POST" ) . HeadersRegexp ( "Content-Type" , "multipart/form-data*" ) . HandlerFunc ( api . PostPolicyBucketHandler )
// Register GetObject handler.
case "GetObject" :
bucket . Methods ( "GET" ) . Path ( "/{object:.+}" ) . HandlerFunc ( api . GetObjectHandler )
// Register PutObject handler.
case "PutObject" :
bucket . Methods ( "PUT" ) . Path ( "/{object:.+}" ) . HandlerFunc ( api . PutObjectHandler )
// Register Delete Object handler.
case "DeleteObject" :
bucket . Methods ( "DELETE" ) . Path ( "/{object:.+}" ) . HandlerFunc ( api . DeleteObjectHandler )
// Register Copy Object handler.
case "CopyObject" :
bucket . Methods ( "PUT" ) . Path ( "/{object:.+}" ) . HeadersRegexp ( "X-Amz-Copy-Source" , ".*?(\\/|%2F).*?" ) . HandlerFunc ( api . CopyObjectHandler )
// Register PutBucket Policy handler.
case "PutBucketPolicy" :
bucket . Methods ( "PUT" ) . HandlerFunc ( api . PutBucketPolicyHandler ) . Queries ( "policy" , "" )
// Register Delete bucket HTTP policy handler.
case "DeleteBucketPolicy" :
bucket . Methods ( "DELETE" ) . HandlerFunc ( api . DeleteBucketPolicyHandler ) . Queries ( "policy" , "" )
// Register Get Bucket policy HTTP Handler.
case "GetBucketPolicy" :
bucket . Methods ( "GET" ) . HandlerFunc ( api . GetBucketPolicyHandler ) . Queries ( "policy" , "" )
// Register GetBucketLocation handler.
case "GetBucketLocation" :
bucket . Methods ( "GET" ) . HandlerFunc ( api . GetBucketLocationHandler ) . Queries ( "location" , "" )
// Register HeadBucket handler.
case "HeadBucket" :
bucket . Methods ( "HEAD" ) . HandlerFunc ( api . HeadBucketHandler )
// Register New Multipart upload handler.
case "NewMultipart" :
bucket . Methods ( "POST" ) . Path ( "/{object:.+}" ) . HandlerFunc ( api . NewMultipartUploadHandler ) . Queries ( "uploads" , "" )
// Register PutObjectPart handler.
case "PutObjectPart" :
bucket . Methods ( "PUT" ) . Path ( "/{object:.+}" ) . HandlerFunc ( api . PutObjectPartHandler ) . Queries ( "partNumber" , "{partNumber:[0-9]+}" , "uploadId" , "{uploadId:.*}" )
// Register ListObjectParts handler.
case "ListObjectParts" :
bucket . Methods ( "GET" ) . Path ( "/{object:.+}" ) . HandlerFunc ( api . ListObjectPartsHandler ) . Queries ( "uploadId" , "{uploadId:.*}" )
// Register ListMultipartUploads handler.
case "ListMultipartUploads" :
bucket . Methods ( "GET" ) . HandlerFunc ( api . ListMultipartUploadsHandler ) . Queries ( "uploads" , "" )
// Register Complete Multipart Upload handler.
case "CompleteMultipart" :
bucket . Methods ( "POST" ) . Path ( "/{object:.+}" ) . HandlerFunc ( api . CompleteMultipartUploadHandler ) . Queries ( "uploadId" , "{uploadId:.*}" )
// Register GetBucketNotification Handler.
case "GetBucketNotification" :
bucket . Methods ( "GET" ) . HandlerFunc ( api . GetBucketNotificationHandler ) . Queries ( "notification" , "" )
// Register PutBucketNotification Handler.
case "PutBucketNotification" :
bucket . Methods ( "PUT" ) . HandlerFunc ( api . PutBucketNotificationHandler ) . Queries ( "notification" , "" )
// Register ListenBucketNotification Handler.
case "ListenBucketNotification" :
bucket . Methods ( "GET" ) . HandlerFunc ( api . ListenBucketNotificationHandler ) . Queries ( "events" , "{events:.*}" )
}
2016-10-01 11:23:26 -04:00
}
}
2016-10-05 15:48:07 -04:00
// registerAPIFunctions helper function to add API functions identified by name to the routers.
func registerAPIFunctions ( muxRouter * router . Router , objLayer ObjectLayer , apiFunctions ... string ) {
if len ( apiFunctions ) == 0 {
// Register all api endpoints by default.
registerAPIRouter ( muxRouter )
return
2016-10-01 11:23:26 -04:00
}
// API Router.
apiRouter := muxRouter . NewRoute ( ) . PathPrefix ( "/" ) . Subrouter ( )
// Bucket router.
2016-10-05 15:48:07 -04:00
bucketRouter := apiRouter . PathPrefix ( "/{bucket}" ) . Subrouter ( )
2016-10-01 11:23:26 -04:00
2016-07-02 22:05:16 -04:00
// All object storage operations are registered as HTTP handlers on `objectAPIHandlers`.
// When the handlers get a HTTP request they use the underlyting ObjectLayer to perform operations.
2016-09-23 02:47:48 -04:00
objLayerMutex . Lock ( )
globalObjectAPI = objLayer
objLayerMutex . Unlock ( )
2016-07-02 22:05:16 -04:00
api := objectAPIHandlers {
2016-09-23 02:47:48 -04:00
ObjectAPI : newObjectLayerFn ,
2016-07-02 22:05:16 -04:00
}
2016-09-23 02:47:48 -04:00
2016-10-05 15:48:07 -04:00
// Register ListBuckets handler.
apiRouter . Methods ( "GET" ) . HandlerFunc ( api . ListBucketsHandler )
// Register all bucket level handlers.
registerBucketLevelFunc ( bucketRouter , api , apiFunctions ... )
}
// Takes in XL/FS object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler.
// Need isolated registration of API end points while writing unit tests for end points.
// All the API end points are registered only for the default case.
func initTestAPIEndPoints ( objLayer ObjectLayer , apiFunctions [ ] string ) http . Handler {
// initialize a new mux router.
// goriilla/mux is the library used to register all the routes and handle them.
muxRouter := router . NewRouter ( )
if len ( apiFunctions ) > 0 {
// Iterate the list of API functions requested for and register them in mux HTTP handler.
registerAPIFunctions ( muxRouter , objLayer , apiFunctions ... )
return muxRouter
2016-07-02 22:05:16 -04:00
}
2016-10-05 15:48:07 -04:00
registerAPIRouter ( muxRouter )
2016-07-02 22:05:16 -04:00
return muxRouter
}
2016-08-15 19:13:03 -04:00
2016-08-28 22:31:59 -04:00
// Initialize Web RPC Handlers for testing
2016-08-15 19:13:03 -04:00
func initTestWebRPCEndPoint ( objLayer ObjectLayer ) http . Handler {
2016-10-05 15:48:07 -04:00
objLayerMutex . Lock ( )
globalObjectAPI = objLayer
objLayerMutex . Unlock ( )
2016-08-15 19:13:03 -04:00
// Initialize router.
muxRouter := router . NewRouter ( )
2016-10-05 15:48:07 -04:00
registerWebRouter ( muxRouter )
2016-08-15 19:13:03 -04:00
return muxRouter
}