2016-05-06 14:57:04 -04:00
|
|
|
/*
|
|
|
|
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2016-06-21 15:10:18 -04:00
|
|
|
"bytes"
|
|
|
|
"encoding/base64"
|
|
|
|
"encoding/hex"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2016-05-06 14:57:04 -04:00
|
|
|
"io/ioutil"
|
2016-06-25 22:07:44 -04:00
|
|
|
"math/rand"
|
2016-06-21 15:10:18 -04:00
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
2016-06-25 22:07:44 -04:00
|
|
|
"net/url"
|
2016-05-06 14:57:04 -04:00
|
|
|
"os"
|
2016-06-25 22:07:44 -04:00
|
|
|
"regexp"
|
2016-06-21 15:10:18 -04:00
|
|
|
"sort"
|
2016-06-29 06:13:44 -04:00
|
|
|
"strconv"
|
2016-06-21 15:10:18 -04:00
|
|
|
"strings"
|
2016-06-29 06:13:44 -04:00
|
|
|
"sync"
|
2016-05-06 14:57:04 -04:00
|
|
|
"testing"
|
2016-06-21 15:10:18 -04:00
|
|
|
"time"
|
2016-06-25 22:07:44 -04:00
|
|
|
"unicode/utf8"
|
2016-07-02 22:05:16 -04:00
|
|
|
|
|
|
|
router "github.com/gorilla/mux"
|
2016-05-06 14:57:04 -04:00
|
|
|
)
|
|
|
|
|
2016-07-07 22:50:44 -04:00
|
|
|
// Tests should initNSLock only once.
|
|
|
|
func init() {
|
|
|
|
// Initialize name space lock.
|
|
|
|
initNSLock()
|
|
|
|
}
|
|
|
|
|
2016-06-21 15:10:18 -04:00
|
|
|
// The Argument to TestServer should satidy the interface.
|
|
|
|
// Golang Testing.T and Testing.B, and gocheck.C satisfy the interface.
|
|
|
|
// This makes it easy to run the TestServer from any of the tests.
|
|
|
|
type TestErrHandler interface {
|
|
|
|
Error(args ...interface{})
|
|
|
|
Errorf(format string, args ...interface{})
|
|
|
|
Failed() bool
|
|
|
|
Fatal(args ...interface{})
|
|
|
|
Fatalf(format string, args ...interface{})
|
|
|
|
}
|
|
|
|
|
2016-05-06 14:57:04 -04:00
|
|
|
const (
|
|
|
|
// singleNodeTestStr is the string which is used as notation for Single node ObjectLayer in the unit tests.
|
2016-07-08 21:26:04 -04:00
|
|
|
singleNodeTestStr string = "FS"
|
2016-05-06 14:57:04 -04:00
|
|
|
// xLTestStr is the string which is used as notation for XL ObjectLayer in the unit tests.
|
|
|
|
xLTestStr string = "XL"
|
|
|
|
)
|
|
|
|
|
2016-06-25 22:07:44 -04:00
|
|
|
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
|
|
|
|
const (
|
|
|
|
letterIdxBits = 6 // 6 bits to represent a letter index
|
|
|
|
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
|
|
|
|
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
|
|
|
|
)
|
|
|
|
|
2016-06-29 06:13:44 -04:00
|
|
|
// Random number state.
|
|
|
|
// We generate random temporary file names so that there's a good
|
|
|
|
// chance the file doesn't exist yet.
|
|
|
|
var randN uint32
|
|
|
|
var randmu sync.Mutex
|
|
|
|
|
2016-07-02 04:59:28 -04:00
|
|
|
// reseed - returns a new seed every time the function is called.
|
2016-06-29 06:13:44 -04:00
|
|
|
func reseed() uint32 {
|
|
|
|
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
|
|
|
|
}
|
|
|
|
|
2016-07-02 04:59:28 -04:00
|
|
|
// nextSuffix - provides a new unique suffix every time the function is called.
|
2016-06-29 06:13:44 -04:00
|
|
|
func nextSuffix() string {
|
|
|
|
randmu.Lock()
|
|
|
|
r := randN
|
|
|
|
// Initial seed required, generate one.
|
|
|
|
if r == 0 {
|
|
|
|
r = reseed()
|
|
|
|
}
|
|
|
|
// constants from Numerical Recipes
|
|
|
|
r = r*1664525 + 1013904223
|
|
|
|
randN = r
|
|
|
|
randmu.Unlock()
|
|
|
|
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
|
|
|
}
|
|
|
|
|
2016-06-21 15:10:18 -04:00
|
|
|
// TestServer encapsulates an instantiation of a Minio instance with a temporary backend.
|
|
|
|
// Example usage:
|
|
|
|
// s := StartTestServer(t,"XL")
|
|
|
|
// defer s.Stop()
|
|
|
|
type TestServer struct {
|
|
|
|
Root string
|
|
|
|
Disks []string
|
|
|
|
AccessKey string
|
|
|
|
SecretKey string
|
|
|
|
Server *httptest.Server
|
|
|
|
}
|
|
|
|
|
|
|
|
// Starts the test server and returns the TestServer instance.
|
|
|
|
func StartTestServer(t TestErrHandler, instanceType string) TestServer {
|
|
|
|
// create an instance of TestServer.
|
|
|
|
testServer := TestServer{}
|
|
|
|
// create temporary backend for the test server.
|
2016-06-25 23:22:04 -04:00
|
|
|
_, erasureDisks, err := makeTestBackend(instanceType)
|
|
|
|
|
2016-06-21 15:10:18 -04:00
|
|
|
if err != nil {
|
2016-06-25 23:22:04 -04:00
|
|
|
t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
2016-07-02 22:05:16 -04:00
|
|
|
|
|
|
|
credentials, root, err := initTestConfig("us-east-1")
|
2016-06-21 15:10:18 -04:00
|
|
|
if err != nil {
|
2016-07-02 22:05:16 -04:00
|
|
|
t.Fatalf("%s", err)
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
|
|
|
testServer.Root = root
|
|
|
|
testServer.Disks = erasureDisks
|
2016-07-02 22:05:16 -04:00
|
|
|
testServer.AccessKey = credentials.AccessKeyID
|
|
|
|
testServer.SecretKey = credentials.SecretAccessKey
|
|
|
|
// Run TestServer.
|
2016-07-10 17:38:15 -04:00
|
|
|
testServer.Server = httptest.NewServer(configureServerHandler(serverCmdConfig{disks: erasureDisks}))
|
2016-07-02 22:05:16 -04:00
|
|
|
|
|
|
|
return testServer
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure the server for the test run.
|
|
|
|
func initTestConfig(bucketLocation string) (credential, string, error) {
|
2016-06-21 15:10:18 -04:00
|
|
|
// Initialize server config.
|
|
|
|
initConfig()
|
|
|
|
// Get credential.
|
|
|
|
credentials := serverConfig.GetCredential()
|
|
|
|
// Set a default region.
|
2016-07-02 22:05:16 -04:00
|
|
|
serverConfig.SetRegion(bucketLocation)
|
|
|
|
rootPath, err := getTestRoot()
|
|
|
|
if err != nil {
|
|
|
|
return credential{}, "", err
|
|
|
|
}
|
2016-06-21 15:10:18 -04:00
|
|
|
// Do this only once here.
|
2016-07-02 22:05:16 -04:00
|
|
|
setGlobalConfigPath(rootPath)
|
2016-06-21 15:10:18 -04:00
|
|
|
|
|
|
|
err = serverConfig.Save()
|
|
|
|
if err != nil {
|
2016-07-02 22:05:16 -04:00
|
|
|
return credential{}, "", err
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
2016-07-02 22:05:16 -04:00
|
|
|
return credentials, rootPath, nil
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Deleting the temporary backend and stopping the server.
|
|
|
|
func (testServer TestServer) Stop() {
|
|
|
|
removeAll(testServer.Root)
|
|
|
|
for _, disk := range testServer.Disks {
|
|
|
|
removeAll(disk)
|
|
|
|
}
|
|
|
|
testServer.Server.Close()
|
|
|
|
}
|
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
// Sign given request using Signature V4.
|
|
|
|
func signRequest(req *http.Request, accessKey, secretKey string) error {
|
|
|
|
// Get hashed payload.
|
|
|
|
hashedPayload := req.Header.Get("x-amz-content-sha256")
|
|
|
|
if hashedPayload == "" {
|
|
|
|
return fmt.Errorf("Invalid hashed payload.")
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
currTime := time.Now().UTC()
|
2016-06-21 15:10:18 -04:00
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
// Set x-amz-date.
|
|
|
|
req.Header.Set("x-amz-date", currTime.Format(iso8601Format))
|
2016-06-21 15:10:18 -04:00
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
// Get header map.
|
|
|
|
headerMap := make(map[string][]string)
|
|
|
|
for k, vv := range req.Header {
|
|
|
|
// If request header key is not in ignored headers, then add it.
|
|
|
|
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok {
|
|
|
|
headerMap[strings.ToLower(k)] = vv
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
// Get header keys.
|
|
|
|
headers := []string{"host"}
|
|
|
|
for k := range headerMap {
|
|
|
|
headers = append(headers, k)
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
|
|
|
sort.Strings(headers)
|
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
// Get canonical headers.
|
|
|
|
var buf bytes.Buffer
|
2016-06-21 15:10:18 -04:00
|
|
|
for _, k := range headers {
|
2016-07-10 14:10:59 -04:00
|
|
|
buf.WriteString(k)
|
|
|
|
buf.WriteByte(':')
|
2016-06-21 15:10:18 -04:00
|
|
|
switch {
|
|
|
|
case k == "host":
|
2016-07-10 14:10:59 -04:00
|
|
|
buf.WriteString(req.URL.Host)
|
2016-06-21 15:10:18 -04:00
|
|
|
fallthrough
|
|
|
|
default:
|
2016-07-10 14:10:59 -04:00
|
|
|
for idx, v := range headerMap[k] {
|
2016-06-21 15:10:18 -04:00
|
|
|
if idx > 0 {
|
2016-07-10 14:10:59 -04:00
|
|
|
buf.WriteByte(',')
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
2016-07-10 14:10:59 -04:00
|
|
|
buf.WriteString(v)
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
2016-07-10 14:10:59 -04:00
|
|
|
buf.WriteByte('\n')
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
|
|
|
}
|
2016-07-10 14:10:59 -04:00
|
|
|
canonicalHeaders := buf.String()
|
2016-06-21 15:10:18 -04:00
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
// Get signed headers.
|
2016-06-21 15:10:18 -04:00
|
|
|
signedHeaders := strings.Join(headers, ";")
|
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
// Get canonical query string.
|
2016-06-21 15:10:18 -04:00
|
|
|
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
|
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
// Get canonical URI.
|
|
|
|
canonicalURI := getURLEncodedName(req.URL.Path)
|
|
|
|
|
|
|
|
// Get canonical request.
|
2016-06-21 15:10:18 -04:00
|
|
|
// canonicalRequest =
|
|
|
|
// <HTTPMethod>\n
|
|
|
|
// <CanonicalURI>\n
|
|
|
|
// <CanonicalQueryString>\n
|
|
|
|
// <CanonicalHeaders>\n
|
|
|
|
// <SignedHeaders>\n
|
|
|
|
// <HashedPayload>
|
|
|
|
//
|
|
|
|
canonicalRequest := strings.Join([]string{
|
|
|
|
req.Method,
|
2016-07-10 14:10:59 -04:00
|
|
|
canonicalURI,
|
2016-06-21 15:10:18 -04:00
|
|
|
req.URL.RawQuery,
|
2016-07-10 14:10:59 -04:00
|
|
|
canonicalHeaders,
|
2016-06-21 15:10:18 -04:00
|
|
|
signedHeaders,
|
|
|
|
hashedPayload,
|
|
|
|
}, "\n")
|
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
// Get scope.
|
2016-06-21 15:10:18 -04:00
|
|
|
scope := strings.Join([]string{
|
2016-07-10 14:10:59 -04:00
|
|
|
currTime.Format(yyyymmdd),
|
2016-06-21 15:10:18 -04:00
|
|
|
"us-east-1",
|
|
|
|
"s3",
|
|
|
|
"aws4_request",
|
|
|
|
}, "/")
|
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
|
2016-06-21 15:10:18 -04:00
|
|
|
stringToSign = stringToSign + scope + "\n"
|
|
|
|
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
|
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
|
2016-06-21 15:10:18 -04:00
|
|
|
region := sumHMAC(date, []byte("us-east-1"))
|
|
|
|
service := sumHMAC(region, []byte("s3"))
|
|
|
|
signingKey := sumHMAC(service, []byte("aws4_request"))
|
|
|
|
|
|
|
|
signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
|
|
|
|
|
|
|
|
// final Authorization header
|
|
|
|
parts := []string{
|
|
|
|
"AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope,
|
|
|
|
"SignedHeaders=" + signedHeaders,
|
|
|
|
"Signature=" + signature,
|
|
|
|
}
|
|
|
|
auth := strings.Join(parts, ", ")
|
|
|
|
req.Header.Set("Authorization", auth)
|
|
|
|
|
2016-07-10 14:10:59 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns new HTTP request object.
|
|
|
|
func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) {
|
|
|
|
if method == "" {
|
|
|
|
method = "POST"
|
|
|
|
}
|
|
|
|
|
|
|
|
req, err := http.NewRequest(method, urlStr, nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add Content-Length
|
|
|
|
req.ContentLength = contentLength
|
|
|
|
|
|
|
|
// Save for subsequent use
|
|
|
|
var hashedPayload string
|
|
|
|
switch {
|
|
|
|
case body == nil:
|
|
|
|
hashedPayload = hex.EncodeToString(sum256([]byte{}))
|
|
|
|
default:
|
|
|
|
payloadBytes, e := ioutil.ReadAll(body)
|
|
|
|
if e != nil {
|
|
|
|
return nil, e
|
|
|
|
}
|
|
|
|
hashedPayload = hex.EncodeToString(sum256(payloadBytes))
|
|
|
|
md5Base64 := base64.StdEncoding.EncodeToString(sumMD5(payloadBytes))
|
|
|
|
req.Header.Set("Content-Md5", md5Base64)
|
|
|
|
}
|
|
|
|
req.Header.Set("x-amz-content-sha256", hashedPayload)
|
|
|
|
|
|
|
|
// Seek back to beginning.
|
|
|
|
if body != nil {
|
|
|
|
body.Seek(0, 0)
|
|
|
|
// Add body
|
|
|
|
req.Body = ioutil.NopCloser(body)
|
|
|
|
} else {
|
|
|
|
// this is added to avoid panic during ioutil.ReadAll(req.Body).
|
|
|
|
// th stack trace can be found here https://github.com/minio/minio/pull/2074 .
|
|
|
|
// This is very similar to https://github.com/golang/go/issues/7527.
|
|
|
|
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
|
|
|
|
}
|
|
|
|
|
|
|
|
return req, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns new HTTP request object signed with signature v4.
|
|
|
|
func newTestSignedRequest(method, urlStr string, contentLength int64, body io.ReadSeeker, accessKey, secretKey string) (*http.Request, error) {
|
|
|
|
req, err := newTestRequest(method, urlStr, contentLength, body)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = signRequest(req, accessKey, secretKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-06-21 15:10:18 -04:00
|
|
|
return req, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// creates the temp backend setup.
|
|
|
|
// if the option is
|
|
|
|
// FS: Returns a temp single disk setup initializes FS Backend.
|
|
|
|
// XL: Returns a 16 temp single disk setup and initializse XL Backend.
|
2016-06-25 23:22:04 -04:00
|
|
|
func makeTestBackend(instanceType string) (ObjectLayer, []string, error) {
|
2016-06-21 15:10:18 -04:00
|
|
|
switch instanceType {
|
|
|
|
case "FS":
|
2016-06-25 23:22:04 -04:00
|
|
|
objLayer, fsroot, err := getSingleNodeObjectLayer()
|
2016-06-21 15:10:18 -04:00
|
|
|
if err != nil {
|
2016-06-25 23:22:04 -04:00
|
|
|
return nil, []string{}, err
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
2016-06-25 23:22:04 -04:00
|
|
|
return objLayer, []string{fsroot}, err
|
2016-06-21 15:10:18 -04:00
|
|
|
|
|
|
|
case "XL":
|
2016-06-25 23:22:04 -04:00
|
|
|
objectLayer, erasureDisks, err := getXLObjectLayer()
|
2016-06-21 15:10:18 -04:00
|
|
|
if err != nil {
|
2016-06-25 23:22:04 -04:00
|
|
|
return nil, []string{}, err
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
2016-06-25 23:22:04 -04:00
|
|
|
return objectLayer, erasureDisks, err
|
2016-06-21 15:10:18 -04:00
|
|
|
default:
|
|
|
|
errMsg := "Invalid instance type, Only FS and XL are valid options"
|
2016-06-25 23:22:04 -04:00
|
|
|
return nil, []string{}, fmt.Errorf("Failed obtaining Temp XL layer: <ERROR> %s", errMsg)
|
2016-06-21 15:10:18 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-25 22:07:44 -04:00
|
|
|
var src = rand.NewSource(time.Now().UTC().UnixNano())
|
|
|
|
|
|
|
|
// Function to generate random string for bucket/object names.
|
|
|
|
func randString(n int) string {
|
|
|
|
b := make([]byte, n)
|
|
|
|
// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
|
|
|
|
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
|
|
|
|
if remain == 0 {
|
|
|
|
cache, remain = src.Int63(), letterIdxMax
|
|
|
|
}
|
|
|
|
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
|
|
|
|
b[i] = letterBytes[idx]
|
|
|
|
i--
|
|
|
|
}
|
|
|
|
cache >>= letterIdxBits
|
|
|
|
remain--
|
|
|
|
}
|
|
|
|
return string(b)
|
|
|
|
}
|
|
|
|
|
2016-07-08 17:28:06 -04:00
|
|
|
// generate random object name.
|
|
|
|
func getRandomObjectName() string {
|
|
|
|
return randString(16)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-06-25 22:07:44 -04:00
|
|
|
// generate random bucket name.
|
|
|
|
func getRandomBucketName() string {
|
|
|
|
return randString(60)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-07-08 21:26:04 -04:00
|
|
|
// NewEOFWriter returns a Writer that writes to w,
|
|
|
|
// but returns EOF error after writing n bytes.
|
|
|
|
func NewEOFWriter(w io.Writer, n int64) io.Writer {
|
|
|
|
return &EOFWriter{w, n}
|
|
|
|
}
|
|
|
|
|
|
|
|
type EOFWriter struct {
|
|
|
|
w io.Writer
|
|
|
|
n int64
|
|
|
|
}
|
|
|
|
|
2016-07-09 20:11:08 -04:00
|
|
|
// io.Writer implementation designed to error out with io.EOF after reading `n` bytes.
|
2016-07-08 21:26:04 -04:00
|
|
|
func (t *EOFWriter) Write(p []byte) (n int, err error) {
|
|
|
|
if t.n <= 0 {
|
|
|
|
return -1, io.EOF
|
|
|
|
}
|
|
|
|
// real write
|
|
|
|
n = len(p)
|
|
|
|
if int64(n) > t.n {
|
|
|
|
n = int(t.n)
|
|
|
|
}
|
|
|
|
n, err = t.w.Write(p[0:n])
|
|
|
|
t.n -= int64(n)
|
|
|
|
if err == nil {
|
|
|
|
n = len(p)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-06-25 22:07:44 -04:00
|
|
|
// queryEncode - encodes query values in their URL encoded form.
|
|
|
|
func queryEncode(v url.Values) string {
|
|
|
|
if v == nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
var buf bytes.Buffer
|
|
|
|
keys := make([]string, 0, len(v))
|
|
|
|
for k := range v {
|
|
|
|
keys = append(keys, k)
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
for _, k := range keys {
|
|
|
|
vs := v[k]
|
|
|
|
prefix := urlEncodePath(k) + "="
|
|
|
|
for _, v := range vs {
|
|
|
|
if buf.Len() > 0 {
|
|
|
|
buf.WriteByte('&')
|
|
|
|
}
|
|
|
|
buf.WriteString(prefix)
|
|
|
|
buf.WriteString(urlEncodePath(v))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return buf.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
|
|
|
|
//
|
|
|
|
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
|
|
|
|
// non english characters cannot be parsed due to the nature in which url.Encode() is written
|
|
|
|
//
|
|
|
|
// This function on the other hand is a direct replacement for url.Encode() technique to support
|
|
|
|
// pretty much every UTF-8 character.
|
|
|
|
func urlEncodePath(pathName string) string {
|
|
|
|
// if object matches reserved string, no need to encode them
|
|
|
|
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
|
|
|
|
if reservedNames.MatchString(pathName) {
|
|
|
|
return pathName
|
|
|
|
}
|
|
|
|
var encodedPathname string
|
|
|
|
for _, s := range pathName {
|
|
|
|
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
|
|
|
|
encodedPathname = encodedPathname + string(s)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch s {
|
|
|
|
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
|
|
|
|
encodedPathname = encodedPathname + string(s)
|
|
|
|
continue
|
|
|
|
default:
|
|
|
|
len := utf8.RuneLen(s)
|
|
|
|
if len < 0 {
|
|
|
|
// if utf8 cannot convert return the same string as is
|
|
|
|
return pathName
|
|
|
|
}
|
|
|
|
u := make([]byte, len)
|
|
|
|
utf8.EncodeRune(u, s)
|
|
|
|
for _, r := range u {
|
|
|
|
hex := hex.EncodeToString([]byte{r})
|
|
|
|
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return encodedPathname
|
|
|
|
}
|
|
|
|
|
|
|
|
// construct URL for http requests for bucket operations.
|
|
|
|
func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.Values) string {
|
|
|
|
urlStr := endPoint + "/"
|
|
|
|
if bucketName != "" {
|
|
|
|
urlStr = urlStr + bucketName + "/"
|
|
|
|
}
|
|
|
|
if objectName != "" {
|
|
|
|
urlStr = urlStr + urlEncodePath(objectName)
|
|
|
|
}
|
|
|
|
if len(queryValues) > 0 {
|
|
|
|
urlStr = urlStr + "?" + queryEncode(queryValues)
|
|
|
|
}
|
|
|
|
return urlStr
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for uploading object into the bucket.
|
|
|
|
func getPutObjectURL(endPoint, bucketName, objectName string) string {
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, objectName, url.Values{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for fetching object from the bucket.
|
|
|
|
func getGetObjectURL(endPoint, bucketName, objectName string) string {
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, objectName, url.Values{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for deleting the object from the bucket.
|
|
|
|
func getDeleteObjectURL(endPoint, bucketName, objectName string) string {
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, objectName, url.Values{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for HEAD o nthe object.
|
|
|
|
func getHeadObjectURL(endPoint, bucketName, objectName string) string {
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, objectName, url.Values{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for inserting bucket policy.
|
|
|
|
func getPutPolicyURL(endPoint, bucketName string) string {
|
|
|
|
queryValue := url.Values{}
|
|
|
|
queryValue.Set("policy", "")
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, "", queryValue)
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for fetching bucket policy.
|
|
|
|
func getGetPolicyURL(endPoint, bucketName string) string {
|
|
|
|
queryValue := url.Values{}
|
|
|
|
queryValue.Set("policy", "")
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, "", queryValue)
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for deleting bucket policy.
|
|
|
|
func getDeletePolicyURL(endPoint, bucketName string) string {
|
2016-07-04 01:35:30 -04:00
|
|
|
queryValue := url.Values{}
|
|
|
|
queryValue.Set("policy", "")
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, "", queryValue)
|
2016-06-25 22:07:44 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for creating the bucket.
|
|
|
|
func getMakeBucketURL(endPoint, bucketName string) string {
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, "", url.Values{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for listing buckets.
|
|
|
|
func getListBucketURL(endPoint string) string {
|
|
|
|
return makeTestTargetURL(endPoint, "", "", url.Values{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for HEAD on the bucket.
|
|
|
|
func getHEADBucketURL(endPoint, bucketName string) string {
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, "", url.Values{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for deleting the bucket.
|
|
|
|
func getDeleteBucketURL(endPoint, bucketName string) string {
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, "", url.Values{})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-07-17 15:32:05 -04:00
|
|
|
// return URL for listing objects in the bucket with V1 legacy API.
|
|
|
|
func getListObjectsV1URL(endPoint, bucketName string, maxKeys string) string {
|
2016-06-28 02:54:56 -04:00
|
|
|
queryValue := url.Values{}
|
|
|
|
if maxKeys != "" {
|
|
|
|
queryValue.Set("max-keys", maxKeys)
|
|
|
|
}
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, "", queryValue)
|
|
|
|
}
|
|
|
|
|
2016-07-17 15:32:05 -04:00
|
|
|
// return URL for listing objects in the bucket with V2 API.
|
|
|
|
func getListObjectsV2URL(endPoint, bucketName string, maxKeys string) string {
|
|
|
|
queryValue := url.Values{}
|
|
|
|
queryValue.Set("list-type", "2") // Enables list objects V2 URL.
|
|
|
|
if maxKeys != "" {
|
|
|
|
queryValue.Set("max-keys", maxKeys)
|
|
|
|
}
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, "", queryValue)
|
|
|
|
}
|
|
|
|
|
2016-06-28 02:54:56 -04:00
|
|
|
// return URL for a new multipart upload.
|
|
|
|
func getNewMultipartURL(endPoint, bucketName, objectName string) string {
|
|
|
|
queryValue := url.Values{}
|
|
|
|
queryValue.Set("uploads", "")
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, objectName, queryValue)
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for a new multipart upload.
|
|
|
|
func getPartUploadURL(endPoint, bucketName, objectName, uploadID, partNumber string) string {
|
|
|
|
queryValues := url.Values{}
|
|
|
|
queryValues.Set("uploadId", uploadID)
|
|
|
|
queryValues.Set("partNumber", partNumber)
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, objectName, queryValues)
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for aborting multipart upload.
|
|
|
|
func getAbortMultipartUploadURL(endPoint, bucketName, objectName, uploadID string) string {
|
|
|
|
queryValue := url.Values{}
|
|
|
|
queryValue.Set("uploadId", uploadID)
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, objectName, queryValue)
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for a new multipart upload.
|
|
|
|
func getListMultipartURL(endPoint, bucketName string) string {
|
|
|
|
queryValue := url.Values{}
|
|
|
|
queryValue.Set("uploads", "")
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, "", queryValue)
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for a new multipart upload.
|
|
|
|
func getListMultipartURLWithParams(endPoint, bucketName, objectName, uploadID, maxParts string) string {
|
|
|
|
queryValues := url.Values{}
|
|
|
|
queryValues.Set("uploadId", uploadID)
|
|
|
|
queryValues.Set("max-parts", maxParts)
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, objectName, queryValues)
|
|
|
|
}
|
|
|
|
|
|
|
|
// return URL for completing multipart upload.
|
|
|
|
// complete multipart upload request is sent after all parts are uploaded.
|
|
|
|
func getCompleteMultipartUploadURL(endPoint, bucketName, objectName, uploadID string) string {
|
|
|
|
queryValue := url.Values{}
|
|
|
|
queryValue.Set("uploadId", uploadID)
|
|
|
|
return makeTestTargetURL(endPoint, bucketName, objectName, queryValue)
|
|
|
|
}
|
|
|
|
|
2016-06-21 15:10:18 -04:00
|
|
|
// returns temp root directory. `
|
|
|
|
func getTestRoot() (string, error) {
|
|
|
|
return ioutil.TempDir(os.TempDir(), "api-")
|
|
|
|
}
|
|
|
|
|
2016-06-07 21:15:04 -04:00
|
|
|
// getXLObjectLayer - Instantiates XL object layer and returns it.
|
|
|
|
func getXLObjectLayer() (ObjectLayer, []string, error) {
|
|
|
|
var nDisks = 16 // Maximum disks.
|
|
|
|
var erasureDisks []string
|
|
|
|
for i := 0; i < nDisks; i++ {
|
|
|
|
path, err := ioutil.TempDir(os.TempDir(), "minio-")
|
2016-05-06 14:57:04 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2016-06-07 21:15:04 -04:00
|
|
|
erasureDisks = append(erasureDisks, path)
|
2016-05-06 14:57:04 -04:00
|
|
|
}
|
|
|
|
|
2016-07-10 17:38:15 -04:00
|
|
|
objLayer, err := newXLObjects(erasureDisks, nil)
|
2016-06-07 21:15:04 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return objLayer, erasureDisks, nil
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2016-06-07 21:15:04 -04:00
|
|
|
// getSingleNodeObjectLayer - Instantiates single node object layer and returns it.
|
|
|
|
func getSingleNodeObjectLayer() (ObjectLayer, string, error) {
|
|
|
|
// Make a temporary directory to use as the obj.
|
|
|
|
fsDir, err := ioutil.TempDir("", "minio-")
|
|
|
|
if err != nil {
|
|
|
|
return nil, "", err
|
2016-05-06 14:57:04 -04:00
|
|
|
}
|
|
|
|
|
2016-06-07 21:15:04 -04:00
|
|
|
// Create the obj.
|
|
|
|
objLayer, err := newFSObjects(fsDir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, "", err
|
|
|
|
}
|
|
|
|
return objLayer, fsDir, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// removeRoots - Cleans up initialized directories during tests.
|
|
|
|
func removeRoots(roots []string) {
|
|
|
|
for _, root := range roots {
|
2016-06-13 05:53:09 -04:00
|
|
|
removeAll(root)
|
2016-05-06 14:57:04 -04:00
|
|
|
}
|
2016-06-07 21:15:04 -04:00
|
|
|
}
|
2016-05-06 14:57:04 -04:00
|
|
|
|
2016-06-29 01:32:00 -04:00
|
|
|
//removeDiskN - removes N disks from supplied disk slice.
|
|
|
|
func removeDiskN(disks []string, n int) {
|
|
|
|
if n > len(disks) {
|
|
|
|
n = len(disks)
|
|
|
|
}
|
|
|
|
for _, disk := range disks[:n] {
|
|
|
|
removeAll(disk)
|
2016-06-07 21:15:04 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Regular object test type.
|
2016-07-07 18:05:51 -04:00
|
|
|
type objTestType func(obj ObjectLayer, instanceType string, t TestErrHandler)
|
2016-06-07 21:15:04 -04:00
|
|
|
|
|
|
|
// Special object test type for disk not found situations.
|
|
|
|
type objTestDiskNotFoundType func(obj ObjectLayer, instanceType string, dirs []string, t *testing.T)
|
|
|
|
|
|
|
|
// ExecObjectLayerTest - executes object layer tests.
|
|
|
|
// Creates single node and XL ObjectLayer instance and runs test for both the layers.
|
2016-07-07 18:05:51 -04:00
|
|
|
func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) {
|
2016-05-06 14:57:04 -04:00
|
|
|
objLayer, fsDir, err := getSingleNodeObjectLayer()
|
|
|
|
if err != nil {
|
2016-07-02 22:05:16 -04:00
|
|
|
t.Fatalf("Initialization of object layer failed for single node setup: %s", err)
|
2016-05-06 14:57:04 -04:00
|
|
|
}
|
2016-05-26 17:43:17 -04:00
|
|
|
// Executing the object layer tests for single node setup.
|
|
|
|
objTest(objLayer, singleNodeTestStr, t)
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2016-05-06 14:57:04 -04:00
|
|
|
objLayer, fsDirs, err := getXLObjectLayer()
|
|
|
|
if err != nil {
|
2016-07-02 22:05:16 -04:00
|
|
|
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
2016-05-06 14:57:04 -04:00
|
|
|
}
|
|
|
|
// Executing the object layer tests for XL.
|
|
|
|
objTest(objLayer, xLTestStr, t)
|
|
|
|
defer removeRoots(append(fsDirs, fsDir))
|
|
|
|
}
|
2016-06-07 21:15:04 -04:00
|
|
|
|
|
|
|
// ExecObjectLayerDiskNotFoundTest - executes object layer tests while deleting
|
|
|
|
// disks in between tests. Creates XL ObjectLayer instance and runs test for XL layer.
|
|
|
|
func ExecObjectLayerDiskNotFoundTest(t *testing.T, objTest objTestDiskNotFoundType) {
|
|
|
|
objLayer, fsDirs, err := getXLObjectLayer()
|
|
|
|
if err != nil {
|
2016-07-02 22:05:16 -04:00
|
|
|
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
2016-06-07 21:15:04 -04:00
|
|
|
}
|
|
|
|
// Executing the object layer tests for XL.
|
|
|
|
objTest(objLayer, xLTestStr, fsDirs, t)
|
|
|
|
defer removeRoots(fsDirs)
|
|
|
|
}
|
2016-06-29 05:28:46 -04:00
|
|
|
|
|
|
|
// Special object test type for stale files situations.
|
|
|
|
type objTestStaleFilesType func(obj ObjectLayer, instanceType string, dirs []string, t *testing.T)
|
|
|
|
|
|
|
|
// ExecObjectLayerStaleFilesTest - executes object layer tests those leaves stale
|
|
|
|
// files/directories under .minio/tmp. Creates XL ObjectLayer instance and runs test for XL layer.
|
|
|
|
func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) {
|
|
|
|
objLayer, fsDirs, err := getXLObjectLayer()
|
|
|
|
if err != nil {
|
2016-07-02 22:05:16 -04:00
|
|
|
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
2016-06-29 05:28:46 -04:00
|
|
|
}
|
|
|
|
// Executing the object layer tests for XL.
|
|
|
|
objTest(objLayer, xLTestStr, fsDirs, t)
|
|
|
|
defer removeRoots(fsDirs)
|
|
|
|
}
|
2016-07-02 22:05:16 -04:00
|
|
|
|
|
|
|
// Takes in XL/FS object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler.
|
|
|
|
// Need isolated registration of API end points while writing unit tests for end points.
|
|
|
|
// All the API end points are registered only for the default case.
|
|
|
|
func initTestAPIEndPoints(objLayer ObjectLayer, apiFunctions []string) http.Handler {
|
|
|
|
// initialize a new mux router.
|
|
|
|
// goriilla/mux is the library used to register all the routes and handle them.
|
|
|
|
muxRouter := router.NewRouter()
|
|
|
|
// All object storage operations are registered as HTTP handlers on `objectAPIHandlers`.
|
|
|
|
// When the handlers get a HTTP request they use the underlyting ObjectLayer to perform operations.
|
|
|
|
api := objectAPIHandlers{
|
|
|
|
ObjectAPI: objLayer,
|
|
|
|
}
|
|
|
|
// API Router.
|
|
|
|
apiRouter := muxRouter.NewRoute().PathPrefix("/").Subrouter()
|
|
|
|
// Bucket router.
|
|
|
|
bucket := apiRouter.PathPrefix("/{bucket}").Subrouter()
|
|
|
|
// Iterate the list of API functions requested for and register them in mux HTTP handler.
|
|
|
|
for _, apiFunction := range apiFunctions {
|
|
|
|
switch apiFunction {
|
|
|
|
// Register PutBucket Policy handler.
|
|
|
|
case "PutBucketPolicy":
|
|
|
|
bucket.Methods("PUT").HandlerFunc(api.PutBucketPolicyHandler).Queries("policy", "")
|
|
|
|
|
|
|
|
// Register Delete bucket HTTP policy handler.
|
|
|
|
case "DeleteBucketPolicy":
|
|
|
|
bucket.Methods("DELETE").HandlerFunc(api.DeleteBucketPolicyHandler).Queries("policy", "")
|
|
|
|
|
|
|
|
// Register Get Bucket policy HTTP Handler.
|
|
|
|
case "GetBucketPolicy":
|
|
|
|
bucket.Methods("GET").HandlerFunc(api.GetBucketPolicyHandler).Queries("policy", "")
|
|
|
|
|
|
|
|
// Register Post Bucket policy function.
|
|
|
|
case "PostBucketPolicy":
|
|
|
|
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(api.PostPolicyBucketHandler)
|
|
|
|
|
|
|
|
// Register all api endpoints by default.
|
|
|
|
default:
|
|
|
|
registerAPIRouter(muxRouter, api)
|
|
|
|
// No need to register any more end points, all the end points are registered.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return muxRouter
|
|
|
|
}
|