2015-07-12 15:40:38 -04:00
|
|
|
/*
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
2015-07-12 15:40:38 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2015-09-19 03:52:01 -04:00
|
|
|
package main
|
2015-07-12 15:40:38 -04:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2016-02-05 23:05:56 -05:00
|
|
|
"crypto/md5"
|
2015-07-12 15:40:38 -04:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2016-02-16 21:50:36 -05:00
|
|
|
"net"
|
2015-07-12 15:40:38 -04:00
|
|
|
"os"
|
2015-09-20 15:44:44 -04:00
|
|
|
"sort"
|
2016-02-16 21:50:36 -05:00
|
|
|
"strconv"
|
2015-07-12 15:40:38 -04:00
|
|
|
"strings"
|
2015-09-20 15:44:44 -04:00
|
|
|
"time"
|
2015-07-12 15:40:38 -04:00
|
|
|
|
2016-02-05 23:05:56 -05:00
|
|
|
"encoding/base64"
|
2015-09-20 15:44:44 -04:00
|
|
|
"encoding/hex"
|
2015-07-12 15:40:38 -04:00
|
|
|
"encoding/xml"
|
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
"github.com/minio/minio/pkg/fs"
|
2015-08-22 21:34:00 -04:00
|
|
|
. "gopkg.in/check.v1"
|
2015-07-12 15:40:38 -04:00
|
|
|
)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
type MyAPIFSCacheSuite struct {
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
root string
|
|
|
|
req *http.Request
|
|
|
|
body io.ReadSeeker
|
|
|
|
credential credential
|
2015-07-12 15:40:38 -04:00
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
var _ = Suite(&MyAPIFSCacheSuite{})
|
2015-07-12 15:40:38 -04:00
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
var testAPIFSCacheServer *httptest.Server
|
2015-07-12 15:40:38 -04:00
|
|
|
|
2016-02-16 21:50:36 -05:00
|
|
|
// Ask the kernel for a free open port.
|
|
|
|
func getFreePort() int {
|
|
|
|
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
l, err := net.ListenTCP("tcp", addr)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
defer l.Close()
|
|
|
|
return l.Addr().(*net.TCPAddr).Port
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) SetUpSuite(c *C) {
|
2016-02-04 15:52:25 -05:00
|
|
|
root, e := ioutil.TempDir(os.TempDir(), "api-")
|
|
|
|
c.Assert(e, IsNil)
|
2015-07-12 15:40:38 -04:00
|
|
|
s.root = root
|
|
|
|
|
2016-02-04 15:52:25 -05:00
|
|
|
fsroot, e := ioutil.TempDir(os.TempDir(), "api-")
|
|
|
|
c.Assert(e, IsNil)
|
2015-10-16 14:26:01 -04:00
|
|
|
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
// Initialize server config.
|
|
|
|
initConfig()
|
|
|
|
|
|
|
|
// Get credential.
|
|
|
|
s.credential = serverConfig.GetCredential()
|
|
|
|
|
|
|
|
// Set a default region.
|
|
|
|
serverConfig.SetRegion("us-east-1")
|
2015-07-12 15:40:38 -04:00
|
|
|
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
// Set a new address.
|
|
|
|
serverConfig.SetAddr(":" + strconv.Itoa(getFreePort()))
|
2015-07-12 15:40:38 -04:00
|
|
|
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
// Do this only once here
|
2015-12-06 17:31:20 -05:00
|
|
|
setGlobalConfigPath(root)
|
2015-10-16 14:26:01 -04:00
|
|
|
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
// Save config.
|
|
|
|
c.Assert(serverConfig.Save(), IsNil)
|
2015-07-12 15:40:38 -04:00
|
|
|
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
fs, err := fs.New(fsroot)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
httpHandler := configureServerHandler(fs)
|
2015-10-16 14:26:01 -04:00
|
|
|
testAPIFSCacheServer = httptest.NewServer(httpHandler)
|
2015-07-12 15:40:38 -04:00
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TearDownSuite(c *C) {
|
2015-07-12 15:40:38 -04:00
|
|
|
os.RemoveAll(s.root)
|
2015-10-16 14:26:01 -04:00
|
|
|
testAPIFSCacheServer.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258
|
|
|
|
///
|
|
|
|
/// User-Agent:
|
|
|
|
///
|
|
|
|
/// This is ignored from signing because signing this causes problems with generating pre-signed URLs
|
|
|
|
/// (that are executed by other agents) or when customers pass requests through proxies, which may
|
|
|
|
/// modify the user-agent.
|
|
|
|
///
|
|
|
|
/// Content-Length:
|
|
|
|
///
|
|
|
|
/// This is ignored from signing because generating a pre-signed URL should not provide a content-length
|
|
|
|
/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when
|
|
|
|
/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which
|
|
|
|
/// implicitly validates the payload length (since changing the number of bytes would change the checksum)
|
|
|
|
/// and therefore this header is not valuable in the signature.
|
|
|
|
///
|
|
|
|
/// Content-Type:
|
|
|
|
///
|
|
|
|
/// Signing this header causes quite a number of problems in browser environments, where browsers
|
|
|
|
/// like to modify and normalize the content-type header in different ways. There is more information
|
|
|
|
/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic
|
|
|
|
/// and reduces the possibility of future bugs
|
|
|
|
///
|
|
|
|
/// Authorization:
|
|
|
|
///
|
|
|
|
/// Is skipped for obvious reasons
|
|
|
|
///
|
|
|
|
var ignoredHeaders = map[string]bool{
|
|
|
|
"Authorization": true,
|
|
|
|
"Content-Type": true,
|
|
|
|
"Content-Length": true,
|
|
|
|
"User-Agent": true,
|
2015-07-12 15:40:38 -04:00
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) newRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) {
|
2016-01-19 20:49:48 -05:00
|
|
|
if method == "" {
|
|
|
|
method = "POST"
|
|
|
|
}
|
2015-09-20 15:44:44 -04:00
|
|
|
t := time.Now().UTC()
|
2016-01-19 20:49:48 -05:00
|
|
|
|
2015-09-20 15:44:44 -04:00
|
|
|
req, err := http.NewRequest(method, urlStr, nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
req.Header.Set("x-amz-date", t.Format(iso8601Format))
|
|
|
|
|
2016-03-12 19:08:15 -05:00
|
|
|
// Add Content-Length
|
2015-09-20 15:44:44 -04:00
|
|
|
req.ContentLength = contentLength
|
|
|
|
|
2016-03-12 19:08:15 -05:00
|
|
|
// Save for subsequent use
|
|
|
|
var hashedPayload string
|
2015-09-20 15:44:44 -04:00
|
|
|
switch {
|
|
|
|
case body == nil:
|
2016-03-12 19:08:15 -05:00
|
|
|
hashedPayload = hex.EncodeToString(sum256([]byte{}))
|
2015-09-20 15:44:44 -04:00
|
|
|
default:
|
2016-03-12 19:08:15 -05:00
|
|
|
payloadBytes, e := ioutil.ReadAll(body)
|
|
|
|
if e != nil {
|
|
|
|
return nil, e
|
2015-09-20 15:44:44 -04:00
|
|
|
}
|
2016-03-12 19:08:15 -05:00
|
|
|
hashedPayload = hex.EncodeToString(sum256(payloadBytes))
|
|
|
|
md5base64 := base64.StdEncoding.EncodeToString(sumMD5(payloadBytes))
|
|
|
|
req.Header.Set("Content-Md5", md5base64)
|
2015-09-20 15:44:44 -04:00
|
|
|
}
|
|
|
|
req.Header.Set("x-amz-content-sha256", hashedPayload)
|
|
|
|
|
2016-03-12 19:08:15 -05:00
|
|
|
// Seek back to beginning.
|
|
|
|
if body != nil {
|
|
|
|
body.Seek(0, 0)
|
|
|
|
// Add body
|
|
|
|
req.Body = ioutil.NopCloser(body)
|
|
|
|
}
|
|
|
|
|
2015-09-20 15:44:44 -04:00
|
|
|
var headers []string
|
|
|
|
vals := make(map[string][]string)
|
|
|
|
for k, vv := range req.Header {
|
|
|
|
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
|
|
|
|
continue // ignored header
|
|
|
|
}
|
|
|
|
headers = append(headers, strings.ToLower(k))
|
|
|
|
vals[strings.ToLower(k)] = vv
|
|
|
|
}
|
|
|
|
headers = append(headers, "host")
|
|
|
|
sort.Strings(headers)
|
|
|
|
|
|
|
|
var canonicalHeaders bytes.Buffer
|
|
|
|
for _, k := range headers {
|
|
|
|
canonicalHeaders.WriteString(k)
|
|
|
|
canonicalHeaders.WriteByte(':')
|
|
|
|
switch {
|
|
|
|
case k == "host":
|
|
|
|
canonicalHeaders.WriteString(req.URL.Host)
|
|
|
|
fallthrough
|
|
|
|
default:
|
|
|
|
for idx, v := range vals[k] {
|
|
|
|
if idx > 0 {
|
|
|
|
canonicalHeaders.WriteByte(',')
|
|
|
|
}
|
|
|
|
canonicalHeaders.WriteString(v)
|
|
|
|
}
|
|
|
|
canonicalHeaders.WriteByte('\n')
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
signedHeaders := strings.Join(headers, ";")
|
|
|
|
|
|
|
|
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
|
2015-10-08 22:56:41 -04:00
|
|
|
encodedPath := getURLEncodedName(req.URL.Path)
|
2015-09-20 15:44:44 -04:00
|
|
|
// convert any space strings back to "+"
|
|
|
|
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
|
|
|
|
|
|
|
|
//
|
|
|
|
// canonicalRequest =
|
|
|
|
// <HTTPMethod>\n
|
|
|
|
// <CanonicalURI>\n
|
|
|
|
// <CanonicalQueryString>\n
|
|
|
|
// <CanonicalHeaders>\n
|
|
|
|
// <SignedHeaders>\n
|
|
|
|
// <HashedPayload>
|
|
|
|
//
|
|
|
|
canonicalRequest := strings.Join([]string{
|
|
|
|
req.Method,
|
|
|
|
encodedPath,
|
|
|
|
req.URL.RawQuery,
|
|
|
|
canonicalHeaders.String(),
|
|
|
|
signedHeaders,
|
|
|
|
hashedPayload,
|
|
|
|
}, "\n")
|
|
|
|
|
|
|
|
scope := strings.Join([]string{
|
|
|
|
t.Format(yyyymmdd),
|
2015-11-23 16:39:57 -05:00
|
|
|
"us-east-1",
|
2015-09-20 15:44:44 -04:00
|
|
|
"s3",
|
|
|
|
"aws4_request",
|
|
|
|
}, "/")
|
|
|
|
|
2016-02-15 20:42:39 -05:00
|
|
|
stringToSign := "AWS4-HMAC-SHA256" + "\n" + t.Format(iso8601Format) + "\n"
|
2015-09-20 15:44:44 -04:00
|
|
|
stringToSign = stringToSign + scope + "\n"
|
|
|
|
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
|
|
|
|
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
date := sumHMAC([]byte("AWS4"+s.credential.SecretAccessKey), []byte(t.Format(yyyymmdd)))
|
2015-11-23 16:39:57 -05:00
|
|
|
region := sumHMAC(date, []byte("us-east-1"))
|
2015-09-20 15:44:44 -04:00
|
|
|
service := sumHMAC(region, []byte("s3"))
|
|
|
|
signingKey := sumHMAC(service, []byte("aws4_request"))
|
|
|
|
|
|
|
|
signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
|
|
|
|
|
|
|
|
// final Authorization header
|
|
|
|
parts := []string{
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
"AWS4-HMAC-SHA256" + " Credential=" + s.credential.AccessKeyID + "/" + scope,
|
2015-09-20 15:44:44 -04:00
|
|
|
"SignedHeaders=" + signedHeaders,
|
|
|
|
"Signature=" + signature,
|
|
|
|
}
|
|
|
|
auth := strings.Join(parts, ", ")
|
|
|
|
req.Header.Set("Authorization", auth)
|
|
|
|
|
|
|
|
return req, nil
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestAuth(c *C) {
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
secretID, err := genSecretAccessKey()
|
2015-10-16 14:26:01 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
accessID, err := genAccessKeyID()
|
2015-10-16 14:26:01 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
c.Assert(len(secretID), Equals, minioSecretID)
|
|
|
|
c.Assert(len(accessID), Equals, minioAccessID)
|
|
|
|
}
|
|
|
|
|
bucketpolicy: Improve bucket policy validation, avoid nested rules.
Bucket policy validation is more stricter now, to avoid nested
rules. The reason to do this is keep the rules simpler and more
meaningful avoiding conflicts.
This patch implements stricter checks.
Example policy to be generally avoided.
```
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:GetObject",
"s3:DeleteObject"
],
"Effect": "Allow",
"Principal": {
"AWS": [
"*"
]
},
"Resource": [
"arn:aws:s3:::jarjarbing/*"
]
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject"
],
"Effect": "Deny",
"Principal": {
"AWS": [
"*"
]
},
"Resource": [
"arn:aws:s3:::jarjarbing/restic/key/*"
]
}
]
}
```
2016-03-15 13:38:04 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestBucketPolicy(c *C) {
|
|
|
|
// Sample bucket policy.
|
|
|
|
bucketPolicyBuf := `{
|
|
|
|
"Version": "2012-10-17",
|
|
|
|
"Statement": [
|
|
|
|
{
|
|
|
|
"Action": [
|
|
|
|
"s3:GetBucketLocation",
|
|
|
|
"s3:ListBucket"
|
|
|
|
],
|
|
|
|
"Effect": "Allow",
|
|
|
|
"Principal": {
|
|
|
|
"AWS": [
|
|
|
|
"*"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
"Resource": [
|
|
|
|
"arn:aws:s3:::policybucket"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"Action": [
|
|
|
|
"s3:GetObject"
|
|
|
|
],
|
|
|
|
"Effect": "Allow",
|
|
|
|
"Principal": {
|
|
|
|
"AWS": [
|
|
|
|
"*"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
"Resource": [
|
|
|
|
"arn:aws:s3:::policybucket/this*"
|
|
|
|
]
|
|
|
|
}
|
|
|
|
]
|
|
|
|
}`
|
|
|
|
|
|
|
|
// Put a new bucket policy.
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/policybucket?policy", int64(len(bucketPolicyBuf)), bytes.NewReader([]byte(bucketPolicyBuf)))
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusNoContent)
|
|
|
|
|
|
|
|
// Fetch the uploaded policy.
|
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/policybucket?policy", 0, nil)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
bucketPolicyReadBuf, err := ioutil.ReadAll(response.Body)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
// Verify if downloaded policy matches with previousy uploaded.
|
|
|
|
c.Assert(bytes.Equal([]byte(bucketPolicyBuf), bucketPolicyReadBuf), Equals, true)
|
|
|
|
|
|
|
|
// Delete policy.
|
|
|
|
request, err = s.newRequest("DELETE", testAPIFSCacheServer.URL+"/policybucket?policy", 0, nil)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusNoContent)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestDeleteBucket(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/deletebucket", 0, nil)
|
2015-07-16 17:08:33 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
client := http.Client{}
|
2015-07-16 17:08:33 -04:00
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
2015-10-16 14:26:01 -04:00
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
request, err = s.newRequest("DELETE", testAPIFSCacheServer.URL+"/deletebucket", 0, nil)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
2015-10-16 23:02:37 -04:00
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusNoContent)
|
2015-07-16 17:08:33 -04:00
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestDeleteObject(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/deletebucketobject", 0, nil)
|
2015-07-16 17:08:33 -04:00
|
|
|
c.Assert(err, IsNil)
|
2015-10-16 14:26:01 -04:00
|
|
|
|
|
|
|
client := http.Client{}
|
2015-07-16 17:08:33 -04:00
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
2015-10-16 14:26:01 -04:00
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/deletebucketobject/myobject", 0, nil)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
request, err = s.newRequest("DELETE", testAPIFSCacheServer.URL+"/deletebucketobject/myobject", 0, nil)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
2015-10-16 23:02:37 -04:00
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusNoContent)
|
2015-07-16 17:08:33 -04:00
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestNonExistantBucket(c *C) {
|
|
|
|
request, err := s.newRequest("HEAD", testAPIFSCacheServer.URL+"/nonexistantbucket", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusNotFound)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestEmptyObject(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/emptyobject", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/emptyobject/object", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/emptyobject/object", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
var buffer bytes.Buffer
|
|
|
|
responseBody, err := ioutil.ReadAll(response.Body)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(true, Equals, bytes.Equal(responseBody, buffer.Bytes()))
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestBucket(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/bucket", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("HEAD", testAPIFSCacheServer.URL+"/bucket", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestObject(c *C) {
|
2015-07-12 15:40:38 -04:00
|
|
|
buffer := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/testobject", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/testobject/object", int64(buffer.Len()), buffer)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/testobject/object", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
responseBody, err := ioutil.ReadAll(response.Body)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(responseBody, DeepEquals, []byte("hello world"))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestMultipleObjects(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/multipleobjects", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/multipleobjects/object", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
verifyError(c, response, "NoSuchKey", "The specified key does not exist.", http.StatusNotFound)
|
|
|
|
|
|
|
|
//// test object 1
|
|
|
|
|
|
|
|
// get object
|
|
|
|
buffer1 := bytes.NewReader([]byte("hello one"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/multipleobjects/object1", int64(buffer1.Len()), buffer1)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/multipleobjects/object1", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
// verify response data
|
|
|
|
responseBody, err := ioutil.ReadAll(response.Body)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(true, Equals, bytes.Equal(responseBody, []byte("hello one")))
|
|
|
|
|
|
|
|
buffer2 := bytes.NewReader([]byte("hello two"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/multipleobjects/object2", int64(buffer2.Len()), buffer2)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/multipleobjects/object2", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
// verify response data
|
|
|
|
responseBody, err = ioutil.ReadAll(response.Body)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(true, Equals, bytes.Equal(responseBody, []byte("hello two")))
|
|
|
|
|
|
|
|
buffer3 := bytes.NewReader([]byte("hello three"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/multipleobjects/object3", int64(buffer3.Len()), buffer3)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/multipleobjects/object3", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
// verify object
|
|
|
|
responseBody, err = ioutil.ReadAll(response.Body)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(true, Equals, bytes.Equal(responseBody, []byte("hello three")))
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestNotImplemented(c *C) {
|
|
|
|
request, err := s.newRequest("GET", testAPIFSCacheServer.URL+"/bucket/object?policy", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusNotImplemented)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestHeader(c *C) {
|
|
|
|
request, err := s.newRequest("GET", testAPIFSCacheServer.URL+"/bucket/object", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
verifyError(c, response, "NoSuchKey", "The specified key does not exist.", http.StatusNotFound)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestPutBucket(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/put-bucket", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
2015-10-25 11:00:39 -04:00
|
|
|
|
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/put-bucket-slash/", 0, nil)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
2015-07-12 15:40:38 -04:00
|
|
|
}
|
|
|
|
|
2016-02-27 06:04:52 -05:00
|
|
|
func (s *MyAPIFSCacheSuite) TestCopyObject(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/put-object-copy", 0, nil)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
request.Header.Add("x-amz-acl", "private")
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
buffer1 := bytes.NewReader([]byte("hello world"))
|
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/put-object-copy/object", int64(buffer1.Len()), buffer1)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/put-object-copy/object1", 0, nil)
|
|
|
|
request.Header.Set("X-Amz-Copy-Source", "/put-object-copy/object")
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/put-object-copy/object1", 0, nil)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
object, err := ioutil.ReadAll(response.Body)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
c.Assert(string(object), Equals, "hello world")
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestPutObject(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/put-object", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
buffer1 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/put-object/object", int64(buffer1.Len()), buffer1)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestListBuckets(c *C) {
|
|
|
|
request, err := s.newRequest("GET", testAPIFSCacheServer.URL+"/", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-09-19 03:52:01 -04:00
|
|
|
var results ListBucketsResponse
|
2015-07-12 15:40:38 -04:00
|
|
|
decoder := xml.NewDecoder(response.Body)
|
|
|
|
err = decoder.Decode(&results)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestNotBeAbleToCreateObjectInNonexistantBucket(c *C) {
|
2015-07-12 15:40:38 -04:00
|
|
|
buffer1 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/innonexistantbucket/object", int64(buffer1.Len()), buffer1)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
verifyError(c, response, "NoSuchBucket", "The specified bucket does not exist.", http.StatusNotFound)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestHeadOnObject(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/headonobject", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
buffer1 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/headonobject/object1", int64(buffer1.Len()), buffer1)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("HEAD", testAPIFSCacheServer.URL+"/headonobject/object1", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
2016-02-28 21:10:37 -05:00
|
|
|
|
|
|
|
lastModified := response.Header.Get("Last-Modified")
|
|
|
|
t, err := time.Parse(http.TimeFormat, lastModified)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
request, err = s.newRequest("HEAD", testAPIFSCacheServer.URL+"/headonobject/object1", 0, nil)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
request.Header.Set("If-Modified-Since", t.Add(1*time.Minute).UTC().Format(http.TimeFormat))
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusNotModified)
|
|
|
|
|
|
|
|
request, err = s.newRequest("HEAD", testAPIFSCacheServer.URL+"/headonobject/object1", 0, nil)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
request.Header.Set("If-Unmodified-Since", t.Add(-1*time.Minute).UTC().Format(http.TimeFormat))
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusPreconditionFailed)
|
2015-07-12 15:40:38 -04:00
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestHeadOnBucket(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/headonbucket", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("HEAD", testAPIFSCacheServer.URL+"/headonbucket", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestXMLNameNotInBucketListJson(c *C) {
|
|
|
|
request, err := s.newRequest("GET", testAPIFSCacheServer.URL+"/", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
request.Header.Add("Accept", "application/json")
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
byteResults, err := ioutil.ReadAll(response.Body)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(strings.Contains(string(byteResults), "XML"), Equals, false)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestXMLNameNotInObjectListJson(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/xmlnamenotinobjectlistjson", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
request.Header.Add("Accept", "application/json")
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/xmlnamenotinobjectlistjson", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
request.Header.Add("Accept", "application/json")
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
byteResults, err := ioutil.ReadAll(response.Body)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(strings.Contains(string(byteResults), "XML"), Equals, false)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestContentTypePersists(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/contenttype-persists", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
buffer1 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/contenttype-persists/one", int64(buffer1.Len()), buffer1)
|
2015-07-12 15:40:38 -04:00
|
|
|
delete(request.Header, "Content-Type")
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("HEAD", testAPIFSCacheServer.URL+"/contenttype-persists/one", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.Header.Get("Content-Type"), Equals, "application/octet-stream")
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/contenttype-persists/one", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
c.Assert(response.Header.Get("Content-Type"), Equals, "application/octet-stream")
|
|
|
|
|
|
|
|
buffer2 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/contenttype-persists/two", int64(buffer2.Len()), buffer2)
|
2015-07-12 15:40:38 -04:00
|
|
|
delete(request.Header, "Content-Type")
|
|
|
|
request.Header.Add("Content-Type", "application/json")
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("HEAD", testAPIFSCacheServer.URL+"/contenttype-persists/two", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.Header.Get("Content-Type"), Equals, "application/octet-stream")
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/contenttype-persists/two", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.Header.Get("Content-Type"), Equals, "application/octet-stream")
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestPartialContent(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/partial-content", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
buffer1 := bytes.NewReader([]byte("Hello World"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/partial-content/bar", int64(buffer1.Len()), buffer1)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
// prepare request
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/partial-content/bar", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
request.Header.Add("Range", "bytes=6-7")
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusPartialContent)
|
|
|
|
partialObject, err := ioutil.ReadAll(response.Body)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
c.Assert(string(partialObject), Equals, "Wo")
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestListObjectsHandlerErrors(c *C) {
|
|
|
|
request, err := s.newRequest("GET", testAPIFSCacheServer.URL+"/objecthandlererrors-.", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
verifyError(c, response, "InvalidBucketName", "The specified bucket is not valid.", http.StatusBadRequest)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/objecthandlererrors", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
verifyError(c, response, "NoSuchBucket", "The specified bucket does not exist.", http.StatusNotFound)
|
2015-07-16 20:32:33 -04:00
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/objecthandlererrors", 0, nil)
|
2015-07-16 20:32:33 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/objecthandlererrors?max-keys=-2", 0, nil)
|
2015-07-16 20:32:33 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
2015-09-18 17:48:01 -04:00
|
|
|
verifyError(c, response, "InvalidArgument", "Argument maxKeys must be an integer between 0 and 2147483647.", http.StatusBadRequest)
|
2015-07-12 15:40:38 -04:00
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestPutBucketErrors(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/putbucket-.", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
verifyError(c, response, "InvalidBucketName", "The specified bucket is not valid.", http.StatusBadRequest)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/putbucket", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/putbucket", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
verifyError(c, response, "BucketAlreadyExists", "The requested bucket name is not available.", http.StatusConflict)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/putbucket?acl", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
verifyError(c, response, "NotImplemented", "A header you provided implies functionality that is not implemented.", http.StatusNotImplemented)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestGetObjectErrors(c *C) {
|
|
|
|
request, err := s.newRequest("GET", testAPIFSCacheServer.URL+"/getobjecterrors", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
verifyError(c, response, "NoSuchBucket", "The specified bucket does not exist.", http.StatusNotFound)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/getobjecterrors", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/getobjecterrors/bar", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
verifyError(c, response, "NoSuchKey", "The specified key does not exist.", http.StatusNotFound)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/getobjecterrors-./bar", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
verifyError(c, response, "InvalidBucketName", "The specified bucket is not valid.", http.StatusBadRequest)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestGetObjectRangeErrors(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/getobjectrangeerrors", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
buffer1 := bytes.NewReader([]byte("Hello World"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/getobjectrangeerrors/bar", int64(buffer1.Len()), buffer1)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/getobjectrangeerrors/bar", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
request.Header.Add("Range", "bytes=7-6")
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
verifyError(c, response, "InvalidRange", "The requested range cannot be satisfied.", http.StatusRequestedRangeNotSatisfiable)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestObjectMultipartAbort(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/objectmultipartabort", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, 200)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("POST", testAPIFSCacheServer.URL+"/objectmultipartabort/object?uploads", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
decoder := xml.NewDecoder(response.Body)
|
2015-09-19 03:52:01 -04:00
|
|
|
newResponse := &InitiateMultipartUploadResponse{}
|
2015-07-12 15:40:38 -04:00
|
|
|
|
|
|
|
err = decoder.Decode(newResponse)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(len(newResponse.UploadID) > 0, Equals, true)
|
|
|
|
uploadID := newResponse.UploadID
|
|
|
|
|
|
|
|
buffer1 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/objectmultipartabort/object?uploadId="+uploadID+"&partNumber=1", int64(buffer1.Len()), buffer1)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response1, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response1.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
buffer2 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/objectmultipartabort/object?uploadId="+uploadID+"&partNumber=2", int64(buffer2.Len()), buffer2)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response2, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response2.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("DELETE", testAPIFSCacheServer.URL+"/objectmultipartabort/object?uploadId="+uploadID, 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response3, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response3.StatusCode, Equals, http.StatusNoContent)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestBucketMultipartList(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/bucketmultipartlist", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, 200)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("POST", testAPIFSCacheServer.URL+"/bucketmultipartlist/object?uploads", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
decoder := xml.NewDecoder(response.Body)
|
2015-09-19 03:52:01 -04:00
|
|
|
newResponse := &InitiateMultipartUploadResponse{}
|
2015-07-12 15:40:38 -04:00
|
|
|
|
|
|
|
err = decoder.Decode(newResponse)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(len(newResponse.UploadID) > 0, Equals, true)
|
|
|
|
uploadID := newResponse.UploadID
|
|
|
|
|
|
|
|
buffer1 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/bucketmultipartlist/object?uploadId="+uploadID+"&partNumber=1", int64(buffer1.Len()), buffer1)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response1, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response1.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
buffer2 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/bucketmultipartlist/object?uploadId="+uploadID+"&partNumber=2", int64(buffer2.Len()), buffer2)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response2, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response2.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/bucketmultipartlist?uploads", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response3, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response3.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2016-03-21 04:06:07 -04:00
|
|
|
// The reason to duplicate this structure here is to verify if the
|
|
|
|
// unmarshalling works from a client perspective, specifically
|
|
|
|
// while unmarshalling time.Time type for 'Initiated' field.
|
|
|
|
// time.Time does not honor xml marshaler, it means that we need
|
|
|
|
// to encode/format it before giving it to xml marshalling.
|
|
|
|
|
|
|
|
// This below check adds client side verification to see if its
|
|
|
|
// truly parseable.
|
|
|
|
|
2016-03-20 02:44:43 -04:00
|
|
|
// listMultipartUploadsResponse - format for list multipart uploads response.
|
|
|
|
type listMultipartUploadsResponse struct {
|
|
|
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"`
|
|
|
|
|
|
|
|
Bucket string
|
|
|
|
KeyMarker string
|
|
|
|
UploadIDMarker string `xml:"UploadIdMarker"`
|
|
|
|
NextKeyMarker string
|
|
|
|
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
|
|
|
|
EncodingType string
|
|
|
|
MaxUploads int
|
|
|
|
IsTruncated bool
|
|
|
|
// All the in progress multipart uploads.
|
|
|
|
Uploads []struct {
|
|
|
|
Key string
|
|
|
|
UploadID string `xml:"UploadId"`
|
|
|
|
Initiator Initiator
|
|
|
|
Owner Owner
|
|
|
|
StorageClass string
|
|
|
|
Initiated time.Time // Keep this native to be able to parse properly.
|
|
|
|
}
|
|
|
|
Prefix string
|
|
|
|
Delimiter string
|
|
|
|
CommonPrefixes []CommonPrefix
|
|
|
|
}
|
|
|
|
|
2015-07-12 15:40:38 -04:00
|
|
|
decoder = xml.NewDecoder(response3.Body)
|
2016-03-20 02:44:43 -04:00
|
|
|
newResponse3 := &listMultipartUploadsResponse{}
|
2015-07-12 15:40:38 -04:00
|
|
|
err = decoder.Decode(newResponse3)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(newResponse3.Bucket, Equals, "bucketmultipartlist")
|
|
|
|
}
|
|
|
|
|
2016-01-26 17:57:46 -05:00
|
|
|
func (s *MyAPIFSCacheSuite) TestValidateObjectMultipartUploadID(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/objectmultipartlist-uploadid", 0, nil)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, 200)
|
|
|
|
|
|
|
|
request, err = s.newRequest("POST", testAPIFSCacheServer.URL+"/objectmultipartlist-uploadid/directory1/directory2/object?uploads", 0, nil)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
decoder := xml.NewDecoder(response.Body)
|
|
|
|
newResponse := &InitiateMultipartUploadResponse{}
|
|
|
|
|
|
|
|
err = decoder.Decode(newResponse)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(len(newResponse.UploadID) > 0, Equals, true)
|
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestObjectMultipartList(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/objectmultipartlist", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, 200)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("POST", testAPIFSCacheServer.URL+"/objectmultipartlist/object?uploads", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
decoder := xml.NewDecoder(response.Body)
|
2015-09-19 03:52:01 -04:00
|
|
|
newResponse := &InitiateMultipartUploadResponse{}
|
2015-07-12 15:40:38 -04:00
|
|
|
|
|
|
|
err = decoder.Decode(newResponse)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(len(newResponse.UploadID) > 0, Equals, true)
|
|
|
|
uploadID := newResponse.UploadID
|
|
|
|
|
|
|
|
buffer1 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/objectmultipartlist/object?uploadId="+uploadID+"&partNumber=1", int64(buffer1.Len()), buffer1)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response1, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response1.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
buffer2 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/objectmultipartlist/object?uploadId="+uploadID+"&partNumber=2", int64(buffer2.Len()), buffer2)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response2, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response2.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/objectmultipartlist/object?uploadId="+uploadID, 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response3, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response3.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/objectmultipartlist/object?max-parts=-2&uploadId="+uploadID, 0, nil)
|
2015-07-16 20:32:33 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response4, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
2015-09-18 17:48:01 -04:00
|
|
|
verifyError(c, response4, "InvalidArgument", "Argument maxParts must be an integer between 1 and 10000.", http.StatusBadRequest)
|
2015-07-12 15:40:38 -04:00
|
|
|
}
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
func (s *MyAPIFSCacheSuite) TestObjectMultipart(c *C) {
|
|
|
|
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/objectmultiparts", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client := http.Client{}
|
|
|
|
response, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, 200)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("POST", testAPIFSCacheServer.URL+"/objectmultiparts/object?uploads", 0, nil)
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
decoder := xml.NewDecoder(response.Body)
|
2015-09-19 03:52:01 -04:00
|
|
|
newResponse := &InitiateMultipartUploadResponse{}
|
2015-07-12 15:40:38 -04:00
|
|
|
|
|
|
|
err = decoder.Decode(newResponse)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(len(newResponse.UploadID) > 0, Equals, true)
|
|
|
|
uploadID := newResponse.UploadID
|
|
|
|
|
2016-02-05 23:05:56 -05:00
|
|
|
hasher := md5.New()
|
|
|
|
hasher.Write([]byte("hello world"))
|
|
|
|
md5Sum := hasher.Sum(nil)
|
|
|
|
|
2015-07-12 15:40:38 -04:00
|
|
|
buffer1 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/objectmultiparts/object?uploadId="+uploadID+"&partNumber=1", int64(buffer1.Len()), buffer1)
|
2016-03-05 19:43:48 -05:00
|
|
|
request.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(md5Sum))
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response1, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response1.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
|
|
|
buffer2 := bytes.NewReader([]byte("hello world"))
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/objectmultiparts/object?uploadId="+uploadID+"&partNumber=2", int64(buffer2.Len()), buffer2)
|
2016-03-05 19:43:48 -05:00
|
|
|
request.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(md5Sum))
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
client = http.Client{}
|
|
|
|
response2, err := client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response2.StatusCode, Equals, http.StatusOK)
|
|
|
|
|
2016-02-05 23:05:56 -05:00
|
|
|
// Complete multipart upload
|
2015-10-16 14:26:01 -04:00
|
|
|
completeUploads := &fs.CompleteMultipartUpload{
|
|
|
|
Part: []fs.CompletePart{
|
2015-07-12 15:40:38 -04:00
|
|
|
{
|
|
|
|
PartNumber: 1,
|
|
|
|
ETag: response1.Header.Get("ETag"),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
PartNumber: 2,
|
|
|
|
ETag: response2.Header.Get("ETag"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
completeBytes, err := xml.Marshal(completeUploads)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
2015-10-16 14:26:01 -04:00
|
|
|
request, err = s.newRequest("POST", testAPIFSCacheServer.URL+"/objectmultiparts/object?uploadId="+uploadID, int64(len(completeBytes)), bytes.NewReader(completeBytes))
|
2015-07-12 15:40:38 -04:00
|
|
|
c.Assert(err, IsNil)
|
|
|
|
|
|
|
|
response, err = client.Do(request)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
|
|
|
}
|
2015-10-07 02:32:20 -04:00
|
|
|
|
|
|
|
func verifyError(c *C, response *http.Response, code, description string, statusCode int) {
|
|
|
|
data, err := ioutil.ReadAll(response.Body)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
errorResponse := APIErrorResponse{}
|
|
|
|
err = xml.Unmarshal(data, &errorResponse)
|
|
|
|
c.Assert(err, IsNil)
|
|
|
|
c.Assert(errorResponse.Code, Equals, code)
|
|
|
|
c.Assert(errorResponse.Message, Equals, description)
|
|
|
|
c.Assert(response.StatusCode, Equals, statusCode)
|
|
|
|
}
|