mirror of
https://github.com/minio/minio.git
synced 2025-11-07 21:02:58 -05:00
Add mint tests into MinIO repo (#7886)
This commit is contained in:
328
mint/run/core/aws-sdk-go/quick-tests.go
Normal file
328
mint/run/core/aws-sdk-go/quick-tests.go
Normal file
@@ -0,0 +1,328 @@
|
||||
/*
|
||||
*
|
||||
* Mint, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
|
||||
const (
|
||||
letterIdxBits = 6 // 6 bits to represent a letter index
|
||||
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
|
||||
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
|
||||
)
|
||||
const (
|
||||
PASS = "PASS" // Indicate that a test passed
|
||||
FAIL = "FAIL" // Indicate that a test failed
|
||||
NA = "NA" // Indicate that a test is not applicable
|
||||
)
|
||||
|
||||
type ErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
BucketName string
|
||||
Key string
|
||||
RequestID string `xml:"RequestId"`
|
||||
HostID string `xml:"HostId"`
|
||||
|
||||
// Region where the bucket is located. This header is returned
|
||||
// only in HEAD bucket and ListObjects response.
|
||||
Region string
|
||||
|
||||
// Headers of the returned S3 XML error
|
||||
Headers http.Header `xml:"-" json:"-"`
|
||||
}
|
||||
|
||||
type mintJSONFormatter struct {
|
||||
}
|
||||
|
||||
func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
|
||||
data := make(log.Fields, len(entry.Data))
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
|
||||
// log successful test runs
|
||||
func successLogger(function string, args map[string]interface{}, startTime time.Time) *log.Entry {
|
||||
// calculate the test case duration
|
||||
duration := time.Since(startTime)
|
||||
// log with the fields as per mint
|
||||
fields := log.Fields{"name": "aws-sdk-go", "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": PASS}
|
||||
return log.WithFields(fields)
|
||||
}
|
||||
|
||||
// log failed test runs
|
||||
func failureLog(function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry {
|
||||
// calculate the test case duration
|
||||
duration := time.Since(startTime)
|
||||
var fields log.Fields
|
||||
// log with the fields as per mint
|
||||
if err != nil {
|
||||
fields = log.Fields{"name": "aws-sdk-go", "function": function, "args": args,
|
||||
"duration": duration.Nanoseconds() / 1000000, "status": FAIL, "alert": alert, "message": message, "error": err}
|
||||
} else {
|
||||
fields = log.Fields{"name": "aws-sdk-go", "function": function, "args": args,
|
||||
"duration": duration.Nanoseconds() / 1000000, "status": FAIL, "alert": alert, "message": message}
|
||||
}
|
||||
return log.WithFields(fields)
|
||||
}
|
||||
|
||||
func randString(n int, src rand.Source, prefix string) string {
|
||||
b := make([]byte, n)
|
||||
// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
|
||||
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
|
||||
if remain == 0 {
|
||||
cache, remain = src.Int63(), letterIdxMax
|
||||
}
|
||||
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
|
||||
b[i] = letterBytes[idx]
|
||||
i--
|
||||
}
|
||||
cache >>= letterIdxBits
|
||||
remain--
|
||||
}
|
||||
return prefix + string(b[0:30-len(prefix)])
|
||||
}
|
||||
|
||||
func cleanup(s3Client *s3.S3, bucket string, object string, function string,
|
||||
args map[string]interface{}, startTime time.Time, deleteBucket bool) {
|
||||
|
||||
// Deleting the object, just in case it was created. Will not check for errors.
|
||||
s3Client.DeleteObject(&s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(object),
|
||||
})
|
||||
|
||||
if deleteBucket {
|
||||
_, err := s3Client.DeleteBucket(&s3.DeleteBucketInput{
|
||||
Bucket: aws.String(bucket),
|
||||
})
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "AWS SDK Go DeleteBucket Failed", err).Fatal()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testPresignedPutInvalidHash(s3Client *s3.S3) {
|
||||
startTime := time.Now()
|
||||
function := "PresignedPut"
|
||||
bucket := randString(60, rand.NewSource(time.Now().UnixNano()), "aws-sdk-go-test-")
|
||||
object := "presignedTest"
|
||||
expiry := 1 * time.Minute
|
||||
args := map[string]interface{}{
|
||||
"bucketName": bucket,
|
||||
"objectName": object,
|
||||
"expiry": expiry,
|
||||
}
|
||||
|
||||
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
|
||||
Bucket: aws.String(bucket),
|
||||
})
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "AWS SDK Go CreateBucket Failed", err).Fatal()
|
||||
return
|
||||
}
|
||||
defer cleanup(s3Client, bucket, object, function, args, startTime, true)
|
||||
|
||||
req, _ := s3Client.PutObjectRequest(&s3.PutObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(object),
|
||||
ContentType: aws.String("application/octet-stream"),
|
||||
})
|
||||
|
||||
req.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "invalid-sha256")
|
||||
url, err := req.Presign(expiry)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "AWS SDK Go presigned Put request creation failed", err).Fatal()
|
||||
return
|
||||
}
|
||||
|
||||
rreq, err := http.NewRequest("PUT", url, bytes.NewReader([]byte("")))
|
||||
rreq.Header.Add("X-Amz-Content-Sha256", "invalid-sha256")
|
||||
rreq.Header.Add("Content-Type", "application/octet-stream")
|
||||
|
||||
resp, err := http.DefaultClient.Do(rreq)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "AWS SDK Go presigned put request failed", err).Fatal()
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
dec := xml.NewDecoder(resp.Body)
|
||||
errResp := ErrorResponse{}
|
||||
err = dec.Decode(&errResp)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "AWS SDK Go unmarshalling xml failed", err).Fatal()
|
||||
return
|
||||
}
|
||||
|
||||
if errResp.Code != "XAmzContentSHA256Mismatch" {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go presigned PUT expected to fail with XAmzContentSHA256Mismatch but got %v", errResp.Code), errors.New("AWS S3 error code mismatch")).Fatal()
|
||||
return
|
||||
}
|
||||
|
||||
successLogger(function, args, startTime).Info()
|
||||
}
|
||||
|
||||
func testListObjects(s3Client *s3.S3) {
|
||||
startTime := time.Now()
|
||||
function := "testListObjects"
|
||||
bucket := randString(60, rand.NewSource(time.Now().UnixNano()), "aws-sdk-go-test-")
|
||||
object1 := "testObject1"
|
||||
object2 := "testObject2"
|
||||
expiry := 1 * time.Minute
|
||||
args := map[string]interface{}{
|
||||
"bucketName": bucket,
|
||||
"objectName1": object1,
|
||||
"objectName2": object2,
|
||||
"expiry": expiry,
|
||||
}
|
||||
|
||||
getKeys := func(objects []*s3.Object) []string {
|
||||
var rv []string
|
||||
for _, obj := range objects {
|
||||
rv = append(rv, *obj.Key)
|
||||
}
|
||||
return rv
|
||||
}
|
||||
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
|
||||
Bucket: aws.String(bucket),
|
||||
})
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "AWS SDK Go CreateBucket Failed", err).Fatal()
|
||||
return
|
||||
}
|
||||
defer cleanup(s3Client, bucket, object1, function, args, startTime, true)
|
||||
defer cleanup(s3Client, bucket, object2, function, args, startTime, false)
|
||||
|
||||
listInput := &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(bucket),
|
||||
MaxKeys: aws.Int64(1000),
|
||||
Prefix: aws.String(""),
|
||||
}
|
||||
result, err := s3Client.ListObjectsV2(listInput)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go listobjects expected to success but got %v", err), err).Fatal()
|
||||
return
|
||||
}
|
||||
if *result.KeyCount != 0 {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go listobjects with prefix '' expected 0 key but got %v, %v", result.KeyCount, getKeys(result.Contents)), errors.New("AWS S3 key count mismatch")).Fatal()
|
||||
return
|
||||
}
|
||||
putInput1 := &s3.PutObjectInput{
|
||||
Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(object1),
|
||||
}
|
||||
_, err = s3Client.PutObject(putInput1)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to success but got %v", err), err).Fatal()
|
||||
return
|
||||
}
|
||||
putInput2 := &s3.PutObjectInput{
|
||||
Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(object2),
|
||||
}
|
||||
_, err = s3Client.PutObject(putInput2)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to success but got %v", err), err).Fatal()
|
||||
return
|
||||
}
|
||||
result, err = s3Client.ListObjectsV2(listInput)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go listobjects expected to success but got %v", err), err).Fatal()
|
||||
return
|
||||
}
|
||||
if *result.KeyCount != 2 {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go listobjects with prefix '' expected 2 key but got %v, %v", *result.KeyCount, getKeys(result.Contents)), errors.New("AWS S3 key count mismatch")).Fatal()
|
||||
return
|
||||
}
|
||||
|
||||
successLogger(function, args, startTime).Info()
|
||||
}
|
||||
|
||||
func main() {
|
||||
endpoint := os.Getenv("SERVER_ENDPOINT")
|
||||
accessKey := os.Getenv("ACCESS_KEY")
|
||||
secretKey := os.Getenv("SECRET_KEY")
|
||||
secure := os.Getenv("ENABLE_HTTPS")
|
||||
sdkEndpoint := "http://" + endpoint
|
||||
if secure == "1" {
|
||||
sdkEndpoint = "https://" + endpoint
|
||||
}
|
||||
|
||||
creds := credentials.NewStaticCredentials(accessKey, secretKey, "")
|
||||
newSession := session.New()
|
||||
s3Config := &aws.Config{
|
||||
Credentials: creds,
|
||||
Endpoint: aws.String(sdkEndpoint),
|
||||
Region: aws.String("us-east-1"),
|
||||
S3ForcePathStyle: aws.Bool(true),
|
||||
}
|
||||
|
||||
// Create an S3 service object in the default region.
|
||||
s3Client := s3.New(newSession, s3Config)
|
||||
|
||||
// Output to stdout instead of the default stderr
|
||||
log.SetOutput(os.Stdout)
|
||||
// create custom formatter
|
||||
mintFormatter := mintJSONFormatter{}
|
||||
// set custom formatter
|
||||
log.SetFormatter(&mintFormatter)
|
||||
// log Info or above -- success cases are Info level, failures are Fatal level
|
||||
log.SetLevel(log.InfoLevel)
|
||||
// execute tests
|
||||
testPresignedPutInvalidHash(s3Client)
|
||||
testListObjects(s3Client)
|
||||
}
|
||||
28
mint/run/core/aws-sdk-go/run.sh
Executable file
28
mint/run/core/aws-sdk-go/run.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# run tests
|
||||
/mint/run/core/aws-sdk-go/aws-sdk-go 1>>"$output_log_file" 2>"$error_log_file"
|
||||
30
mint/run/core/aws-sdk-java/run.sh
Executable file
30
mint/run/core/aws-sdk-java/run.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2018 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# run tests
|
||||
cd /mint/run/core/aws-sdk-java/ || exit -1
|
||||
|
||||
java -jar FunctionalTests.jar 1>>"$output_log_file" 2>"$error_log_file"
|
||||
19
mint/run/core/aws-sdk-php/README.md
Normal file
19
mint/run/core/aws-sdk-php/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## `aws-sdk-php` tests
|
||||
This directory serves as the location for Mint tests using `aws-sdk-php`. Top level `mint.sh` calls `run.sh` to execute tests.
|
||||
|
||||
## Adding new tests
|
||||
New tests is added into `quick-tests.php` as new functions.
|
||||
|
||||
## Running tests manually
|
||||
- Set environment variables `MINT_DATA_DIR`, `MINT_MODE`, `SERVER_ENDPOINT`, `ACCESS_KEY`, `SECRET_KEY`, `SERVER_REGION` and `ENABLE_HTTPS`
|
||||
- Call `run.sh` with output log file and error log file. for example
|
||||
```bash
|
||||
export MINT_DATA_DIR=~/my-mint-dir
|
||||
export MINT_MODE=core
|
||||
export SERVER_ENDPOINT="play.minio.io:9000"
|
||||
export ACCESS_KEY="Q3AM3UQ867SPQQA43P2F"
|
||||
export SECRET_KEY="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
export ENABLE_HTTPS=1
|
||||
export SERVER_REGION=us-east-1
|
||||
./run.sh /tmp/output.log /tmp/error.log
|
||||
```
|
||||
6
mint/run/core/aws-sdk-php/composer.json
Normal file
6
mint/run/core/aws-sdk-php/composer.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"require": {
|
||||
"aws/aws-sdk-php": "^3.30",
|
||||
"GuzzleHttp/Psr7": "^1.4"
|
||||
}
|
||||
}
|
||||
1101
mint/run/core/aws-sdk-php/quick-tests.php
Normal file
1101
mint/run/core/aws-sdk-php/quick-tests.php
Normal file
File diff suppressed because it is too large
Load Diff
28
mint/run/core/aws-sdk-php/run.sh
Executable file
28
mint/run/core/aws-sdk-php/run.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint, (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# run tests
|
||||
php ./quick-tests.php 1>>"$output_log_file" 2>"$error_log_file"
|
||||
19
mint/run/core/aws-sdk-ruby/README.md
Normal file
19
mint/run/core/aws-sdk-ruby/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## `aws-sdk-ruby` tests
|
||||
This directory serves as the location for Mint tests using `aws-sdk-ruby`. Top level `mint.sh` calls `run.sh` to execute tests.
|
||||
|
||||
## Adding new tests
|
||||
New tests is added into `aws-stub-test.rb` as new functions.
|
||||
|
||||
## Running tests manually
|
||||
- Set environment variables `MINT_DATA_DIR`, `MINT_MODE`, `SERVER_ENDPOINT`, `ACCESS_KEY`, `SECRET_KEY`, `SERVER_REGION` and `ENABLE_HTTPS`
|
||||
- Call `run.sh` with output log file and error log file. for example
|
||||
```bash
|
||||
export MINT_DATA_DIR=~/my-mint-dir
|
||||
export MINT_MODE=core
|
||||
export SERVER_ENDPOINT="play.minio.io:9000"
|
||||
export ACCESS_KEY="Q3AM3UQ867SPQQA43P2F"
|
||||
export SECRET_KEY="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
export ENABLE_HTTPS=1
|
||||
export SERVER_REGION=us-east-1
|
||||
./run.sh /tmp/output.log /tmp/error.log
|
||||
```
|
||||
855
mint/run/core/aws-sdk-ruby/aws-stub-tests.rb
Executable file
855
mint/run/core/aws-sdk-ruby/aws-stub-tests.rb
Executable file
@@ -0,0 +1,855 @@
|
||||
#!/usr/bin/env ruby
|
||||
#
|
||||
# Mint (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
require 'aws-sdk'
|
||||
require 'securerandom'
|
||||
require 'net/http'
|
||||
require 'multipart_body'
|
||||
|
||||
class AwsSdkRubyTest
|
||||
# Set variables necessary to create an s3 client instance.
|
||||
# Get them from the environment variables
|
||||
|
||||
# Region information, eg. "us-east-1"
|
||||
region = ENV['SERVER_REGION'] ||= 'SERVER_REGION is not set'
|
||||
# Minio server, eg. "play.minio.io:9000"
|
||||
endpoint = ENV['SERVER_ENDPOINT'] ||= 'SERVER_ENDPOINT is not set'
|
||||
access_key_id = ENV['ACCESS_KEY'] ||= 'ACCESS_KEY is not set'
|
||||
secret_access_key = ENV['SECRET_KEY'] ||= 'SECRET_KEY is not set'
|
||||
enable_https = ENV['ENABLE_HTTPS']
|
||||
endpoint = enable_https == '1' ? 'https://' + endpoint : 'http://' + endpoint
|
||||
# Create s3 client instances, "s3Resource" and "s3Client"
|
||||
@@s3 = Aws::S3::Resource.new(region: region,
|
||||
endpoint: endpoint,
|
||||
access_key_id: access_key_id,
|
||||
secret_access_key: secret_access_key,
|
||||
force_path_style: true)
|
||||
|
||||
def initialize_log_output(meth, alert = nil)
|
||||
# Initialize and return log content in log_output hash table
|
||||
|
||||
# Collect args in args_arr
|
||||
args_arr = method(meth).parameters.flatten.map(&:to_s)
|
||||
.reject { |x| x == 'req' || x == 'opt' }
|
||||
# Create and return log output content
|
||||
{ name: 'aws-sdk-ruby',
|
||||
function: "#{meth}(#{args_arr.join(',')})", # method name and arguments
|
||||
args: args_arr, # array of arg names. This'll be replaced with a
|
||||
# a arg/value pairs insdie the caller method
|
||||
duration: 0, # test runtime duration in seconds
|
||||
alert: alert,
|
||||
message: nil,
|
||||
error: nil }
|
||||
end
|
||||
|
||||
def get_random_bucket_name()
|
||||
bucket_name = "aws-sdk-ruby-bucket-"+SecureRandom.hex(6)
|
||||
return bucket_name
|
||||
end
|
||||
|
||||
def calculate_duration(t2, t1)
|
||||
# Durations are in miliseconds, with precision of 2 decimal places
|
||||
((t2 - t1) * 1000).round(2)
|
||||
end
|
||||
|
||||
def print_log(log_output, start_time)
|
||||
# Calculate duration in miliseconds
|
||||
log_output[:duration] = calculate_duration(Time.now, start_time)
|
||||
# Get rid of the log_output fields if nil
|
||||
puts log_output.delete_if{|k, value| value == nil}.to_json
|
||||
# Exit at the first failure
|
||||
exit 1 if log_output[:status] == 'FAIL'
|
||||
end
|
||||
|
||||
def cleanUp(buckets, log_output)
|
||||
# Removes objects and bucket if bucket exists
|
||||
bucket_name = ''
|
||||
buckets.each do |b|
|
||||
bucket_name = b
|
||||
if bucketExistsWrapper(b, log_output)
|
||||
removeObjectsWrapper(b, log_output)
|
||||
removeBucketWrapper(b, log_output)
|
||||
end
|
||||
end
|
||||
rescue => e
|
||||
raise "Failed to clean-up bucket '#{bucket_name}', #{e}"
|
||||
end
|
||||
|
||||
#
|
||||
# API commands/methods
|
||||
#
|
||||
def makeBucket(bucket_name)
|
||||
# Creates a bucket, "bucket_name"
|
||||
# on S3 client , "s3".
|
||||
# Returns bucket_name if already exists
|
||||
@@s3.bucket(bucket_name).exists? ? @@s3.bucket(bucket_name) : @@s3.create_bucket(bucket: bucket_name)
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def makeBucketWrapper(bucket_name, log_output)
|
||||
makeBucket(bucket_name)
|
||||
rescue => e
|
||||
log_output[:function] = "makeBucket(bucket_name)"
|
||||
log_output[:args] = {'bucket_name': bucket_name}
|
||||
raise e
|
||||
end
|
||||
|
||||
def removeBucket(bucket_name)
|
||||
# Deletes/removes bucket, "bucket_name" on S3 client, "s3"
|
||||
@@s3.bucket(bucket_name).delete
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def removeBucketWrapper(bucket_name, log_output)
|
||||
removeBucket(bucket_name)
|
||||
rescue => e
|
||||
log_output[:function] = "removeBucket(bucket_name)"
|
||||
log_output[:args] = {'bucket_name': bucket_name}
|
||||
raise e
|
||||
end
|
||||
|
||||
def putObject(bucket_name, file)
|
||||
# Creates "file" (full path) in bucket, "bucket_name",
|
||||
# on S3 client, "s3"
|
||||
file_name = File.basename(file)
|
||||
@@s3.bucket(bucket_name).object(file_name).upload_file(file)
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def putObjectWrapper(bucket_name, file, log_output)
|
||||
putObject(bucket_name, file)
|
||||
rescue => e
|
||||
log_output[:function] = "putObject(bucket_name, file)"
|
||||
log_output[:args] = {'bucket_name': bucket_name,
|
||||
'file': file}
|
||||
raise e
|
||||
end
|
||||
|
||||
def getObject(bucket_name, file, destination)
|
||||
# Gets/Downloads file, "file",
|
||||
# from bucket, "bucket_name", of S3 client, "s3"
|
||||
file_name = File.basename(file)
|
||||
dest = File.join(destination, file_name)
|
||||
@@s3.bucket(bucket_name).object(file_name).get(response_target: dest)
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def getObjectWrapper(bucket_name, file, destination, log_output)
|
||||
getObject(bucket_name, file, destination)
|
||||
rescue => e
|
||||
log_output[:function] = "getObject(bucket_name, file)"
|
||||
log_output[:args] = {'bucket_name': bucket_name,
|
||||
'file': file,
|
||||
'destination': destination}
|
||||
raise e
|
||||
end
|
||||
|
||||
def copyObject(source_bucket_name, target_bucket_name, source_file_name, target_file_name = '')
|
||||
# Copies file, "file_name", from source bucket,
|
||||
# "source_bucket_name", to target bucket,
|
||||
# "target_bucket_name", on S3 client, "s3"
|
||||
target_file_name = source_file_name if target_file_name.empty?
|
||||
source = @@s3.bucket(source_bucket_name)
|
||||
target = @@s3.bucket(target_bucket_name)
|
||||
source_obj = source.object(source_file_name)
|
||||
target_obj = target.object(target_file_name)
|
||||
source_obj.copy_to(target_obj)
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def copyObjectWrapper(source_bucket_name, target_bucket_name, source_file_name, target_file_name = '', log_output)
|
||||
copyObject(source_bucket_name, target_bucket_name, source_file_name, target_file_name)
|
||||
rescue => e
|
||||
log_output[:function] = 'copyObject(source_bucket_name, target_bucket_name, source_file_name, target_file_name = '')'
|
||||
log_output[:args] = {'source_bucket_name': source_bucket_name,
|
||||
'target_bucket_name': target_bucket_name,
|
||||
'source_file_name': source_file_name,
|
||||
'target_file_name': target_file_name}
|
||||
raise e
|
||||
end
|
||||
|
||||
def removeObject(bucket_name, file)
|
||||
# Deletes file in bucket,
|
||||
# "bucket_name", on S3 client, "s3".
|
||||
# If file, "file_name" does not exist,
|
||||
# it quietly returns without any error message
|
||||
@@s3.bucket(bucket_name).object(file).delete
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def removeObjectWrapper(bucket_name, file_name, log_output)
|
||||
removeObject(bucket_name, file_name)
|
||||
rescue => e
|
||||
log_output[:function] = "removeObject(bucket_name, file_name)"
|
||||
log_output[:args] = {'bucket_name': bucket_name,
|
||||
'file_name': file_name}
|
||||
raise e
|
||||
end
|
||||
|
||||
def removeObjects(bucket_name)
|
||||
# Deletes all files in bucket, "bucket_name"
|
||||
# on S3 client, "s3"
|
||||
file_name = ''
|
||||
@@s3.bucket(bucket_name).objects.each do |obj|
|
||||
file_name = obj.key
|
||||
obj.delete
|
||||
end
|
||||
rescue => e
|
||||
raise "File name: '#{file_name}', #{e}"
|
||||
end
|
||||
|
||||
def removeObjectsWrapper(bucket_name, log_output)
|
||||
removeObjects(bucket_name)
|
||||
rescue => e
|
||||
log_output[:function] = 'removeObjects(bucket_name)'
|
||||
log_output[:args] = {'bucket_name': bucket_name}
|
||||
raise e
|
||||
end
|
||||
|
||||
def listBuckets
|
||||
# Returns an array of bucket names on S3 client, "s3"
|
||||
bucket_name_list = []
|
||||
@@s3.buckets.each do |b|
|
||||
bucket_name_list.push(b.name)
|
||||
end
|
||||
return bucket_name_list
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def listBucketsWrapper(log_output)
|
||||
listBuckets
|
||||
rescue => e
|
||||
log_output[:function] = 'listBuckets'
|
||||
log_output[:args] = {}
|
||||
raise e
|
||||
end
|
||||
|
||||
def listObjects(bucket_name)
|
||||
# Returns an array of object/file names
|
||||
# in bucket, "bucket_name", on S3 client, "s3"
|
||||
object_list = []
|
||||
@@s3.bucket(bucket_name).objects.each do |obj|
|
||||
object_list.push(obj.key)
|
||||
end
|
||||
return object_list
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def listObjectsWrapper(bucket_name, log_output)
|
||||
listObjects(bucket_name)
|
||||
rescue => e
|
||||
log_output[:function] = 'listObjects(bucket_name)'
|
||||
log_output[:args] = {'bucket_name': bucket_name}
|
||||
raise e
|
||||
end
|
||||
|
||||
def statObject(bucket_name, file_name)
|
||||
return @@s3.bucket(bucket_name).object(file_name).exists?
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def statObjectWrapper(bucket_name, file_name, log_output)
|
||||
statObject(bucket_name, file_name)
|
||||
rescue => e
|
||||
log_output[:function] = 'statObject(bucket_name, file_name)'
|
||||
log_output[:args] = {'bucket_name': bucket_name,
|
||||
'file_name': file_name}
|
||||
raise e
|
||||
end
|
||||
|
||||
def bucketExists?(bucket_name)
|
||||
# Returns true if bucket, "bucket_name", exists,
|
||||
# false otherwise
|
||||
return @@s3.bucket(bucket_name).exists?
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def bucketExistsWrapper(bucket_name, log_output)
|
||||
bucketExists?(bucket_name)
|
||||
rescue => e
|
||||
log_output[:function] = 'bucketExists?(bucket_name)'
|
||||
log_output[:args] = {'bucket_name': bucket_name}
|
||||
raise e
|
||||
end
|
||||
|
||||
def presignedGet(bucket_name, file_name)
|
||||
# Returns download/get url
|
||||
obj = @@s3.bucket(bucket_name).object(file_name)
|
||||
return obj.presigned_url(:get, expires_in: 600)
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def presignedGetWrapper(bucket_name, file_name, log_output)
|
||||
presignedGet(bucket_name, file_name)
|
||||
rescue => e
|
||||
log_output[:function] = 'presignedGet(bucket_name, file_name)'
|
||||
log_output[:args] = {'bucket_name': bucket_name,
|
||||
'file_name': file_name}
|
||||
raise e
|
||||
end
|
||||
|
||||
def presignedPut(bucket_name, file_name)
|
||||
# Returns put url
|
||||
obj = @@s3.bucket(bucket_name).object(file_name)
|
||||
return obj.presigned_url(:put, expires_in: 600)
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def presignedPutWrapper(bucket_name, file_name, log_output)
|
||||
presignedPut(bucket_name, file_name)
|
||||
rescue => e
|
||||
log_output[:function] = 'presignedPut(bucket_name, file_name)'
|
||||
log_output[:args] = {'bucket_name': bucket_name,
|
||||
'file_name': file_name}
|
||||
raise e
|
||||
end
|
||||
|
||||
def presignedPost(bucket_name, file_name, expires_in_sec, max_byte_size)
|
||||
# Returns upload/post url
|
||||
obj = @@s3.bucket(bucket_name).object(file_name)
|
||||
return obj.presigned_post(expires: Time.now + expires_in_sec,
|
||||
content_length_range: 1..max_byte_size)
|
||||
rescue => e
|
||||
raise e
|
||||
end
|
||||
|
||||
def presignedPostWrapper(bucket_name, file_name, expires_in_sec, max_byte_size, log_output)
|
||||
presignedPost(bucket_name, file_name, expires_in_sec, max_byte_size)
|
||||
rescue => e
|
||||
log_output[:function] = 'presignedPost(bucket_name, file_name, expires_in_sec, max_byte_size)'
|
||||
log_output[:args] = {'bucket_name': bucket_name,
|
||||
'file_name': file_name,
|
||||
'expires_in_sec': expires_in_sec,
|
||||
'max_byte_size': max_byte_size}
|
||||
raise e
|
||||
end
|
||||
|
||||
# To be addressed. S3 API 'get_bucket_policy' does not work!
|
||||
# def getBucketPolicy(bucket_name)
|
||||
# # Returns bucket policy
|
||||
# return @@s3.bucket(bucket_name).get_bucket_policy
|
||||
# rescue => e
|
||||
# raise e
|
||||
# end
|
||||
|
||||
#
|
||||
# Test case methods
|
||||
#
|
||||
def listBucketsTest()
|
||||
# Tests listBuckets api command by creating
|
||||
# new buckets from bucket_name_list
|
||||
|
||||
# get random bucket names and create list
|
||||
bucket_name1 = get_random_bucket_name()
|
||||
bucket_name2 = get_random_bucket_name()
|
||||
bucket_name_list = [bucket_name1, bucket_name2]
|
||||
# Initialize hash table, 'log_output'
|
||||
log_output = initialize_log_output('listBuckets')
|
||||
# Prepare arg/value hash table and set it in log_output
|
||||
arg_value_hash = {}
|
||||
log_output[:args].each { |x| arg_value_hash[:"#{x}"] = eval x.to_s }
|
||||
log_output[:args] = arg_value_hash
|
||||
|
||||
begin
|
||||
start_time = Time.now
|
||||
prev_total_buckets = listBucketsWrapper(log_output).length
|
||||
new_buckets = bucket_name_list.length
|
||||
bucket_name_list.each do |b|
|
||||
makeBucketWrapper(b, log_output)
|
||||
end
|
||||
new_total_buckets = prev_total_buckets + new_buckets
|
||||
if new_total_buckets >= prev_total_buckets + new_buckets
|
||||
log_output[:status] = 'PASS'
|
||||
else
|
||||
log_output[:error] = 'Could not find expected number of buckets'
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
cleanUp(bucket_name_list, log_output)
|
||||
rescue => log_output[:error]
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
|
||||
print_log(log_output, start_time)
|
||||
end
|
||||
|
||||
def makeBucketTest()
|
||||
# Tests makeBucket api command.
|
||||
|
||||
# get random bucket name
|
||||
bucket_name = get_random_bucket_name()
|
||||
# Initialize hash table, 'log_output'
|
||||
log_output = initialize_log_output('makeBucket')
|
||||
# Prepare arg/value hash table and set it in log_output
|
||||
arg_value_hash = {}
|
||||
log_output[:args].each { |x| arg_value_hash[:"#{x}"] = eval x.to_s }
|
||||
log_output[:args] = arg_value_hash
|
||||
|
||||
begin
|
||||
start_time = Time.now
|
||||
makeBucketWrapper(bucket_name, log_output)
|
||||
|
||||
if bucketExistsWrapper(bucket_name, log_output)
|
||||
log_output[:status] = 'PASS'
|
||||
else
|
||||
log_output[:error] = 'Bucket expected to be created does not exist'
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
cleanUp([bucket_name], log_output)
|
||||
rescue => log_output[:error]
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
|
||||
print_log(log_output, start_time)
|
||||
end
|
||||
|
||||
def bucketExistsNegativeTest()
|
||||
# Tests bucketExists api command.
|
||||
|
||||
# get random bucket name
|
||||
bucket_name = get_random_bucket_name()
|
||||
# Initialize hash table, 'log_output'
|
||||
log_output = initialize_log_output('bucketExists?')
|
||||
# Prepare arg/value hash table and set it in log_output
|
||||
arg_value_hash = {}
|
||||
log_output[:args].each { |x| arg_value_hash[:"#{x}"] = eval x.to_s }
|
||||
log_output[:args] = arg_value_hash
|
||||
|
||||
begin
|
||||
start_time = Time.now
|
||||
if !bucketExistsWrapper(bucket_name, log_output)
|
||||
log_output[:status] = 'PASS'
|
||||
else
|
||||
log_output[:error] = "Failed to return 'false' for a non-existing bucket"
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
cleanUp([bucket_name], log_output)
|
||||
rescue => log_output[:error]
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
|
||||
print_log(log_output, start_time)
|
||||
end
|
||||
|
||||
def removeBucketTest()
|
||||
# Tests removeBucket api command.
|
||||
|
||||
# get a random bucket name
|
||||
bucket_name = get_random_bucket_name()
|
||||
# Initialize hash table, 'log_output'
|
||||
log_output = initialize_log_output('removeBucket')
|
||||
# Prepare arg/value hash table and set it in log_output
|
||||
arg_value_hash = {}
|
||||
log_output[:args].each { |x| arg_value_hash[:"#{x}"] = eval x.to_s }
|
||||
log_output[:args] = arg_value_hash
|
||||
|
||||
begin
|
||||
start_time = Time.now
|
||||
makeBucketWrapper(bucket_name, log_output)
|
||||
removeBucketWrapper(bucket_name, log_output)
|
||||
if !bucketExistsWrapper(bucket_name, log_output)
|
||||
log_output[:status] = 'PASS'
|
||||
else
|
||||
log_output[:error] = 'Bucket expected to be removed still exists'
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
cleanUp([bucket_name], log_output)
|
||||
rescue => log_output[:error]
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
|
||||
print_log(log_output, start_time)
|
||||
end
|
||||
|
||||
def putObjectTest(file)
|
||||
# Tests putObject api command by uploading a file
|
||||
|
||||
# get random bucket name
|
||||
bucket_name = get_random_bucket_name()
|
||||
# Initialize hash table, 'log_output'
|
||||
log_output = initialize_log_output('putObject')
|
||||
# Prepare arg/value hash table and set it in log_output
|
||||
arg_value_hash = {}
|
||||
log_output[:args].each { |x| arg_value_hash[:"#{x}"] = eval x.to_s }
|
||||
log_output[:args] = arg_value_hash
|
||||
|
||||
begin
|
||||
start_time = Time.now
|
||||
makeBucketWrapper(bucket_name, log_output)
|
||||
putObjectWrapper(bucket_name, file, log_output)
|
||||
if statObjectWrapper(bucket_name, File.basename(file), log_output)
|
||||
log_output[:status] = 'PASS'
|
||||
else
|
||||
log_output[:error] = "Status for the created object returned 'false'"
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
cleanUp([bucket_name], log_output)
|
||||
rescue => log_output[:error]
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
|
||||
print_log(log_output, start_time)
|
||||
end
|
||||
|
||||
def removeObjectTest(file)
|
||||
# Tests removeObject api command by uploading and removing a file
|
||||
|
||||
# get random bucket name
|
||||
bucket_name = get_random_bucket_name()
|
||||
# Initialize hash table, 'log_output'
|
||||
log_output = initialize_log_output('removeObject')
|
||||
# Prepare arg/value hash table and set it in log_output
|
||||
arg_value_hash = {}
|
||||
log_output[:args].each { |x| arg_value_hash[:"#{x}"] = eval x.to_s }
|
||||
log_output[:args] = arg_value_hash
|
||||
|
||||
begin
|
||||
start_time = Time.now
|
||||
makeBucketWrapper(bucket_name, log_output)
|
||||
putObjectWrapper(bucket_name, file, log_output)
|
||||
removeObjectWrapper(bucket_name, File.basename(file), log_output)
|
||||
if !statObjectWrapper(bucket_name, File.basename(file), log_output)
|
||||
log_output[:status] = 'PASS'
|
||||
else
|
||||
log_output[:error] = "Status for the removed object returned 'true'"
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
cleanUp([bucket_name], log_output)
|
||||
rescue => log_output[:error]
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
|
||||
print_log(log_output, start_time)
|
||||
end
|
||||
|
||||
def getObjectTest(file, destination)
|
||||
# Tests getObject api command
|
||||
|
||||
# get random bucket name
|
||||
bucket_name = get_random_bucket_name()
|
||||
# Initialize hash table, 'log_output'
|
||||
log_output = initialize_log_output('getObject')
|
||||
# Prepare arg/value hash table and set it in log_output
|
||||
arg_value_hash = {}
|
||||
log_output[:args].each { |x| arg_value_hash[:"#{x}"] = eval x.to_s }
|
||||
log_output[:args] = arg_value_hash
|
||||
|
||||
begin
|
||||
start_time = Time.now
|
||||
makeBucketWrapper(bucket_name, log_output)
|
||||
putObjectWrapper(bucket_name, file, log_output)
|
||||
getObjectWrapper(bucket_name, file, destination, log_output)
|
||||
if system("ls -l #{destination} > /dev/null")
|
||||
log_output[:status] = 'PASS'
|
||||
else
|
||||
log_output[:error] = "Downloaded object does not exist at #{destination}"
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
cleanUp([bucket_name], log_output)
|
||||
rescue => log_output[:error]
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
|
||||
print_log(log_output, start_time)
|
||||
end
|
||||
|
||||
def listObjectsTest(file_list)
|
||||
# Tests listObjects api command
|
||||
|
||||
# get random bucket name
|
||||
bucket_name = get_random_bucket_name()
|
||||
# Initialize hash table, 'log_output'
|
||||
log_output = initialize_log_output('listObjects')
|
||||
# Prepare arg/value hash table and set it in log_output
|
||||
arg_value_hash = {}
|
||||
log_output[:args].each { |x| arg_value_hash[:"#{x}"] = eval x.to_s }
|
||||
log_output[:args] = arg_value_hash
|
||||
|
||||
begin
|
||||
start_time = Time.now
|
||||
makeBucketWrapper(bucket_name, log_output)
|
||||
# Put all objects into the bucket
|
||||
file_list.each do |f|
|
||||
putObjectWrapper(bucket_name, f, log_output)
|
||||
end
|
||||
# Total number of files uploaded
|
||||
expected_no = file_list.length
|
||||
# Actual number is what api returns
|
||||
actual_no = listObjectsWrapper(bucket_name, log_output).length
|
||||
# Compare expected and actual values
|
||||
if expected_no == actual_no
|
||||
log_output[:status] = 'PASS'
|
||||
else
|
||||
log_output[:error] = 'Expected and actual number of listed files/objects do not match!'
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
cleanUp([bucket_name], log_output)
|
||||
rescue => log_output[:error]
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
|
||||
print_log(log_output, start_time)
|
||||
end
|
||||
|
||||
def copyObjectTest(data_dir, source_file_name, target_file_name = '')
|
||||
# Tests copyObject api command
|
||||
|
||||
# get random bucket names
|
||||
source_bucket_name = get_random_bucket_name()
|
||||
target_bucket_name = get_random_bucket_name()
|
||||
# Initialize hash table, 'log_output'
|
||||
log_output = initialize_log_output('copyObject')
|
||||
# Prepare arg/value hash table and set it in log_output
|
||||
arg_value_hash = {}
|
||||
log_output[:args].each { |x| arg_value_hash[:"#{x}"] = eval x.to_s }
|
||||
log_output[:args] = arg_value_hash
|
||||
|
||||
begin
|
||||
start_time = Time.now
|
||||
target_file_name = source_file_name if target_file_name.empty?
|
||||
makeBucketWrapper(source_bucket_name, log_output)
|
||||
makeBucketWrapper(target_bucket_name, log_output)
|
||||
putObjectWrapper(source_bucket_name,
|
||||
File.join(data_dir, source_file_name), log_output)
|
||||
copyObjectWrapper(source_bucket_name, target_bucket_name,
|
||||
source_file_name, target_file_name, log_output)
|
||||
# Check if copy worked fine
|
||||
if statObjectWrapper(target_bucket_name, target_file_name, log_output)
|
||||
log_output[:status] = 'PASS'
|
||||
else
|
||||
log_output[:error] = 'Copied file could not be found in the expected location'
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
cleanUp([source_bucket_name, target_bucket_name], log_output)
|
||||
rescue => log_output[:error]
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
|
||||
print_log(log_output, start_time)
|
||||
end
|
||||
|
||||
def presignedGetObjectTest(data_dir, file_name)
|
||||
# Tests presignedGetObject api command
|
||||
|
||||
# get random bucket name
|
||||
bucket_name = get_random_bucket_name()
|
||||
# Initialize hash table, 'log_output'
|
||||
log_output = initialize_log_output('presignedGet')
|
||||
# Prepare arg/value hash table and set it in log_output
|
||||
arg_value_hash = {}
|
||||
log_output[:args].each { |x| arg_value_hash[:"#{x}"] = eval x.to_s }
|
||||
log_output[:args] = arg_value_hash
|
||||
|
||||
begin
|
||||
start_time = Time.now
|
||||
makeBucketWrapper(bucket_name, log_output)
|
||||
file = File.join(data_dir, file_name)
|
||||
# Get check sum value without the file name
|
||||
cksum_orig = `cksum #{file}`.split[0..1]
|
||||
putObjectWrapper(bucket_name, file, log_output)
|
||||
get_url = presignedGetWrapper(bucket_name, file_name, log_output)
|
||||
# Download the file using the URL
|
||||
# generated by presignedGet api command
|
||||
`wget -O /tmp/#{file_name}, '#{get_url}' > /dev/null 2>&1`
|
||||
# Get check sum value for the downloaded file
|
||||
# Split to get rid of the file name
|
||||
cksum_new = `cksum /tmp/#{file_name}`.split[0..1]
|
||||
|
||||
# Check if check sum values for the orig file
|
||||
# and the downloaded file match
|
||||
if cksum_orig == cksum_new
|
||||
log_output[:status] = 'PASS'
|
||||
else
|
||||
log_output[:error] = 'Check sum values do NOT match'
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
cleanUp([bucket_name], log_output)
|
||||
rescue => log_output[:error]
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
|
||||
print_log(log_output, start_time)
|
||||
end
|
||||
|
||||
def presignedPutObjectTest(data_dir, file_name)
|
||||
# Tests presignedPutObject api command
|
||||
|
||||
# get random bucket name
|
||||
bucket_name = get_random_bucket_name()
|
||||
# Initialize hash table, 'log_output'
|
||||
log_output = initialize_log_output('presignedPut')
|
||||
# Prepare arg/value hash table and set it in log_output
|
||||
arg_value_hash = {}
|
||||
log_output[:args].each { |x| arg_value_hash[:"#{x}"] = eval x.to_s }
|
||||
log_output[:args] = arg_value_hash
|
||||
|
||||
begin
|
||||
start_time = Time.now
|
||||
makeBucketWrapper(bucket_name, log_output)
|
||||
file = File.join(data_dir, file_name)
|
||||
|
||||
# Get check sum value and
|
||||
# split to get rid of the file name
|
||||
cksum_orig = `cksum #{file}`.split[0..1]
|
||||
|
||||
# Generate presigned Put URL and parse it
|
||||
uri = URI.parse(presignedPutWrapper(bucket_name, file_name, log_output))
|
||||
request = Net::HTTP::Put.new(uri.request_uri, 'x-amz-acl' => 'public-read')
|
||||
request.body = IO.read(File.join(data_dir, file_name))
|
||||
|
||||
http = Net::HTTP.new(uri.host, uri.port)
|
||||
http.use_ssl = true if ENV['ENABLE_HTTPS'] == '1'
|
||||
|
||||
http.request(request)
|
||||
|
||||
if statObjectWrapper(bucket_name, file_name, log_output)
|
||||
getObjectWrapper(bucket_name, file_name, '/tmp', log_output)
|
||||
cksum_new = `cksum /tmp/#{file_name}`.split[0..1]
|
||||
# Check if check sum values of the orig file
|
||||
# and the downloaded file match
|
||||
if cksum_orig == cksum_new
|
||||
log_output[:status] = 'PASS'
|
||||
else
|
||||
log_output[:error] = 'Check sum values do NOT match'
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
else
|
||||
log_output[:error] = 'Expected to be created object does NOT exist'
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
cleanUp([bucket_name], log_output)
|
||||
rescue => log_output[:error]
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
|
||||
print_log(log_output, start_time)
|
||||
end
|
||||
|
||||
def presignedPostObjectTest(data_dir, file_name,
|
||||
expires_in_sec, max_byte_size)
|
||||
# Tests presignedPostObject api command
|
||||
|
||||
# get random bucket name
|
||||
bucket_name = get_random_bucket_name()
|
||||
# Initialize hash table, 'log_output'
|
||||
log_output = initialize_log_output('presignedPost')
|
||||
# Prepare arg/value hash table and set it in log_output
|
||||
arg_value_hash = {}
|
||||
log_output[:args].each { |x| arg_value_hash[:"#{x}"] = eval x.to_s }
|
||||
log_output[:args] = arg_value_hash
|
||||
|
||||
begin
|
||||
start_time = Time.now
|
||||
makeBucketWrapper(bucket_name, log_output)
|
||||
file = File.join(data_dir, file_name)
|
||||
|
||||
# Get check sum value and split it
|
||||
# into parts to get rid of the file name
|
||||
cksum_orig = `cksum #{file}`.split[0..1]
|
||||
# Create the presigned POST url
|
||||
post = presignedPostWrapper(bucket_name, file_name,
|
||||
expires_in_sec, max_byte_size, log_output)
|
||||
|
||||
# Prepare multi parts array for POST command request
|
||||
file_part = Part.new name: 'file',
|
||||
body: IO.read(File.join(data_dir, file_name)),
|
||||
filename: file_name,
|
||||
content_type: 'application/octet-stream'
|
||||
parts = [file_part]
|
||||
# Add POST fields into parts array
|
||||
post.fields.each do |field, value|
|
||||
parts.push(Part.new(field, value))
|
||||
end
|
||||
boundary = "---------------------------#{rand(10_000_000_000_000_000)}"
|
||||
body_parts = MultipartBody.new parts, boundary
|
||||
|
||||
# Parse presigned Post URL
|
||||
uri = URI.parse(post.url)
|
||||
|
||||
# Create the HTTP objects
|
||||
http = Net::HTTP.new(uri.host, uri.port)
|
||||
http.use_ssl = true if ENV['ENABLE_HTTPS'] == '1'
|
||||
request = Net::HTTP::Post.new(uri.request_uri)
|
||||
request.body = body_parts.to_s
|
||||
request.content_type = "multipart/form-data; boundary=#{boundary}"
|
||||
# Send the request
|
||||
log_output[:error] = http.request(request)
|
||||
|
||||
if statObjectWrapper(bucket_name, file_name, log_output)
|
||||
getObjectWrapper(bucket_name, file_name, '/tmp', log_output)
|
||||
cksum_new = `cksum /tmp/#{file_name}`.split[0..1]
|
||||
# Check if check sum values of the orig file
|
||||
# and the downloaded file match
|
||||
if cksum_orig == cksum_new
|
||||
log_output[:status] = 'PASS'
|
||||
# FIXME: HTTP No Content error, status code=204 is returned as error
|
||||
log_output[:error] = nil
|
||||
else
|
||||
log_output[:error] = 'Check sum values do NOT match'
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
else
|
||||
log_output[:error] = 'Expected to be created object does NOT exist'
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
cleanUp([bucket_name], log_output)
|
||||
rescue => log_output[:error]
|
||||
log_output[:status] = 'FAIL'
|
||||
end
|
||||
|
||||
print_log(log_output, start_time)
|
||||
end
|
||||
end
|
||||
|
||||
# MAIN CODE
|
||||
|
||||
# Create test Class instance and call the tests
|
||||
aws = AwsSdkRubyTest.new
|
||||
file_name1 = 'datafile-1-kB'
|
||||
file_new_name = 'datafile-1-kB-copy'
|
||||
file_name_list = ['datafile-1-kB', 'datafile-1-b', 'datafile-6-MB']
|
||||
# Add data_dir in front of each file name in file_name_list
|
||||
# The location where the bucket and file
|
||||
# objects are going to be created.
|
||||
data_dir = ENV['MINT_DATA_DIR'] ||= 'MINT_DATA_DIR is not set'
|
||||
file_list = file_name_list.map { |f| File.join(data_dir, f) }
|
||||
destination = '/tmp'
|
||||
|
||||
aws.listBucketsTest()
|
||||
aws.listObjectsTest(file_list)
|
||||
aws.makeBucketTest()
|
||||
aws.bucketExistsNegativeTest()
|
||||
aws.removeBucketTest()
|
||||
aws.putObjectTest(File.join(data_dir, file_name1))
|
||||
aws.removeObjectTest(File.join(data_dir, file_name1))
|
||||
aws.getObjectTest(File.join(data_dir, file_name1), destination)
|
||||
aws.copyObjectTest(data_dir, file_name1)
|
||||
aws.copyObjectTest(data_dir, file_name1, file_new_name)
|
||||
aws.presignedGetObjectTest(data_dir, file_name1)
|
||||
aws.presignedPutObjectTest(data_dir, file_name1)
|
||||
aws.presignedPostObjectTest(data_dir, file_name1, 60, 3*1024*1024)
|
||||
29
mint/run/core/aws-sdk-ruby/run.sh
Executable file
29
mint/run/core/aws-sdk-ruby/run.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# run tests
|
||||
chmod a+x aws-stub-tests.rb
|
||||
ruby aws-stub-tests.rb 1>>"$output_log_file" 2>"$error_log_file"
|
||||
19
mint/run/core/awscli/README.md
Normal file
19
mint/run/core/awscli/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## `awscli` tests
|
||||
This directory serves as the location for Mint tests using `awscli`. Top level `mint.sh` calls `run.sh` to execute tests.
|
||||
|
||||
## Adding new tests
|
||||
New tests is added into `test.sh` as new functions.
|
||||
|
||||
## Running tests manually
|
||||
- Set environment variables `MINT_DATA_DIR`, `MINT_MODE`, `SERVER_ENDPOINT`, `ACCESS_KEY`, `SECRET_KEY`, `SERVER_REGION` and `ENABLE_HTTPS`
|
||||
- Call `run.sh` with output log file and error log file. for example
|
||||
```bash
|
||||
export MINT_DATA_DIR=~/my-mint-dir
|
||||
export MINT_MODE=core
|
||||
export SERVER_ENDPOINT="play.minio.io:9000"
|
||||
export ACCESS_KEY="Q3AM3UQ867SPQQA43P2F"
|
||||
export SECRET_KEY="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
export ENABLE_HTTPS=1
|
||||
export SERVER_REGION=us-east-1
|
||||
./run.sh /tmp/output.log /tmp/error.log
|
||||
```
|
||||
51
mint/run/core/awscli/run.sh
Executable file
51
mint/run/core/awscli/run.sh
Executable file
@@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# configure awscli
|
||||
aws configure set aws_access_key_id "$ACCESS_KEY"
|
||||
aws configure set aws_secret_access_key "$SECRET_KEY"
|
||||
aws configure set default.region "$SERVER_REGION"
|
||||
|
||||
# run tests for virtual style if provided
|
||||
if [ "$ENABLE_VIRTUAL_STYLE" -eq 1 ]; then
|
||||
# Setup endpoint scheme
|
||||
endpoint="http://$DOMAIN:$SERVER_PORT"
|
||||
if [ "$ENABLE_HTTPS" -eq 1 ]; then
|
||||
endpoint="https://$DOMAIN:$SERVER_PORT"
|
||||
fi
|
||||
dnsmasq --address="/$DOMAIN/$SERVER_IP" --user=root
|
||||
echo -e "nameserver 127.0.0.1\n$(cat /etc/resolv.conf)" > /etc/resolv.conf
|
||||
aws configure set default.s3.addressing_style virtual
|
||||
./test.sh "$endpoint" 1>>"$output_log_file" 2>"$error_log_file"
|
||||
aws configure set default.s3.addressing_style path
|
||||
fi
|
||||
|
||||
endpoint="http://$SERVER_ENDPOINT"
|
||||
if [ "$ENABLE_HTTPS" -eq 1 ]; then
|
||||
endpoint="https://$SERVER_ENDPOINT"
|
||||
fi
|
||||
# run path style tests
|
||||
./test.sh "$endpoint" 1>>"$output_log_file" 2>"$error_log_file"
|
||||
1411
mint/run/core/awscli/test.sh
Executable file
1411
mint/run/core/awscli/test.sh
Executable file
File diff suppressed because it is too large
Load Diff
195
mint/run/core/healthcheck/healthcheck.go
Normal file
195
mint/run/core/healthcheck/healthcheck.go
Normal file
@@ -0,0 +1,195 @@
|
||||
/*
|
||||
*
|
||||
* Mint, (C) 2019 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
pass = "PASS" // Indicate that a test passed
|
||||
fail = "FAIL" // Indicate that a test failed
|
||||
livenessPath = "/minio/health/live"
|
||||
readinessPath = "/minio/health/ready"
|
||||
prometheusPath = "/minio/prometheus/metrics"
|
||||
timeout = time.Duration(30 * time.Second)
|
||||
)
|
||||
|
||||
type mintJSONFormatter struct {
|
||||
}
|
||||
|
||||
func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
|
||||
data := make(log.Fields, len(entry.Data))
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
|
||||
// log successful test runs
|
||||
func successLogger(function string, args map[string]interface{}, startTime time.Time) *log.Entry {
|
||||
// calculate the test case duration
|
||||
duration := time.Since(startTime)
|
||||
// log with the fields as per mint
|
||||
fields := log.Fields{"name": "healthcheck", "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": pass}
|
||||
return log.WithFields(fields)
|
||||
}
|
||||
|
||||
// log failed test runs
|
||||
func failureLog(function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry {
|
||||
// calculate the test case duration
|
||||
duration := time.Since(startTime)
|
||||
var fields log.Fields
|
||||
// log with the fields as per mint
|
||||
if err != nil {
|
||||
fields = log.Fields{"name": "healthcheck", "function": function, "args": args,
|
||||
"duration": duration.Nanoseconds() / 1000000, "status": fail, "alert": alert, "message": message, "error": err}
|
||||
} else {
|
||||
fields = log.Fields{"name": "healthcheck", "function": function, "args": args,
|
||||
"duration": duration.Nanoseconds() / 1000000, "status": fail, "alert": alert, "message": message}
|
||||
}
|
||||
return log.WithFields(fields)
|
||||
}
|
||||
|
||||
func testLivenessEndpoint(endpoint string) {
|
||||
startTime := time.Now()
|
||||
function := "testLivenessEndpoint"
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("%s%s", endpoint, livenessPath))
|
||||
if err != nil {
|
||||
// Could not parse URL successfully
|
||||
failureLog(function, nil, startTime, "", "URL Parsing for Healthcheck Liveness handler failed", err).Fatal()
|
||||
}
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{Transport: tr, Timeout: timeout}
|
||||
resp, err := client.Get(u.String())
|
||||
if err != nil {
|
||||
// GET request errored
|
||||
failureLog(function, nil, startTime, "", "GET request failed", err).Fatal()
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
// Status not 200 OK
|
||||
failureLog(function, nil, startTime, "", "GET /minio/health/live returned non OK status", err).Fatal()
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
defer successLogger(function, nil, startTime).Info()
|
||||
}
|
||||
|
||||
func testReadinessEndpoint(endpoint string) {
|
||||
startTime := time.Now()
|
||||
function := "testReadinessEndpoint"
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("%s%s", endpoint, readinessPath))
|
||||
if err != nil {
|
||||
// Could not parse URL successfully
|
||||
failureLog(function, nil, startTime, "", "URL Parsing for Healthcheck Readiness handler failed", err).Fatal()
|
||||
}
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{Transport: tr, Timeout: timeout}
|
||||
resp, err := client.Get(u.String())
|
||||
if err != nil {
|
||||
// GET request errored
|
||||
failureLog(function, nil, startTime, "", "GET request to Readiness endpoint failed", err).Fatal()
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
// Status not 200 OK
|
||||
failureLog(function, nil, startTime, "", "GET /minio/health/ready returned non OK status", err).Fatal()
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
defer successLogger(function, nil, startTime).Info()
|
||||
}
|
||||
|
||||
func testPrometheusEndpoint(endpoint string) {
|
||||
startTime := time.Now()
|
||||
function := "testPrometheusEndpoint"
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("%s%s", endpoint, prometheusPath))
|
||||
if err != nil {
|
||||
// Could not parse URL successfully
|
||||
failureLog(function, nil, startTime, "", "URL Parsing for Healthcheck Prometheus handler failed", err).Fatal()
|
||||
}
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{Transport: tr, Timeout: timeout}
|
||||
resp, err := client.Get(u.String())
|
||||
if err != nil {
|
||||
// GET request errored
|
||||
failureLog(function, nil, startTime, "", "GET request to Prometheus endpoint failed", err).Fatal()
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
// Status not 200 OK
|
||||
failureLog(function, nil, startTime, "", "GET /minio/prometheus/metrics returned non OK status", err).Fatal()
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
defer successLogger(function, nil, startTime).Info()
|
||||
}
|
||||
|
||||
func main() {
|
||||
endpoint := os.Getenv("SERVER_ENDPOINT")
|
||||
secure := os.Getenv("ENABLE_HTTPS")
|
||||
endpoint = "http://" + endpoint
|
||||
if secure == "1" {
|
||||
endpoint = "https://" + endpoint
|
||||
}
|
||||
|
||||
// Output to stdout instead of the default stderr
|
||||
log.SetOutput(os.Stdout)
|
||||
// create custom formatter
|
||||
mintFormatter := mintJSONFormatter{}
|
||||
// set custom formatter
|
||||
log.SetFormatter(&mintFormatter)
|
||||
// log Info or above -- success cases are Info level, failures are Fatal level
|
||||
log.SetLevel(log.InfoLevel)
|
||||
// execute tests
|
||||
testLivenessEndpoint(endpoint)
|
||||
testReadinessEndpoint(endpoint)
|
||||
testPrometheusEndpoint(endpoint)
|
||||
}
|
||||
28
mint/run/core/healthcheck/run.sh
Executable file
28
mint/run/core/healthcheck/run.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2019 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# run tests
|
||||
/mint/run/core/healthcheck/healthcheck 1>>"$output_log_file" 2>"$error_log_file"
|
||||
19
mint/run/core/mc/README.md
Normal file
19
mint/run/core/mc/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## `mc` tests
|
||||
This directory serves as the location for Mint tests using `mc`. Top level `mint.sh` calls `run.sh` to execute tests.
|
||||
|
||||
## Adding new tests
|
||||
New tests is added into `test.sh` as new functions.
|
||||
|
||||
## Running tests manually
|
||||
- Set environment variables `MINT_DATA_DIR`, `MINT_MODE`, `SERVER_ENDPOINT`, `ACCESS_KEY`, `SECRET_KEY`, `SERVER_REGION` and `ENABLE_HTTPS`
|
||||
- Call `run.sh` with output log file and error log file. for example
|
||||
```bash
|
||||
export MINT_DATA_DIR=~/my-mint-dir
|
||||
export MINT_MODE=core
|
||||
export SERVER_ENDPOINT="play.minio.io:9000"
|
||||
export ACCESS_KEY="Q3AM3UQ867SPQQA43P2F"
|
||||
export SECRET_KEY="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
export ENABLE_HTTPS=1
|
||||
export SERVER_REGION=us-east-1
|
||||
./run.sh /tmp/output.log /tmp/error.log
|
||||
```
|
||||
27
mint/run/core/mc/run.sh
Executable file
27
mint/run/core/mc/run.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
./functional-tests.sh 1>>"$output_log_file" 2>"$error_log_file"
|
||||
26
mint/run/core/minio-dotnet/run.sh
Executable file
26
mint/run/core/minio-dotnet/run.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
/mint/run/core/minio-dotnet/out/Minio.Functional.Tests 1>>"$output_log_file" 2>"$error_log_file"
|
||||
19
mint/run/core/minio-go/README.md
Normal file
19
mint/run/core/minio-go/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## `minio-go` tests
|
||||
This directory serves as the location for Mint tests using `minio-go`. Top level `mint.sh` calls `run.sh` to execute tests.
|
||||
|
||||
## Adding new tests
|
||||
New tests are added in functional tests of minio-go. Please check https://github.com/minio/minio-go
|
||||
|
||||
## Running tests manually
|
||||
- Set environment variables `MINT_DATA_DIR`, `MINT_MODE`, `SERVER_ENDPOINT`, `ACCESS_KEY`, `SECRET_KEY`, `SERVER_REGION` and `ENABLE_HTTPS`
|
||||
- Call `run.sh` with output log file and error log file. for example
|
||||
```bash
|
||||
export MINT_DATA_DIR=~/my-mint-dir
|
||||
export MINT_MODE=core
|
||||
export SERVER_ENDPOINT="play.minio.io:9000"
|
||||
export ACCESS_KEY="Q3AM3UQ867SPQQA43P2F"
|
||||
export SECRET_KEY="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
export ENABLE_HTTPS=1
|
||||
export SERVER_REGION=us-east-1
|
||||
./run.sh /tmp/output.log /tmp/error.log
|
||||
```
|
||||
28
mint/run/core/minio-go/run.sh
Executable file
28
mint/run/core/minio-go/run.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# run tests
|
||||
/mint/run/core/minio-go/minio-go 1>>"$output_log_file" 2>"$error_log_file"
|
||||
19
mint/run/core/minio-java/README.md
Normal file
19
mint/run/core/minio-java/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## `minio-java` tests
|
||||
This directory serves as the location for Mint tests using `minio-java`. Top level `mint.sh` calls `run.sh` to execute tests.
|
||||
|
||||
## Adding new tests
|
||||
New tests is added in functional tests of minio-java. Please check https://github.com/minio/minio-java
|
||||
|
||||
## Running tests manually
|
||||
- Set environment variables `MINT_DATA_DIR`, `MINT_MODE`, `SERVER_ENDPOINT`, `ACCESS_KEY`, `SECRET_KEY`, `SERVER_REGION` and `ENABLE_HTTPS`
|
||||
- Call `run.sh` with output log file and error log file. for example
|
||||
```bash
|
||||
export MINT_DATA_DIR=~/my-mint-dir
|
||||
export MINT_MODE=core
|
||||
export SERVER_ENDPOINT="play.minio.io:9000"
|
||||
export ACCESS_KEY="Q3AM3UQ867SPQQA43P2F"
|
||||
export SECRET_KEY="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
export ENABLE_HTTPS=1
|
||||
export SERVER_REGION=us-east-1
|
||||
./run.sh /tmp/output.log /tmp/error.log
|
||||
```
|
||||
34
mint/run/core/minio-java/run.sh
Executable file
34
mint/run/core/minio-java/run.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# run tests
|
||||
endpoint="http://$SERVER_ENDPOINT"
|
||||
if [ "$ENABLE_HTTPS" -eq 1 ]; then
|
||||
endpoint="https://$SERVER_ENDPOINT"
|
||||
fi
|
||||
|
||||
java -Xmx4096m -Xms256m -cp "/mint/run/core/minio-java/*:." FunctionalTest \
|
||||
"$endpoint" "$ACCESS_KEY" "$SECRET_KEY" "$SERVER_REGION" 1>>"$output_log_file" 2>"$error_log_file"
|
||||
19
mint/run/core/minio-js/README.md
Normal file
19
mint/run/core/minio-js/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## `minio-js` tests
|
||||
This directory serves as the location for Mint tests using `minio-js`. Top level `mint.sh` calls `run.sh` to execute tests.
|
||||
|
||||
## Adding new tests
|
||||
New tests is added in functional tests of minio-js. Please check https://github.com/minio/minio-js
|
||||
|
||||
## Running tests manually
|
||||
- Set environment variables `MINT_DATA_DIR`, `MINT_MODE`, `SERVER_ENDPOINT`, `ACCESS_KEY`, `SECRET_KEY`, `SERVER_REGION` and `ENABLE_HTTPS`
|
||||
- Call `run.sh` with output log file and error log file. for example
|
||||
```bash
|
||||
export MINT_DATA_DIR=~/my-mint-dir
|
||||
export MINT_MODE=core
|
||||
export SERVER_ENDPOINT="play.minio.io:9000"
|
||||
export ACCESS_KEY="Q3AM3UQ867SPQQA43P2F"
|
||||
export SECRET_KEY="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
export ENABLE_HTTPS=1
|
||||
export SERVER_REGION=us-east-1
|
||||
./run.sh /tmp/output.log /tmp/error.log
|
||||
```
|
||||
69
mint/run/core/minio-js/minioreporter.js
Normal file
69
mint/run/core/minio-js/minioreporter.js
Normal file
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Minio Reporter for JSON formatted logging, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
var mocha = require('mocha');
|
||||
module.exports = minioreporter;
|
||||
|
||||
function minioreporter(runner) {
|
||||
mocha.reporters.Base.call(this, runner);
|
||||
var self = this;
|
||||
|
||||
runner.on('pass', function (test) {
|
||||
GenerateJsonEntry(test)
|
||||
});
|
||||
|
||||
runner.on('fail', function (test, err) {
|
||||
GenerateJsonEntry(test, err)
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert test result into a JSON object and print on the console.
|
||||
*
|
||||
* @api private
|
||||
* @param test, err
|
||||
*/
|
||||
|
||||
function GenerateJsonEntry (test, err) {
|
||||
var res = test.title.split("_")
|
||||
var jsonEntry = {};
|
||||
|
||||
jsonEntry.name = "minio-js"
|
||||
|
||||
if (res.length > 0 && res[0].length) {
|
||||
jsonEntry.function = res[0]
|
||||
}
|
||||
|
||||
if (res.length > 1 && res[1].length) {
|
||||
jsonEntry.args = res[1]
|
||||
}
|
||||
|
||||
jsonEntry.duration = test.duration
|
||||
|
||||
if (res.length > 2 && res[2].length) {
|
||||
jsonEntry.alert = res[2]
|
||||
}
|
||||
|
||||
if (err != null ) {
|
||||
jsonEntry.status = "FAIL"
|
||||
jsonEntry.error = err.stack.replace(/\n/g, " ").replace(/ +(?= )/g,'')
|
||||
} else {
|
||||
jsonEntry.status = "PASS"
|
||||
}
|
||||
|
||||
process.stdout.write(JSON.stringify(jsonEntry) + "\n")
|
||||
}
|
||||
49
mint/run/core/minio-js/package.json
Normal file
49
mint/run/core/minio-js/package.json
Normal file
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"name": "bin",
|
||||
"version": "1.0.0",
|
||||
"main": "functional_test.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"dependencies": {
|
||||
"app-module-path": "*",
|
||||
"async": "*",
|
||||
"block-stream2": "*",
|
||||
"concat-stream": "*",
|
||||
"es6-error": "*",
|
||||
"json-stream": "*",
|
||||
"lodash": "*",
|
||||
"mime-types": "*",
|
||||
"mkdirp": "*",
|
||||
"moment": "*",
|
||||
"source-map-support": "*",
|
||||
"through2": "*",
|
||||
"xml": "*",
|
||||
"xml2js": "*"
|
||||
},
|
||||
"devDependencies": {
|
||||
"browserify": "*",
|
||||
"chai": "*",
|
||||
"gulp": "*",
|
||||
"gulp-babel": "*",
|
||||
"gulp-jscs": "*",
|
||||
"jshint":"2.*",
|
||||
"gulp-jshint": "*",
|
||||
"gulp-mocha": "*",
|
||||
"gulp-notify": "*",
|
||||
"gulp-sourcemaps": "*",
|
||||
"jshint-stylish": "*",
|
||||
"mocha": "*",
|
||||
"mocha-steps": "*",
|
||||
"nock": "*",
|
||||
"rewire": "*",
|
||||
"superagent": "*"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "mocha"
|
||||
}
|
||||
}
|
||||
28
mint/run/core/minio-js/run.sh
Executable file
28
mint/run/core/minio-js/run.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# run tests
|
||||
./node_modules/mocha/bin/mocha -R minioreporter -b --exit 1>>"$output_log_file" 2>"$error_log_file"
|
||||
19
mint/run/core/minio-py/README.md
Normal file
19
mint/run/core/minio-py/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## `minio-py` tests
|
||||
This directory serves as the location for Mint tests using `minio-py`. Top level `mint.sh` calls `run.sh` to execute tests.
|
||||
|
||||
## Adding new tests
|
||||
New tests is added in functional tests of minio-py. Please check https://github.com/minio/minio-py
|
||||
|
||||
## Running tests manually
|
||||
- Set environment variables `MINT_DATA_DIR`, `MINT_MODE`, `SERVER_ENDPOINT`, `ACCESS_KEY`, `SECRET_KEY`, `SERVER_REGION` and `ENABLE_HTTPS`
|
||||
- Call `run.sh` with output log file and error log file. for example
|
||||
```bash
|
||||
export MINT_DATA_DIR=~/my-mint-dir
|
||||
export MINT_MODE=core
|
||||
export SERVER_ENDPOINT="play.minio.io:9000"
|
||||
export ACCESS_KEY="Q3AM3UQ867SPQQA43P2F"
|
||||
export SECRET_KEY="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
export ENABLE_HTTPS=1
|
||||
export SERVER_REGION=us-east-1
|
||||
./run.sh /tmp/output.log /tmp/error.log
|
||||
```
|
||||
28
mint/run/core/minio-py/run.sh
Executable file
28
mint/run/core/minio-py/run.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# run tests
|
||||
python "/mint/run/core/minio-py/tests.py" 1>>"$output_log_file" 2>"$error_log_file"
|
||||
2
mint/run/core/s3cmd/.gitignore
vendored
Normal file
2
mint/run/core/s3cmd/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*~
|
||||
*.log
|
||||
19
mint/run/core/s3cmd/README.md
Normal file
19
mint/run/core/s3cmd/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## `s3cmd` tests
|
||||
This directory serves as the location for Mint tests using `s3cmd`. Top level `mint.sh` calls `run.sh` to execute tests.
|
||||
|
||||
## Adding new tests
|
||||
New tests is added into `test.sh` as new functions.
|
||||
|
||||
## Running tests manually
|
||||
- Set environment variables `MINT_DATA_DIR`, `MINT_MODE`, `SERVER_ENDPOINT`, `ACCESS_KEY`, `SECRET_KEY`, `SERVER_REGION` and `ENABLE_HTTPS`
|
||||
- Call `run.sh` with output log file and error log file. for example
|
||||
```bash
|
||||
export MINT_DATA_DIR=~/my-mint-dir
|
||||
export MINT_MODE=core
|
||||
export SERVER_ENDPOINT="play.minio.io:9000"
|
||||
export ACCESS_KEY="Q3AM3UQ867SPQQA43P2F"
|
||||
export SECRET_KEY="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
export ENABLE_HTTPS=1
|
||||
export SERVER_REGION=us-east-1
|
||||
./run.sh /tmp/output.log /tmp/error.log
|
||||
```
|
||||
28
mint/run/core/s3cmd/run.sh
Executable file
28
mint/run/core/s3cmd/run.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# run tests
|
||||
./test.sh 1>>"$output_log_file" 2>"$error_log_file"
|
||||
422
mint/run/core/s3cmd/test.sh
Executable file
422
mint/run/core/s3cmd/test.sh
Executable file
@@ -0,0 +1,422 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
|
||||
if [ -n "$MINT_MODE" ]; then
|
||||
if [ -z "${MINT_DATA_DIR+x}" ]; then
|
||||
echo "MINT_DATA_DIR not defined"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${SERVER_ENDPOINT+x}" ]; then
|
||||
echo "SERVER_ENDPOINT not defined"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${ACCESS_KEY+x}" ]; then
|
||||
echo "ACCESS_KEY not defined"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${SECRET_KEY+x}" ]; then
|
||||
echo "SECRET_KEY not defined"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "${SERVER_ENDPOINT+x}" ]; then
|
||||
SERVER_ENDPOINT="play.minio.io:9000"
|
||||
ACCESS_KEY="Q3AM3UQ867SPQQA43P2F"
|
||||
SECRET_KEY="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
ENABLE_HTTPS=1
|
||||
SERVER_REGION="us-east-1"
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD"
|
||||
DATA_DIR="$MINT_DATA_DIR"
|
||||
if [ -z "$MINT_MODE" ]; then
|
||||
WORK_DIR="$PWD/.run-$RANDOM"
|
||||
DATA_DIR="$WORK_DIR/data"
|
||||
fi
|
||||
|
||||
FILE_1_MB="$DATA_DIR/datafile-1-MB"
|
||||
FILE_65_MB="$DATA_DIR/datafile-65-MB"
|
||||
declare FILE_1_MB_MD5SUM
|
||||
declare FILE_65_MB_MD5SUM
|
||||
|
||||
BUCKET_NAME="s3cmd-test-bucket-$RANDOM"
|
||||
S3CMD=$(which s3cmd)
|
||||
declare -a S3CMD_CMD
|
||||
|
||||
function get_md5sum()
|
||||
{
|
||||
filename="$FILE_1_MB"
|
||||
out=$(md5sum "$filename" 2>/dev/null)
|
||||
rv=$?
|
||||
if [ "$rv" -eq 0 ]; then
|
||||
awk '{ print $1 }' <<< "$out"
|
||||
fi
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function get_time()
|
||||
{
|
||||
date +%s%N
|
||||
}
|
||||
|
||||
function get_duration()
|
||||
{
|
||||
start_time=$1
|
||||
end_time=$(get_time)
|
||||
|
||||
echo $(( (end_time - start_time) / 1000000 ))
|
||||
}
|
||||
|
||||
function log_success()
|
||||
{
|
||||
if [ -n "$MINT_MODE" ]; then
|
||||
printf '{"name": "s3cmd", "duration": "%d", "function": "%s", "status": "PASS"}\n' "$(get_duration "$1")" "$2"
|
||||
fi
|
||||
}
|
||||
|
||||
function show()
|
||||
{
|
||||
if [ -z "$MINT_MODE" ]; then
|
||||
func_name="$1"
|
||||
echo "Running $func_name()"
|
||||
fi
|
||||
}
|
||||
|
||||
function fail()
|
||||
{
|
||||
rv="$1"
|
||||
shift
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
echo "$@"
|
||||
fi
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function assert()
|
||||
{
|
||||
expected_rv="$1"
|
||||
shift
|
||||
start_time="$1"
|
||||
shift
|
||||
func_name="$1"
|
||||
shift
|
||||
|
||||
err=$("$@" 2>&1)
|
||||
rv=$?
|
||||
if [ "$rv" -ne 0 ] && [ "$expected_rv" -eq 0 ]; then
|
||||
if [ -n "$MINT_MODE" ]; then
|
||||
err=$(printf '%s' "$err" | python -c 'import sys,json; print(json.dumps(sys.stdin.read()))')
|
||||
## err is already JSON string, no need to double quote
|
||||
printf '{"name": "s3cmd", "duration": "%d", "function": "%s", "status": "FAIL", "error": %s}\n' "$(get_duration "$start_time")" "$func_name" "$err"
|
||||
else
|
||||
echo "s3cmd: $func_name: $err"
|
||||
fi
|
||||
|
||||
exit "$rv"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function assert_success() {
|
||||
assert 0 "$@"
|
||||
}
|
||||
|
||||
function assert_failure() {
|
||||
assert 1 "$@"
|
||||
}
|
||||
|
||||
function s3cmd_cmd()
|
||||
{
|
||||
cmd=( "${S3CMD_CMD[@]}" "$@" )
|
||||
"${cmd[@]}"
|
||||
rv=$?
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function check_md5sum()
|
||||
{
|
||||
expected_checksum="$1"
|
||||
shift
|
||||
filename="$*"
|
||||
|
||||
checksum="$(get_md5sum "$filename")"
|
||||
rv=$?
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
echo "unable to get md5sum for $filename"
|
||||
return "$rv"
|
||||
fi
|
||||
|
||||
if [ "$checksum" != "$expected_checksum" ]; then
|
||||
echo "$filename: md5sum mismatch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function test_make_bucket()
|
||||
{
|
||||
show "${FUNCNAME[0]}"
|
||||
|
||||
start_time=$(get_time)
|
||||
bucket_name="s3cmd-test-bucket-$RANDOM"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd mb "s3://${bucket_name}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd rb "s3://${bucket_name}"
|
||||
|
||||
log_success "$start_time" "${FUNCNAME[0]}"
|
||||
}
|
||||
|
||||
function test_make_bucket_error() {
|
||||
show "${FUNCNAME[0]}"
|
||||
|
||||
start_time=$(get_time)
|
||||
bucket_name="S3CMD-test%bucket%$RANDOM"
|
||||
assert_failure "$start_time" "${FUNCNAME[0]}" s3cmd_cmd mb "s3://${bucket_name}"
|
||||
|
||||
log_success "$start_time" "${FUNCNAME[0]}"
|
||||
}
|
||||
|
||||
function setup()
|
||||
{
|
||||
start_time=$(get_time)
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd mb "s3://${BUCKET_NAME}"
|
||||
}
|
||||
|
||||
function teardown()
|
||||
{
|
||||
start_time=$(get_time)
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd rm --force --recursive "s3://${BUCKET_NAME}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd rb --force "s3://${BUCKET_NAME}"
|
||||
}
|
||||
|
||||
function test_put_object()
|
||||
{
|
||||
show "${FUNCNAME[0]}"
|
||||
|
||||
start_time=$(get_time)
|
||||
object_name="s3cmd-test-object-$RANDOM"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd put "${FILE_1_MB}" "s3://${BUCKET_NAME}/${object_name}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd rm "s3://${BUCKET_NAME}/${object_name}"
|
||||
|
||||
log_success "$start_time" "${FUNCNAME[0]}"
|
||||
}
|
||||
|
||||
function test_put_object_error()
|
||||
{
|
||||
show "${FUNCNAME[0]}"
|
||||
start_time=$(get_time)
|
||||
|
||||
object_long_name=$(printf "s3cmd-test-object-%01100d" 1)
|
||||
assert_failure "$start_time" "${FUNCNAME[0]}" s3cmd_cmd put "${FILE_1_MB}" "s3://${BUCKET_NAME}/${object_long_name}"
|
||||
|
||||
log_success "$start_time" "${FUNCNAME[0]}"
|
||||
}
|
||||
|
||||
function test_put_object_multipart()
|
||||
{
|
||||
show "${FUNCNAME[0]}"
|
||||
|
||||
start_time=$(get_time)
|
||||
object_name="s3cmd-test-object-$RANDOM"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd put "${FILE_65_MB}" "s3://${BUCKET_NAME}/${object_name}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd rm "s3://${BUCKET_NAME}/${object_name}"
|
||||
|
||||
log_success "$start_time" "${FUNCNAME[0]}"
|
||||
}
|
||||
|
||||
function test_get_object()
|
||||
{
|
||||
show "${FUNCNAME[0]}"
|
||||
|
||||
start_time=$(get_time)
|
||||
object_name="s3cmd-test-object-$RANDOM"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd put "${FILE_1_MB}" "s3://${BUCKET_NAME}/${object_name}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd get "s3://${BUCKET_NAME}/${object_name}" "${object_name}.downloaded"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" check_md5sum "$FILE_1_MB_MD5SUM" "${object_name}.downloaded"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd rm "s3://${BUCKET_NAME}/${object_name}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" rm -f "${object_name}.downloaded"
|
||||
|
||||
log_success "$start_time" "${FUNCNAME[0]}"
|
||||
}
|
||||
|
||||
function test_get_object_error()
|
||||
{
|
||||
show "${FUNCNAME[0]}"
|
||||
|
||||
start_time=$(get_time)
|
||||
object_name="s3cmd-test-object-$RANDOM"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd put "${FILE_1_MB}" "s3://${BUCKET_NAME}/${object_name}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd rm "s3://${BUCKET_NAME}/${object_name}"
|
||||
assert_failure "$start_time" "${FUNCNAME[0]}" s3cmd_cmd get "s3://${BUCKET_NAME}/${object_name}" "${object_name}.downloaded"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" rm -f "${object_name}.downloaded"
|
||||
|
||||
log_success "$start_time" "${FUNCNAME[0]}"
|
||||
}
|
||||
|
||||
function test_get_object_multipart()
|
||||
{
|
||||
show "${FUNCNAME[0]}"
|
||||
|
||||
start_time=$(get_time)
|
||||
object_name="s3cmd-test-object-$RANDOM"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd put "${FILE_65_MB}" "s3://${BUCKET_NAME}/${object_name}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd get "s3://${BUCKET_NAME}/${object_name}" "${object_name}.downloaded"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" check_md5sum "$FILE_65_MB_MD5SUM" "${object_name}.downloaded"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd rm "s3://${BUCKET_NAME}/${object_name}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" rm -f "${object_name}.downloaded"
|
||||
|
||||
log_success "$start_time" "${FUNCNAME[0]}"
|
||||
}
|
||||
|
||||
function test_copy_object()
|
||||
{
|
||||
show "${FUNCNAME[0]}"
|
||||
|
||||
start_time=$(get_time)
|
||||
object_name1="s3cmd-test-object-$RANDOM"
|
||||
object_name2="s3cmd-test-object-$RANDOM"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd put "${FILE_1_MB}" "s3://${BUCKET_NAME}/${object_name1}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd cp "s3://${BUCKET_NAME}/${object_name1}" "s3://${BUCKET_NAME}/${object_name2}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd info "s3://${BUCKET_NAME}/${object_name2}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd rm "s3://${BUCKET_NAME}/${object_name1}" "s3://${BUCKET_NAME}/${object_name2}"
|
||||
|
||||
log_success "$start_time" "${FUNCNAME[0]}"
|
||||
}
|
||||
|
||||
function test_sync_list_objects()
|
||||
{
|
||||
show "${FUNCNAME[0]}"
|
||||
|
||||
start_time=$(get_time)
|
||||
bucket_name="s3cmd-test-bucket-$RANDOM"
|
||||
object_name="s3cmd-test-object-$RANDOM"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd mb "s3://${bucket_name}"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd sync "$DATA_DIR/" "s3://${bucket_name}"
|
||||
|
||||
diff -bB <(ls "$DATA_DIR") <("${S3CMD_CMD[@]}" ls "s3://${bucket_name}" | awk '{print $4}' | sed "s/s3:*..${bucket_name}.//g") >/dev/null 2>&1
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" fail $? "sync and list differs"
|
||||
assert_success "$start_time" "${FUNCNAME[0]}" s3cmd_cmd rb --force --recursive "s3://${bucket_name}"
|
||||
|
||||
log_success "$start_time" "${FUNCNAME[0]}"
|
||||
}
|
||||
|
||||
function run_test()
|
||||
{
|
||||
test_make_bucket
|
||||
test_make_bucket_error
|
||||
|
||||
setup
|
||||
|
||||
test_put_object
|
||||
test_put_object_error
|
||||
test_put_object_multipart
|
||||
test_get_object
|
||||
test_get_object_multipart
|
||||
test_copy_object
|
||||
test_sync_list_objects
|
||||
|
||||
teardown
|
||||
}
|
||||
|
||||
function __init__()
|
||||
{
|
||||
set -e
|
||||
|
||||
S3CMD_CONFIG_DIR="/tmp/.s3cmd-$RANDOM"
|
||||
mkdir -p $S3CMD_CONFIG_DIR
|
||||
S3CMD_CONFIG_FILE="$S3CMD_CONFIG_DIR/s3cfg"
|
||||
|
||||
# configure s3cmd
|
||||
cat > $S3CMD_CONFIG_FILE <<EOF
|
||||
signature_v2 = False
|
||||
host_base = $SERVER_ENDPOINT
|
||||
host_bucket = $SERVER_ENDPOINT
|
||||
bucket_location = $SERVER_REGION
|
||||
use_https = $ENABLE_HTTPS
|
||||
access_key = $ACCESS_KEY
|
||||
secret_key = $SECRET_KEY
|
||||
EOF
|
||||
|
||||
# For Mint, setup is already done. For others, setup the environment
|
||||
if [ -z "$MINT_MODE" ]; then
|
||||
mkdir -p "$WORK_DIR"
|
||||
mkdir -p "$DATA_DIR"
|
||||
|
||||
# If s3cmd executable binary is not available in current directory, use it in the path.
|
||||
if [ ! -x "$S3CMD" ]; then
|
||||
echo "'s3cmd' executable binary not found in current directory and in path"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -x "$S3CMD" ]; then
|
||||
echo "$S3CMD executable binary not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
S3CMD_CMD=( "${S3CMD}" --config "$S3CMD_CONFIG_FILE" )
|
||||
|
||||
if [ ! -e "$FILE_1_MB" ]; then
|
||||
shred -n 1 -s 1MB - >"$FILE_1_MB"
|
||||
fi
|
||||
|
||||
if [ ! -e "$FILE_65_MB" ]; then
|
||||
shred -n 1 -s 65MB - >"$FILE_65_MB"
|
||||
fi
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
FILE_1_MB_MD5SUM="$(get_md5sum "$FILE_1_MB")"
|
||||
rv=$?
|
||||
if [ $rv -ne 0 ]; then
|
||||
echo "unable to get md5sum of $FILE_1_MB"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
FILE_65_MB_MD5SUM="$(get_md5sum "$FILE_65_MB")"
|
||||
rv=$?
|
||||
if [ $rv -ne 0 ]; then
|
||||
echo "unable to get md5sum of $FILE_65_MB"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set +e
|
||||
}
|
||||
|
||||
function main()
|
||||
{
|
||||
( run_test )
|
||||
rv=$?
|
||||
|
||||
rm -fr "$S3CMD_CONFIG_FILE"
|
||||
if [ -z "$MINT_MODE" ]; then
|
||||
rm -fr "$WORK_DIR" "$DATA_DIR"
|
||||
fi
|
||||
|
||||
exit "$rv"
|
||||
}
|
||||
|
||||
__init__ "$@"
|
||||
main "$@"
|
||||
28
mint/run/core/security/run.sh
Executable file
28
mint/run/core/security/run.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2018 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# run tests
|
||||
/mint/run/core/security/tls-tests 1>>"$output_log_file" 2>"$error_log_file"
|
||||
272
mint/run/core/security/tls-tests.go
Normal file
272
mint/run/core/security/tls-tests.go
Normal file
@@ -0,0 +1,272 @@
|
||||
// Mint, (C) 2018 Minio, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const testName = "TLS-tests"
|
||||
|
||||
const (
|
||||
// PASS indicate that a test passed
|
||||
PASS = "PASS"
|
||||
// FAIL indicate that a test failed
|
||||
FAIL = "FAIL"
|
||||
// NA indicates that a test is not applicable
|
||||
NA = "NA"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFormatter(&mintJSONFormatter{})
|
||||
log.SetLevel(log.InfoLevel)
|
||||
|
||||
endpoint := os.Getenv("SERVER_ENDPOINT")
|
||||
secure := os.Getenv("ENABLE_HTTPS")
|
||||
if secure != "1" {
|
||||
log.WithFields(log.Fields{"name:": testName, "status": NA, "message": "TLS is not enabled"}).Info()
|
||||
return
|
||||
}
|
||||
|
||||
testTLSVersions(endpoint)
|
||||
testTLSCiphers(endpoint)
|
||||
testTLSEllipticCurves(endpoint)
|
||||
}
|
||||
|
||||
// Tests whether the endpoint accepts SSL3.0, TLS1.0 or TLS1.1 connections - fail if so.
|
||||
// Tests whether the endpoint accepts TLS1.2 connections - fail if not.
|
||||
func testTLSVersions(endpoint string) {
|
||||
const function = "TLSVersions"
|
||||
startTime := time.Now()
|
||||
|
||||
// Tests whether the endpoint accepts SSL3.0, TLS1.0 or TLS1.1 connections
|
||||
args := map[string]interface{}{
|
||||
"MinVersion": "tls.VersionSSL30",
|
||||
"MaxVersion": "tls.VersionTLS11",
|
||||
}
|
||||
_, err := tls.Dial("tcp", endpoint, &tls.Config{
|
||||
MinVersion: tls.VersionSSL30,
|
||||
MaxVersion: tls.VersionTLS11,
|
||||
})
|
||||
if err == nil {
|
||||
failureLog(function, args, startTime, "", "Endpoint accepts insecure connection", err).Error()
|
||||
return
|
||||
}
|
||||
|
||||
// Tests whether the endpoint accepts TLS1.2 connections
|
||||
args = map[string]interface{}{
|
||||
"MinVersion": "tls.VersionTLS12",
|
||||
}
|
||||
_, err = tls.Dial("tcp", endpoint, &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
})
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "Endpoint rejects secure connection", err).Error()
|
||||
return
|
||||
}
|
||||
successLog(function, args, startTime)
|
||||
}
|
||||
|
||||
// Tests whether the endpoint accepts SSL3.0, TLS1.0 or TLS1.1 connections - fail if so.
|
||||
// Tests whether the endpoint accepts TLS1.2 connections - fail if not.
|
||||
func testTLSCiphers(endpoint string) {
|
||||
const function = "TLSCiphers"
|
||||
startTime := time.Now()
|
||||
|
||||
// Tests whether the endpoint accepts insecure ciphers
|
||||
args := map[string]interface{}{
|
||||
"MinVersion": "tls.VersionTLS12",
|
||||
"CipherSuites": unsupportedCipherSuites,
|
||||
}
|
||||
_, err := tls.Dial("tcp", endpoint, &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: unsupportedCipherSuites,
|
||||
})
|
||||
if err == nil {
|
||||
failureLog(function, args, startTime, "", "Endpoint accepts insecure cipher suites", err).Error()
|
||||
return
|
||||
}
|
||||
|
||||
// Tests whether the endpoint accepts at least one secure cipher
|
||||
args = map[string]interface{}{
|
||||
"MinVersion": "tls.VersionTLS12",
|
||||
"CipherSuites": supportedCipherSuites,
|
||||
}
|
||||
_, err = tls.Dial("tcp", endpoint, &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: supportedCipherSuites,
|
||||
})
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "Endpoint rejects all secure cipher suites", err).Error()
|
||||
return
|
||||
}
|
||||
|
||||
// Tests whether the endpoint accepts at least one default cipher
|
||||
args = map[string]interface{}{
|
||||
"MinVersion": "tls.VersionTLS12",
|
||||
"CipherSuites": nil,
|
||||
}
|
||||
_, err = tls.Dial("tcp", endpoint, &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: nil, // default value
|
||||
})
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "Endpoint rejects default cipher suites", err).Error()
|
||||
return
|
||||
}
|
||||
successLog(function, args, startTime)
|
||||
}
|
||||
|
||||
// Tests whether the endpoint accepts the P-384 or P-521 elliptic curve - fail if so.
|
||||
// Tests whether the endpoint accepts Curve25519 or P-256 - fail if not.
|
||||
func testTLSEllipticCurves(endpoint string) {
|
||||
const function = "TLSEllipticCurves"
|
||||
startTime := time.Now()
|
||||
|
||||
// Tests whether the endpoint accepts curves using non-constant time implementations.
|
||||
args := map[string]interface{}{
|
||||
"CurvePreferences": unsupportedCurves,
|
||||
}
|
||||
_, err := tls.Dial("tcp", endpoint, &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CurvePreferences: unsupportedCurves,
|
||||
CipherSuites: supportedCipherSuites,
|
||||
})
|
||||
if err == nil {
|
||||
failureLog(function, args, startTime, "", "Endpoint accepts insecure elliptic curves", err).Error()
|
||||
return
|
||||
}
|
||||
|
||||
// Tests whether the endpoint accepts curves using constant time implementations.
|
||||
args = map[string]interface{}{
|
||||
"CurvePreferences": unsupportedCurves,
|
||||
}
|
||||
_, err = tls.Dial("tcp", endpoint, &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CurvePreferences: supportedCurves,
|
||||
CipherSuites: supportedCipherSuites,
|
||||
})
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "Endpoint does not accept secure elliptic curves", err).Error()
|
||||
return
|
||||
}
|
||||
successLog(function, args, startTime)
|
||||
}
|
||||
|
||||
func successLog(function string, args map[string]interface{}, startTime time.Time) *log.Entry {
|
||||
duration := time.Since(startTime).Nanoseconds() / 1000000
|
||||
return log.WithFields(log.Fields{
|
||||
"name": testName,
|
||||
"function": function,
|
||||
"args": args,
|
||||
"duration": duration,
|
||||
"status": PASS,
|
||||
})
|
||||
}
|
||||
|
||||
func failureLog(function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry {
|
||||
duration := time.Since(startTime).Nanoseconds() / 1000000
|
||||
fields := log.Fields{
|
||||
"name": testName,
|
||||
"function": function,
|
||||
"args": args,
|
||||
"duration": duration,
|
||||
"status": FAIL,
|
||||
"alert": alert,
|
||||
"message": message,
|
||||
}
|
||||
if err != nil {
|
||||
fields["error"] = err
|
||||
}
|
||||
return log.WithFields(fields)
|
||||
}
|
||||
|
||||
type mintJSONFormatter struct {
|
||||
}
|
||||
|
||||
func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
|
||||
data := make(log.Fields, len(entry.Data))
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
|
||||
// Secure Go implementations of modern TLS ciphers
|
||||
// The following ciphers are excluded because:
|
||||
// - RC4 ciphers: RC4 is broken
|
||||
// - 3DES ciphers: Because of the 64 bit blocksize of DES (Sweet32)
|
||||
// - CBC-SHA256 ciphers: No countermeasures against Lucky13 timing attack
|
||||
// - CBC-SHA ciphers: Legacy ciphers (SHA-1) and non-constant time
|
||||
// implementation of CBC.
|
||||
// (CBC-SHA ciphers can be enabled again if required)
|
||||
// - RSA key exchange ciphers: Disabled because of dangerous PKCS1-v1.5 RSA
|
||||
// padding scheme. See Bleichenbacher attacks.
|
||||
var supportedCipherSuites = []uint16{
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
}
|
||||
|
||||
// Supported elliptic curves: Implementations are constant-time.
|
||||
var supportedCurves = []tls.CurveID{tls.X25519, tls.CurveP256}
|
||||
|
||||
var unsupportedCipherSuites = []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // Go stack contains (some) countermeasures against timing attacks (Lucky13)
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, // No countermeasures against timing attacks
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // Go stack contains (some) countermeasures against timing attacks (Lucky13)
|
||||
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, // Broken cipher
|
||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, // Sweet32
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // Go stack contains (some) countermeasures against timing attacks (Lucky13)
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, // No countermeasures against timing attacks
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // Go stack contains (some) countermeasures against timing attacks (Lucky13)
|
||||
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, // Broken cipher
|
||||
|
||||
// all RSA-PKCS1-v1.5 ciphers are disabled - danger of Bleichenbacher attack variants
|
||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // Sweet32
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA, // Go stack contains (some) countermeasures against timing attacks (Lucky13)
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA256, // No countermeasures against timing attacks
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA, // Go stack contains (some) countermeasures against timing attacks (Lucky13)
|
||||
tls.TLS_RSA_WITH_RC4_128_SHA, // Broken cipher
|
||||
|
||||
tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // Disabled because of RSA-PKCS1-v1.5 - AES-GCM is considered secure.
|
||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // Disabled because of RSA-PKCS1-v1.5 - AES-GCM is considered secure.
|
||||
}
|
||||
|
||||
// Unsupported elliptic curves: Implementations are not constant-time.
|
||||
var unsupportedCurves = []tls.CurveID{tls.CurveP384, tls.CurveP521}
|
||||
409
mint/run/core/worm/quick-worm-tests.go
Normal file
409
mint/run/core/worm/quick-worm-tests.go
Normal file
@@ -0,0 +1,409 @@
|
||||
/*
|
||||
* Mint, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const charset = "abcdefghijklmnopqrstuvwxyz0123456789"
|
||||
|
||||
var randSource *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
const (
|
||||
letterIdxBits = 6 // 6 bits to represent a letter index
|
||||
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
|
||||
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
|
||||
)
|
||||
const (
|
||||
PASS = "PASS" // Indicate that a test passed
|
||||
FAIL = "FAIL" // Indicate that a test failed
|
||||
NA = "NA" // Indicate that a test is not applicable
|
||||
maxPartSize = int64(512 * 1000 * 1024)
|
||||
maxRetries = 1
|
||||
)
|
||||
|
||||
type mintJSONFormatter struct {
|
||||
}
|
||||
|
||||
func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
|
||||
data := make(log.Fields, len(entry.Data))
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
|
||||
// log successful test runs
|
||||
func successLogger(function string, args map[string]interface{}, startTime time.Time) *log.Entry {
|
||||
// calculate the test case duration
|
||||
duration := time.Since(startTime)
|
||||
// log with the fields as per mint
|
||||
fields := log.Fields{"name": "test worm mode", "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": PASS}
|
||||
return log.WithFields(fields)
|
||||
}
|
||||
|
||||
// log failed test runs
|
||||
func failureLog(function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry {
|
||||
// calculate the test case duration
|
||||
duration := time.Since(startTime)
|
||||
var fields log.Fields
|
||||
// log with the fields as per mint
|
||||
if err != nil {
|
||||
fields = log.Fields{"name": "test worm mode", "function": function, "args": args,
|
||||
"duration": duration.Nanoseconds() / 1000000, "status": FAIL, "alert": alert, "message": message, "error": err}
|
||||
} else {
|
||||
fields = log.Fields{"name": "test worm mode", "function": function, "args": args,
|
||||
"duration": duration.Nanoseconds() / 1000000, "status": FAIL, "alert": alert, "message": message}
|
||||
}
|
||||
return log.WithFields(fields)
|
||||
}
|
||||
|
||||
func randBucketName() string {
|
||||
b := make([]byte, 55)
|
||||
for i := range b {
|
||||
b[i] = charset[randSource.Intn(len(charset))]
|
||||
}
|
||||
return "bucket-" + string(b)
|
||||
}
|
||||
|
||||
func testPutDeletObject(s3Client *s3.S3) {
|
||||
startTime := time.Now()
|
||||
object := "testObject"
|
||||
function := "PutAndDelete"
|
||||
bucket := randBucketName()
|
||||
expiry := 1 * time.Minute
|
||||
args := map[string]interface{}{
|
||||
"bucketName": bucket,
|
||||
"objectName": object,
|
||||
"expiry": expiry,
|
||||
}
|
||||
// First time bucket creation will be successful
|
||||
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
|
||||
Bucket: aws.String(bucket),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "WORM_MODE ON - CreateBucket Failed", err).Fatal()
|
||||
return
|
||||
}
|
||||
// First time put object will be successful
|
||||
putInput1 := &s3.PutObjectInput{
|
||||
Body: aws.ReadSeekCloser(strings.NewReader("fileToUpload")),
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(object),
|
||||
}
|
||||
_, err = s3Client.PutObject(putInput1)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON - expected to pass but got %v", err), err).Fatal()
|
||||
return
|
||||
}
|
||||
// Put Object
|
||||
putInput2 := &s3.PutObjectInput{
|
||||
Body: aws.ReadSeekCloser(strings.NewReader("filetouploadSecondTime")),
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(object),
|
||||
}
|
||||
_, err = s3Client.PutObject(putInput2)
|
||||
if err == nil {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON Put is expected to fail, but it passed %v", nil), nil).Fatal()
|
||||
return
|
||||
}
|
||||
|
||||
// Deleting the Object
|
||||
delObject := &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(object),
|
||||
}
|
||||
_, err = s3Client.DeleteObject(delObject)
|
||||
if err == nil {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON Delete is expected to fail, but it passed %v", nil), nil).Fatal()
|
||||
return
|
||||
}
|
||||
successLogger(function, args, startTime).Info()
|
||||
|
||||
}
|
||||
|
||||
func testCopyObject(s3Client *s3.S3) {
|
||||
startTime := time.Now()
|
||||
function := "CopyObject"
|
||||
object := "DestinationObject"
|
||||
object1 := "SourceObject"
|
||||
destinationBucket := randBucketName()
|
||||
sourceBucket := randBucketName()
|
||||
expiry := 1 * time.Minute
|
||||
args := map[string]interface{}{
|
||||
"bucketName": destinationBucket,
|
||||
"objectName": object,
|
||||
"expiry": expiry,
|
||||
}
|
||||
// Create Destination bucket
|
||||
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
|
||||
Bucket: aws.String(destinationBucket),
|
||||
})
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "WORM_MODE ON Destination Bucket Creation Failed", err).Fatal()
|
||||
return
|
||||
}
|
||||
|
||||
// Put object on Destination bucket
|
||||
putInput1 := &s3.PutObjectInput{
|
||||
Body: aws.ReadSeekCloser(strings.NewReader("file to Upload In Destination")),
|
||||
Bucket: aws.String(destinationBucket),
|
||||
Key: aws.String(object),
|
||||
}
|
||||
_, err = s3Client.PutObject(putInput1)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON PUT expected to pass but got %v", err), err).Fatal()
|
||||
return
|
||||
}
|
||||
|
||||
// Create Source bucket
|
||||
_, err1 := s3Client.CreateBucket(&s3.CreateBucketInput{
|
||||
Bucket: aws.String(sourceBucket),
|
||||
})
|
||||
if err1 != nil {
|
||||
failureLog(function, args, startTime, "", "WORM_MODE ON Source Bucket Creation Failed", err).Fatal()
|
||||
return
|
||||
}
|
||||
|
||||
// Put object on Destination bucket
|
||||
putInput2 := &s3.PutObjectInput{
|
||||
Body: aws.ReadSeekCloser(strings.NewReader("file content to copy ")),
|
||||
Bucket: aws.String(sourceBucket),
|
||||
Key: aws.String(object1),
|
||||
}
|
||||
_, err = s3Client.PutObject(putInput2)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON PUT expected to pass but got %v", err), err).Fatal()
|
||||
return
|
||||
}
|
||||
|
||||
// Test for Copy Object
|
||||
copyInput := &s3.CopyObjectInput{
|
||||
Bucket: aws.String(destinationBucket),
|
||||
CopySource: aws.String(sourceBucket + "/" + object1),
|
||||
Key: aws.String(object),
|
||||
}
|
||||
|
||||
_, err = s3Client.CopyObject(copyInput)
|
||||
if err == nil {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON Copy Object should fail, but it passed %v", nil), nil).Fatal()
|
||||
return
|
||||
}
|
||||
successLogger(function, args, startTime).Info()
|
||||
|
||||
}
|
||||
|
||||
func testPutMultipart(s3Client *s3.S3) {
|
||||
bucket := randBucketName()
|
||||
startTime := time.Now()
|
||||
object := "testObject"
|
||||
expiry := 1 * time.Minute
|
||||
args := map[string]interface{}{
|
||||
"bucketName": bucket,
|
||||
"objectName": object,
|
||||
"expiry": expiry,
|
||||
}
|
||||
function := "PutMultiPart"
|
||||
file, err := os.Open("/mint/data/datafile-5-MB")
|
||||
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "WORM_MODE ON err opening file", err).Fatal()
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
fileInfo, _ := file.Stat()
|
||||
size := fileInfo.Size()
|
||||
buffer := make([]byte, size)
|
||||
fileType := http.DetectContentType(buffer)
|
||||
file.Read(buffer)
|
||||
|
||||
path := file.Name()
|
||||
input := &s3.CreateMultipartUploadInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(path),
|
||||
ContentType: aws.String(fileType),
|
||||
}
|
||||
_, err = s3Client.CreateBucket(&s3.CreateBucketInput{
|
||||
Bucket: aws.String(bucket),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "WORM_MODE ON Destination Bucket Creation Failed", err).Fatal()
|
||||
return
|
||||
}
|
||||
// Upload for the first time
|
||||
resp, err := s3Client.CreateMultipartUpload(input)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "WORM_MODE ON CreateMultipartUpload Failed", err).Fatal()
|
||||
return
|
||||
}
|
||||
var curr, partLength int64
|
||||
var remaining = size
|
||||
var completedParts []*s3.CompletedPart
|
||||
partNumber := 1
|
||||
for curr = 0; remaining != 0; curr += partLength {
|
||||
if remaining < maxPartSize {
|
||||
partLength = remaining
|
||||
} else {
|
||||
partLength = maxPartSize
|
||||
}
|
||||
completedPart, err := uploadPart(s3Client, resp, buffer[curr:curr+partLength], partNumber)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "WORM_MODE ON uploadPart Failed", err).Fatal()
|
||||
err := abortMultipartUpload(s3Client, resp)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "WORM_MODE ON abortMultipartUpload Failed", err).Fatal()
|
||||
}
|
||||
return
|
||||
}
|
||||
remaining -= partLength
|
||||
partNumber++
|
||||
completedParts = append(completedParts, completedPart)
|
||||
}
|
||||
_, err = completeMultipartUpload(s3Client, resp, completedParts)
|
||||
if err != nil {
|
||||
failureLog(function, args, startTime, "", "WORM_MODE ON completeMultipartUpload Failed", err).Fatal()
|
||||
return
|
||||
}
|
||||
// These tests should fail
|
||||
_, err = s3Client.CreateMultipartUpload(input)
|
||||
if err == nil {
|
||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON CreateMultipartUpload must fail, but it passed %v", nil), nil).Fatal()
|
||||
return
|
||||
}
|
||||
successLogger(function, args, startTime).Info()
|
||||
|
||||
}
|
||||
func completeMultipartUpload(svc *s3.S3, resp *s3.CreateMultipartUploadOutput, completedParts []*s3.CompletedPart) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
completeInput := &s3.CompleteMultipartUploadInput{
|
||||
Bucket: resp.Bucket,
|
||||
Key: resp.Key,
|
||||
UploadId: resp.UploadId,
|
||||
MultipartUpload: &s3.CompletedMultipartUpload{
|
||||
Parts: completedParts,
|
||||
},
|
||||
}
|
||||
return svc.CompleteMultipartUpload(completeInput)
|
||||
}
|
||||
func uploadPart(svc *s3.S3, resp *s3.CreateMultipartUploadOutput, fileBytes []byte, partNumber int) (*s3.CompletedPart, error) {
|
||||
tryNum := 1
|
||||
partInput := &s3.UploadPartInput{
|
||||
Body: bytes.NewReader(fileBytes),
|
||||
Bucket: resp.Bucket,
|
||||
Key: resp.Key,
|
||||
PartNumber: aws.Int64(int64(partNumber)),
|
||||
UploadId: resp.UploadId,
|
||||
ContentLength: aws.Int64(int64(len(fileBytes))),
|
||||
}
|
||||
|
||||
for tryNum <= maxRetries {
|
||||
uploadResult, err := svc.UploadPart(partInput)
|
||||
if err != nil {
|
||||
if tryNum == maxRetries {
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
return nil, aerr
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
tryNum++
|
||||
} else {
|
||||
return &s3.CompletedPart{
|
||||
ETag: uploadResult.ETag,
|
||||
PartNumber: aws.Int64(int64(partNumber)),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func abortMultipartUpload(svc *s3.S3, resp *s3.CreateMultipartUploadOutput) error {
|
||||
abortInput := &s3.AbortMultipartUploadInput{
|
||||
Bucket: resp.Bucket,
|
||||
Key: resp.Key,
|
||||
UploadId: resp.UploadId,
|
||||
}
|
||||
_, err := svc.AbortMultipartUpload(abortInput)
|
||||
return err
|
||||
}
|
||||
|
||||
func main() {
|
||||
endpoint := os.Getenv("SERVER_ENDPOINT")
|
||||
accessKey := os.Getenv("ACCESS_KEY")
|
||||
secretKey := os.Getenv("SECRET_KEY")
|
||||
secure := os.Getenv("ENABLE_HTTPS")
|
||||
sdkEndpoint := "http://" + endpoint
|
||||
if secure == "1" {
|
||||
sdkEndpoint = "https://" + endpoint
|
||||
}
|
||||
creds := credentials.NewStaticCredentials(accessKey, secretKey, "")
|
||||
newSession := session.New()
|
||||
s3Config := &aws.Config{
|
||||
Credentials: creds,
|
||||
Endpoint: aws.String(sdkEndpoint),
|
||||
Region: aws.String("us-east-1"),
|
||||
S3ForcePathStyle: aws.Bool(true),
|
||||
}
|
||||
|
||||
// Create an S3 service object in the default region.
|
||||
s3Client := s3.New(newSession, s3Config)
|
||||
|
||||
// Output to stdout instead of the default stderr
|
||||
log.SetOutput(os.Stdout)
|
||||
// create custom formatter
|
||||
mintFormatter := mintJSONFormatter{}
|
||||
// set custom formatter
|
||||
log.SetFormatter(&mintFormatter)
|
||||
// log Info or above -- success cases are Info level, failures are Fatal level
|
||||
log.SetLevel(log.InfoLevel)
|
||||
// execute tests
|
||||
// Test Put and Delete Object
|
||||
testPutDeletObject(s3Client)
|
||||
//testCopyObject
|
||||
testCopyObject(s3Client)
|
||||
// Test Multipart Upload
|
||||
testPutMultipart(s3Client)
|
||||
}
|
||||
28
mint/run/core/worm/run.sh
Executable file
28
mint/run/core/worm/run.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Mint (C) 2017 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# handle command line arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
output_log_file="$1"
|
||||
error_log_file="$2"
|
||||
|
||||
# run tests
|
||||
/mint/run/core/worm/worm 1>>"$output_log_file" 2>"$error_log_file"
|
||||
Reference in New Issue
Block a user