Use humanize constants for KiB, MiB and GiB units. (#3322)

This commit is contained in:
Bala FA 2016-11-22 18:18:22 -08:00 committed by Harshavardhana
parent c1ebcbcda2
commit 825000bc34
23 changed files with 170 additions and 135 deletions

View File

@ -24,6 +24,8 @@ import (
"strconv" "strconv"
"testing" "testing"
"time" "time"
humanize "github.com/dustin/go-humanize"
) )
// Prepare benchmark backend // Prepare benchmark backend
@ -107,7 +109,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
b.Fatal(err) b.Fatal(err)
} }
objSize := 128 * 1024 * 1024 objSize := 128 * humanize.MiByte
// PutObjectPart returns md5Sum of the object inserted. // PutObjectPart returns md5Sum of the object inserted.
// md5Sum variable is assigned with that value. // md5Sum variable is assigned with that value.

View File

@ -23,13 +23,14 @@ import (
"io/ioutil" "io/ioutil"
"net/http" "net/http"
humanize "github.com/dustin/go-humanize"
mux "github.com/gorilla/mux" mux "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/set" "github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/pkg/wildcard" "github.com/minio/minio/pkg/wildcard"
) )
// maximum supported access policy size. // maximum supported access policy size.
const maxAccessPolicySize = 20 * 1024 // 20KiB. const maxAccessPolicySize = 20 * humanize.KiByte
// Verify if a given action is valid for the url path based on the // Verify if a given action is valid for the url path based on the
// existing bucket access policy. // existing bucket access policy.

View File

@ -21,6 +21,7 @@ import (
"crypto/rand" "crypto/rand"
"testing" "testing"
humanize "github.com/dustin/go-humanize"
"github.com/klauspost/reedsolomon" "github.com/klauspost/reedsolomon"
) )
@ -48,8 +49,8 @@ func TestErasureCreateFile(t *testing.T) {
disks := setup.disks disks := setup.disks
// Prepare a slice of 1MB with random data. // Prepare a slice of 1MiB with random data.
data := make([]byte, 1*1024*1024) data := make([]byte, 1*humanize.MiByte)
_, err = rand.Read(data) _, err = rand.Read(data)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -22,6 +22,8 @@ import (
"os" "os"
"path" "path"
"testing" "testing"
humanize "github.com/dustin/go-humanize"
) )
// Test erasureHealFile() // Test erasureHealFile()
@ -39,8 +41,8 @@ func TestErasureHealFile(t *testing.T) {
disks := setup.disks disks := setup.disks
// Prepare a slice of 1MB with random data. // Prepare a slice of 1MiB with random data.
data := make([]byte, 1*1024*1024) data := make([]byte, 1*humanize.MiByte)
_, err = rand.Read(data) _, err = rand.Read(data)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -67,7 +69,7 @@ func TestErasureHealFile(t *testing.T) {
latest[0] = nil latest[0] = nil
outDated[0] = disks[0] outDated[0] = disks[0]
healCheckSums, err := erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo) healCheckSums, err := erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*humanize.MiByte, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -90,7 +92,7 @@ func TestErasureHealFile(t *testing.T) {
outDated[index] = disks[index] outDated[index] = disks[index]
} }
healCheckSums, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo) healCheckSums, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*humanize.MiByte, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -120,7 +122,7 @@ func TestErasureHealFile(t *testing.T) {
latest[index] = nil latest[index] = nil
outDated[index] = disks[index] outDated[index] = disks[index]
} }
_, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo) _, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*humanize.MiByte, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
if err == nil { if err == nil {
t.Error("Expected erasureHealFile() to fail when the number of available disks <= parityBlocks") t.Error("Expected erasureHealFile() to fail when the number of available disks <= parityBlocks")
} }

View File

@ -22,9 +22,11 @@ import (
"testing" "testing"
"time" "time"
"reflect"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/bpool" "github.com/minio/minio/pkg/bpool"
) )
import "reflect"
// Tests getReadDisks which returns readable disks slice from which we can // Tests getReadDisks which returns readable disks slice from which we can
// read parallelly. // read parallelly.
@ -260,8 +262,8 @@ func TestErasureReadFileDiskFail(t *testing.T) {
disks := setup.disks disks := setup.disks
// Prepare a slice of 1MB with random data. // Prepare a slice of 1humanize.MiByte with random data.
data := make([]byte, 1*1024*1024) data := make([]byte, 1*humanize.MiByte)
length := int64(len(data)) length := int64(len(data))
_, err = rand.Read(data) _, err = rand.Read(data)
if err != nil { if err != nil {
@ -333,7 +335,7 @@ func TestErasureReadFileOffsetLength(t *testing.T) {
// Initialize environment needed for the test. // Initialize environment needed for the test.
dataBlocks := 7 dataBlocks := 7
parityBlocks := 7 parityBlocks := 7
blockSize := int64(1 * 1024 * 1024) blockSize := int64(1 * humanize.MiByte)
setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize) setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
@ -343,8 +345,8 @@ func TestErasureReadFileOffsetLength(t *testing.T) {
disks := setup.disks disks := setup.disks
// Prepare a slice of 5MB with random data. // Prepare a slice of 5humanize.MiByte with random data.
data := make([]byte, 5*1024*1024) data := make([]byte, 5*humanize.MiByte)
length := int64(len(data)) length := int64(len(data))
_, err = rand.Read(data) _, err = rand.Read(data)
if err != nil { if err != nil {
@ -409,7 +411,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
// Initialize environment needed for the test. // Initialize environment needed for the test.
dataBlocks := 7 dataBlocks := 7
parityBlocks := 7 parityBlocks := 7
blockSize := int64(1 * 1024 * 1024) blockSize := int64(1 * humanize.MiByte)
setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize) setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
@ -419,8 +421,8 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
disks := setup.disks disks := setup.disks
// Prepare a slice of 5MB with random data. // Prepare a slice of 5MiB with random data.
data := make([]byte, 5*1024*1024) data := make([]byte, 5*humanize.MiByte)
length := int64(len(data)) length := int64(len(data))
_, err = rand.Read(data) _, err = rand.Read(data)
if err != nil { if err != nil {

View File

@ -23,6 +23,7 @@ import (
"strings" "strings"
"time" "time"
humanize "github.com/dustin/go-humanize"
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/rs/cors" "github.com/rs/cors"
) )
@ -43,7 +44,7 @@ func registerHandlers(mux *router.Router, handlerFns ...HandlerFunc) http.Handle
// Set the body size limit to 6 Gb = Maximum object size + other possible data // Set the body size limit to 6 Gb = Maximum object size + other possible data
// in the same request // in the same request
const requestMaxBodySize = 1024 * 1024 * 1024 * (5 + 1) const requestMaxBodySize = (5 + 1) * humanize.GiByte
type requestSizeLimitHandler struct { type requestSizeLimitHandler struct {
handler http.Handler handler http.Handler

View File

@ -20,6 +20,7 @@ import (
"crypto/x509" "crypto/x509"
"time" "time"
humanize "github.com/dustin/go-humanize"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/minio/minio/pkg/objcache" "github.com/minio/minio/pkg/objcache"
) )
@ -70,7 +71,7 @@ var (
var ( var (
// Limit fields size (except file) to 1Mib since Policy document // Limit fields size (except file) to 1Mib since Policy document
// can reach that size according to https://aws.amazon.com/articles/1434 // can reach that size according to https://aws.amazon.com/articles/1434
maxFormFieldSize = int64(1024 * 1024) maxFormFieldSize = int64(1 * humanize.MiByte)
) )
var ( var (

View File

@ -24,6 +24,8 @@ import (
"runtime" "runtime"
"strings" "strings"
"testing" "testing"
humanize "github.com/dustin/go-humanize"
) )
// Wrapper for calling GetObject tests for both XL multiple disks and single node setup. // Wrapper for calling GetObject tests for both XL multiple disks and single node setup.
@ -50,7 +52,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
bytesData := []struct { bytesData := []struct {
byteData []byte byteData []byte
}{ }{
{generateBytesData(6 * 1024 * 1024)}, {generateBytesData(6 * humanize.MiByte)},
} }
// set of inputs for uploading the objects before tests for downloading is done. // set of inputs for uploading the objects before tests for downloading is done.
putObjectInputs := []struct { putObjectInputs := []struct {
@ -199,7 +201,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
bytesData := []struct { bytesData := []struct {
byteData []byte byteData []byte
}{ }{
{generateBytesData(6 * 1024 * 1024)}, {generateBytesData(6 * humanize.MiByte)},
} }
// set of inputs for uploading the objects before tests for downloading is done. // set of inputs for uploading the objects before tests for downloading is done.
putObjectInputs := []struct { putObjectInputs := []struct {
@ -314,7 +316,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
bytesData := []struct { bytesData := []struct {
byteData []byte byteData []byte
}{ }{
{generateBytesData(6 * 1024 * 1024)}, {generateBytesData(6 * humanize.MiByte)},
} }
// set of inputs for uploading the objects before tests for downloading is done. // set of inputs for uploading the objects before tests for downloading is done.
putObjectInputs := []struct { putObjectInputs := []struct {
@ -463,73 +465,73 @@ func BenchmarkGetObjectVerySmallXL(b *testing.B) {
// BenchmarkGetObject10KbFS - Benchmark FS.GetObject() for object size of 10KB. // BenchmarkGetObject10KbFS - Benchmark FS.GetObject() for object size of 10KB.
func BenchmarkGetObject10KbFS(b *testing.B) { func BenchmarkGetObject10KbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 10*1024) benchmarkGetObject(b, "FS", 10*humanize.KiByte)
} }
// BenchmarkGetObject10KbXL - Benchmark XL.GetObject() for object size of 10KB. // BenchmarkGetObject10KbXL - Benchmark XL.GetObject() for object size of 10KB.
func BenchmarkGetObject10KbXL(b *testing.B) { func BenchmarkGetObject10KbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 10*1024) benchmarkGetObject(b, "XL", 10*humanize.KiByte)
} }
// BenchmarkGetObject100KbFS - Benchmark FS.GetObject() for object size of 100KB. // BenchmarkGetObject100KbFS - Benchmark FS.GetObject() for object size of 100KB.
func BenchmarkGetObject100KbFS(b *testing.B) { func BenchmarkGetObject100KbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 100*1024) benchmarkGetObject(b, "FS", 100*humanize.KiByte)
} }
// BenchmarkGetObject100KbXL - Benchmark XL.GetObject() for object size of 100KB. // BenchmarkGetObject100KbXL - Benchmark XL.GetObject() for object size of 100KB.
func BenchmarkGetObject100KbXL(b *testing.B) { func BenchmarkGetObject100KbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 100*1024) benchmarkGetObject(b, "XL", 100*humanize.KiByte)
} }
// BenchmarkGetObject1MbFS - Benchmark FS.GetObject() for object size of 1MB. // BenchmarkGetObject1MbFS - Benchmark FS.GetObject() for object size of 1MB.
func BenchmarkGetObject1MbFS(b *testing.B) { func BenchmarkGetObject1MbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 1024*1024) benchmarkGetObject(b, "FS", 1*humanize.MiByte)
} }
// BenchmarkGetObject1MbXL - Benchmark XL.GetObject() for object size of 1MB. // BenchmarkGetObject1MbXL - Benchmark XL.GetObject() for object size of 1MB.
func BenchmarkGetObject1MbXL(b *testing.B) { func BenchmarkGetObject1MbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 1024*1024) benchmarkGetObject(b, "XL", 1*humanize.MiByte)
} }
// BenchmarkGetObject5MbFS - Benchmark FS.GetObject() for object size of 5MB. // BenchmarkGetObject5MbFS - Benchmark FS.GetObject() for object size of 5MB.
func BenchmarkGetObject5MbFS(b *testing.B) { func BenchmarkGetObject5MbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 5*1024*1024) benchmarkGetObject(b, "FS", 5*humanize.MiByte)
} }
// BenchmarkGetObject5MbXL - Benchmark XL.GetObject() for object size of 5MB. // BenchmarkGetObject5MbXL - Benchmark XL.GetObject() for object size of 5MB.
func BenchmarkGetObject5MbXL(b *testing.B) { func BenchmarkGetObject5MbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 5*1024*1024) benchmarkGetObject(b, "XL", 5*humanize.MiByte)
} }
// BenchmarkGetObject10MbFS - Benchmark FS.GetObject() for object size of 10MB. // BenchmarkGetObject10MbFS - Benchmark FS.GetObject() for object size of 10MB.
func BenchmarkGetObject10MbFS(b *testing.B) { func BenchmarkGetObject10MbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 10*1024*1024) benchmarkGetObject(b, "FS", 10*humanize.MiByte)
} }
// BenchmarkGetObject10MbXL - Benchmark XL.GetObject() for object size of 10MB. // BenchmarkGetObject10MbXL - Benchmark XL.GetObject() for object size of 10MB.
func BenchmarkGetObject10MbXL(b *testing.B) { func BenchmarkGetObject10MbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 10*1024*1024) benchmarkGetObject(b, "XL", 10*humanize.MiByte)
} }
// BenchmarkGetObject25MbFS - Benchmark FS.GetObject() for object size of 25MB. // BenchmarkGetObject25MbFS - Benchmark FS.GetObject() for object size of 25MB.
func BenchmarkGetObject25MbFS(b *testing.B) { func BenchmarkGetObject25MbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 25*1024*1024) benchmarkGetObject(b, "FS", 25*humanize.MiByte)
} }
// BenchmarkGetObject25MbXL - Benchmark XL.GetObject() for object size of 25MB. // BenchmarkGetObject25MbXL - Benchmark XL.GetObject() for object size of 25MB.
func BenchmarkGetObject25MbXL(b *testing.B) { func BenchmarkGetObject25MbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 25*1024*1024) benchmarkGetObject(b, "XL", 25*humanize.MiByte)
} }
// BenchmarkGetObject50MbFS - Benchmark FS.GetObject() for object size of 50MB. // BenchmarkGetObject50MbFS - Benchmark FS.GetObject() for object size of 50MB.
func BenchmarkGetObject50MbFS(b *testing.B) { func BenchmarkGetObject50MbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 50*1024*1024) benchmarkGetObject(b, "FS", 50*humanize.MiByte)
} }
// BenchmarkGetObject50MbXL - Benchmark XL.GetObject() for object size of 50MB. // BenchmarkGetObject50MbXL - Benchmark XL.GetObject() for object size of 50MB.
func BenchmarkGetObject50MbXL(b *testing.B) { func BenchmarkGetObject50MbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 50*1024*1024) benchmarkGetObject(b, "XL", 50*humanize.MiByte)
} }
// parallel benchmarks for ObjectLayer.GetObject() . // parallel benchmarks for ObjectLayer.GetObject() .
@ -546,71 +548,71 @@ func BenchmarkGetObjectParallelVerySmallXL(b *testing.B) {
// BenchmarkGetObjectParallel10KbFS - Benchmark FS.GetObject() for object size of 10KB. // BenchmarkGetObjectParallel10KbFS - Benchmark FS.GetObject() for object size of 10KB.
func BenchmarkGetObjectParallel10KbFS(b *testing.B) { func BenchmarkGetObjectParallel10KbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 10*1024) benchmarkGetObjectParallel(b, "FS", 10*humanize.KiByte)
} }
// BenchmarkGetObjectParallel10KbXL - Benchmark XL.GetObject() for object size of 10KB. // BenchmarkGetObjectParallel10KbXL - Benchmark XL.GetObject() for object size of 10KB.
func BenchmarkGetObjectParallel10KbXL(b *testing.B) { func BenchmarkGetObjectParallel10KbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 10*1024) benchmarkGetObjectParallel(b, "XL", 10*humanize.KiByte)
} }
// BenchmarkGetObjectParallel100KbFS - Benchmark FS.GetObject() for object size of 100KB. // BenchmarkGetObjectParallel100KbFS - Benchmark FS.GetObject() for object size of 100KB.
func BenchmarkGetObjectParallel100KbFS(b *testing.B) { func BenchmarkGetObjectParallel100KbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 100*1024) benchmarkGetObjectParallel(b, "FS", 100*humanize.KiByte)
} }
// BenchmarkGetObjectParallel100KbXL - Benchmark XL.GetObject() for object size of 100KB. // BenchmarkGetObjectParallel100KbXL - Benchmark XL.GetObject() for object size of 100KB.
func BenchmarkGetObjectParallel100KbXL(b *testing.B) { func BenchmarkGetObjectParallel100KbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 100*1024) benchmarkGetObjectParallel(b, "XL", 100*humanize.KiByte)
} }
// BenchmarkGetObjectParallel1MbFS - Benchmark FS.GetObject() for object size of 1MB. // BenchmarkGetObjectParallel1MbFS - Benchmark FS.GetObject() for object size of 1MB.
func BenchmarkGetObjectParallel1MbFS(b *testing.B) { func BenchmarkGetObjectParallel1MbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 1024*1024) benchmarkGetObjectParallel(b, "FS", 1*humanize.MiByte)
} }
// BenchmarkGetObjectParallel1MbXL - Benchmark XL.GetObject() for object size of 1MB. // BenchmarkGetObjectParallel1MbXL - Benchmark XL.GetObject() for object size of 1MB.
func BenchmarkGetObjectParallel1MbXL(b *testing.B) { func BenchmarkGetObjectParallel1MbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 1024*1024) benchmarkGetObjectParallel(b, "XL", 1*humanize.MiByte)
} }
// BenchmarkGetObjectParallel5MbFS - Benchmark FS.GetObject() for object size of 5MB. // BenchmarkGetObjectParallel5MbFS - Benchmark FS.GetObject() for object size of 5MB.
func BenchmarkGetObjectParallel5MbFS(b *testing.B) { func BenchmarkGetObjectParallel5MbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 5*1024*1024) benchmarkGetObjectParallel(b, "FS", 5*humanize.MiByte)
} }
// BenchmarkGetObjectParallel5MbXL - Benchmark XL.GetObject() for object size of 5MB. // BenchmarkGetObjectParallel5MbXL - Benchmark XL.GetObject() for object size of 5MB.
func BenchmarkGetObjectParallel5MbXL(b *testing.B) { func BenchmarkGetObjectParallel5MbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 5*1024*1024) benchmarkGetObjectParallel(b, "XL", 5*humanize.MiByte)
} }
// BenchmarkGetObjectParallel10MbFS - Benchmark FS.GetObject() for object size of 10MB. // BenchmarkGetObjectParallel10MbFS - Benchmark FS.GetObject() for object size of 10MB.
func BenchmarkGetObjectParallel10MbFS(b *testing.B) { func BenchmarkGetObjectParallel10MbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 10*1024*1024) benchmarkGetObjectParallel(b, "FS", 10*humanize.MiByte)
} }
// BenchmarkGetObjectParallel10MbXL - Benchmark XL.GetObject() for object size of 10MB. // BenchmarkGetObjectParallel10MbXL - Benchmark XL.GetObject() for object size of 10MB.
func BenchmarkGetObjectParallel10MbXL(b *testing.B) { func BenchmarkGetObjectParallel10MbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 10*1024*1024) benchmarkGetObjectParallel(b, "XL", 10*humanize.MiByte)
} }
// BenchmarkGetObjectParallel25MbFS - Benchmark FS.GetObject() for object size of 25MB. // BenchmarkGetObjectParallel25MbFS - Benchmark FS.GetObject() for object size of 25MB.
func BenchmarkGetObjectParallel25MbFS(b *testing.B) { func BenchmarkGetObjectParallel25MbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 25*1024*1024) benchmarkGetObjectParallel(b, "FS", 25*humanize.MiByte)
} }
// BenchmarkGetObjectParallel25MbXL - Benchmark XL.GetObject() for object size of 25MB. // BenchmarkGetObjectParallel25MbXL - Benchmark XL.GetObject() for object size of 25MB.
func BenchmarkGetObjectParallel25MbXL(b *testing.B) { func BenchmarkGetObjectParallel25MbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 25*1024*1024) benchmarkGetObjectParallel(b, "XL", 25*humanize.MiByte)
} }
// BenchmarkGetObjectParallel50MbFS - Benchmark FS.GetObject() for object size of 50MB. // BenchmarkGetObjectParallel50MbFS - Benchmark FS.GetObject() for object size of 50MB.
func BenchmarkGetObjectParallel50MbFS(b *testing.B) { func BenchmarkGetObjectParallel50MbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 50*1024*1024) benchmarkGetObjectParallel(b, "FS", 50*humanize.MiByte)
} }
// BenchmarkGetObjectParallel50MbXL - Benchmark XL.GetObject() for object size of 50MB. // BenchmarkGetObjectParallel50MbXL - Benchmark XL.GetObject() for object size of 50MB.
func BenchmarkGetObjectParallel50MbXL(b *testing.B) { func BenchmarkGetObjectParallel50MbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 50*1024*1024) benchmarkGetObjectParallel(b, "XL", 50*humanize.MiByte)
} }

View File

@ -21,6 +21,8 @@ import (
"fmt" "fmt"
"strings" "strings"
"testing" "testing"
humanize "github.com/dustin/go-humanize"
) )
// Wrapper for calling NewMultipartUpload tests for both XL multiple disks and single node setup. // Wrapper for calling NewMultipartUpload tests for both XL multiple disks and single node setup.
@ -1782,7 +1784,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
uploadIDs = append(uploadIDs, uploadID) uploadIDs = append(uploadIDs, uploadID)
// Parts with size greater than 5 MB. // Parts with size greater than 5 MB.
// Generating a 6MB byte array. // Generating a 6MB byte array.
validPart := bytes.Repeat([]byte("abcdef"), 1024*1024) validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte)
validPartMD5 := getMD5Hash(validPart) validPartMD5 := getMD5Hash(validPart)
// Create multipart parts. // Create multipart parts.
// Need parts to be uploaded before CompleteMultiPartUpload can be called tested. // Need parts to be uploaded before CompleteMultiPartUpload can be called tested.
@ -1941,41 +1943,41 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
// BenchmarkPutObjectPart5MbFS - Benchmark FS.PutObjectPart() for object size of 5MB. // BenchmarkPutObjectPart5MbFS - Benchmark FS.PutObjectPart() for object size of 5MB.
func BenchmarkPutObjectPart5MbFS(b *testing.B) { func BenchmarkPutObjectPart5MbFS(b *testing.B) {
benchmarkPutObjectPart(b, "FS", 5*1024*1024) benchmarkPutObjectPart(b, "FS", 5*humanize.MiByte)
} }
// BenchmarkPutObjectPart5MbXL - Benchmark XL.PutObjectPart() for object size of 5MB. // BenchmarkPutObjectPart5MbXL - Benchmark XL.PutObjectPart() for object size of 5MB.
func BenchmarkPutObjectPart5MbXL(b *testing.B) { func BenchmarkPutObjectPart5MbXL(b *testing.B) {
benchmarkPutObjectPart(b, "XL", 5*1024*1024) benchmarkPutObjectPart(b, "XL", 5*humanize.MiByte)
} }
// BenchmarkPutObjectPart10MbFS - Benchmark FS.PutObjectPart() for object size of 10MB. // BenchmarkPutObjectPart10MbFS - Benchmark FS.PutObjectPart() for object size of 10MB.
func BenchmarkPutObjectPart10MbFS(b *testing.B) { func BenchmarkPutObjectPart10MbFS(b *testing.B) {
benchmarkPutObjectPart(b, "FS", 10*1024*1024) benchmarkPutObjectPart(b, "FS", 10*humanize.MiByte)
} }
// BenchmarkPutObjectPart10MbXL - Benchmark XL.PutObjectPart() for object size of 10MB. // BenchmarkPutObjectPart10MbXL - Benchmark XL.PutObjectPart() for object size of 10MB.
func BenchmarkPutObjectPart10MbXL(b *testing.B) { func BenchmarkPutObjectPart10MbXL(b *testing.B) {
benchmarkPutObjectPart(b, "XL", 10*1024*1024) benchmarkPutObjectPart(b, "XL", 10*humanize.MiByte)
} }
// BenchmarkPutObjectPart25MbFS - Benchmark FS.PutObjectPart() for object size of 25MB. // BenchmarkPutObjectPart25MbFS - Benchmark FS.PutObjectPart() for object size of 25MB.
func BenchmarkPutObjectPart25MbFS(b *testing.B) { func BenchmarkPutObjectPart25MbFS(b *testing.B) {
benchmarkPutObjectPart(b, "FS", 25*1024*1024) benchmarkPutObjectPart(b, "FS", 25*humanize.MiByte)
} }
// BenchmarkPutObjectPart25MbXL - Benchmark XL.PutObjectPart() for object size of 25MB. // BenchmarkPutObjectPart25MbXL - Benchmark XL.PutObjectPart() for object size of 25MB.
func BenchmarkPutObjectPart25MbXL(b *testing.B) { func BenchmarkPutObjectPart25MbXL(b *testing.B) {
benchmarkPutObjectPart(b, "XL", 25*1024*1024) benchmarkPutObjectPart(b, "XL", 25*humanize.MiByte)
} }
// BenchmarkPutObjectPart50MbFS - Benchmark FS.PutObjectPart() for object size of 50MB. // BenchmarkPutObjectPart50MbFS - Benchmark FS.PutObjectPart() for object size of 50MB.
func BenchmarkPutObjectPart50MbFS(b *testing.B) { func BenchmarkPutObjectPart50MbFS(b *testing.B) {
benchmarkPutObjectPart(b, "FS", 50*1024*1024) benchmarkPutObjectPart(b, "FS", 50*humanize.MiByte)
} }
// BenchmarkPutObjectPart50MbXL - Benchmark XL.PutObjectPart() for object size of 50MB. // BenchmarkPutObjectPart50MbXL - Benchmark XL.PutObjectPart() for object size of 50MB.
func BenchmarkPutObjectPart50MbXL(b *testing.B) { func BenchmarkPutObjectPart50MbXL(b *testing.B) {
benchmarkPutObjectPart(b, "XL", 50*1024*1024) benchmarkPutObjectPart(b, "XL", 50*humanize.MiByte)
} }

View File

@ -25,6 +25,8 @@ import (
"path" "path"
"runtime" "runtime"
"testing" "testing"
humanize "github.com/dustin/go-humanize"
) )
func md5Header(data []byte) map[string]string { func md5Header(data []byte) map[string]string {
@ -59,7 +61,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
var ( var (
nilBytes []byte nilBytes []byte
data = []byte("hello") data = []byte("hello")
fiveMBBytes = bytes.Repeat([]byte("a"), 5*1024*124) fiveMBBytes = bytes.Repeat([]byte("a"), 5*humanize.MiByte)
) )
invalidMD5 := getMD5Hash([]byte("meh")) invalidMD5 := getMD5Hash([]byte("meh"))
invalidMD5Header := md5Header([]byte("meh")) invalidMD5Header := md5Header([]byte("meh"))
@ -354,7 +356,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
} }
// Upload part1. // Upload part1.
fiveMBBytes := bytes.Repeat([]byte("a"), 5*1024*1024) fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
md5Writer := md5.New() md5Writer := md5.New()
md5Writer.Write(fiveMBBytes) md5Writer.Write(fiveMBBytes)
etag1 := hex.EncodeToString(md5Writer.Sum(nil)) etag1 := hex.EncodeToString(md5Writer.Sum(nil))
@ -422,73 +424,73 @@ func BenchmarkPutObjectVerySmallXL(b *testing.B) {
// BenchmarkPutObject10KbFS - Benchmark FS.PutObject() for object size of 10KB. // BenchmarkPutObject10KbFS - Benchmark FS.PutObject() for object size of 10KB.
func BenchmarkPutObject10KbFS(b *testing.B) { func BenchmarkPutObject10KbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 10*1024) benchmarkPutObject(b, "FS", 10*humanize.KiByte)
} }
// BenchmarkPutObject10KbXL - Benchmark XL.PutObject() for object size of 10KB. // BenchmarkPutObject10KbXL - Benchmark XL.PutObject() for object size of 10KB.
func BenchmarkPutObject10KbXL(b *testing.B) { func BenchmarkPutObject10KbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 10*1024) benchmarkPutObject(b, "XL", 10*humanize.KiByte)
} }
// BenchmarkPutObject100KbFS - Benchmark FS.PutObject() for object size of 100KB. // BenchmarkPutObject100KbFS - Benchmark FS.PutObject() for object size of 100KB.
func BenchmarkPutObject100KbFS(b *testing.B) { func BenchmarkPutObject100KbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 100*1024) benchmarkPutObject(b, "FS", 100*humanize.KiByte)
} }
// BenchmarkPutObject100KbXL - Benchmark XL.PutObject() for object size of 100KB. // BenchmarkPutObject100KbXL - Benchmark XL.PutObject() for object size of 100KB.
func BenchmarkPutObject100KbXL(b *testing.B) { func BenchmarkPutObject100KbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 100*1024) benchmarkPutObject(b, "XL", 100*humanize.KiByte)
} }
// BenchmarkPutObject1MbFS - Benchmark FS.PutObject() for object size of 1MB. // BenchmarkPutObject1MbFS - Benchmark FS.PutObject() for object size of 1MB.
func BenchmarkPutObject1MbFS(b *testing.B) { func BenchmarkPutObject1MbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 1024*1024) benchmarkPutObject(b, "FS", 1*humanize.MiByte)
} }
// BenchmarkPutObject1MbXL - Benchmark XL.PutObject() for object size of 1MB. // BenchmarkPutObject1MbXL - Benchmark XL.PutObject() for object size of 1MB.
func BenchmarkPutObject1MbXL(b *testing.B) { func BenchmarkPutObject1MbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 1024*1024) benchmarkPutObject(b, "XL", 1*humanize.MiByte)
} }
// BenchmarkPutObject5MbFS - Benchmark FS.PutObject() for object size of 5MB. // BenchmarkPutObject5MbFS - Benchmark FS.PutObject() for object size of 5MB.
func BenchmarkPutObject5MbFS(b *testing.B) { func BenchmarkPutObject5MbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 5*1024*1024) benchmarkPutObject(b, "FS", 5*humanize.MiByte)
} }
// BenchmarkPutObject5MbXL - Benchmark XL.PutObject() for object size of 5MB. // BenchmarkPutObject5MbXL - Benchmark XL.PutObject() for object size of 5MB.
func BenchmarkPutObject5MbXL(b *testing.B) { func BenchmarkPutObject5MbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 5*1024*1024) benchmarkPutObject(b, "XL", 5*humanize.MiByte)
} }
// BenchmarkPutObject10MbFS - Benchmark FS.PutObject() for object size of 10MB. // BenchmarkPutObject10MbFS - Benchmark FS.PutObject() for object size of 10MB.
func BenchmarkPutObject10MbFS(b *testing.B) { func BenchmarkPutObject10MbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 10*1024*1024) benchmarkPutObject(b, "FS", 10*humanize.MiByte)
} }
// BenchmarkPutObject10MbXL - Benchmark XL.PutObject() for object size of 10MB. // BenchmarkPutObject10MbXL - Benchmark XL.PutObject() for object size of 10MB.
func BenchmarkPutObject10MbXL(b *testing.B) { func BenchmarkPutObject10MbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 10*1024*1024) benchmarkPutObject(b, "XL", 10*humanize.MiByte)
} }
// BenchmarkPutObject25MbFS - Benchmark FS.PutObject() for object size of 25MB. // BenchmarkPutObject25MbFS - Benchmark FS.PutObject() for object size of 25MB.
func BenchmarkPutObject25MbFS(b *testing.B) { func BenchmarkPutObject25MbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 25*1024*1024) benchmarkPutObject(b, "FS", 25*humanize.MiByte)
} }
// BenchmarkPutObject25MbXL - Benchmark XL.PutObject() for object size of 25MB. // BenchmarkPutObject25MbXL - Benchmark XL.PutObject() for object size of 25MB.
func BenchmarkPutObject25MbXL(b *testing.B) { func BenchmarkPutObject25MbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 25*1024*1024) benchmarkPutObject(b, "XL", 25*humanize.MiByte)
} }
// BenchmarkPutObject50MbFS - Benchmark FS.PutObject() for object size of 50MB. // BenchmarkPutObject50MbFS - Benchmark FS.PutObject() for object size of 50MB.
func BenchmarkPutObject50MbFS(b *testing.B) { func BenchmarkPutObject50MbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 50*1024*1024) benchmarkPutObject(b, "FS", 50*humanize.MiByte)
} }
// BenchmarkPutObject50MbXL - Benchmark XL.PutObject() for object size of 50MB. // BenchmarkPutObject50MbXL - Benchmark XL.PutObject() for object size of 50MB.
func BenchmarkPutObject50MbXL(b *testing.B) { func BenchmarkPutObject50MbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 50*1024*1024) benchmarkPutObject(b, "XL", 50*humanize.MiByte)
} }
// parallel benchmarks for ObjectLayer.PutObject() . // parallel benchmarks for ObjectLayer.PutObject() .
@ -505,61 +507,61 @@ func BenchmarkParallelPutObjectVerySmallXL(b *testing.B) {
// BenchmarkParallelPutObject10KbFS - BenchmarkParallel FS.PutObject() for object size of 10KB. // BenchmarkParallelPutObject10KbFS - BenchmarkParallel FS.PutObject() for object size of 10KB.
func BenchmarkParallelPutObject10KbFS(b *testing.B) { func BenchmarkParallelPutObject10KbFS(b *testing.B) {
benchmarkPutObjectParallel(b, "FS", 10*1024) benchmarkPutObjectParallel(b, "FS", 10*humanize.KiByte)
} }
// BenchmarkParallelPutObject10KbXL - BenchmarkParallel XL.PutObject() for object size of 10KB. // BenchmarkParallelPutObject10KbXL - BenchmarkParallel XL.PutObject() for object size of 10KB.
func BenchmarkParallelPutObject10KbXL(b *testing.B) { func BenchmarkParallelPutObject10KbXL(b *testing.B) {
benchmarkPutObjectParallel(b, "XL", 10*1024) benchmarkPutObjectParallel(b, "XL", 10*humanize.KiByte)
} }
// BenchmarkParallelPutObject100KbFS - BenchmarkParallel FS.PutObject() for object size of 100KB. // BenchmarkParallelPutObject100KbFS - BenchmarkParallel FS.PutObject() for object size of 100KB.
func BenchmarkParallelPutObject100KbFS(b *testing.B) { func BenchmarkParallelPutObject100KbFS(b *testing.B) {
benchmarkPutObjectParallel(b, "FS", 100*1024) benchmarkPutObjectParallel(b, "FS", 100*humanize.KiByte)
} }
// BenchmarkParallelPutObject100KbXL - BenchmarkParallel XL.PutObject() for object size of 100KB. // BenchmarkParallelPutObject100KbXL - BenchmarkParallel XL.PutObject() for object size of 100KB.
func BenchmarkParallelPutObject100KbXL(b *testing.B) { func BenchmarkParallelPutObject100KbXL(b *testing.B) {
benchmarkPutObjectParallel(b, "XL", 100*1024) benchmarkPutObjectParallel(b, "XL", 100*humanize.KiByte)
} }
// BenchmarkParallelPutObject1MbFS - BenchmarkParallel FS.PutObject() for object size of 1MB. // BenchmarkParallelPutObject1MbFS - BenchmarkParallel FS.PutObject() for object size of 1MB.
func BenchmarkParallelPutObject1MbFS(b *testing.B) { func BenchmarkParallelPutObject1MbFS(b *testing.B) {
benchmarkPutObjectParallel(b, "FS", 1024*1024) benchmarkPutObjectParallel(b, "FS", 1*humanize.MiByte)
} }
// BenchmarkParallelPutObject1MbXL - BenchmarkParallel XL.PutObject() for object size of 1MB. // BenchmarkParallelPutObject1MbXL - BenchmarkParallel XL.PutObject() for object size of 1MB.
func BenchmarkParallelPutObject1MbXL(b *testing.B) { func BenchmarkParallelPutObject1MbXL(b *testing.B) {
benchmarkPutObjectParallel(b, "XL", 1024*1024) benchmarkPutObjectParallel(b, "XL", 1*humanize.MiByte)
} }
// BenchmarkParallelPutObject5MbFS - BenchmarkParallel FS.PutObject() for object size of 5MB. // BenchmarkParallelPutObject5MbFS - BenchmarkParallel FS.PutObject() for object size of 5MB.
func BenchmarkParallelPutObject5MbFS(b *testing.B) { func BenchmarkParallelPutObject5MbFS(b *testing.B) {
benchmarkPutObjectParallel(b, "FS", 5*1024*1024) benchmarkPutObjectParallel(b, "FS", 5*humanize.MiByte)
} }
// BenchmarkParallelPutObject5MbXL - BenchmarkParallel XL.PutObject() for object size of 5MB. // BenchmarkParallelPutObject5MbXL - BenchmarkParallel XL.PutObject() for object size of 5MB.
func BenchmarkParallelPutObject5MbXL(b *testing.B) { func BenchmarkParallelPutObject5MbXL(b *testing.B) {
benchmarkPutObjectParallel(b, "XL", 5*1024*1024) benchmarkPutObjectParallel(b, "XL", 5*humanize.MiByte)
} }
// BenchmarkParallelPutObject10MbFS - BenchmarkParallel FS.PutObject() for object size of 10MB. // BenchmarkParallelPutObject10MbFS - BenchmarkParallel FS.PutObject() for object size of 10MB.
func BenchmarkParallelPutObject10MbFS(b *testing.B) { func BenchmarkParallelPutObject10MbFS(b *testing.B) {
benchmarkPutObjectParallel(b, "FS", 10*1024*1024) benchmarkPutObjectParallel(b, "FS", 10*humanize.MiByte)
} }
// BenchmarkParallelPutObject10MbXL - BenchmarkParallel XL.PutObject() for object size of 10MB. // BenchmarkParallelPutObject10MbXL - BenchmarkParallel XL.PutObject() for object size of 10MB.
func BenchmarkParallelPutObject10MbXL(b *testing.B) { func BenchmarkParallelPutObject10MbXL(b *testing.B) {
benchmarkPutObjectParallel(b, "XL", 10*1024*1024) benchmarkPutObjectParallel(b, "XL", 10*humanize.MiByte)
} }
// BenchmarkParallelPutObject25MbFS - BenchmarkParallel FS.PutObject() for object size of 25MB. // BenchmarkParallelPutObject25MbFS - BenchmarkParallel FS.PutObject() for object size of 25MB.
func BenchmarkParallelPutObject25MbFS(b *testing.B) { func BenchmarkParallelPutObject25MbFS(b *testing.B) {
benchmarkPutObjectParallel(b, "FS", 25*1024*1024) benchmarkPutObjectParallel(b, "FS", 25*humanize.MiByte)
} }
// BenchmarkParallelPutObject25MbXL - BenchmarkParallel XL.PutObject() for object size of 25MB. // BenchmarkParallelPutObject25MbXL - BenchmarkParallel XL.PutObject() for object size of 25MB.
func BenchmarkParallelPutObject25MbXL(b *testing.B) { func BenchmarkParallelPutObject25MbXL(b *testing.B) {
benchmarkPutObjectParallel(b, "XL", 25*1024*1024) benchmarkPutObjectParallel(b, "XL", 25*humanize.MiByte)
} }

View File

@ -22,14 +22,16 @@ import (
"runtime" "runtime"
"strings" "strings"
"sync" "sync"
humanize "github.com/dustin/go-humanize"
) )
const ( const (
// Block size used for all internal operations version 1. // Block size used for all internal operations version 1.
blockSizeV1 = 10 * 1024 * 1024 // 10MiB. blockSizeV1 = 10 * humanize.MiByte
// Staging buffer read size for all internal operations version 1. // Staging buffer read size for all internal operations version 1.
readSizeV1 = 1 * 1024 * 1024 // 1MiB. readSizeV1 = 1 * humanize.MiByte
// Buckets meta prefix. // Buckets meta prefix.
bucketMetaPrefix = "buckets" bucketMetaPrefix = "buckets"

View File

@ -28,6 +28,8 @@ import (
"strconv" "strconv"
"sync" "sync"
"testing" "testing"
humanize "github.com/dustin/go-humanize"
) )
// Type to capture different modifications to API request to simulate failure cases. // Type to capture different modifications to API request to simulate failure cases.
@ -58,7 +60,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
bytesData := []struct { bytesData := []struct {
byteData []byte byteData []byte
}{ }{
{generateBytesData(6 * 1024 * 1024)}, {generateBytesData(6 * humanize.MiByte)},
} }
// set of inputs for uploading the objects before tests for downloading is done. // set of inputs for uploading the objects before tests for downloading is done.
putObjectInputs := []struct { putObjectInputs := []struct {
@ -205,7 +207,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
bytesData := []struct { bytesData := []struct {
byteData []byte byteData []byte
}{ }{
{generateBytesData(6 * 1024 * 1024)}, {generateBytesData(6 * humanize.MiByte)},
} }
// set of inputs for uploading the objects before tests for downloading is done. // set of inputs for uploading the objects before tests for downloading is done.
putObjectInputs := []struct { putObjectInputs := []struct {
@ -421,9 +423,9 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
credentials credential, t *testing.T) { credentials credential, t *testing.T) {
objectName := "test-object" objectName := "test-object"
bytesDataLen := 65 * 1024 bytesDataLen := 65 * humanize.KiByte
bytesData := bytes.Repeat([]byte{'a'}, bytesDataLen) bytesData := bytes.Repeat([]byte{'a'}, bytesDataLen)
oneKData := bytes.Repeat([]byte("a"), 1024) oneKData := bytes.Repeat([]byte("a"), 1*humanize.KiByte)
err := initEventNotifier(obj) err := initEventNotifier(obj)
if err != nil { if err != nil {
@ -465,7 +467,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
objectName: objectName, objectName: objectName,
data: bytesData, data: bytesData,
dataLen: len(bytesData), dataLen: len(bytesData),
chunkSize: 64 * 1024, // 64k chunkSize: 64 * humanize.KiByte,
expectedContent: []byte{}, expectedContent: []byte{},
expectedRespStatus: http.StatusOK, expectedRespStatus: http.StatusOK,
accessKey: credentials.AccessKeyID, accessKey: credentials.AccessKeyID,
@ -479,7 +481,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
objectName: objectName, objectName: objectName,
data: bytesData, data: bytesData,
dataLen: len(bytesData), dataLen: len(bytesData),
chunkSize: 1 * 1024, // 1k chunkSize: 1 * humanize.KiByte,
expectedContent: []byte{}, expectedContent: []byte{},
expectedRespStatus: http.StatusOK, expectedRespStatus: http.StatusOK,
accessKey: credentials.AccessKeyID, accessKey: credentials.AccessKeyID,
@ -493,7 +495,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
objectName: objectName, objectName: objectName,
data: bytesData, data: bytesData,
dataLen: len(bytesData), dataLen: len(bytesData),
chunkSize: 64 * 1024, // 64k chunkSize: 64 * humanize.KiByte,
expectedContent: []byte{}, expectedContent: []byte{},
expectedRespStatus: http.StatusForbidden, expectedRespStatus: http.StatusForbidden,
accessKey: "", accessKey: "",
@ -507,7 +509,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
objectName: objectName, objectName: objectName,
data: bytesData, data: bytesData,
dataLen: len(bytesData), dataLen: len(bytesData),
chunkSize: 64 * 1024, // 64k chunkSize: 64 * humanize.KiByte,
expectedContent: []byte{}, expectedContent: []byte{},
expectedRespStatus: http.StatusBadRequest, expectedRespStatus: http.StatusBadRequest,
accessKey: credentials.AccessKeyID, accessKey: credentials.AccessKeyID,
@ -522,7 +524,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
objectName: objectName, objectName: objectName,
data: bytesData, data: bytesData,
dataLen: len(bytesData), dataLen: len(bytesData),
chunkSize: 100 * 1024, // 100k chunkSize: 100 * humanize.KiByte,
expectedContent: []byte{}, expectedContent: []byte{},
expectedRespStatus: http.StatusOK, expectedRespStatus: http.StatusOK,
accessKey: credentials.AccessKeyID, accessKey: credentials.AccessKeyID,
@ -696,7 +698,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
} }
objectName := "test-object" objectName := "test-object"
// byte data for PutObject. // byte data for PutObject.
bytesData := generateBytesData(6 * 1024 * 1024) bytesData := generateBytesData(6 * humanize.KiByte)
copySourceHeader := http.Header{} copySourceHeader := http.Header{}
copySourceHeader.Set("X-Amz-Copy-Source", "somewhere") copySourceHeader.Set("X-Amz-Copy-Source", "somewhere")
@ -940,7 +942,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
bytesData := []struct { bytesData := []struct {
byteData []byte byteData []byte
}{ }{
{generateBytesData(6 * 1024 * 1024)}, {generateBytesData(6 * humanize.KiByte)},
} }
buffers := []*bytes.Buffer{ buffers := []*bytes.Buffer{
@ -1404,7 +1406,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
// Parts with size greater than 5 MB. // Parts with size greater than 5 MB.
// Generating a 6MB byte array. // Generating a 6MB byte array.
validPart := bytes.Repeat([]byte("abcdef"), 1024*1024) validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte)
validPartMD5 := getMD5Hash(validPart) validPartMD5 := getMD5Hash(validPart)
// Create multipart parts. // Create multipart parts.
// Need parts to be uploaded before CompleteMultiPartUpload can be called tested. // Need parts to be uploaded before CompleteMultiPartUpload can be called tested.
@ -1759,7 +1761,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
// Parts with size greater than 5 MB. // Parts with size greater than 5 MB.
// Generating a 6MB byte array. // Generating a 6MB byte array.
validPart := bytes.Repeat([]byte("abcdef"), 1024*1024) validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte)
validPartMD5 := getMD5Hash(validPart) validPartMD5 := getMD5Hash(validPart)
// Create multipart parts. // Create multipart parts.
// Need parts to be uploaded before AbortMultiPartUpload can be called tested. // Need parts to be uploaded before AbortMultiPartUpload can be called tested.
@ -1914,7 +1916,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
bytesData := []struct { bytesData := []struct {
byteData []byte byteData []byte
}{ }{
{generateBytesData(6 * 1024 * 1024)}, {generateBytesData(6 * humanize.MiByte)},
} }
// set of inputs for uploading the objects before tests for deleting them is done. // set of inputs for uploading the objects before tests for deleting them is done.

View File

@ -22,6 +22,8 @@ import (
"math/rand" "math/rand"
"strconv" "strconv"
humanize "github.com/dustin/go-humanize"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -98,8 +100,8 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, c TestErr
if err != nil { if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err) c.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
// Create a byte array of 5MB. // Create a byte array of 5MiB.
data := bytes.Repeat([]byte("0123456789abcdef"), 5*1024*1024/16) data := bytes.Repeat([]byte("0123456789abcdef"), 5*humanize.MiByte/16)
completedParts := completeMultipartUpload{} completedParts := completeMultipartUpload{}
for i := 1; i <= 10; i++ { for i := 1; i <= 10; i++ {
expectedMD5Sumhex := getMD5Hash(data) expectedMD5Sumhex := getMD5Hash(data)

View File

@ -29,12 +29,13 @@ import (
"sync/atomic" "sync/atomic"
"syscall" "syscall"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/disk"
) )
const ( const (
fsMinFreeSpace = 1024 * 1024 * 1024 // Min 1GiB free space. fsMinFreeSpace = 1 * humanize.GiByte // Min 1GiB free space.
fsMinFreeInodes = 10000 // Min 10000. fsMinFreeInodes = 10000 // Min 10000.
maxAllowedIOError = 5 maxAllowedIOError = 5
) )

View File

@ -26,6 +26,8 @@ import (
"net/http/httptest" "net/http/httptest"
"testing" "testing"
"time" "time"
humanize "github.com/dustin/go-humanize"
) )
const ( const (
@ -264,7 +266,7 @@ func testPostPolicyHandler(obj ObjectLayer, instanceType string, t TestErrHandle
// Failed with entity too large. // Failed with entity too large.
{ {
objectName: "test", objectName: "test",
data: bytes.Repeat([]byte("a"), 1024*1024+1), data: bytes.Repeat([]byte("a"), (1*humanize.MiByte)+1),
expectedRespStatus: http.StatusBadRequest, expectedRespStatus: http.StatusBadRequest,
accessKey: credentials.AccessKeyID, accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey, secretKey: credentials.SecretAccessKey,

View File

@ -23,13 +23,15 @@ import (
"strings" "strings"
"testing" "testing"
"time" "time"
humanize "github.com/dustin/go-humanize"
) )
// Tests if we generate storage info. // Tests if we generate storage info.
func TestStorageInfoMsg(t *testing.T) { func TestStorageInfoMsg(t *testing.T) {
infoStorage := StorageInfo{ infoStorage := StorageInfo{
Total: 1024 * 1024 * 1024 * 10, Total: 10 * humanize.GiByte,
Free: 1024 * 1024 * 1024 * 2, Free: 2 * humanize.GiByte,
Backend: struct { Backend: struct {
Type BackendType Type BackendType
OnlineDisks int OnlineDisks int

View File

@ -31,6 +31,8 @@ import (
"sync" "sync"
"time" "time"
humanize "github.com/dustin/go-humanize"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -2367,7 +2369,7 @@ func (s *TestSuiteCommon) TestObjectValidMD5(c *C) {
// Create a byte array of 5MB. // Create a byte array of 5MB.
// content for the object to be uploaded. // content for the object to be uploaded.
data := bytes.Repeat([]byte("0123456789abcdef"), 5*1024*1024/16) data := bytes.Repeat([]byte("0123456789abcdef"), 5*humanize.MiByte/16)
// calculate md5Sum of the data. // calculate md5Sum of the data.
md5SumBase64 := getMD5HashBase64(data) md5SumBase64 := getMD5HashBase64(data)
@ -2440,7 +2442,7 @@ func (s *TestSuiteCommon) TestObjectMultipart(c *C) {
// content for the part to be uploaded. // content for the part to be uploaded.
// Create a byte array of 5MB. // Create a byte array of 5MB.
data := bytes.Repeat([]byte("0123456789abcdef"), 5*1024*1024/16) data := bytes.Repeat([]byte("0123456789abcdef"), 5*humanize.MiByte/16)
// calculate md5Sum of the data. // calculate md5Sum of the data.
md5SumBase64 := getMD5HashBase64(data) md5SumBase64 := getMD5HashBase64(data)

View File

@ -28,6 +28,7 @@ import (
"net/http" "net/http"
"time" "time"
humanize "github.com/dustin/go-humanize"
"github.com/minio/sha256-simd" "github.com/minio/sha256-simd"
) )
@ -151,7 +152,7 @@ func calculateSeedSignature(r *http.Request) (signature string, date time.Time,
return newSignature, date, ErrNone return newSignature, date, ErrNone
} }
const maxLineLength = 4096 // assumed <= bufio.defaultBufSize 4KiB. const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB
// lineTooLong is generated as chunk header is bigger than 4KiB. // lineTooLong is generated as chunk header is bigger than 4KiB.
var errLineTooLong = errors.New("header line too long") var errLineTooLong = errors.New("header line too long")

View File

@ -27,6 +27,7 @@ import (
"encoding/json" "encoding/json"
humanize "github.com/dustin/go-humanize"
"github.com/pkg/profile" "github.com/pkg/profile"
) )
@ -112,9 +113,9 @@ func checkValidMD5(md5 string) ([]byte, error) {
/// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html /// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
const ( const (
// maximum object size per PUT request is 5GiB // maximum object size per PUT request is 5GiB
maxObjectSize = 1024 * 1024 * 1024 * 5 maxObjectSize = 5 * humanize.GiByte
// minimum Part size for multipart upload is 5MB // minimum Part size for multipart upload is 5MiB
minPartSize = 1024 * 1024 * 5 minPartSize = 5 * humanize.MiByte
// maximum Part ID for multipart upload is 10000 (Acceptable values range from 1 to 10000 inclusive) // maximum Part ID for multipart upload is 10000 (Acceptable values range from 1 to 10000 inclusive)
maxPartID = 10000 maxPartID = 10000
) )

View File

@ -27,6 +27,7 @@ import (
"strings" "strings"
"testing" "testing"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/set" "github.com/minio/minio-go/pkg/set"
) )
@ -403,7 +404,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
objectName := "object" objectName := "object"
objectSize := 1024 objectSize := 1 * humanize.KiByte
// Create bucket. // Create bucket.
err = obj.MakeBucket(bucketName) err = obj.MakeBucket(bucketName)
@ -474,7 +475,7 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
objectName := "object" objectName := "object"
objectSize := 1024 objectSize := 1 * humanize.KiByte
// Create bucket. // Create bucket.
err = obj.MakeBucket(bucketName) err = obj.MakeBucket(bucketName)
@ -823,7 +824,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
objectName := "object" objectName := "object"
objectSize := 1024 objectSize := 1 * humanize.KiByte
// Create bucket. // Create bucket.
err = obj.MakeBucket(bucketName) err = obj.MakeBucket(bucketName)

View File

@ -21,9 +21,9 @@ import (
"strconv" "strconv"
"testing" "testing"
"time" "time"
)
const MiB = 1024 * 1024 humanize "github.com/dustin/go-humanize"
)
// Test xlMetaV1.AddObjectPart() // Test xlMetaV1.AddObjectPart()
func TestAddObjectPart(t *testing.T) { func TestAddObjectPart(t *testing.T) {
@ -54,7 +54,7 @@ func TestAddObjectPart(t *testing.T) {
for _, testCase := range testCases { for _, testCase := range testCases {
if testCase.expectedIndex > -1 { if testCase.expectedIndex > -1 {
partNumString := strconv.Itoa(testCase.partNum) partNumString := strconv.Itoa(testCase.partNum)
xlMeta.AddObjectPart(testCase.partNum, "part."+partNumString, "etag."+partNumString, int64(testCase.partNum+MiB)) xlMeta.AddObjectPart(testCase.partNum, "part."+partNumString, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte))
} }
if index := objectPartIndex(xlMeta.Parts, testCase.partNum); index != testCase.expectedIndex { if index := objectPartIndex(xlMeta.Parts, testCase.partNum); index != testCase.expectedIndex {
@ -86,7 +86,7 @@ func TestObjectPartIndex(t *testing.T) {
// Add some parts for testing. // Add some parts for testing.
for _, testCase := range testCases { for _, testCase := range testCases {
partNumString := strconv.Itoa(testCase.partNum) partNumString := strconv.Itoa(testCase.partNum)
xlMeta.AddObjectPart(testCase.partNum, "part."+partNumString, "etag."+partNumString, int64(testCase.partNum+MiB)) xlMeta.AddObjectPart(testCase.partNum, "part."+partNumString, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte))
} }
// Add failure test case. // Add failure test case.
@ -115,7 +115,7 @@ func TestObjectToPartOffset(t *testing.T) {
// Total size of all parts is 5,242,899 bytes. // Total size of all parts is 5,242,899 bytes.
for _, partNum := range []int{1, 2, 4, 5, 7} { for _, partNum := range []int{1, 2, 4, 5, 7} {
partNumString := strconv.Itoa(partNum) partNumString := strconv.Itoa(partNum)
xlMeta.AddObjectPart(partNum, "part."+partNumString, "etag."+partNumString, int64(partNum+MiB)) xlMeta.AddObjectPart(partNum, "part."+partNumString, "etag."+partNumString, int64(partNum+humanize.MiByte))
} }
testCases := []struct { testCases := []struct {
@ -125,15 +125,15 @@ func TestObjectToPartOffset(t *testing.T) {
expectedErr error expectedErr error
}{ }{
{0, 0, 0, nil}, {0, 0, 0, nil},
{MiB, 0, MiB, nil}, {1 * humanize.MiByte, 0, 1 * humanize.MiByte, nil},
{1 + MiB, 1, 0, nil}, {1 + humanize.MiByte, 1, 0, nil},
{2 + MiB, 1, 1, nil}, {2 + humanize.MiByte, 1, 1, nil},
// Its valid for zero sized object. // Its valid for zero sized object.
{-1, 0, -1, nil}, {-1, 0, -1, nil},
// Max fffset is always (size - 1). // Max fffset is always (size - 1).
{(1 + 2 + 4 + 5 + 7) + (5 * MiB) - 1, 4, 1048582, nil}, {(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte) - 1, 4, 1048582, nil},
// Error if offset is size. // Error if offset is size.
{(1 + 2 + 4 + 5 + 7) + (5 * MiB), 0, 0, InvalidRange{}}, {(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte), 0, 0, InvalidRange{}},
} }
// Test them. // Test them.

View File

@ -25,6 +25,8 @@ import (
"reflect" "reflect"
"testing" "testing"
"time" "time"
humanize "github.com/dustin/go-humanize"
) )
func TestRepeatPutObjectPart(t *testing.T) { func TestRepeatPutObjectPart(t *testing.T) {
@ -49,14 +51,14 @@ func TestRepeatPutObjectPart(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
fiveMBBytes := bytes.Repeat([]byte("a"), 5*1024*1024) fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
md5Hex := getMD5Hash(fiveMBBytes) md5Hex := getMD5Hash(fiveMBBytes)
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*1024*1024, bytes.NewReader(fiveMBBytes), md5Hex, "") _, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*humanize.MiByte, bytes.NewReader(fiveMBBytes), md5Hex, "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930 // PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*1024*1024, bytes.NewReader(fiveMBBytes), md5Hex, "") _, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*humanize.MiByte, bytes.NewReader(fiveMBBytes), md5Hex, "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -279,7 +281,7 @@ func TestHealing(t *testing.T) {
bucket := "bucket" bucket := "bucket"
object := "object" object := "object"
data := make([]byte, 1*1024*1024) data := make([]byte, 1*humanize.MiByte)
length := int64(len(data)) length := int64(len(data))
_, err = rand.Read(data) _, err = rand.Read(data)
if err != nil { if err != nil {

View File

@ -23,6 +23,7 @@ import (
"strings" "strings"
"sync" "sync"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/objcache" "github.com/minio/minio/pkg/objcache"
) )
@ -42,7 +43,7 @@ const (
uploadsJSONFile = "uploads.json" uploadsJSONFile = "uploads.json"
// 8GiB cache by default. // 8GiB cache by default.
maxCacheSize = 8 * 1024 * 1024 * 1024 maxCacheSize = 8 * humanize.GiByte
// Maximum erasure blocks. // Maximum erasure blocks.
maxErasureBlocks = 16 maxErasureBlocks = 16