Further fixes -

- All test files have been renamed to their respective <package>_test name,
    this is done in accordance with
      - https://github.com/golang/go/wiki/CodeReviewComments#import-dot
        imports are largely used in testing, but to avoid namespace collision
        and circular dependencies

  - Never use _* in package names other than "_test" change fragment_v1 to expose
    fragment just like 'gopkg.in/check.v1'
This commit is contained in:
Harshavardhana 2015-03-06 01:50:51 -08:00
parent 02ccf123c9
commit e5af8a3f5d
24 changed files with 245 additions and 285 deletions

View File

@ -55,7 +55,11 @@ Building Libraries
* When you're ready to create a pull request, be sure to:
- Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
- Run `go fmt
- Run `golint` (`go get github.com/golang/lint/golint`)
- Run `golint`
```
$ go get github.com/golang/lint/golint
$ golint ./...
```
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
- Make sure `go test -race ./...` and `go build` completes.
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project

View File

@ -16,6 +16,7 @@ getdeps: checkdeps checkgopath
verifier: getdeps
@echo "Checking for offending code"
@go run buildscripts/verifier.go ${PWD}
@go vet ./...
build-all: verifier
@echo "Building Libraries"

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package minioapi
package minioapi_test
import (
"bytes"
@ -28,8 +28,10 @@ import (
"testing"
"time"
"github.com/minio-io/minio/pkg/api/minioapi"
mstorage "github.com/minio-io/minio/pkg/storage"
"github.com/minio-io/minio/pkg/storage/inmemory"
. "gopkg.in/check.v1"
)
@ -41,7 +43,7 @@ var _ = Suite(&MySuite{})
func (s *MySuite) TestNonExistantObject(c *C) {
_, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage)
httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler)
defer testServer.Close()
@ -53,7 +55,7 @@ func (s *MySuite) TestNonExistantObject(c *C) {
func (s *MySuite) TestEmptyObject(c *C) {
_, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage)
httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler)
defer testServer.Close()
@ -78,7 +80,7 @@ func (s *MySuite) TestEmptyObject(c *C) {
func (s *MySuite) TestObject(c *C) {
_, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage)
httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler)
defer testServer.Close()
@ -101,7 +103,7 @@ func (s *MySuite) TestObject(c *C) {
func (s *MySuite) TestMultipleObjects(c *C) {
_, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage)
httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler)
defer testServer.Close()
@ -181,7 +183,7 @@ func (s *MySuite) TestMultipleObjects(c *C) {
func (s *MySuite) TestNotImplemented(c *C) {
_, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage)
httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler)
defer testServer.Close()
@ -192,7 +194,7 @@ func (s *MySuite) TestNotImplemented(c *C) {
func (s *MySuite) TestHeader(c *C) {
_, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage)
httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler)
defer testServer.Close()
@ -215,7 +217,7 @@ func (s *MySuite) TestHeader(c *C) {
func (s *MySuite) TestPutBucket(c *C) {
_, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage)
httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler)
defer testServer.Close()
@ -240,7 +242,7 @@ func (s *MySuite) TestPutBucket(c *C) {
func (s *MySuite) TestPutObject(c *C) {
_, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage)
httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler)
defer testServer.Close()
@ -297,7 +299,7 @@ func (s *MySuite) TestPutObject(c *C) {
func (s *MySuite) TestListBuckets(c *C) {
_, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage)
httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler)
defer testServer.Close()
@ -337,8 +339,8 @@ func (s *MySuite) TestListBuckets(c *C) {
c.Assert(listResponse.Buckets.Bucket[1].Name, Equals, "foo")
}
func readListBucket(reader io.Reader) (BucketListResponse, error) {
var results BucketListResponse
func readListBucket(reader io.Reader) (minioapi.BucketListResponse, error) {
var results minioapi.BucketListResponse
decoder := xml.NewDecoder(reader)
err := decoder.Decode(&results)
return results, err
@ -376,7 +378,7 @@ func verifyHeaders(c *C, header http.Header, date time.Time, size int, contentTy
func (s *MySuite) TestXMLNameNotInBucketListJson(c *C) {
_, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage)
httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler)
defer testServer.Close()
@ -401,7 +403,7 @@ func (s *MySuite) TestXMLNameNotInBucketListJson(c *C) {
func (s *MySuite) TestXMLNameNotInObjectListJson(c *C) {
_, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage)
httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler)
defer testServer.Close()
@ -426,7 +428,7 @@ func (s *MySuite) TestXMLNameNotInObjectListJson(c *C) {
func (s *MySuite) TestContentTypePersists(c *C) {
_, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage)
httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler)
defer testServer.Close()

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package erasure_v1
package erasure
import (
"bytes"

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package erasure_v1
package erasure
import (
"bytes"

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package fragment_v1
package fragment
import (
"bytes"

View File

@ -1,2 +0,0 @@
donut_gen
hello

View File

@ -1,59 +0,0 @@
package main
import (
"bytes"
"fmt"
"os"
"reflect"
"github.com/minio-io/minio/pkg/storage/donut/fragment/fragment_v1"
)
func main() {
fmt.Println("--start")
file, err := os.OpenFile("newfile", os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
panic(err)
}
data := []byte("Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.")
dataBuffer := bytes.NewBuffer(data)
err = fragment_v1.WriteFrame(file, dataBuffer, uint64(dataBuffer.Len()))
if err != nil {
panic(err)
}
file.Close()
fmt.Println("--closed")
fmt.Println("--verify")
stat, _ := os.Stat("newfile")
fileSize := stat.Size()
rfile, _ := os.OpenFile("newfile", os.O_RDONLY, 0666)
blockStart := make([]byte, 4)
blockStartCheck := []byte{'M', 'I', 'N', 'I'}
_, err = rfile.Read(blockStart)
if err != nil {
panic(err)
}
blockEnd := make([]byte, 4)
start := fileSize - 4
blockEndCheck := []byte{'I', 'N', 'I', 'M'}
rfile.ReadAt(blockEnd, start)
rfile.Close()
if !reflect.DeepEqual(blockStart, blockStartCheck) {
panic("Corrupted donut file")
}
if !reflect.DeepEqual(blockEnd, blockEndCheck) {
panic("Corrupted donut file")
}
fmt.Println("--verified")
fmt.Println("--end")
}

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package fragment_v1
package fragment_test
import (
"bytes"
@ -22,8 +22,8 @@ import (
"encoding/binary"
"testing"
"github.com/minio-io/minio/pkg/storage/donut/fragment/fragment_v1"
"github.com/minio-io/minio/pkg/utils/checksum/crc32c"
. "gopkg.in/check.v1"
)
@ -39,7 +39,7 @@ func (s *MySuite) TestSingleWrite(c *C) {
testData := "Hello, World"
testLength := uint64(len(testData))
err := WriteFrame(&testBuffer, bytes.NewBufferString(testData), testLength)
err := fragment.WriteFrame(&testBuffer, bytes.NewBufferString(testData), testLength)
c.Assert(err, IsNil)
testBufferLength := uint64(testBuffer.Len())
@ -112,7 +112,7 @@ func (s *MySuite) TestSingleWrite(c *C) {
func (s *MySuite) TestLengthMismatchInWrite(c *C) {
var testData bytes.Buffer
err := WriteFrame(&testData, bytes.NewBufferString("hello, world"), 5)
err := fragment.WriteFrame(&testData, bytes.NewBufferString("hello, world"), 5)
c.Assert(err, Not(IsNil))
}
@ -122,7 +122,7 @@ func benchmarkSize(b *testing.B, size int) {
b.SetBytes(int64(size))
target := new(bytes.Buffer)
for i := 0; i < b.N; i++ {
WriteFrame(target, bytes.NewReader(buf[:size]), uint64(size))
fragment.WriteFrame(target, bytes.NewReader(buf[:size]), uint64(size))
}
}

View File

@ -14,12 +14,14 @@
* limitations under the License.
*/
package erasure
package erasure_test
import (
"bytes"
. "gopkg.in/check.v1"
"testing"
"github.com/minio-io/minio/pkg/storage/erasure"
. "gopkg.in/check.v1"
)
type MySuite struct{}
@ -29,11 +31,11 @@ var _ = Suite(&MySuite{})
func Test(t *testing.T) { TestingT(t) }
func (s *MySuite) TestCauchyDecode(c *C) {
ep, _ := ParseEncoderParams(10, 5, Cauchy)
ep, _ := erasure.ParseEncoderParams(10, 5, erasure.Cauchy)
data := []byte("Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.")
e := NewEncoder(ep)
e := erasure.NewEncoder(ep)
chunks, length := e.Encode(data)
c.Assert(length, Equals, len(data))

View File

@ -14,19 +14,21 @@
* limitations under the License.
*/
package erasure
package erasure_test
import (
"bytes"
"github.com/minio-io/minio/pkg/storage/erasure"
. "gopkg.in/check.v1"
)
func (s *MySuite) TestVanderMondeDecode(c *C) {
ep, _ := ParseEncoderParams(10, 5, Vandermonde)
ep, _ := erasure.ParseEncoderParams(10, 5, erasure.Vandermonde)
data := []byte("Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.")
e := NewEncoder(ep)
e := erasure.NewEncoder(ep)
chunks, length := e.Encode(data)
c.Logf("chunks length: %d", len(chunks))
c.Logf("length: %d", length)

View File

@ -33,7 +33,8 @@ import (
"github.com/minio-io/minio/pkg/utils/policy"
)
type storage struct {
// Storage - fs local variables
type Storage struct {
root string
lock *sync.Mutex
}
@ -44,17 +45,17 @@ type SerializedMetadata struct {
}
// Start filesystem channel
func Start(root string) (chan<- string, <-chan error, *storage) {
func Start(root string) (chan<- string, <-chan error, *Storage) {
ctrlChannel := make(chan string)
errorChannel := make(chan error)
s := storage{}
s := Storage{}
s.root = root
s.lock = new(sync.Mutex)
go start(ctrlChannel, errorChannel, &s)
return ctrlChannel, errorChannel, &s
}
func start(ctrlChannel <-chan string, errorChannel chan<- error, s *storage) {
func start(ctrlChannel <-chan string, errorChannel chan<- error, s *Storage) {
err := os.MkdirAll(s.root, 0700)
errorChannel <- err
close(errorChannel)
@ -71,8 +72,8 @@ func appendUniq(slice []string, i string) []string {
/// Bucket Operations
// GET - Service
func (storage *storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
// ListBuckets - Get service
func (storage *Storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
files, err := ioutil.ReadDir(storage.root)
if err != nil {
return []mstorage.BucketMetadata{}, mstorage.EmbedError("bucket", "", err)
@ -96,8 +97,8 @@ func (storage *storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
return metadataList, nil
}
// PUT - Bucket
func (storage *storage) StoreBucket(bucket string) error {
// StoreBucket - PUT Bucket
func (storage *Storage) StoreBucket(bucket string) error {
storage.lock.Lock()
defer storage.lock.Unlock()
@ -124,8 +125,8 @@ func (storage *storage) StoreBucket(bucket string) error {
return nil
}
// GET - Bucket policy
func (storage *storage) GetBucketPolicy(bucket string) (interface{}, error) {
// GetBucketPolicy - GET bucket policy
func (storage *Storage) GetBucketPolicy(bucket string) (interface{}, error) {
storage.lock.Lock()
defer storage.lock.Unlock()
@ -169,8 +170,8 @@ func (storage *storage) GetBucketPolicy(bucket string) (interface{}, error) {
}
// PUT - Bucket policy
func (storage *storage) StoreBucketPolicy(bucket string, policy interface{}) error {
// StoreBucketPolicy - PUT bucket policy
func (storage *Storage) StoreBucketPolicy(bucket string, policy interface{}) error {
storage.lock.Lock()
defer storage.lock.Unlock()
@ -212,8 +213,8 @@ func (storage *storage) StoreBucketPolicy(bucket string, policy interface{}) err
/// Object Operations
// GET Object
func (storage *storage) CopyObjectToWriter(w io.Writer, bucket string, object string) (int64, error) {
// CopyObjectToWriter - GET object
func (storage *Storage) CopyObjectToWriter(w io.Writer, bucket string, object string) (int64, error) {
// validate bucket
if mstorage.IsValidBucket(bucket) == false {
return 0, mstorage.BucketNameInvalid{Bucket: bucket}
@ -254,8 +255,8 @@ func (storage *storage) CopyObjectToWriter(w io.Writer, bucket string, object st
return count, nil
}
// HEAD Object
func (storage *storage) GetObjectMetadata(bucket string, object string) (mstorage.ObjectMetadata, error) {
// GetObjectMetadata - HEAD object
func (storage *Storage) GetObjectMetadata(bucket string, object string) (mstorage.ObjectMetadata, error) {
if mstorage.IsValidBucket(bucket) == false {
return mstorage.ObjectMetadata{}, mstorage.BucketNameInvalid{Bucket: bucket}
}
@ -352,8 +353,8 @@ func (b byObjectKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// Less
func (b byObjectKey) Less(i, j int) bool { return b[i].Key < b[j].Key }
// GET bucket (list objects)
func (storage *storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) {
// ListObjects - GET bucket (list objects)
func (storage *Storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) {
p := bucketDir{}
p.files = make(map[string]os.FileInfo)
@ -454,8 +455,8 @@ ret:
return metadataList, resources, nil
}
// PUT object
func (storage *storage) StoreObject(bucket, key, contentType string, data io.Reader) error {
// StoreObject - PUT object
func (storage *Storage) StoreObject(bucket, key, contentType string, data io.Reader) error {
// TODO Commits should stage then move instead of writing directly
storage.lock.Lock()
defer storage.lock.Unlock()

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package fs
package fs_test
import (
"io/ioutil"
@ -22,6 +22,7 @@ import (
"testing"
mstorage "github.com/minio-io/minio/pkg/storage"
"github.com/minio-io/minio/pkg/storage/fs"
. "gopkg.in/check.v1"
)
@ -38,7 +39,7 @@ func (s *MySuite) TestAPISuite(c *C) {
path, err := ioutil.TempDir(os.TempDir(), "minio-fs-")
c.Check(err, IsNil)
storageList = append(storageList, path)
_, _, store := Start(path)
_, _, store := fs.Start(path)
return store
}
mstorage.APITestSuite(c, create)

View File

@ -30,7 +30,8 @@ import (
"github.com/minio-io/minio/pkg/utils/policy"
)
type storage struct {
// Storage - local variables
type Storage struct {
bucketdata map[string]storedBucket
objectdata map[string]storedObject
lock *sync.RWMutex
@ -48,11 +49,11 @@ type storedObject struct {
}
// Start inmemory object server
func Start() (chan<- string, <-chan error, *storage) {
func Start() (chan<- string, <-chan error, *Storage) {
ctrlChannel := make(chan string)
errorChannel := make(chan error)
go start(ctrlChannel, errorChannel)
return ctrlChannel, errorChannel, &storage{
return ctrlChannel, errorChannel, &Storage{
bucketdata: make(map[string]storedBucket),
objectdata: make(map[string]storedObject),
lock: new(sync.RWMutex),
@ -63,8 +64,8 @@ func start(ctrlChannel <-chan string, errorChannel chan<- error) {
close(errorChannel)
}
// GET object from memory buffer
func (storage *storage) CopyObjectToWriter(w io.Writer, bucket string, object string) (int64, error) {
// CopyObjectToWriter - GET object from memory buffer
func (storage *Storage) CopyObjectToWriter(w io.Writer, bucket string, object string) (int64, error) {
// TODO synchronize access
// get object
key := bucket + ":" + object
@ -76,18 +77,18 @@ func (storage *storage) CopyObjectToWriter(w io.Writer, bucket string, object st
return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
}
// Not implemented
func (storage *storage) StoreBucketPolicy(bucket string, policy interface{}) error {
// StoreBucketPolicy - Not implemented
func (storage *Storage) StoreBucketPolicy(bucket string, policy interface{}) error {
return mstorage.APINotImplemented{API: "PutBucketPolicy"}
}
// Not implemented
func (storage *storage) GetBucketPolicy(bucket string) (interface{}, error) {
// GetBucketPolicy - Not implemented
func (storage *Storage) GetBucketPolicy(bucket string) (interface{}, error) {
return policy.BucketPolicy{}, mstorage.APINotImplemented{API: "GetBucketPolicy"}
}
// PUT object to memory buffer
func (storage *storage) StoreObject(bucket, key, contentType string, data io.Reader) error {
// StoreObject - PUT object to memory buffer
func (storage *Storage) StoreObject(bucket, key, contentType string, data io.Reader) error {
storage.lock.Lock()
defer storage.lock.Unlock()
@ -127,8 +128,8 @@ func (storage *storage) StoreObject(bucket, key, contentType string, data io.Rea
return nil
}
// Create Bucket in memory
func (storage *storage) StoreBucket(bucketName string) error {
// StoreBucket - create bucket in memory
func (storage *Storage) StoreBucket(bucketName string) error {
storage.lock.Lock()
defer storage.lock.Unlock()
if !mstorage.IsValidBucket(bucketName) {
@ -148,8 +149,8 @@ func (storage *storage) StoreBucket(bucketName string) error {
return nil
}
// List objects in memory
func (storage *storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) {
// ListObjects - list objects from memory
func (storage *Storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) {
if _, ok := storage.bucketdata[bucket]; ok == false {
return []mstorage.ObjectMetadata{}, mstorage.BucketResourcesMetadata{IsTruncated: false}, mstorage.BucketNotFound{Bucket: bucket}
}
@ -186,8 +187,8 @@ func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// Less
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// List buckets
func (storage *storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
// ListBuckets - List buckets from memory
func (storage *Storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
var results []mstorage.BucketMetadata
for _, bucket := range storage.bucketdata {
results = append(results, bucket.metadata)
@ -196,8 +197,8 @@ func (storage *storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
return results, nil
}
// HEAD object
func (storage *storage) GetObjectMetadata(bucket, key string) (mstorage.ObjectMetadata, error) {
// GetObjectMetadata - get object metadata from memory
func (storage *Storage) GetObjectMetadata(bucket, key string) (mstorage.ObjectMetadata, error) {
objectKey := bucket + ":" + key
if object, ok := storage.objectdata[objectKey]; ok == true {

View File

@ -14,12 +14,13 @@
* limitations under the License.
*/
package inmemory
package inmemory_test
import (
"testing"
mstorage "github.com/minio-io/minio/pkg/storage"
"github.com/minio-io/minio/pkg/storage/inmemory"
. "gopkg.in/check.v1"
)
@ -32,7 +33,7 @@ var _ = Suite(&MySuite{})
func (s *MySuite) TestAPISuite(c *C) {
create := func() mstorage.Storage {
_, _, store := Start()
_, _, store := inmemory.Start()
return store
}

View File

@ -21,11 +21,11 @@ import (
"math/rand"
"strconv"
. "gopkg.in/check.v1"
"gopkg.in/check.v1"
)
// APITestSuite - collection of API tests
func APITestSuite(c *C, create func() Storage) {
func APITestSuite(c *check.C, create func() Storage) {
testCreateBucket(c, create)
testMultipleObjectCreation(c, create)
testPaging(c, create)
@ -41,15 +41,15 @@ func APITestSuite(c *C, create func() Storage) {
testDefaultContentType(c, create)
}
func testCreateBucket(c *C, create func() Storage) {
func testCreateBucket(c *check.C, create func() Storage) {
// TODO
}
func testMultipleObjectCreation(c *C, create func() Storage) {
func testMultipleObjectCreation(c *check.C, create func() Storage) {
objects := make(map[string][]byte)
storage := create()
err := storage.StoreBucket("bucket")
c.Assert(err, IsNil)
c.Assert(err, check.IsNil)
for i := 0; i < 10; i++ {
randomPerm := rand.Perm(10)
randomString := ""
@ -59,7 +59,7 @@ func testMultipleObjectCreation(c *C, create func() Storage) {
key := "obj" + strconv.Itoa(i)
objects[key] = []byte(randomString)
err := storage.StoreObject("bucket", key, "", bytes.NewBufferString(randomString))
c.Assert(err, IsNil)
c.Assert(err, check.IsNil)
}
// ensure no duplicate etags
@ -67,162 +67,162 @@ func testMultipleObjectCreation(c *C, create func() Storage) {
for key, value := range objects {
var byteBuffer bytes.Buffer
storage.CopyObjectToWriter(&byteBuffer, "bucket", key)
c.Assert(bytes.Equal(value, byteBuffer.Bytes()), Equals, true)
c.Assert(bytes.Equal(value, byteBuffer.Bytes()), check.Equals, true)
metadata, err := storage.GetObjectMetadata("bucket", key)
c.Assert(err, IsNil)
c.Assert(metadata.Size, Equals, int64(len(value)))
c.Assert(err, check.IsNil)
c.Assert(metadata.Size, check.Equals, int64(len(value)))
_, ok := etags[metadata.ETag]
c.Assert(ok, Equals, false)
c.Assert(ok, check.Equals, false)
etags[metadata.ETag] = metadata.ETag
}
}
func testPaging(c *C, create func() Storage) {
func testPaging(c *check.C, create func() Storage) {
storage := create()
storage.StoreBucket("bucket")
resources := BucketResourcesMetadata{}
objects, resources, err := storage.ListObjects("bucket", resources)
c.Assert(len(objects), Equals, 0)
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(err, IsNil)
// check before paging occurs
c.Assert(len(objects), check.Equals, 0)
c.Assert(resources.IsTruncated, check.Equals, false)
c.Assert(err, check.IsNil)
// checheck before paging occurs
for i := 0; i < 5; i++ {
key := "obj" + strconv.Itoa(i)
storage.StoreObject("bucket", key, "", bytes.NewBufferString(key))
resources.Maxkeys = 5
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(len(objects), Equals, i+1)
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(err, IsNil)
c.Assert(len(objects), check.Equals, i+1)
c.Assert(resources.IsTruncated, check.Equals, false)
c.Assert(err, check.IsNil)
}
// check after paging occurs pages work
// checheck after paging occurs pages work
for i := 6; i <= 10; i++ {
key := "obj" + strconv.Itoa(i)
storage.StoreObject("bucket", key, "", bytes.NewBufferString(key))
resources.Maxkeys = 5
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(len(objects), Equals, 5)
c.Assert(resources.IsTruncated, Equals, true)
c.Assert(err, IsNil)
c.Assert(len(objects), check.Equals, 5)
c.Assert(resources.IsTruncated, check.Equals, true)
c.Assert(err, check.IsNil)
}
// check paging with prefix at end returns less objects
// checheck paging with prefix at end returns less objects
{
storage.StoreObject("bucket", "newPrefix", "", bytes.NewBufferString("prefix1"))
storage.StoreObject("bucket", "newPrefix2", "", bytes.NewBufferString("prefix2"))
resources.Prefix = "new"
resources.Maxkeys = 5
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(len(objects), Equals, 2)
c.Assert(len(objects), check.Equals, 2)
}
// check ordering of pages
// checheck ordering of pages
{
resources.Prefix = ""
resources.Maxkeys = 1000
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(objects[0].Key, Equals, "newPrefix")
c.Assert(objects[1].Key, Equals, "newPrefix2")
c.Assert(objects[2].Key, Equals, "obj0")
c.Assert(objects[3].Key, Equals, "obj1")
c.Assert(objects[4].Key, Equals, "obj10")
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
c.Assert(objects[2].Key, check.Equals, "obj0")
c.Assert(objects[3].Key, check.Equals, "obj1")
c.Assert(objects[4].Key, check.Equals, "obj10")
}
// check ordering of results with prefix
// checheck ordering of results with prefix
{
resources.Prefix = "obj"
resources.Maxkeys = 1000
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(objects[0].Key, Equals, "obj0")
c.Assert(objects[1].Key, Equals, "obj1")
c.Assert(objects[2].Key, Equals, "obj10")
c.Assert(objects[3].Key, Equals, "obj2")
c.Assert(objects[4].Key, Equals, "obj3")
c.Assert(objects[0].Key, check.Equals, "obj0")
c.Assert(objects[1].Key, check.Equals, "obj1")
c.Assert(objects[2].Key, check.Equals, "obj10")
c.Assert(objects[3].Key, check.Equals, "obj2")
c.Assert(objects[4].Key, check.Equals, "obj3")
}
// check ordering of results with prefix and no paging
// checheck ordering of results with prefix and no paging
{
resources.Prefix = "new"
resources.Maxkeys = 5
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(objects[0].Key, Equals, "newPrefix")
c.Assert(objects[1].Key, Equals, "newPrefix2")
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
}
}
func testObjectOverwriteFails(c *C, create func() Storage) {
func testObjectOverwriteFails(c *check.C, create func() Storage) {
storage := create()
storage.StoreBucket("bucket")
err := storage.StoreObject("bucket", "object", "", bytes.NewBufferString("one"))
c.Assert(err, IsNil)
c.Assert(err, check.IsNil)
err = storage.StoreObject("bucket", "object", "", bytes.NewBufferString("three"))
c.Assert(err, Not(IsNil))
c.Assert(err, check.Not(check.IsNil))
var bytesBuffer bytes.Buffer
length, err := storage.CopyObjectToWriter(&bytesBuffer, "bucket", "object")
c.Assert(length, Equals, int64(len("one")))
c.Assert(err, IsNil)
c.Assert(string(bytesBuffer.Bytes()), Equals, "one")
c.Assert(length, check.Equals, int64(len("one")))
c.Assert(err, check.IsNil)
c.Assert(string(bytesBuffer.Bytes()), check.Equals, "one")
}
func testNonExistantBucketOperations(c *C, create func() Storage) {
func testNonExistantBucketOperations(c *check.C, create func() Storage) {
storage := create()
err := storage.StoreObject("bucket", "object", "", bytes.NewBufferString("one"))
c.Assert(err, Not(IsNil))
c.Assert(err, check.Not(check.IsNil))
}
func testBucketRecreateFails(c *C, create func() Storage) {
func testBucketRecreateFails(c *check.C, create func() Storage) {
storage := create()
err := storage.StoreBucket("string")
c.Assert(err, IsNil)
c.Assert(err, check.IsNil)
err = storage.StoreBucket("string")
c.Assert(err, Not(IsNil))
c.Assert(err, check.Not(check.IsNil))
}
func testPutObjectInSubdir(c *C, create func() Storage) {
func testPutObjectInSubdir(c *check.C, create func() Storage) {
storage := create()
err := storage.StoreBucket("bucket")
c.Assert(err, IsNil)
c.Assert(err, check.IsNil)
err = storage.StoreObject("bucket", "dir1/dir2/object", "", bytes.NewBufferString("hello world"))
c.Assert(err, IsNil)
c.Assert(err, check.IsNil)
var bytesBuffer bytes.Buffer
length, err := storage.CopyObjectToWriter(&bytesBuffer, "bucket", "dir1/dir2/object")
c.Assert(len(bytesBuffer.Bytes()), Equals, len("hello world"))
c.Assert(int64(len(bytesBuffer.Bytes())), Equals, length)
c.Assert(err, IsNil)
c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world"))
c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length)
c.Assert(err, check.IsNil)
}
func testListBuckets(c *C, create func() Storage) {
func testListBuckets(c *check.C, create func() Storage) {
storage := create()
// test empty list
buckets, err := storage.ListBuckets()
c.Assert(len(buckets), Equals, 0)
c.Assert(err, IsNil)
c.Assert(len(buckets), check.Equals, 0)
c.Assert(err, check.IsNil)
// add one and test exists
err = storage.StoreBucket("bucket1")
c.Assert(err, IsNil)
c.Assert(err, check.IsNil)
buckets, err = storage.ListBuckets()
c.Assert(len(buckets), Equals, 1)
c.Assert(err, IsNil)
c.Assert(len(buckets), check.Equals, 1)
c.Assert(err, check.IsNil)
// add two and test exists
err = storage.StoreBucket("bucket2")
c.Assert(err, IsNil)
c.Assert(err, check.IsNil)
buckets, err = storage.ListBuckets()
c.Assert(len(buckets), Equals, 2)
c.Assert(err, IsNil)
c.Assert(len(buckets), check.Equals, 2)
c.Assert(err, check.IsNil)
// add three and test exists + prefix
err = storage.StoreBucket("bucket22")
buckets, err = storage.ListBuckets()
c.Assert(len(buckets), Equals, 3)
c.Assert(err, IsNil)
c.Assert(len(buckets), check.Equals, 3)
c.Assert(err, check.IsNil)
}
func testListBucketsOrder(c *C, create func() Storage) {
func testListBucketsOrder(c *check.C, create func() Storage) {
// if implementation contains a map, order of map keys will vary.
// this ensures they return in the same order each time
for i := 0; i < 10; i++ {
@ -232,107 +232,107 @@ func testListBucketsOrder(c *C, create func() Storage) {
storage.StoreBucket("bucket2")
buckets, err := storage.ListBuckets()
c.Assert(len(buckets), Equals, 2)
c.Assert(err, IsNil)
c.Assert(buckets[0].Name, Equals, "bucket1")
c.Assert(buckets[1].Name, Equals, "bucket2")
c.Assert(len(buckets), check.Equals, 2)
c.Assert(err, check.IsNil)
c.Assert(buckets[0].Name, check.Equals, "bucket1")
c.Assert(buckets[1].Name, check.Equals, "bucket2")
}
}
func testListObjectsTestsForNonExistantBucket(c *C, create func() Storage) {
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Storage) {
storage := create()
resources := BucketResourcesMetadata{Prefix: "", Maxkeys: 1000}
objects, resources, err := storage.ListObjects("bucket", resources)
c.Assert(err, Not(IsNil))
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(len(objects), Equals, 0)
c.Assert(err, check.Not(check.IsNil))
c.Assert(resources.IsTruncated, check.Equals, false)
c.Assert(len(objects), check.Equals, 0)
}
func testNonExistantObjectInBucket(c *C, create func() Storage) {
func testNonExistantObjectInBucket(c *check.C, create func() Storage) {
storage := create()
err := storage.StoreBucket("bucket")
c.Assert(err, IsNil)
c.Assert(err, check.IsNil)
var byteBuffer bytes.Buffer
length, err := storage.CopyObjectToWriter(&byteBuffer, "bucket", "dir1")
c.Assert(length, Equals, int64(0))
c.Assert(err, Not(IsNil))
c.Assert(len(byteBuffer.Bytes()), Equals, 0)
c.Assert(length, check.Equals, int64(0))
c.Assert(err, check.Not(check.IsNil))
c.Assert(len(byteBuffer.Bytes()), check.Equals, 0)
switch err := err.(type) {
case ObjectNotFound:
{
c.Assert(err, ErrorMatches, "Object not Found: bucket#dir1")
c.Assert(err, check.ErrorMatches, "Object not Found: bucket#dir1")
}
default:
{
c.Assert(err, Equals, "fails")
c.Assert(err, check.Equals, "fails")
}
}
}
func testGetDirectoryReturnsObjectNotFound(c *C, create func() Storage) {
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Storage) {
storage := create()
err := storage.StoreBucket("bucket")
c.Assert(err, IsNil)
c.Assert(err, check.IsNil)
err = storage.StoreObject("bucket", "dir1/dir2/object", "", bytes.NewBufferString("hello world"))
c.Assert(err, IsNil)
c.Assert(err, check.IsNil)
var byteBuffer bytes.Buffer
length, err := storage.CopyObjectToWriter(&byteBuffer, "bucket", "dir1")
c.Assert(length, Equals, int64(0))
c.Assert(length, check.Equals, int64(0))
switch err := err.(type) {
case ObjectNotFound:
{
c.Assert(err.Bucket, Equals, "bucket")
c.Assert(err.Object, Equals, "dir1")
c.Assert(err.Bucket, check.Equals, "bucket")
c.Assert(err.Object, check.Equals, "dir1")
}
default:
{
// force a failure with a line number
c.Assert(err, Equals, "ObjectNotFound")
c.Assert(err, check.Equals, "ObjectNotFound")
}
}
c.Assert(len(byteBuffer.Bytes()), Equals, 0)
c.Assert(len(byteBuffer.Bytes()), check.Equals, 0)
var byteBuffer2 bytes.Buffer
length, err = storage.CopyObjectToWriter(&byteBuffer, "bucket", "dir1/")
c.Assert(length, Equals, int64(0))
c.Assert(length, check.Equals, int64(0))
switch err := err.(type) {
case ObjectNotFound:
{
c.Assert(err.Bucket, Equals, "bucket")
c.Assert(err.Object, Equals, "dir1/")
c.Assert(err.Bucket, check.Equals, "bucket")
c.Assert(err.Object, check.Equals, "dir1/")
}
default:
{
// force a failure with a line number
c.Assert(err, Equals, "ObjectNotFound")
c.Assert(err, check.Equals, "ObjectNotFound")
}
}
c.Assert(len(byteBuffer2.Bytes()), Equals, 0)
c.Assert(len(byteBuffer2.Bytes()), check.Equals, 0)
}
func testDefaultContentType(c *C, create func() Storage) {
func testDefaultContentType(c *check.C, create func() Storage) {
storage := create()
err := storage.StoreBucket("bucket")
c.Assert(err, IsNil)
c.Assert(err, check.IsNil)
// test empty
err = storage.StoreObject("bucket", "one", "", bytes.NewBufferString("one"))
metadata, err := storage.GetObjectMetadata("bucket", "one")
c.Assert(err, IsNil)
c.Assert(metadata.ContentType, Equals, "application/octet-stream")
c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, check.Equals, "application/octet-stream")
// test custom
storage.StoreObject("bucket", "two", "application/text", bytes.NewBufferString("two"))
metadata, err = storage.GetObjectMetadata("bucket", "two")
c.Assert(err, IsNil)
c.Assert(metadata.ContentType, Equals, "application/text")
c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, check.Equals, "application/text")
// test trim space
storage.StoreObject("bucket", "three", "\tapplication/json ", bytes.NewBufferString("three"))
metadata, err = storage.GetObjectMetadata("bucket", "three")
c.Assert(err, IsNil)
c.Assert(metadata.ContentType, Equals, "application/json")
c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, check.Equals, "application/json")
}

View File

@ -25,7 +25,7 @@ var castanagoliTable = crc32.MakeTable(crc32.Castagnoli)
/// Convenience functions
// Single caller crc helper
// Sum32 - single caller crc helper
func Sum32(buffer []byte) uint32 {
crc := crc32.New(castanagoliTable)
crc.Reset()
@ -33,7 +33,7 @@ func Sum32(buffer []byte) uint32 {
return crc.Sum32()
}
// Low memory footprint io.Reader based crc helper
// Sum - io.Reader based crc helper
func Sum(reader io.Reader) (uint32, error) {
h := New()
var err error

View File

@ -27,9 +27,9 @@ import (
// Config context
type Config struct {
configPath string
configFile string
configLock *sync.RWMutex
ConfigPath string
ConfigFile string
ConfigLock *sync.RWMutex
Users map[string]User
}
@ -52,22 +52,22 @@ func (c *Config) SetupConfig() error {
return err
}
c.configPath = confPath
c.configFile = path.Join(c.configPath, "config.json")
if _, err := os.Stat(c.configFile); os.IsNotExist(err) {
_, err = os.Create(c.configFile)
c.ConfigPath = confPath
c.ConfigFile = path.Join(c.ConfigPath, "config.json")
if _, err := os.Stat(c.ConfigFile); os.IsNotExist(err) {
_, err = os.Create(c.ConfigFile)
if err != nil {
return err
}
}
c.configLock = new(sync.RWMutex)
c.ConfigLock = new(sync.RWMutex)
return nil
}
// GetConfigPath config file location
func (c *Config) GetConfigPath() string {
return c.configPath
return c.ConfigPath
}
// IsUserExists verify if user exists
@ -104,13 +104,13 @@ func (c *Config) AddUser(user User) {
// WriteConfig - write encoded json in config file
func (c *Config) WriteConfig() error {
c.configLock.Lock()
defer c.configLock.Unlock()
c.ConfigLock.Lock()
defer c.ConfigLock.Unlock()
var file *os.File
var err error
file, err = os.OpenFile(c.configFile, os.O_WRONLY, 0666)
file, err = os.OpenFile(c.ConfigFile, os.O_WRONLY, 0666)
defer file.Close()
if err != nil {
return err
@ -123,13 +123,13 @@ func (c *Config) WriteConfig() error {
// ReadConfig - read json config file and decode
func (c *Config) ReadConfig() error {
c.configLock.RLock()
defer c.configLock.RUnlock()
c.ConfigLock.RLock()
defer c.ConfigLock.RUnlock()
var file *os.File
var err error
file, err = os.OpenFile(c.configFile, os.O_RDONLY, 0666)
file, err = os.OpenFile(c.ConfigFile, os.O_RDONLY, 0666)
defer file.Close()
if err != nil {
return err

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package config
package config_test
import (
"io/ioutil"
@ -23,6 +23,7 @@ import (
"sync"
"testing"
"github.com/minio-io/minio/pkg/utils/config"
"github.com/minio-io/minio/pkg/utils/crypto/keys"
. "gopkg.in/check.v1"
)
@ -34,22 +35,22 @@ var _ = Suite(&MySuite{})
func Test(t *testing.T) { TestingT(t) }
func (s *MySuite) TestConfig(c *C) {
conf := Config{}
conf.configPath, _ = ioutil.TempDir("/tmp", "minio-test-")
defer os.RemoveAll(conf.configPath)
conf.configFile = path.Join(conf.configPath, "config.json")
if _, err := os.Stat(conf.configFile); os.IsNotExist(err) {
_, err = os.Create(conf.configFile)
conf := config.Config{}
conf.ConfigPath, _ = ioutil.TempDir("/tmp", "minio-test-")
defer os.RemoveAll(conf.ConfigPath)
conf.ConfigFile = path.Join(conf.ConfigPath, "config.json")
if _, err := os.Stat(conf.ConfigFile); os.IsNotExist(err) {
_, err = os.Create(conf.ConfigFile)
if err != nil {
c.Fatal(err)
}
}
conf.configLock = new(sync.RWMutex)
conf.ConfigLock = new(sync.RWMutex)
accesskey, _ := keys.GenerateRandomAlphaNumeric(keys.MinioAccessID)
secretkey, _ := keys.GenerateRandomBase64(keys.MinioSecretID)
user := User{
user := config.User{
Name: "gnubot",
AccessKey: string(accesskey),
SecretKey: string(secretkey),
@ -64,7 +65,7 @@ func (s *MySuite) TestConfig(c *C) {
accesskey, _ = keys.GenerateRandomAlphaNumeric(keys.MinioAccessID)
secretkey, _ = keys.GenerateRandomBase64(keys.MinioSecretID)
user = User{
user = config.User{
Name: "minio",
AccessKey: string(accesskey),
SecretKey: string(secretkey),

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package cpu
package cpu_test
import (
"errors"
@ -23,6 +23,7 @@ import (
"strings"
"testing"
"github.com/minio-io/minio/pkg/utils/cpu"
. "gopkg.in/check.v1"
)
@ -49,7 +50,7 @@ func hasCPUFeatureFromOS(feature string) (bool, error) {
func (s *MySuite) TestHasSSE41(c *C) {
if runtime.GOOS == "linux" {
var flag = HasSSE41()
var flag = cpu.HasSSE41()
osCheck, err := hasCPUFeatureFromOS("sse4_1")
c.Assert(err, IsNil)
c.Check(flag, Equals, osCheck)
@ -58,7 +59,7 @@ func (s *MySuite) TestHasSSE41(c *C) {
func (s *MySuite) TestHasAVX(c *C) {
if runtime.GOOS == "linux" {
var flag = HasAVX()
var flag = cpu.HasAVX()
osFlag, err := hasCPUFeatureFromOS("avx")
c.Assert(err, IsNil)
c.Check(osFlag, Equals, flag)
@ -67,7 +68,7 @@ func (s *MySuite) TestHasAVX(c *C) {
func (s *MySuite) TestHasAVX2(c *C) {
if runtime.GOOS == "linux" {
var flag = HasAVX2()
var flag = cpu.HasAVX2()
osFlag, err := hasCPUFeatureFromOS("avx2")
c.Assert(err, IsNil)
c.Check(osFlag, Equals, flag)

View File

@ -14,11 +14,12 @@
* limitations under the License.
*/
package keys
package keys_test
import (
"testing"
"github.com/minio-io/minio/pkg/utils/crypto/keys"
. "gopkg.in/check.v1"
)
@ -28,11 +29,11 @@ type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) Testing(c *C) {
value, err := GenerateRandomBase64(MinioSecretID)
func (s *MySuite) TestingKeys(c *C) {
value, err := keys.GenerateRandomBase64(keys.MinioSecretID)
c.Assert(err, IsNil)
alphanum, err := GenerateRandomAlphaNumeric(MinioAccessID)
alphanum, err := keys.GenerateRandomAlphaNumeric(keys.MinioAccessID)
c.Assert(err, IsNil)
c.Log(string(value))

View File

@ -1,10 +1,11 @@
package md5
package md5_test
import (
"bytes"
"encoding/hex"
"testing"
"github.com/minio-io/minio/pkg/utils/crypto/md5"
. "gopkg.in/check.v1"
)
@ -17,7 +18,7 @@ var _ = Suite(&MySuite{})
func (s *MySuite) TestMd5sum(c *C) {
testString := []byte("Test string")
expectedHash, _ := hex.DecodeString("0fd3dbec9730101bff92acc820befc34")
hash, err := Sum(bytes.NewBuffer(testString))
hash, err := md5.Sum(bytes.NewBuffer(testString))
c.Assert(err, IsNil)
c.Assert(bytes.Equal(expectedHash, hash), Equals, true)
}

View File

@ -14,12 +14,13 @@
* limitations under the License.
*/
package x509
package x509_test
import (
"testing"
"time"
"github.com/minio-io/minio/pkg/utils/crypto/x509"
. "gopkg.in/check.v1"
)
@ -30,8 +31,8 @@ type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) Testing(c *C) {
certObj := Certificates{}
params := Params{
certObj := x509.Certificates{}
params := x509.Params{
Hostname: "example.com",
IsCA: false,
EcdsaCurve: "P224",

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package split
package split_test
import (
"bufio"
@ -24,6 +24,7 @@ import (
"strconv"
"testing"
"github.com/minio-io/minio/pkg/utils/split"
. "gopkg.in/check.v1"
)
@ -41,7 +42,7 @@ func (s *MySuite) TestSplitStream(c *C) {
}
bytesWriter.Flush()
reader := bytes.NewReader(bytesBuffer.Bytes())
ch := Stream(reader, 25)
ch := split.Stream(reader, 25)
var resultsBuffer bytes.Buffer
resultsWriter := bufio.NewWriter(&resultsBuffer)
for chunk := range ch {
@ -52,17 +53,17 @@ func (s *MySuite) TestSplitStream(c *C) {
}
func (s *MySuite) TestFileSplitJoin(c *C) {
err := FileWithPrefix("test-data/TESTFILE", 1024, "TESTPREFIX")
err := split.FileWithPrefix("test-data/TESTFILE", 1024, "TESTPREFIX")
c.Assert(err, IsNil)
err = FileWithPrefix("test-data/TESTFILE", 1024, "")
err = split.FileWithPrefix("test-data/TESTFILE", 1024, "")
c.Assert(err, Not(IsNil))
devnull, err := os.OpenFile(os.DevNull, 2, os.ModeAppend)
defer devnull.Close()
reader := JoinFiles(".", "ERROR")
reader := split.JoinFiles(".", "ERROR")
_, err = io.Copy(devnull, reader)
c.Assert(err, Not(IsNil))
reader = JoinFiles(".", "TESTPREFIX")
reader = split.JoinFiles(".", "TESTPREFIX")
_, err = io.Copy(devnull, reader)
c.Assert(err, IsNil)
}