Fix issues with multipart upload

This commit is contained in:
Harshavardhana 2015-07-06 16:18:39 -07:00
parent 474954022e
commit 8b94c53345
4 changed files with 26 additions and 19 deletions

View File

@ -85,8 +85,6 @@ func (r *Cache) SetMaxSize(maxSize uint64) {
// Stats get current cache statistics
func (r *Cache) Stats() Stats {
r.Lock()
defer r.Unlock()
return Stats{
Bytes: r.currentSize,
Items: r.items.Len(),

View File

@ -113,7 +113,12 @@ func New() (Interface, error) {
return nil, iodine.New(err, nil)
}
for k, v := range buckets {
a.storedBuckets.Set(k, v)
var newBucket = storedBucket{}
newBucket.objectMetadata = make(map[string]ObjectMetadata)
newBucket.multiPartSession = make(map[string]MultiPartSession)
newBucket.partMetadata = make(map[int]PartMetadata)
newBucket.bucketMetadata = v
a.storedBuckets.Set(k, newBucket)
}
}
return a, nil

View File

@ -22,6 +22,8 @@ import (
"encoding/base64"
"encoding/hex"
"io/ioutil"
"os"
"path/filepath"
"testing"
. "github.com/minio/check"
@ -29,13 +31,20 @@ import (
func TestCache(t *testing.T) { TestingT(t) }
type MyCacheSuite struct{}
type MyCacheSuite struct {
root string
}
var _ = Suite(&MyCacheSuite{})
var dc Interface
func (s *MyCacheSuite) SetUpSuite(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
s.root = root
customConfigPath = filepath.Join(root, "donut.json")
var err error
dc, err = New()
c.Assert(err, IsNil)
@ -46,6 +55,10 @@ func (s *MyCacheSuite) SetUpSuite(c *C) {
c.Assert(len(buckets), Equals, 0)
}
func (s *MyDonutSuite) TearDownSuite(c *C) {
os.RemoveAll(s.root)
}
// test make bucket without name
func (s *MyCacheSuite) TestBucketWithoutNameFails(c *C) {
// fail to create new bucket without a name

View File

@ -25,7 +25,7 @@ type app struct {
errors chan error
}
func newApp(servers []*http.Server) *app {
func newApp(servers ...*http.Server) *app {
return &app{
servers: servers,
net: &nimbleNet{},
@ -64,7 +64,7 @@ func (a *app) wait() {
go func(s httpdown.Server) {
defer wg.Done()
if err := s.Wait(); err != nil {
a.errors <- err
a.errors <- iodine.New(err, nil)
}
}(s)
}
@ -76,7 +76,7 @@ func (a *app) term(wg *sync.WaitGroup) {
go func(s httpdown.Server) {
defer wg.Done()
if err := s.Stop(); err != nil {
a.errors <- err
a.errors <- iodine.New(err, nil)
}
}(s)
}
@ -84,7 +84,7 @@ func (a *app) term(wg *sync.WaitGroup) {
func (a *app) signalHandler(wg *sync.WaitGroup) {
ch := make(chan os.Signal, 10)
signal.Notify(ch, syscall.SIGTERM, syscall.SIGUSR2)
signal.Notify(ch, syscall.SIGTERM, os.Interrupt)
for {
sig := <-ch
switch sig {
@ -94,7 +94,7 @@ func (a *app) signalHandler(wg *sync.WaitGroup) {
signal.Stop(ch)
a.term(wg)
return
case syscall.SIGUSR2:
case os.Interrupt:
// we only return here if there's an error, otherwise the new process
// will send us a TERM when it's ready to trigger the actual shutdown.
if _, err := a.net.StartProcess(); err != nil {
@ -107,9 +107,7 @@ func (a *app) signalHandler(wg *sync.WaitGroup) {
// ListenAndServe will serve the given http.Servers and will monitor for signals
// allowing for graceful termination (SIGTERM) or restart (SIGHUP).
func ListenAndServe(servers ...*http.Server) error {
ppid := os.Getppid()
a := newApp(servers)
a := newApp(servers...)
// Acquire Listeners
if err := a.listen(); err != nil {
@ -119,13 +117,6 @@ func ListenAndServe(servers ...*http.Server) error {
// Start serving.
a.serve()
// Close the parent if we inherited and it wasn't init that started us.
if os.Getenv("LISTEN_FDS") != "" && ppid != 1 {
if err := syscall.Kill(ppid, syscall.SIGTERM); err != nil {
return iodine.New(err, nil)
}
}
waitDone := make(chan struct{})
go func() {
defer close(waitDone)