XL: Make allocations simpler avoid redundant allocs. (#1961)

- Reduce 10MiB buffers for loopy calls to use 128KiB.
- start using 128KiB buffer where needed.
This commit is contained in:
Harshavardhana
2016-06-24 02:06:23 -07:00
committed by GitHub
parent ff9fc22c72
commit e8990e42c2
11 changed files with 374 additions and 179 deletions

View File

@@ -17,6 +17,7 @@
package main
import (
"bytes"
"encoding/json"
"path"
"sort"
@@ -69,16 +70,25 @@ func (u uploadsV1) Index(uploadID string) int {
// readUploadsJSON - get all the saved uploads JSON.
func readUploadsJSON(bucket, object string, disk StorageAPI) (uploadIDs uploadsV1, err error) {
// Staging buffer of 128KiB kept for reading `uploads.json`.
var buf = make([]byte, 128*1024)
// Writer holding `uploads.json` content.
var buffer = new(bytes.Buffer)
uploadJSONPath := path.Join(mpartMetaPrefix, bucket, object, uploadsJSONFile)
// Read all of 'uploads.json'
buffer, rErr := readAll(disk, minioMetaBucket, uploadJSONPath)
if rErr != nil {
return uploadsV1{}, rErr
// Reads entire `uploads.json`.
if err = copyBuffer(buffer, disk, minioMetaBucket, uploadJSONPath, buf); err != nil {
return uploadsV1{}, err
}
rErr = json.Unmarshal(buffer, &uploadIDs)
if rErr != nil {
return uploadsV1{}, rErr
// Decode `uploads.json`.
d := json.NewDecoder(buffer)
if err = d.Decode(&uploadIDs); err != nil {
return uploadsV1{}, err
}
// Success.
return uploadIDs, nil
}