Merge pull request #821 from harshavardhana/shadow

Avoid shadowing variables and enable checks to avoid them during build
This commit is contained in:
Harshavardhana 2015-09-09 15:18:21 -07:00
commit 17f5df689e
5 changed files with 30 additions and 25 deletions

View File

@ -17,8 +17,10 @@ verifiers: getdeps vet fmt lint cyclo
vet:
@echo "Running $@:"
@GO15VENDOREXPERIMENT=1 go vet .
@GO15VENDOREXPERIMENT=1 go vet github.com/minio/minio/pkg...
@GO15VENDOREXPERIMENT=1 go tool vet -all *.go
@GO15VENDOREXPERIMENT=1 go tool vet -all ./pkg
@GO15VENDOREXPERIMENT=1 go tool vet -shadow=true *.go
@GO15VENDOREXPERIMENT=1 go tool vet -shadow=true ./pkg
fmt:
@echo "Running $@:"

View File

@ -525,7 +525,8 @@ func (donut API) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error
disks := make(map[int]disk.Disk)
var err *probe.Error
for _, node := range donut.nodes {
nDisks, err := node.ListDisks()
nDisks := make(map[int]disk.Disk)
nDisks, err = node.ListDisks()
if err != nil {
return nil, err.Trace()
}
@ -596,14 +597,15 @@ func (donut API) makeDonutBucket(bucketName, acl string) *probe.Error {
if _, ok := donut.buckets[bucketName]; ok {
return probe.NewError(BucketExists{Bucket: bucketName})
}
bucket, bucketMetadata, err := newBucket(bucketName, acl, donut.config.DonutName, donut.nodes)
bkt, bucketMetadata, err := newBucket(bucketName, acl, donut.config.DonutName, donut.nodes)
if err != nil {
return err.Trace()
}
nodeNumber := 0
donut.buckets[bucketName] = bucket
donut.buckets[bucketName] = bkt
for _, node := range donut.nodes {
disks, err := node.ListDisks()
disks := make(map[int]disk.Disk)
disks, err = node.ListDisks()
if err != nil {
return err.Trace()
}
@ -666,11 +668,11 @@ func (donut API) listDonutBuckets() *probe.Error {
}
bucketName := splitDir[0]
// we dont need this once we cache from makeDonutBucket()
bucket, _, err := newBucket(bucketName, "private", donut.config.DonutName, donut.nodes)
bkt, _, err := newBucket(bucketName, "private", donut.config.DonutName, donut.nodes)
if err != nil {
return err.Trace()
}
donut.buckets[bucketName] = bucket
donut.buckets[bucketName] = bkt
}
return nil
}

View File

@ -24,8 +24,8 @@ import (
// Info - return info about donut configuration
func (donut API) Info() (nodeDiskMap map[string][]string, err *probe.Error) {
nodeDiskMap = make(map[string][]string)
for nodeName, node := range donut.nodes {
disks, err := node.ListDisks()
for nodeName, n := range donut.nodes {
disks, err := n.ListDisks()
if err != nil {
return nil, err.Trace()
}
@ -43,11 +43,11 @@ func (donut API) AttachNode(hostname string, disks []string) *probe.Error {
if hostname == "" || len(disks) == 0 {
return probe.NewError(InvalidArgument{})
}
node, err := newNode(hostname)
n, err := newNode(hostname)
if err != nil {
return err.Trace()
}
donut.nodes[hostname] = node
donut.nodes[hostname] = n
for i, d := range disks {
newDisk, err := disk.New(d)
if err != nil {
@ -56,7 +56,7 @@ func (donut API) AttachNode(hostname string, disks []string) *probe.Error {
if err := newDisk.MakeDir(donut.config.DonutName); err != nil {
return err.Trace()
}
if err := node.AttachDisk(newDisk, i); err != nil {
if err := n.AttachDisk(newDisk, i); err != nil {
return err.Trace()
}
}

View File

@ -62,18 +62,18 @@ func setCommonHeaders(w http.ResponseWriter, acceptsType string, contentLength i
// Write error response headers
func encodeErrorResponse(response interface{}, acceptsType contentType) []byte {
var bytesBuffer bytes.Buffer
var encoder encoder
var e encoder
// write common headers
switch acceptsType {
case xmlContentType:
encoder = xml.NewEncoder(&bytesBuffer)
e = xml.NewEncoder(&bytesBuffer)
case jsonContentType:
encoder = json.NewEncoder(&bytesBuffer)
e = json.NewEncoder(&bytesBuffer)
// by default even if unknown Accept header received handle it by sending XML contenttype response
default:
encoder = xml.NewEncoder(&bytesBuffer)
e = xml.NewEncoder(&bytesBuffer)
}
encoder.Encode(response)
e.Encode(response)
return bytesBuffer.Bytes()
}
@ -105,14 +105,14 @@ func setObjectHeaders(w http.ResponseWriter, metadata donut.ObjectMetadata, cont
}
func encodeSuccessResponse(response interface{}, acceptsType contentType) []byte {
var encoder encoder
var e encoder
var bytesBuffer bytes.Buffer
switch acceptsType {
case xmlContentType:
encoder = xml.NewEncoder(&bytesBuffer)
e = xml.NewEncoder(&bytesBuffer)
case jsonContentType:
encoder = json.NewEncoder(&bytesBuffer)
e = json.NewEncoder(&bytesBuffer)
}
encoder.Encode(response)
e.Encode(response)
return bytesBuffer.Bytes()
}

View File

@ -67,13 +67,14 @@ func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
metadata, err := api.Donut.GetObjectMetadata(bucket, object, signature)
if err == nil {
httpRange, err := getRequestedRange(req.Header.Get("Range"), metadata.Size)
var hrange *httpRange
hrange, err = getRequestedRange(req.Header.Get("Range"), metadata.Size)
if err != nil {
writeErrorResponse(w, req, InvalidRange, acceptsContentType, req.URL.Path)
return
}
setObjectHeaders(w, metadata, httpRange)
if _, err := api.Donut.GetObject(w, bucket, object, httpRange.start, httpRange.length); err != nil {
setObjectHeaders(w, metadata, hrange)
if _, err = api.Donut.GetObject(w, bucket, object, hrange.start, hrange.length); err != nil {
// unable to write headers, we've already printed data. Just close the connection.
log.Error.Println(err.Trace())
return