mirror of
https://github.com/minio/minio.git
synced 2025-04-01 10:13:42 -04:00
Merge pull request #713 from harshavardhana/pr_out_across_donut_split_nimble_some_code_cleanup
This commit is contained in:
commit
fadadf0e1a
16
commands.go
16
commands.go
@ -1,3 +1,19 @@
|
|||||||
|
/*
|
||||||
|
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
pkg/donut/.gitignore
vendored
2
pkg/donut/.gitignore
vendored
@ -1,2 +0,0 @@
|
|||||||
donut
|
|
||||||
build-constants.go
|
|
47
pkg/donut/acl.go
Normal file
47
pkg/donut/acl.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package donut
|
||||||
|
|
||||||
|
// BucketACL - bucket level access control
|
||||||
|
type BucketACL string
|
||||||
|
|
||||||
|
// different types of ACL's currently supported for buckets
|
||||||
|
const (
|
||||||
|
BucketPrivate = BucketACL("private")
|
||||||
|
BucketPublicRead = BucketACL("public-read")
|
||||||
|
BucketPublicReadWrite = BucketACL("public-read-write")
|
||||||
|
)
|
||||||
|
|
||||||
|
func (b BucketACL) String() string {
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPrivate - is acl Private
|
||||||
|
func (b BucketACL) IsPrivate() bool {
|
||||||
|
return b == BucketACL("private")
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPublicRead - is acl PublicRead
|
||||||
|
func (b BucketACL) IsPublicRead() bool {
|
||||||
|
return b == BucketACL("public-read")
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPublicReadWrite - is acl PublicReadWrite
|
||||||
|
func (b BucketACL) IsPublicReadWrite() bool {
|
||||||
|
return b == BucketACL("public-read-write")
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValidBucketACL - is provided acl string supported
|
||||||
|
func IsValidBucketACL(acl string) bool {
|
||||||
|
switch acl {
|
||||||
|
case "private":
|
||||||
|
fallthrough
|
||||||
|
case "public-read":
|
||||||
|
fallthrough
|
||||||
|
case "public-read-write":
|
||||||
|
return true
|
||||||
|
case "":
|
||||||
|
// by default its "private"
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
@ -444,7 +444,7 @@ func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMeta
|
|||||||
writer.CloseWithError(iodine.New(err, nil))
|
writer.CloseWithError(iodine.New(err, nil))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, err = io.Copy(mwriter, bytes.NewBuffer(decodedData))
|
_, err = io.Copy(mwriter, bytes.NewReader(decodedData))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writer.CloseWithError(iodine.New(err, nil))
|
writer.CloseWithError(iodine.New(err, nil))
|
||||||
return
|
return
|
||||||
@ -473,7 +473,7 @@ func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers []io.ReadC
|
|||||||
if blockSize < totalLeft {
|
if blockSize < totalLeft {
|
||||||
curBlockSize = blockSize
|
curBlockSize = blockSize
|
||||||
} else {
|
} else {
|
||||||
curBlockSize = totalLeft // cast is safe, blockSize in if protects
|
curBlockSize = totalLeft
|
||||||
}
|
}
|
||||||
curChunkSize, err := encoder.GetEncodedBlockLen(int(curBlockSize))
|
curChunkSize, err := encoder.GetEncodedBlockLen(int(curBlockSize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -20,10 +20,52 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// IsValidBucket - verify bucket name in accordance with
|
||||||
|
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
|
||||||
|
func IsValidBucket(bucket string) bool {
|
||||||
|
if len(bucket) < 3 || len(bucket) > 63 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if bucket[0] == '.' || bucket[len(bucket)-1] == '.' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if match, _ := regexp.MatchString("\\.\\.", bucket); match == true {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// We don't support buckets with '.' in them
|
||||||
|
match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket)
|
||||||
|
return match
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValidObjectName - verify object name in accordance with
|
||||||
|
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
||||||
|
func IsValidObjectName(object string) bool {
|
||||||
|
if strings.TrimSpace(object) == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(object) > 1024 || len(object) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !utf8.ValidString(object) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValidPrefix - verify prefix name is correct, an empty prefix is valid
|
||||||
|
func IsValidPrefix(prefix string) bool {
|
||||||
|
if strings.TrimSpace(prefix) == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return IsValidObjectName(prefix)
|
||||||
|
}
|
||||||
|
|
||||||
// ProxyWriter implements io.Writer to trap written bytes
|
// ProxyWriter implements io.Writer to trap written bytes
|
||||||
type ProxyWriter struct {
|
type ProxyWriter struct {
|
||||||
writer io.Writer
|
writer io.Writer
|
||||||
|
@ -1,78 +0,0 @@
|
|||||||
/*
|
|
||||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package donut
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Date - [0000-00-00]
|
|
||||||
type Date struct {
|
|
||||||
Year int16
|
|
||||||
Month byte
|
|
||||||
Day byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// String output in yyyy-mm-dd format
|
|
||||||
func (d Date) String() string {
|
|
||||||
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsZero true if date is 0000-00-00
|
|
||||||
func (d Date) IsZero() bool {
|
|
||||||
return d.Day == 0 && d.Month == 0 && d.Year == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert string date in format YYYY-MM-DD to Date.
|
|
||||||
// Leading and trailing spaces are ignored. If format is invalid returns zero.
|
|
||||||
func parseDate(str string) (d Date, err error) {
|
|
||||||
str = strings.TrimSpace(str)
|
|
||||||
if str == "0000-00-00" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
y, m, n int
|
|
||||||
)
|
|
||||||
if len(str) != 10 || str[4] != '-' || str[7] != '-' {
|
|
||||||
err = errors.New("Invalid 0000-00-000 style DATE string: " + str)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if y, err = strconv.Atoi(str[0:4]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if m, err = strconv.Atoi(str[5:7]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if m < 1 || m > 12 {
|
|
||||||
err = errors.New("Invalid 0000-00-000 style DATE string: " + str)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if n, err = strconv.Atoi(str[8:10]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if n < 1 || n > 31 {
|
|
||||||
err = errors.New("Invalid 0000-00-000 style DATE string: " + str)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d.Year = int16(y)
|
|
||||||
d.Month = byte(m)
|
|
||||||
d.Day = byte(n)
|
|
||||||
return
|
|
||||||
}
|
|
@ -152,7 +152,7 @@ func (donut API) GetObject(w io.Writer, bucket string, object string) (int64, er
|
|||||||
return 0, iodine.New(err, nil)
|
return 0, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
/// cache object read from disk
|
/// cache object read from disk
|
||||||
ok := donut.objects.Set(objectKey, pw.writtenBytes)
|
ok := donut.objects.Append(objectKey, pw.writtenBytes)
|
||||||
pw.writtenBytes = nil
|
pw.writtenBytes = nil
|
||||||
go debug.FreeOSMemory()
|
go debug.FreeOSMemory()
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -208,7 +208,7 @@ func (donut API) GetPartialObject(w io.Writer, bucket, object string, start, len
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, iodine.New(err, nil)
|
return 0, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
ok := donut.objects.Set(objectKey, pw.writtenBytes)
|
ok := donut.objects.Append(objectKey, pw.writtenBytes)
|
||||||
pw.writtenBytes = nil
|
pw.writtenBytes = nil
|
||||||
go debug.FreeOSMemory()
|
go debug.FreeOSMemory()
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -25,6 +25,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/minio/minio/pkg/iodine"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Message - message structure for results from the Stream goroutine
|
// Message - message structure for results from the Stream goroutine
|
||||||
@ -52,7 +54,9 @@ func Stream(reader io.Reader, chunkSize uint64) <-chan Message {
|
|||||||
return ch
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
func splitStreamGoRoutine(reader io.Reader, chunkSize uint64, ch chan Message) {
|
func splitStreamGoRoutine(reader io.Reader, chunkSize uint64, ch chan<- Message) {
|
||||||
|
defer close(ch)
|
||||||
|
|
||||||
// we read until EOF or another error
|
// we read until EOF or another error
|
||||||
var readError error
|
var readError error
|
||||||
|
|
||||||
@ -88,8 +92,6 @@ func splitStreamGoRoutine(reader io.Reader, chunkSize uint64, ch chan Message) {
|
|||||||
if readError != io.EOF {
|
if readError != io.EOF {
|
||||||
ch <- Message{nil, readError}
|
ch <- Message{nil, readError}
|
||||||
}
|
}
|
||||||
// close the channel, signaling the channel reader that the stream is complete
|
|
||||||
close(ch)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// JoinFiles reads from a given directory, joins data in chunks with prefix and sends
|
// JoinFiles reads from a given directory, joins data in chunks with prefix and sends
|
||||||
@ -103,12 +105,12 @@ func splitStreamGoRoutine(reader io.Reader, chunkSize uint64, ch chan Message) {
|
|||||||
// fmt.Println(buf)
|
// fmt.Println(buf)
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
func JoinFiles(dirname string, inputPrefix string) (io.Reader, error) {
|
func JoinFiles(dirname string, inputPrefix string) io.Reader {
|
||||||
reader, writer := io.Pipe()
|
reader, writer := io.Pipe()
|
||||||
fileInfos, readError := ioutil.ReadDir(dirname)
|
fileInfos, readError := ioutil.ReadDir(dirname)
|
||||||
if readError != nil {
|
if readError != nil {
|
||||||
writer.CloseWithError(readError)
|
writer.CloseWithError(readError)
|
||||||
return nil, readError
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var newfileInfos []os.FileInfo
|
var newfileInfos []os.FileInfo
|
||||||
@ -119,16 +121,16 @@ func JoinFiles(dirname string, inputPrefix string) (io.Reader, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(newfileInfos) == 0 {
|
if len(newfileInfos) == 0 {
|
||||||
nofilesError := errors.New("no files found for given prefix " + inputPrefix)
|
nofilesError := iodine.New(errors.New("no files found for given prefix "+inputPrefix), nil)
|
||||||
writer.CloseWithError(nofilesError)
|
writer.CloseWithError(nofilesError)
|
||||||
return nil, nofilesError
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
go joinFilesGoRoutine(newfileInfos, writer)
|
go joinFilesInGoRoutine(newfileInfos, writer)
|
||||||
return reader, nil
|
return reader
|
||||||
}
|
}
|
||||||
|
|
||||||
func joinFilesGoRoutine(fileInfos []os.FileInfo, writer *io.PipeWriter) {
|
func joinFilesInGoRoutine(fileInfos []os.FileInfo, writer *io.PipeWriter) {
|
||||||
for _, fileInfo := range fileInfos {
|
for _, fileInfo := range fileInfos {
|
||||||
file, err := os.Open(fileInfo.Name())
|
file, err := os.Open(fileInfo.Name())
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
@ -159,14 +161,11 @@ func FileWithPrefix(filename string, chunkSize uint64, outputPrefix string) erro
|
|||||||
return errors.New("Invalid argument outputPrefix cannot be empty string")
|
return errors.New("Invalid argument outputPrefix cannot be empty string")
|
||||||
}
|
}
|
||||||
|
|
||||||
// start stream splitting goroutine
|
|
||||||
ch := Stream(file, chunkSize)
|
|
||||||
|
|
||||||
// used to write each chunk out as a separate file. {{outputPrefix}}.{{i}}
|
// used to write each chunk out as a separate file. {{outputPrefix}}.{{i}}
|
||||||
i := 0
|
i := 0
|
||||||
|
|
||||||
// write each chunk out to a separate file
|
// write each chunk out to a separate file
|
||||||
for chunk := range ch {
|
for chunk := range Stream(file, chunkSize) {
|
||||||
if chunk.Err != nil {
|
if chunk.Err != nil {
|
||||||
return chunk.Err
|
return chunk.Err
|
||||||
}
|
}
|
||||||
|
@ -62,11 +62,11 @@ func (s *MySuite) TestFileSplitJoin(c *C) {
|
|||||||
defer devnull.Close()
|
defer devnull.Close()
|
||||||
|
|
||||||
var reader io.Reader
|
var reader io.Reader
|
||||||
reader, err = split.JoinFiles(".", "ERROR")
|
reader = split.JoinFiles(".", "ERROR")
|
||||||
c.Assert(err, Not(IsNil))
|
c.Assert(reader, IsNil)
|
||||||
|
|
||||||
reader, err = split.JoinFiles(".", "TESTPREFIX")
|
reader = split.JoinFiles(".", "TESTPREFIX")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(reader, Not(IsNil))
|
||||||
_, err = io.Copy(devnull, reader)
|
_, err = io.Copy(devnull, reader)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
}
|
}
|
||||||
|
@ -1,109 +0,0 @@
|
|||||||
/*
|
|
||||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package donut
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BucketACL - bucket level access control
|
|
||||||
type BucketACL string
|
|
||||||
|
|
||||||
// different types of ACL's currently supported for buckets
|
|
||||||
const (
|
|
||||||
BucketPrivate = BucketACL("private")
|
|
||||||
BucketPublicRead = BucketACL("public-read")
|
|
||||||
BucketPublicReadWrite = BucketACL("public-read-write")
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b BucketACL) String() string {
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPrivate - is acl Private
|
|
||||||
func (b BucketACL) IsPrivate() bool {
|
|
||||||
return b == BucketACL("private")
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPublicRead - is acl PublicRead
|
|
||||||
func (b BucketACL) IsPublicRead() bool {
|
|
||||||
return b == BucketACL("public-read")
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPublicReadWrite - is acl PublicReadWrite
|
|
||||||
func (b BucketACL) IsPublicReadWrite() bool {
|
|
||||||
return b == BucketACL("public-read-write")
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValidBucketACL - is provided acl string supported
|
|
||||||
func IsValidBucketACL(acl string) bool {
|
|
||||||
switch acl {
|
|
||||||
case "private":
|
|
||||||
fallthrough
|
|
||||||
case "public-read":
|
|
||||||
fallthrough
|
|
||||||
case "public-read-write":
|
|
||||||
return true
|
|
||||||
case "":
|
|
||||||
// by default its "private"
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValidBucket - verify bucket name in accordance with
|
|
||||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
|
|
||||||
func IsValidBucket(bucket string) bool {
|
|
||||||
if len(bucket) < 3 || len(bucket) > 63 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if bucket[0] == '.' || bucket[len(bucket)-1] == '.' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if match, _ := regexp.MatchString("\\.\\.", bucket); match == true {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// We don't support buckets with '.' in them
|
|
||||||
match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket)
|
|
||||||
return match
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValidObjectName - verify object name in accordance with
|
|
||||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
|
||||||
func IsValidObjectName(object string) bool {
|
|
||||||
if strings.TrimSpace(object) == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(object) > 1024 || len(object) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !utf8.ValidString(object) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValidPrefix - verify prefix name is correct, an empty prefix is valid
|
|
||||||
func IsValidPrefix(prefix string) bool {
|
|
||||||
if strings.TrimSpace(prefix) == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return IsValidObjectName(prefix)
|
|
||||||
}
|
|
@ -1,3 +1,19 @@
|
|||||||
|
/*
|
||||||
|
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
// Package nimble provides easy to use graceful restart for a set of HTTP services
|
// Package nimble provides easy to use graceful restart for a set of HTTP services
|
||||||
//
|
//
|
||||||
// This package originally from https://github.com/facebookgo/grace
|
// This package originally from https://github.com/facebookgo/grace
|
||||||
|
@ -1,3 +1,19 @@
|
|||||||
|
/*
|
||||||
|
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
package nimble
|
package nimble
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
Loading…
x
Reference in New Issue
Block a user