diff --git a/pkg/madmin/api.go b/pkg/madmin/api.go index ef010583b..713ea647b 100644 --- a/pkg/madmin/api.go +++ b/pkg/madmin/api.go @@ -446,7 +446,7 @@ func (c AdminClient) newRequest(method string, reqData requestData) (req *http.R } // Add signature version '4' authorization header. - req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location) + req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "", location) // Return request. return req, nil diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 000000000..ac034e525 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 000000000..85947422d --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,740 @@ +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +#### Ignore cases of key name + +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 and sec2 are the exactly same section object +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 and key2 are the exactly same key object +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### MySQL-like boolean key + +MySQL's configuration allows a key without value as follows: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. + +To generate such keys in your program, you could use `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### Comment + +Take care that following format will be treated as comment: + +1. Line begins with `#` or `;` +2. Words after `#` or `;` +3. Words after section name (i.e words after `[some section name]`) + +If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("section name") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.Section("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Well, I hate continuation lines, how do I disable that? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +Holy crap! + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +By default, spaces are used to align "=" sign between key and values, to disable that: + +```go +ini.PrettyFormat = false +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### Retrieve parent keys available to a child section + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### Unparseable Sections + +Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Value Mapper + +To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 000000000..163432db9 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,727 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 + +#### 忽略键名的大小写 + +有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 和 sec2 指向同一个分区对象 +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 和 key2 指向同一个键对象 +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### 类似 MySQL 配置中的布尔值键 + +MySQL 的配置文件中会出现没有具体值的布尔类型的键: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 + +如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### 关于注释 + +下述几种情况的内容将被视为注释: + +1. 所有以 `#` 或 `;` 开头的行 +2. 所有在 `#` 或 `;` 之后的内容 +3. 分区标签后的文字 (即 `[分区名]` 之后的内容) + +如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("section name") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +判断某个键是否存在: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.Section("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断某个原值是否存在: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +哇靠给力啊! + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: + +```go +ini.PrettyFormat = false +``` + +## 高级用法 + +### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 获取上级父分区下的所有键名 + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### 无法解析的分区 + +如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 值映射器(Value Mapper) + +值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 000000000..80afe7431 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,32 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +type ErrDelimiterNotFound struct { + Line string +} + +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 000000000..5211d5abc --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,556 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.27.0" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + options LoadOptions + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 || DefaultHeader { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + if sec.isRawSection { + if _, err = buf.WriteString(sec.rawBody); err != nil { + return 0, err + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + for _, val := range key.ValueWithShadows() { + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } + } + } + + // Put a line between sections + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 000000000..838356af0 --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,699 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + Comment string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + return []string{k.value} + } + vals := make([]string, len(k.shadows)+1) + vals[0] = k.value + for i := range k.shadows { + vals[i+1] = k.shadows[i].value + } + return vals +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 000000000..6c0b10745 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,361 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + + // Check continuation lines when desired + if !ignoreContinuation && line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !ignoreInlineComment { + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + var inUnparseableSection bool + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 + closeIdx := bytes.LastIndex(line, []byte("]")) + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { + kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + if err != nil { + return err + } + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 000000000..94f7375ed --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,248 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + "." + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]) + } + } + return children +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 000000000..031c78b8e --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,450 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, _ = key.parseInts(strs, true, false) + case reflect.Int64: + vals, _ = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, _ = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, _ = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, _ = key.parseFloat64s(strs, true, false) + case reflectTime: + vals, _ = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil || intVal == 0 { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { + opts := strings.SplitN(tag, ",", 3) + rawName = opts[0] + if len(opts) > 1 { + omitEmpty = opts[1] == "omitempty" + } + if len(opts) > 2 { + allowShadow = opts[2] == "allowshadow" + } + return rawName, omitEmpty, allowShadow +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflectTime: + return v.Interface().(time.Time).IsZero() + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go index ff8b8b109..e0019a334 100644 --- a/vendor/github.com/minio/minio-go/api-error-response.go +++ b/vendor/github.com/minio/minio-go/api-error-response.go @@ -161,6 +161,9 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) if errResp.Region == "" { errResp.Region = resp.Header.Get("x-amz-bucket-region") } + if errResp.Code == "InvalidRegion" && errResp.Region != "" { + errResp.Message = fmt.Sprintf("Region does not match, expecting region '%s'.", errResp.Region) + } // Save headers returned in the API XML error errResp.Headers = resp.Header diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go index 477a0969f..c4193e934 100644 --- a/vendor/github.com/minio/minio-go/api-get-object-file.go +++ b/vendor/github.com/minio/minio-go/api-get-object-file.go @@ -20,15 +20,17 @@ import ( "io" "os" "path/filepath" + + "github.com/minio/minio-go/pkg/s3utils" ) // FGetObject - download contents of an object to a local file. func (c Client) FGetObject(bucketName, objectName, filePath string) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go index 2abd4608e..7dae0c837 100644 --- a/vendor/github.com/minio/minio-go/api-get-object.go +++ b/vendor/github.com/minio/minio-go/api-get-object.go @@ -26,11 +26,12 @@ import ( "time" "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/pkg/s3utils" ) -// GetEncryptedObject deciphers and streams data stored in the server after applying a specifed encryption materiels -func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.Reader, error) { - +// GetEncryptedObject deciphers and streams data stored in the server after applying a specifed encryption materials, +// returned stream should be closed by the caller. +func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) { if encryptMaterials == nil { return nil, ErrInvalidArgument("Unable to recognize empty encryption properties") } @@ -57,10 +58,10 @@ func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMateria // GetObject - returns an seekable, readable object. func (c Client) GetObject(bucketName, objectName string) (*Object, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } @@ -328,14 +329,14 @@ func (o *Object) setOffset(bytesRead int64) error { // Update the currentOffset. o.currOffset += bytesRead - if o.currOffset >= o.objectInfo.Size { + if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { return io.EOF } return nil } // Read reads up to len(b) bytes into b. It returns the number of -// bytes read (0 <= n <= len(p)) and any error encountered. Returns +// bytes read (0 <= n <= len(b)) and any error encountered. Returns // io.EOF upon end of file. func (o *Object) Read(b []byte) (n int, err error) { if o == nil { @@ -442,7 +443,7 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { if o.objectInfoSet { // If offset is negative than we return io.EOF. // If offset is greater than or equal to object size we return io.EOF. - if offset >= o.objectInfo.Size || offset < 0 { + if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { return 0, io.EOF } } @@ -542,16 +543,20 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) { default: return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) case 0: - if offset > o.objectInfo.Size { + if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { return 0, io.EOF } o.currOffset = offset case 1: - if o.currOffset+offset > o.objectInfo.Size { + if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { return 0, io.EOF } o.currOffset += offset case 2: + // If we don't know the object size return an error for io.SeekEnd + if o.objectInfo.Size < 0 { + return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown") + } // Seeking to positive offset is valid for whence '2', but // since we are backing a Reader we have reached 'EOF' if // offset is positive. @@ -623,10 +628,10 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) { // Validate input arguments. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, ObjectInfo{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return nil, ObjectInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go index 7491df330..10ccdc66b 100644 --- a/vendor/github.com/minio/minio-go/api-get-policy.go +++ b/vendor/github.com/minio/minio-go/api-get-policy.go @@ -23,15 +23,16 @@ import ( "net/url" "github.com/minio/minio-go/pkg/policy" + "github.com/minio/minio-go/pkg/s3utils" ) // GetBucketPolicy - get bucket policy at a given path. func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return policy.BucketPolicyNone, err } - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { return policy.BucketPolicyNone, err } policyInfo, err := c.getBucketPolicy(bucketName) @@ -48,10 +49,10 @@ func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy p // ListBucketPolicies - list all policies for a given prefix and all its children. func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return map[string]policy.BucketPolicy{}, err } - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { return map[string]policy.BucketPolicy{}, err } policyInfo, err := c.getBucketPolicy(bucketName) diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go index 6a228179e..f9d178413 100644 --- a/vendor/github.com/minio/minio-go/api-list.go +++ b/vendor/github.com/minio/minio-go/api-list.go @@ -21,6 +21,8 @@ import ( "net/http" "net/url" "strings" + + "github.com/minio/minio-go/pkg/s3utils" ) // ListBuckets list all buckets owned by this authenticated user. @@ -69,7 +71,7 @@ func (c Client) ListBuckets() ([]BucketInfo, error) { // // Create a done channel. // doneCh := make(chan struct{}) // defer close(doneCh) -// // Recurively list all objects in 'mytestbucket' +// // Recursively list all objects in 'mytestbucket' // recursive := true // for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) { // fmt.Println(message) @@ -87,7 +89,7 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d // Return object owner information by default fetchOwner := true // Validate bucket name. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectStatCh) objectStatCh <- ObjectInfo{ Err: err, @@ -95,7 +97,7 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d return objectStatCh } // Validate incoming object prefix. - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { defer close(objectStatCh) objectStatCh <- ObjectInfo{ Err: err, @@ -170,11 +172,11 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d // ?max-keys - Sets the maximum number of keys returned in the response body. func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { // Validate bucket name. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketV2Result{}, err } // Validate object prefix. - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { return ListBucketV2Result{}, err } // Get resources properly escaped and lined up before @@ -266,7 +268,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don delimiter = "" } // Validate bucket name. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectStatCh) objectStatCh <- ObjectInfo{ Err: err, @@ -274,7 +276,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don return objectStatCh } // Validate incoming object prefix. - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { defer close(objectStatCh) objectStatCh <- ObjectInfo{ Err: err, @@ -350,11 +352,11 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don // ?max-keys - Sets the maximum number of keys returned in the response body. func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { // Validate bucket name. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketResult{}, err } // Validate object prefix. - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { return ListBucketResult{}, err } // Get resources properly escaped and lined up before @@ -442,7 +444,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive delimiter = "" } // Validate bucket name. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectMultipartStatCh) objectMultipartStatCh <- ObjectMultipartInfo{ Err: err, @@ -450,7 +452,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive return objectMultipartStatCh } // Validate incoming object prefix. - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { defer close(objectMultipartStatCh) objectMultipartStatCh <- ObjectMultipartInfo{ Err: err, diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go index cbea1c6da..43f078cd6 100644 --- a/vendor/github.com/minio/minio-go/api-notification.go +++ b/vendor/github.com/minio/minio-go/api-notification.go @@ -30,7 +30,7 @@ import ( // GetBucketNotification - get bucket notification at a given path. func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return BucketNotification{}, err } notification, err := c.getBucketNotification(bucketName) @@ -140,7 +140,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even defer close(notificationInfoCh) // Validate the bucket name. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { notificationInfoCh <- NotificationInfo{ Err: err, } diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go index f9d05ab9b..8cfcb55fb 100644 --- a/vendor/github.com/minio/minio-go/api-presigned.go +++ b/vendor/github.com/minio/minio-go/api-presigned.go @@ -42,10 +42,10 @@ func (c Client) presignURL(method string, bucketName string, objectName string, if method == "" { return nil, ErrInvalidArgument("method cannot be empty.") } - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } if err := isValidExpiry(expires); err != nil { @@ -122,21 +122,38 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str return nil, nil, err } + // Get credentials from the configured credentials provider. + credValues, err := c.credsProvider.Get() + if err != nil { + return nil, nil, err + } + + var ( + signerType = credValues.SignerType + sessionToken = credValues.SessionToken + accessKeyID = credValues.AccessKeyID + secretAccessKey = credValues.SecretAccessKey + ) + + if signerType.IsAnonymous() { + return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials") + } + // Keep time. t := time.Now().UTC() // For signature version '2' handle here. - if c.signature.isV2() { + if signerType.IsV2() { policyBase64 := p.base64() p.formData["policy"] = policyBase64 // For Google endpoint set this value to be 'GoogleAccessId'. if s3utils.IsGoogleEndpoint(c.endpointURL) { - p.formData["GoogleAccessId"] = c.accessKeyID + p.formData["GoogleAccessId"] = accessKeyID } else { // For all other endpoints set this value to be 'AWSAccessKeyId'. - p.formData["AWSAccessKeyId"] = c.accessKeyID + p.formData["AWSAccessKeyId"] = accessKeyID } // Sign the policy. - p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, c.secretAccessKey) + p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey) return u, p.formData, nil } @@ -159,7 +176,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str } // Add a credential policy. - credential := s3signer.GetCredential(c.accessKeyID, location, t) + credential := s3signer.GetCredential(accessKeyID, location, t) if err = p.addNewPolicy(policyCondition{ matchType: "eq", condition: "$x-amz-credential", @@ -168,13 +185,27 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str return nil, nil, err } + if sessionToken != "" { + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-security-token", + value: sessionToken, + }); err != nil { + return nil, nil, err + } + } + // Get base64 encoded policy. policyBase64 := p.base64() + // Fill in the form data. p.formData["policy"] = policyBase64 p.formData["x-amz-algorithm"] = signV4Algorithm p.formData["x-amz-credential"] = credential p.formData["x-amz-date"] = t.Format(iso8601DateFormat) - p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location) + if sessionToken != "" { + p.formData["x-amz-security-token"] = sessionToken + } + p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) return u, p.formData, nil } diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go index 001da6de3..fd37dc192 100644 --- a/vendor/github.com/minio/minio-go/api-put-bucket.go +++ b/vendor/github.com/minio/minio-go/api-put-bucket.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,18 +19,14 @@ package minio import ( "bytes" - "encoding/base64" - "encoding/hex" "encoding/json" "encoding/xml" "fmt" - "io/ioutil" "net/http" "net/url" - "path" "github.com/minio/minio-go/pkg/policy" - "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/pkg/s3utils" ) /// Bucket operations @@ -50,95 +47,23 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) { }() // Validate the input arguments. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { return err } // If location is empty, treat is a default region 'us-east-1'. if location == "" { location = "us-east-1" - } - - // Try creating bucket with the provided region, in case of - // invalid region error let's guess the appropriate region - // from S3 API headers - - // Create a done channel to control 'newRetryTimer' go routine. - doneCh := make(chan struct{}, 1) - - // Indicate to our routine to exit cleanly upon return. - defer close(doneCh) - - // Blank indentifier is kept here on purpose since 'range' without - // blank identifiers is only supported since go1.4 - // https://golang.org/doc/go1.4#forrange. - for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { - // Initialize the makeBucket request. - req, err := c.makeBucketRequest(bucketName, location) - if err != nil { - return err + // For custom region clients, default + // to custom region instead not 'us-east-1'. + if c.region != "" { + location = c.region } - - // Execute make bucket request. - resp, err := c.do(req) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - err := httpRespToErrorResponse(resp, bucketName, "") - errResp := ToErrorResponse(err) - if errResp.Code == "InvalidRegion" && errResp.Region != "" { - // Fetch bucket region found in headers - // of S3 error response, attempt bucket - // create again. - location = errResp.Region - continue - } - // Nothing to retry, fail. - return err - } - - // Control reaches here when bucket create was successful, - // break out. - break } - - // Success. - return nil -} - -// Low level wrapper API For makeBucketRequest. -func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) { - // Validate input arguments. - if err := isValidBucketName(bucketName); err != nil { - return nil, err - } - - // In case of Amazon S3. The make bucket issued on - // already existing bucket would fail with - // 'AuthorizationMalformed' error if virtual style is - // used. So we default to 'path style' as that is the - // preferred method here. The final location of the - // 'bucket' is provided through XML LocationConstraint - // data with the request. - targetURL := c.endpointURL - targetURL.Path = path.Join(bucketName, "") + "/" - - // get a new HTTP request for the method. - req, err := http.NewRequest("PUT", targetURL.String(), nil) - if err != nil { - return nil, err - } - - // set UserAgent for the request. - c.setUserAgent(req) - - // set sha256 sum for signature calculation only with - // signature version '4'. - if c.signature.isV4() { - req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) + // PUT bucket request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + bucketLocation: location, } // If location is not 'us-east-1' create bucket location config. @@ -148,30 +73,29 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req var createBucketConfigBytes []byte createBucketConfigBytes, err = xml.Marshal(createBucketConfig) if err != nil { - return nil, err + return err } - createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes) - req.Body = ioutil.NopCloser(createBucketConfigBuffer) - req.ContentLength = int64(len(createBucketConfigBytes)) - // Set content-md5. - req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes))) - if c.signature.isV4() { - // Set sha256. - req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes))) + reqMetadata.contentMD5Bytes = sumMD5(createBucketConfigBytes) + reqMetadata.contentSHA256Bytes = sum256(createBucketConfigBytes) + reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) + reqMetadata.contentLength = int64(len(createBucketConfigBytes)) + } + + // Execute PUT to create a new bucket. + resp, err := c.executeMethod("PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") } } - // Sign the request. - if c.signature.isV4() { - // Signature calculated for MakeBucket request should be for 'us-east-1', - // regardless of the bucket's location constraint. - req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") - } else if c.signature.isV2() { - req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) - } - - // Return signed request. - return req, nil + // Success. + return nil } // SetBucketPolicy set the access permissions on an existing bucket. @@ -184,10 +108,10 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req // writeonly - anonymous put/delete access to a given object prefix. func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { return err } @@ -216,7 +140,7 @@ func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPo // Saves a new bucket policy. func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } @@ -262,7 +186,7 @@ func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAcces // Removes all policies on a bucket. func (c Client) removeBucketPolicy(bucketName string) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } // Get resources properly escaped and lined up before @@ -286,7 +210,7 @@ func (c Client) removeBucketPolicy(bucketName string) error { // SetBucketNotification saves a new bucket notification. func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go index 68a459f4a..1d9e70ca0 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/api-put-object-common.go @@ -23,6 +23,8 @@ import ( "io/ioutil" "math" "os" + + "github.com/minio/minio-go/pkg/s3utils" ) // Verify if reader is *os.File @@ -168,10 +170,10 @@ func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, // or initiate a new request to fetch a new upload id. func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return "", err } diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go index 56978d427..d9e2f1b57 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-copy.go +++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go @@ -25,10 +25,10 @@ import ( // CopyObject - copy a source object into a new object with the provided name in the provided bucket func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return err } if objectSource == "" { diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go index 09fec769d..4fa8f86d7 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-file.go +++ b/vendor/github.com/minio/minio-go/api-put-object-file.go @@ -35,10 +35,10 @@ import ( // FPutObject - Create an object in a bucket, with contents from file at filePath. func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } @@ -77,17 +77,8 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) objMetadata["Content-Type"] = []string{contentType} // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. - // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. if s3utils.IsGoogleEndpoint(c.endpointURL) { - if fileSize > int64(maxSinglePutObjectSize) { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize), - Key: objectName, - BucketName: bucketName, - } - } - // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size. + // Do not compute MD5 for Google Cloud Storage. return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil) } @@ -125,10 +116,10 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) // specific sections and not having to create temporary files. func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } @@ -182,7 +173,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe hashAlgos := make(map[string]hash.Hash) hashSums := make(map[string][]byte) hashAlgos["md5"] = md5.New() - if c.signature.isV4() && !c.secure { + if c.overrideSignerType.IsV4() && !c.secure { hashAlgos["sha256"] = sha256.New() } diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go index 3a299f65b..0dc8a8441 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go @@ -32,6 +32,8 @@ import ( "sort" "strconv" "strings" + + "github.com/minio/minio-go/pkg/s3utils" ) // Comprehensive put object operation involving multipart resumable uploads. @@ -74,10 +76,10 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } @@ -176,10 +178,10 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string // special case where size is unknown i.e '-1'. func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } @@ -213,7 +215,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i hashSums := make(map[string][]byte) hashAlgos := make(map[string]hash.Hash) hashAlgos["md5"] = md5.New() - if c.signature.isV4() && !c.secure { + if c.overrideSignerType.IsV4() && !c.secure { hashAlgos["sha256"] = sha256.New() } @@ -305,10 +307,10 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return initiateMultipartUploadResult{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return initiateMultipartUploadResult{}, err } @@ -359,10 +361,10 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData // uploadPart - Uploads a part in a multipart upload. func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectPart{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectPart{}, err } if size > maxPartSize { @@ -419,10 +421,10 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return completeMultipartUploadResult{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return completeMultipartUploadResult{}, err } diff --git a/vendor/github.com/minio/minio-go/api-put-object-progress.go b/vendor/github.com/minio/minio-go/api-put-object-progress.go index f3844127e..fc4c40ad4 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-progress.go +++ b/vendor/github.com/minio/minio-go/api-put-object-progress.go @@ -20,6 +20,7 @@ import ( "io" "strings" + "github.com/minio/minio-go/pkg/credentials" "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/s3utils" ) @@ -57,10 +58,10 @@ func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Read // PutObjectWithMetadata - with metadata. func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } if reader == nil { @@ -82,20 +83,8 @@ func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.R } // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. - // So we fall back to single PUT operation with the maximum limit of 5GiB. if s3utils.IsGoogleEndpoint(c.endpointURL) { - if size <= -1 { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.", - Key: objectName, - BucketName: bucketName, - } - } - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size. + // Do not compute MD5 for Google Cloud Storage. return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress) } @@ -103,6 +92,7 @@ func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.R if size < minPartSize && size >= 0 { return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress) } + // For all sizes greater than 5MiB do multipart. n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress) if err != nil { @@ -143,8 +133,8 @@ func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, re BucketName: bucketName, } } - // This method should return error with signature v2 minioClient. - if c.signature.isV2() { + + if c.overrideSignerType.IsV2() { return 0, ErrorResponse{ Code: "NotImplemented", Message: "AWS streaming signature v4 is not supported with minio client initialized for AWS signature v2", @@ -173,8 +163,8 @@ func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, re return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress) } - // Set signature type to streaming signature v4. - c.signature = SignatureV4Streaming + // Set streaming signature. + c.overrideSignerType = credentials.SignatureV4Streaming if size < minPartSize && size >= 0 { return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) diff --git a/vendor/github.com/minio/minio-go/api-put-object-readat.go b/vendor/github.com/minio/minio-go/api-put-object-readat.go index ebf422638..dda81e7b1 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-readat.go +++ b/vendor/github.com/minio/minio-go/api-put-object-readat.go @@ -25,6 +25,8 @@ import ( "io" "io/ioutil" "sort" + + "github.com/minio/minio-go/pkg/s3utils" ) // uploadedPartRes - the response received from a part upload. @@ -65,10 +67,10 @@ func shouldUploadPartReadAt(objPart ObjectPart, uploadReq uploadPartReq) bool { // stream after uploading all the contents successfully. func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } @@ -146,7 +148,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read hashSums := make(map[string][]byte) hashAlgos := make(map[string]hash.Hash) hashAlgos["md5"] = md5.New() - if c.signature.isV4() && !c.secure { + if c.overrideSignerType.IsV4() && !c.secure { hashAlgos["sha256"] = sha256.New() } diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go index e218075df..8ff2f2ab5 100644 --- a/vendor/github.com/minio/minio-go/api-put-object.go +++ b/vendor/github.com/minio/minio-go/api-put-object.go @@ -17,7 +17,6 @@ package minio import ( - "bytes" "crypto/md5" "crypto/sha256" "hash" @@ -28,6 +27,8 @@ import ( "reflect" "runtime" "strings" + + "github.com/minio/minio-go/pkg/s3utils" ) // toInt - converts go value to its integer representation based @@ -109,14 +110,24 @@ func getReaderSize(reader io.Reader) (size int64, err error) { case "|0", "|1": return } - size = st.Size() + var pos int64 + pos, err = v.Seek(0, 1) // SeekCurrent. + if err != nil { + return -1, err + } + size = st.Size() - pos case *Object: var st ObjectInfo st, err = v.Stat() if err != nil { return } - size = st.Size + var pos int64 + pos, err = v.Seek(0, 1) // SeekCurrent. + if err != nil { + return -1, err + } + size = st.Size - pos } } // Returns the size here. @@ -151,14 +162,17 @@ func (c Client) PutObject(bucketName, objectName string, reader io.Reader, conte // is used for Google Cloud Storage since Google's multipart API is not S3 compatible. func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + if size > 0 { + readerAt, ok := reader.(io.ReaderAt) + if ok { + reader = io.NewSectionReader(readerAt, 0, size) + } } // Update progress reader appropriately to the latest offset as we @@ -181,10 +195,10 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea // This special function is used as a fallback when multipart upload fails. func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } if size > maxSinglePutObjectSize { @@ -200,38 +214,29 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, hashAlgos := make(map[string]hash.Hash) hashSums := make(map[string][]byte) hashAlgos["md5"] = md5.New() - if c.signature.isV4() && !c.secure { + if c.overrideSignerType.IsV4() && !c.secure { hashAlgos["sha256"] = sha256.New() } - if size <= minPartSize { - // Initialize a new temporary buffer. - tmpBuffer := new(bytes.Buffer) - size, err = hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, size) - reader = bytes.NewReader(tmpBuffer.Bytes()) - tmpBuffer.Reset() - } else { - // Initialize a new temporary file. - var tmpFile *tempFile - tmpFile, err = newTempFile("single$-putobject-single") - if err != nil { - return 0, err - } - defer tmpFile.Close() - size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size) - if err != nil { - return 0, err - } - // Seek back to beginning of the temporary file. - if _, err = tmpFile.Seek(0, 0); err != nil { - return 0, err - } - reader = tmpFile + // Initialize a new temporary file. + tmpFile, err := newTempFile("single$-putobject-single") + if err != nil { + return 0, err } + defer tmpFile.Close() + + size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size) // Return error if its not io.EOF. if err != nil && err != io.EOF { return 0, err } + + // Seek back to beginning of the temporary file. + if _, err = tmpFile.Seek(0, 0); err != nil { + return 0, err + } + reader = tmpFile + // Execute put object. st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData) if err != nil { @@ -253,21 +258,13 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, // NOTE: You must have WRITE permissions on a bucket to add an object to it. func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectInfo{}, err } - if size <= -1 { - return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName) - } - - if size > maxSinglePutObjectSize { - return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Set headers. customHeader := make(http.Header) diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go index 73790f002..88dd48661 100644 --- a/vendor/github.com/minio/minio-go/api-remove.go +++ b/vendor/github.com/minio/minio-go/api-remove.go @@ -22,6 +22,8 @@ import ( "io" "net/http" "net/url" + + "github.com/minio/minio-go/pkg/s3utils" ) // RemoveBucket deletes the bucket name. @@ -30,7 +32,7 @@ import ( // in the bucket must be deleted before successfully attempting this request. func (c Client) RemoveBucket(bucketName string) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } // Execute DELETE on bucket. @@ -57,10 +59,10 @@ func (c Client) RemoveBucket(bucketName string) error { // RemoveObject remove an object from a bucket. func (c Client) RemoveObject(bucketName, objectName string) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return err } // Execute DELETE on objectName. @@ -132,7 +134,7 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan errorCh := make(chan RemoveObjectError, 1) // Validate if bucket name is valid. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(errorCh) errorCh <- RemoveObjectError{ Err: err, @@ -212,10 +214,10 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan // RemoveIncompleteUpload aborts an partially uploaded object. func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return err } // Find multipart upload id of the object to be aborted. @@ -237,10 +239,10 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { // uploadID, all previously uploaded parts are deleted. func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go index 5b3dfe1b4..d4fb9c65c 100644 --- a/vendor/github.com/minio/minio-go/api-stat.go +++ b/vendor/github.com/minio/minio-go/api-stat.go @@ -28,7 +28,7 @@ import ( // BucketExists verify if bucket exists and you have permission to access it. func (c Client) BucketExists(bucketName string) (bool, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return false, err } @@ -80,10 +80,10 @@ func extractObjMetadata(header http.Header) http.Header { // StatObject verifies if object exists and you have permission to access. func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectInfo{}, err } reqHeaders := NewHeadReqHeaders() @@ -93,10 +93,10 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) { // Lower level API for statObject supporting pre-conditions and range headers. func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectInfo{}, err } @@ -126,12 +126,13 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") md5sum = strings.TrimSuffix(md5sum, "\"") - // Content-Length is not valid for Google Cloud Storage, do not verify. + // Parse content length is exists var size int64 = -1 - if !s3utils.IsGoogleEndpoint(c.endpointURL) { - // Parse content length. - size, err = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + contentLengthStr := resp.Header.Get("Content-Length") + if contentLengthStr != "" { + size, err = strconv.ParseInt(contentLengthStr, 10, 64) if err != nil { + // Content-Length is not valid return ObjectInfo{}, ErrorResponse{ Code: "InternalError", Message: "Content-Length is invalid. " + reportIssue, diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go index 46d69214a..e2479805a 100644 --- a/vendor/github.com/minio/minio-go/api.go +++ b/vendor/github.com/minio/minio-go/api.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,16 +26,17 @@ import ( "io" "io/ioutil" "math/rand" + "net" "net/http" "net/http/httputil" "net/url" "os" - "regexp" "runtime" "strings" "sync" "time" + "github.com/minio/minio-go/pkg/credentials" "github.com/minio/minio-go/pkg/s3signer" "github.com/minio/minio-go/pkg/s3utils" ) @@ -46,14 +48,11 @@ type Client struct { // Parsed endpoint url provided by the user. endpointURL url.URL - // AccessKeyID required for authorized requests. - accessKeyID string - // SecretAccessKey required for authorized requests. - secretAccessKey string - // Choose a signature type if necessary. - signature SignatureType - // Set to 'true' if Client has no access and secret keys. - anonymous bool + // Holds various credential providers. + credsProvider *credentials.Credentials + + // Custom signerType value overrides all credentials. + overrideSignerType credentials.SignatureType // User supplied. appInfo struct { @@ -85,7 +84,7 @@ type Client struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "2.0.4" + libraryVersion = "2.1.0" ) // User Agent should always following the below style. @@ -100,58 +99,58 @@ const ( // NewV2 - instantiate minio client with Amazon S3 signature version // '2' compatibility. func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) + creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "") + clnt, err := privateNew(endpoint, creds, secure, "") if err != nil { return nil, err } - - // Set to use signature version '2'. - clnt.signature = SignatureV2 + clnt.overrideSignerType = credentials.SignatureV2 return clnt, nil } // NewV4 - instantiate minio client with Amazon S3 signature version // '4' compatibility. func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) + creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") + clnt, err := privateNew(endpoint, creds, secure, "") if err != nil { return nil, err } - - // Set to use signature version '4'. - clnt.signature = SignatureV4 + clnt.overrideSignerType = credentials.SignatureV4 return clnt, nil } -// New - instantiate minio client Client, adds automatic verification of signature. +// New - instantiate minio client, adds automatic verification of signature. func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - return NewWithRegion(endpoint, accessKeyID, secretAccessKey, secure, "") + creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") + clnt, err := privateNew(endpoint, creds, secure, "") + if err != nil { + return nil, err + } + // Google cloud storage should be set to signature V2, force it if not. + if s3utils.IsGoogleEndpoint(clnt.endpointURL) { + clnt.overrideSignerType = credentials.SignatureV2 + } + // If Amazon S3 set to signature v4. + if s3utils.IsAmazonEndpoint(clnt.endpointURL) { + clnt.overrideSignerType = credentials.SignatureV4 + } + return clnt, nil +} + +// NewWithCredentials - instantiate minio client with credentials provider +// for retrieving credentials from various credentials provider such as +// IAM, File, Env etc. +func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { + return privateNew(endpoint, creds, secure, region) } // NewWithRegion - instantiate minio client, with region configured. Unlike New(), // NewWithRegion avoids bucket-location lookup operations and it is slightly faster. // Use this function when if your application deals with single region. func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) { - clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) - if err != nil { - return nil, err - } - - // Google cloud storage should be set to signature V2, force it if not. - if s3utils.IsGoogleEndpoint(clnt.endpointURL) { - clnt.signature = SignatureV2 - } - - // If Amazon S3 set to signature v2.n - if s3utils.IsAmazonEndpoint(clnt.endpointURL) { - clnt.signature = SignatureV4 - } - - // Sets custom region, if region is empty bucket location cache is used automatically. - clnt.region = region - - // Success.. - return clnt, nil + creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") + return privateNew(endpoint, creds, secure, region) } // lockedRandSource provides protected rand source, implements rand.Source interface. @@ -188,7 +187,7 @@ func redirectHeaders(req *http.Request, via []*http.Request) error { return nil } -func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { +func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { // construct endpoint. endpointURL, err := getEndpointURL(endpoint, secure) if err != nil { @@ -197,8 +196,9 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl // instantiate new Client. clnt := new(Client) - clnt.accessKeyID = accessKeyID - clnt.secretAccessKey = secretAccessKey + + // Save the credentials. + clnt.credsProvider = creds // Remember whether we are using https or not clnt.secure = secure @@ -212,7 +212,10 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl CheckRedirect: redirectHeaders, } - // Instantiae bucket location cache. + // Sets custom region, if region is empty bucket location cache is used automatically. + clnt.region = region + + // Instantiate bucket location cache. clnt.bucketLocCache = newBucketLocationCache() // Introduce a new locked random seed. @@ -306,40 +309,6 @@ type requestMetadata struct { contentMD5Bytes []byte } -// regCred matches credential string in HTTP header -var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") - -// regCred matches signature string in HTTP header -var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") - -// Filter out signature value from Authorization header. -func (c Client) filterSignature(req *http.Request) { - if _, ok := req.Header["Authorization"]; !ok { - return - } - // Handle if Signature V2. - if c.signature.isV2() { - // Set a temporary redacted auth - req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**") - return - } - - /// Signature V4 authorization header. - - // Save the original auth. - origAuth := req.Header.Get("Authorization") - // Strip out accessKeyID from: - // Credential=////aws4_request - newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") - - // Strip out 256-bit signature from: Signature=<256-bit signature> - newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") - - // Set a temporary redacted auth - req.Header.Set("Authorization", newAuth) - return -} - // dumpHTTP - dump HTTP request and response. func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { // Starts http dump. @@ -349,7 +318,10 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { } // Filter out Signature field from Authorization header. - c.filterSignature(req) + origAuth := req.Header.Get("Authorization") + if origAuth != "" { + req.Header.Set("Authorization", redactSignature(origAuth)) + } // Only display request header. reqTrace, err := httputil.DumpRequestOut(req, false) @@ -553,9 +525,14 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt // Bucket region if set in error response and the error // code dictates invalid region, we can retry the request // with the new region. - if errResponse.Code == "InvalidRegion" && errResponse.Region != "" { - c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) - continue // Retry. + // + // Additionally we should only retry if bucketLocation and custom + // region is empty. + if metadata.bucketLocation == "" && c.region == "" { + if res.StatusCode == http.StatusBadRequest && errResponse.Region != "" { + c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) + continue // Retry. + } } // Verify if error response code is retryable. @@ -581,29 +558,19 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R method = "POST" } - // Default all requests to "us-east-1" or "cn-north-1" (china region) - location := "us-east-1" - if s3utils.IsAmazonChinaEndpoint(c.endpointURL) { - // For china specifically we need to set everything to - // cn-north-1 for now, there is no easier way until AWS S3 - // provides a cleaner compatible API across "us-east-1" and - // China region. - location = "cn-north-1" - } - + var location string // Gather location only if bucketName is present. - if metadata.bucketName != "" { + if metadata.bucketName != "" && metadata.bucketLocation == "" { location, err = c.getBucketLocation(metadata.bucketName) if err != nil { return nil, err } + } else { + location = metadata.bucketLocation } - // Save location. - metadata.bucketLocation = location - // Construct a new target URL. - targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.bucketLocation, metadata.queryValues) + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, metadata.queryValues) if err != nil { return nil, err } @@ -614,20 +581,41 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R return nil, err } - // Anonymous request. - anonymous := c.accessKeyID == "" || c.secretAccessKey == "" + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } // Generate presign url if needed, return right here. if metadata.expires != 0 && metadata.presignURL { - if anonymous { - return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.") + if signerType.IsAnonymous() { + return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") } - if c.signature.isV2() { + if signerType.IsV2() { // Presign URL with signature v2. - req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires) - } else if c.signature.isV4() { + req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires) + } else if signerType.IsV4() { // Presign URL with signature v4. - req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires) + req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) } return req, nil } @@ -640,9 +628,11 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R req.Header.Set(k, v[0]) } - // set incoming content-length. - if metadata.contentLength > 0 { - req.ContentLength = metadata.contentLength + // Set incoming content-length. + req.ContentLength = metadata.contentLength + if req.ContentLength <= -1 { + // For unknown content length, we upload using transfer-encoding: chunked. + req.TransferEncoding = []string{"chunked"} } // set md5Sum for content protection. @@ -650,17 +640,18 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) } - if anonymous { + // For anonymous requests just return. + if signerType.IsAnonymous() { return req, nil - } // Sign the request for all authenticated requests. + } switch { - case c.signature.isV2(): + case signerType.IsV2(): // Add signature version '2' authorization header. - req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) - case c.signature.isStreamingV4() && method == "PUT": - req = s3signer.StreamingSignV4(req, c.accessKeyID, - c.secretAccessKey, location, metadata.contentLength, time.Now().UTC()) + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey) + case signerType.IsStreamingV4() && method == "PUT": + req = s3signer.StreamingSignV4(req, accessKeyID, + secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) default: // Set sha256 sum for signature calculation only with signature version '4'. shaHeader := unsignedPayload @@ -670,7 +661,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R req.Header.Set("X-Amz-Content-Sha256", shaHeader) // Add signature version '4' authorization header. - req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location) + req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) } // Return request. @@ -701,14 +692,26 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html host = c.s3AccelerateEndpoint } else { - // Fetch new host based on the bucket location. - host = getS3Endpoint(bucketLocation) + // Do not change the host if the endpoint URL is a FIPS S3 endpoint. + if !s3utils.IsAmazonFIPSGovCloudEndpoint(c.endpointURL) { + // Fetch new host based on the bucket location. + host = getS3Endpoint(bucketLocation) + } } } // Save scheme. scheme := c.endpointURL.Scheme + // Strip port 80 and 443 so we won't send these ports in Host header. + // The reason is that browsers and curl automatically remove :80 and :443 + // with the generated presigned urls, then a signature mismatch error. + if h, p, err := net.SplitHostPort(host); err == nil { + if scheme == "http" && p == "80" || scheme == "https" && p == "443" { + host = h + } + } + urlStr := scheme + "://" + host + "/" // Make URL only if bucketName is available, otherwise use the // endpoint URL. @@ -732,13 +735,16 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que } } } + // If there are any query values, add them to the end. if len(queryValues) > 0 { urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) } + u, err := url.Parse(urlStr) if err != nil { return nil, err } + return u, nil } diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml index be746a7bf..4f5c1b390 100644 --- a/vendor/github.com/minio/minio-go/appveyor.yml +++ b/vendor/github.com/minio/minio-go/appveyor.yml @@ -17,6 +17,8 @@ install: - go version - go env - go get -u github.com/golang/lint/golint + - go get -u github.com/go-ini/ini + - go get -u github.com/minio/go-homedir - go get -u github.com/remyoudompheng/go-misc/deadcode - go get -u github.com/gordonklaus/ineffassign diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go index 28799c69d..8592e5cdf 100644 --- a/vendor/github.com/minio/minio-go/bucket-cache.go +++ b/vendor/github.com/minio/minio-go/bucket-cache.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,6 +24,7 @@ import ( "path" "sync" + "github.com/minio/minio-go/pkg/credentials" "github.com/minio/minio-go/pkg/s3signer" "github.com/minio/minio-go/pkg/s3utils" ) @@ -71,7 +73,7 @@ func (r *bucketLocationCache) Delete(bucketName string) { // GetBucketLocation - get location for the bucket name from location cache, if not // fetch freshly by making a new request. func (c Client) GetBucketLocation(bucketName string) (string, error) { - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err } return c.getBucketLocation(bucketName) @@ -80,7 +82,7 @@ func (c Client) GetBucketLocation(bucketName string) (string, error) { // getBucketLocation - Get location for the bucketName from location map cache, if not // fetch freshly by making a new request. func (c Client) getBucketLocation(bucketName string) (string, error) { - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err } @@ -90,6 +92,12 @@ func (c Client) getBucketLocation(bucketName string) (string, error) { // provides a cleaner compatible API across "us-east-1" and // China region. return "cn-north-1", nil + } else if s3utils.IsAmazonGovCloudEndpoint(c.endpointURL) { + // For us-gov specifically we need to set everything to + // us-gov-west-1 for now, there is no easier way until AWS S3 + // provides a cleaner compatible API across "us-east-1" and + // Gov cloud region. + return "us-gov-west-1", nil } // Region set then no need to fetch bucket location. @@ -181,8 +189,33 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro // Set UserAgent for the request. c.setUserAgent(req) + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + // Set sha256 sum for signature calculation only with signature version '4'. - if c.signature.isV4() { + switch { + case signerType.IsV4(): var contentSha256 string if c.secure { contentSha256 = unsignedPayload @@ -190,13 +223,10 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro contentSha256 = hex.EncodeToString(sum256([]byte{})) } req.Header.Set("X-Amz-Content-Sha256", contentSha256) + req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") + case signerType.IsV2(): + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey) } - // Sign the request. - if c.signature.isV4() { - req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") - } else if c.signature.isV2() { - req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) - } return req, nil } diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go new file mode 100644 index 000000000..6b0e57440 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go @@ -0,0 +1,89 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "fmt" + +// A Chain will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The Chain provides a way of chaining multiple providers together +// which will pick the first available using priority order of the +// Providers in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error, collecting all errors from all providers. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvAWSS3{}, +// &credentials.EnvMinio{}, +// }) +// +// // Usage of ChainCredentials. +// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") +// if err != nil { +// log.Fatalln(err) +// } +// +type Chain struct { + Providers []Provider + curr Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return New(&Chain{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *Chain) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err != nil { + errs = append(errs, err) + continue + } // Success. + c.curr = p + return creds, nil + } + c.curr = nil + return Value{}, fmt.Errorf("No valid providers found %v", errs) +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *Chain) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample new file mode 100644 index 000000000..130746f4b --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample @@ -0,0 +1,17 @@ +{ + "version": "8", + "hosts": { + "play": { + "url": "https://play.minio.io:9000", + "accessKey": "Q3AM3UQ867SPQQA43P2F", + "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "api": "S3v2" + }, + "s3": { + "url": "https://s3.amazonaws.com", + "accessKey": "accessKey", + "secretKey": "secret", + "api": "S3v4" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go new file mode 100644 index 000000000..cc3000532 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go @@ -0,0 +1,175 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "sync" + "time" +) + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Signature Type. + SignerType SignatureType +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type IAMCredentialProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// Credentials - A container for synchronous safe retrieval of credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sync.Mutex + + creds Value + forceRefresh bool + provider Provider +} + +// New returns a pointer to a new Credentials with the provider set. +func New(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.Lock() + defer c.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.Lock() + defer c.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be refreshed. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.Lock() + defer c.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample new file mode 100644 index 000000000..7fc91d9d2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go new file mode 100644 index 000000000..fa1908aeb --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go @@ -0,0 +1,45 @@ +// Package credentials provides credential retrieval and management +// for S3 compatible object storage. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewFromEnv() +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewFromIAM("") +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go new file mode 100644 index 000000000..11934433c --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go @@ -0,0 +1,71 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvAWS retrieves credentials from the environment variables of the +// running process. EnvAWSironment credentials never expire. +// +// EnvAWSironment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. +// * Secret Token: AWS_SESSION_TOKEN. +type EnvAWS struct { + retrieved bool +} + +// NewEnvAWS returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvAWS() *Credentials { + return New(&EnvAWS{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvAWS) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + signerType := SignatureV4 + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvAWS) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go new file mode 100644 index 000000000..791087ef5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go @@ -0,0 +1,62 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvMinio retrieves credentials from the environment variables of the +// running process. EnvMinioironment credentials never expire. +// +// EnvMinioironment variables used: +// +// * Access Key ID: MINIO_ACCESS_KEY. +// * Secret Access Key: MINIO_SECRET_KEY. +type EnvMinio struct { + retrieved bool +} + +// NewEnvMinio returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvMinio() *Credentials { + return New(&EnvMinio{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvMinio) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("MINIO_ACCESS_KEY") + secret := os.Getenv("MINIO_SECRET_KEY") + + signerType := SignatureV4 + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvMinio) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go new file mode 100644 index 000000000..1be621385 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go @@ -0,0 +1,120 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "os" + "path/filepath" + + "github.com/go-ini/ini" + homedir "github.com/minio/go-homedir" +) + +// A FileAWSCredentials retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type FileAWSCredentials struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileAWSCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewFileAWSCredentials(filename string, profile string) *Credentials { + return New(&FileAWSCredentials{ + filename: filename, + profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileAWSCredentials) Retrieve() (Value, error) { + if p.filename == "" { + p.filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + if p.filename == "" { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.filename = filepath.Join(homeDir, ".aws", "credentials") + } + } + if p.profile == "" { + p.profile = os.Getenv("AWS_PROFILE") + if p.profile == "" { + p.profile = "default" + } + } + + p.retrieved = false + + iniProfile, err := loadProfile(p.filename, p.profile) + if err != nil { + return Value{}, err + } + + // Default to empty string if not found. + id := iniProfile.Key("aws_access_key_id") + // Default to empty string if not found. + secret := iniProfile.Key("aws_secret_access_key") + // Default to empty string if not found. + token := iniProfile.Key("aws_session_token") + + p.retrieved = true + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + SignerType: SignatureV4, + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileAWSCredentials) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (*ini.Section, error) { + config, err := ini.Load(filename) + if err != nil { + return nil, err + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return nil, err + } + return iniProfile, nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go new file mode 100644 index 000000000..9e26dd302 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go @@ -0,0 +1,129 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + homedir "github.com/minio/go-homedir" +) + +// A FileMinioClient retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Configuration file example: $HOME/.mc/config.json +type FileMinioClient struct { + // Path to the shared credentials file. + // + // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.mc/config.json" + // Windows: "%USERALIAS%\mc\config.json" + filename string + + // Minio Alias to extract credentials from the shared credentials file. If empty + // will default to environment variable "MINIO_ALIAS" or "default" if + // environment variable is also not set. + alias string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileMinioClient returns a pointer to a new Credentials object +// wrapping the Alias file provider. +func NewFileMinioClient(filename string, alias string) *Credentials { + return New(&FileMinioClient{ + filename: filename, + alias: alias, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileMinioClient) Retrieve() (Value, error) { + if p.filename == "" { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.filename = filepath.Join(homeDir, ".mc", "config.json") + if runtime.GOOS == "windows" { + p.filename = filepath.Join(homeDir, "mc", "config.json") + } + } + + if p.alias == "" { + p.alias = os.Getenv("MINIO_ALIAS") + if p.alias == "" { + p.alias = "s3" + } + } + + p.retrieved = false + + hostCfg, err := loadAlias(p.filename, p.alias) + if err != nil { + return Value{}, err + } + + p.retrieved = true + return Value{ + AccessKeyID: hostCfg.AccessKey, + SecretAccessKey: hostCfg.SecretKey, + SignerType: parseSignatureType(hostCfg.API), + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileMinioClient) IsExpired() bool { + return !p.retrieved +} + +// hostConfig configuration of a host. +type hostConfig struct { + URL string `json:"url"` + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + API string `json:"api"` +} + +// config config version. +type config struct { + Version string `json:"version"` + Hosts map[string]hostConfig `json:"hosts"` +} + +// loadAliass loads from the file pointed to by shared credentials filename for alias. +// The credentials retrieved from the alias will be returned or error. Error will be +// returned if it fails to read from the file. +func loadAlias(filename, alias string) (hostConfig, error) { + cfg := &config{} + configBytes, err := ioutil.ReadFile(filename) + if err != nil { + return hostConfig{}, err + } + if err = json.Unmarshal(configBytes, cfg); err != nil { + return hostConfig{}, err + } + return cfg.Hosts[alias], nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go new file mode 100644 index 000000000..ee24a213b --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go @@ -0,0 +1,196 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bufio" + "encoding/json" + "errors" + "net/http" + "net/url" + "path" + "time" +) + +// DefaultExpiryWindow - Default expiry window. +// ExpiryWindow will allow the credentials to trigger refreshing +// prior to the credentials actually expiring. This is beneficial +// so race conditions with expiring credentials do not cause +// request to fail unexpectedly due to ExpiredTokenException exceptions. +const DefaultExpiryWindow = time.Second * 10 // 10 secs + +// A IAM retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +type IAM struct { + Expiry + + // Required http Client to use when connecting to IAM metadata service. + Client *http.Client + + // Custom endpoint in place of + endpoint string +} + +// redirectHeaders copies all headers when following a redirect URL. +// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800) +func redirectHeaders(req *http.Request, via []*http.Request) error { + if len(via) == 0 { + return nil + } + for key, val := range via[0].Header { + req.Header[key] = val + } + return nil +} + +// NewIAM returns a pointer to a new Credentials object wrapping +// the IAM. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewIAM(endpoint string) *Credentials { + if endpoint == "" { + // IAM Roles for Amazon EC2 + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + endpoint = "http://169.254.169.254" + } + p := &IAM{ + Client: &http.Client{ + Transport: http.DefaultTransport, + CheckRedirect: redirectHeaders, + }, + endpoint: endpoint, + } + return New(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired +func (m *IAM) Retrieve() (Value, error) { + credsList, err := requestCredList(m.Client, m.endpoint) + if err != nil { + return Value{}, err + } + + if len(credsList) == 0 { + return Value{}, errors.New("empty EC2 Role list") + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, m.endpoint, credsName) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + SignerType: SignatureV4, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/latest/meta-data/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *http.Client, endpoint string) ([]string, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + u.Path = iamSecurityCredsPath + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, errors.New(resp.Status) + } + + credsList := []string{} + s := bufio.NewScanner(resp.Body) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, err + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *http.Client, endpoint string, credsName string) (ec2RoleCredRespBody, error) { + u, err := url.Parse(endpoint) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + u.Path = path.Join(iamSecurityCredsPath, credsName) + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, errors.New(respCreds.Message) + } + + return respCreds, nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go new file mode 100644 index 000000000..c64ad6c23 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go @@ -0,0 +1,76 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "strings" + +// SignatureType is type of Authorization requested for a given HTTP request. +type SignatureType int + +// Different types of supported signatures - default is SignatureV4 or SignatureDefault. +const ( + // SignatureDefault is always set to v4. + SignatureDefault SignatureType = iota + SignatureV4 + SignatureV2 + SignatureV4Streaming + SignatureAnonymous // Anonymous signature signifies, no signature. +) + +// IsV2 - is signature SignatureV2? +func (s SignatureType) IsV2() bool { + return s == SignatureV2 +} + +// IsV4 - is signature SignatureV4? +func (s SignatureType) IsV4() bool { + return s == SignatureV4 || s == SignatureDefault +} + +// IsStreamingV4 - is signature SignatureV4Streaming? +func (s SignatureType) IsStreamingV4() bool { + return s == SignatureV4Streaming +} + +// IsAnonymous - is signature empty? +func (s SignatureType) IsAnonymous() bool { + return s == SignatureAnonymous +} + +// Stringer humanized version of signature type, +// strings returned here are case insensitive. +func (s SignatureType) String() string { + if s.IsV2() { + return "S3v2" + } else if s.IsV4() { + return "S3v4" + } else if s.IsStreamingV4() { + return "S3v4Streaming" + } + return "Anonymous" +} + +func parseSignatureType(str string) SignatureType { + if strings.EqualFold(str, "S3v4") { + return SignatureV4 + } else if strings.EqualFold(str, "S3v2") { + return SignatureV2 + } else if strings.EqualFold(str, "S3v4Streaming") { + return SignatureV4Streaming + } + return SignatureAnonymous +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/pkg/credentials/static.go new file mode 100644 index 000000000..25aff5696 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/static.go @@ -0,0 +1,67 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Static is a set of credentials which are set programmatically, +// and will never expire. +type Static struct { + Value +} + +// NewStaticV2 returns a pointer to a new Credentials object +// wrapping a static credentials value provider, signature is +// set to v2. If access and secret are not specified then +// regardless of signature type set it Value will return +// as anonymous. +func NewStaticV2(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV2) +} + +// NewStaticV4 is similar to NewStaticV2 with similar considerations. +func NewStaticV4(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV4) +} + +// NewStatic returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { + return New(&Static{ + Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + SignerType: signerType, + }, + }) +} + +// Retrieve returns the static credentials. +func (s *Static) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + // Anonymous is not an error + return Value{SignerType: SignatureAnonymous}, nil + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For Static, the credentials never expired. +func (s *Static) IsExpired() bool { + return false +} diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go index 7670e68f4..be45e52f4 100644 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go @@ -89,6 +89,15 @@ func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) { } +// Close implements closes the internal stream. +func (s *CBCSecureMaterials) Close() error { + closer, ok := s.stream.(io.Closer) + if ok { + return closer.Close() + } + return nil +} + // SetupEncryptMode - tells CBC that we are going to encrypt data func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error { // Set mode to encrypt diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go index 2fd75033f..8b8554336 100644 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go @@ -25,6 +25,9 @@ import "io" // Materials - provides generic interface to encrypt any stream of data. type Materials interface { + // Closes the wrapped stream properly, initiated by the caller. + Close() error + // Returns encrypted/decrypted data, io.Reader compatible. Read(b []byte) (int, error) diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go index cbb889d8d..b2d46e178 100644 --- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go +++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go @@ -583,7 +583,7 @@ func GetPolicies(statements []Statement, bucketName string) map[string]BucketPol r = r[:len(r)-1] asterisk = "*" } - objectPath := r[len(awsResourcePrefix+bucketName)+1 : len(r)] + objectPath := r[len(awsResourcePrefix+bucketName)+1:] p := GetPolicy(statements, bucketName, objectPath) policyRules[bucketName+"/"+objectPath+asterisk] = p } diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go index 755fd1ac5..c2f0baee6 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go @@ -92,9 +92,12 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [ // prepareStreamingRequest - prepares a request with appropriate // headers before computing the seed signature. -func prepareStreamingRequest(req *http.Request, dataLen int64, timestamp time.Time) { +func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { // Set x-amz-content-sha256 header. req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } req.Header.Set("Content-Encoding", streamingEncoding) req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) @@ -138,6 +141,7 @@ func (s *StreamingReader) setSeedSignature(req *http.Request) { type StreamingReader struct { accessKeyID string secretAccessKey string + sessionToken string region string prevSignature string seedSignature string @@ -195,16 +199,17 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { // StreamingSignV4 - provides chunked upload signatureV4 support by // implementing io.Reader. -func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, +func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, region string, dataLen int64, reqTime time.Time) *http.Request { // Set headers needed for streaming signature. - prepareStreamingRequest(req, dataLen, reqTime) + prepareStreamingRequest(req, sessionToken, dataLen, reqTime) stReader := &StreamingReader{ baseReadCloser: req.Body, accessKeyID: accessKeyID, secretAccessKey: secretAccessKey, + sessionToken: sessionToken, region: region, reqTime: reqTime, chunkBuf: make([]byte, payloadChunkSize), diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go index 245fb08c3..0d75dc162 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go @@ -206,7 +206,7 @@ func getStringToSignV4(t time.Time, location, canonicalRequest string) string { // PreSignV4 presign the request, in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. -func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request { +func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { // Presign is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -228,6 +228,10 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) query.Set("X-Amz-SignedHeaders", signedHeaders) query.Set("X-Amz-Credential", credential) + // Set session token if available. + if sessionToken != "" { + query.Set("X-Amz-Security-Token", sessionToken) + } req.URL.RawQuery = query.Encode() // Get canonical request. @@ -260,7 +264,7 @@ func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l // SignV4 sign the request before Do(), in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. -func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { +func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { // Signature calculation is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -272,6 +276,11 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht // Set x-amz-date. req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + // Set session token if available. + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + // Get canonical request. canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go index a3b6ed845..9d6ac4d81 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go +++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go @@ -19,6 +19,7 @@ package s3utils import ( "bytes" "encoding/hex" + "errors" "net" "net/url" "regexp" @@ -84,10 +85,29 @@ func IsAmazonEndpoint(endpointURL url.URL) bool { if IsAmazonChinaEndpoint(endpointURL) { return true } - + if IsAmazonGovCloudEndpoint(endpointURL) { + return true + } return endpointURL.Host == "s3.amazonaws.com" } +// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. +func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || + IsAmazonFIPSGovCloudEndpoint(endpointURL)) +} + +// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. +func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" +} + // IsAmazonChinaEndpoint - Match if it is exactly Amazon S3 China endpoint. // Customers who wish to use the new Beijing Region are required // to sign up for a separate set of account credentials unique to @@ -181,3 +201,74 @@ func EncodePath(pathName string) string { } return encodedPathname } + +// We support '.' with bucket names but we fallback to using path +// style requests instead for such buckets. +var ( + validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-]{1,61}[A-Za-z0-9]$`) + validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) + ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) +) + +// Common checker for both stricter and basic validation. +func checkBucketNameCommon(bucketName string, strict bool) (err error) { + if strings.TrimSpace(bucketName) == "" { + return errors.New("Bucket name cannot be empty") + } + if len(bucketName) < 3 { + return errors.New("Bucket name cannot be smaller than 3 characters") + } + if len(bucketName) > 63 { + return errors.New("Bucket name cannot be greater than 63 characters") + } + if ipAddress.MatchString(bucketName) { + return errors.New("Bucket name cannot be an ip address") + } + if strings.Contains(bucketName, "..") { + return errors.New("Bucket name contains invalid characters") + } + if strict { + if !validBucketNameStrict.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err + } + if !validBucketName.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err +} + +// CheckValidBucketName - checks if we have a valid input bucket name. +// This is a non stricter version. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func CheckValidBucketName(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, false) +} + +// CheckValidBucketNameStrict - checks if we have a valid input bucket name. +// This is a stricter version. +func CheckValidBucketNameStrict(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, true) +} + +// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectNamePrefix(objectName string) error { + if len(objectName) > 1024 { + return errors.New("Object name cannot be greater than 1024 characters") + } + if !utf8.ValidString(objectName) { + return errors.New("Object name with non UTF-8 strings are not supported") + } + return nil +} + +// CheckValidObjectName - checks if we have a valid input object name. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectName(objectName string) error { + if strings.TrimSpace(objectName) == "" { + return errors.New("Object name cannot be empty") + } + return CheckValidObjectNamePrefix(objectName) +} diff --git a/vendor/github.com/minio/minio-go/request-headers.go b/vendor/github.com/minio/minio-go/request-headers.go index fa23b2fe3..76c87202d 100644 --- a/vendor/github.com/minio/minio-go/request-headers.go +++ b/vendor/github.com/minio/minio-go/request-headers.go @@ -48,7 +48,7 @@ func (c RequestHeaders) SetMatchETag(etag string) error { if etag == "" { return ErrInvalidArgument("ETag cannot be empty.") } - c.Set("If-Match", etag) + c.Set("If-Match", "\""+etag+"\"") return nil } @@ -57,7 +57,7 @@ func (c RequestHeaders) SetMatchETagExcept(etag string) error { if etag == "" { return ErrInvalidArgument("ETag cannot be empty.") } - c.Set("If-None-Match", etag) + c.Set("If-None-Match", "\""+etag+"\"") return nil } diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go index d7fa5e038..c02f3f1fa 100644 --- a/vendor/github.com/minio/minio-go/s3-endpoints.go +++ b/vendor/github.com/minio/minio-go/s3-endpoints.go @@ -33,6 +33,7 @@ var awsS3EndpointMap = map[string]string{ "ap-northeast-1": "s3-ap-northeast-1.amazonaws.com", "ap-northeast-2": "s3-ap-northeast-2.amazonaws.com", "sa-east-1": "s3-sa-east-1.amazonaws.com", + "us-gov-west-1": "s3-us-gov-west-1.amazonaws.com", "cn-north-1": "s3.cn-north-1.amazonaws.com.cn", } diff --git a/vendor/github.com/minio/minio-go/s3-error.go b/vendor/github.com/minio/minio-go/s3-error.go index 11b40a0f8..c5aff9bbc 100644 --- a/vendor/github.com/minio/minio-go/s3-error.go +++ b/vendor/github.com/minio/minio-go/s3-error.go @@ -25,7 +25,7 @@ var s3ErrorResponseMap = map[string]string{ "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", "InternalError": "We encountered an internal error, please try again.", - "InvalidAccessKeyID": "The access key ID you provided does not exist in our records.", + "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", "InvalidBucketName": "The specified bucket is not valid.", "InvalidDigest": "The Content-Md5 you specified is not valid.", "InvalidRange": "The requested range is not satisfiable", diff --git a/vendor/github.com/minio/minio-go/signature-type.go b/vendor/github.com/minio/minio-go/signature-type.go deleted file mode 100644 index f9a57c3f1..000000000 --- a/vendor/github.com/minio/minio-go/signature-type.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -// SignatureType is type of Authorization requested for a given HTTP request. -type SignatureType int - -// Different types of supported signatures - default is Latest i.e SignatureV4. -const ( - Latest SignatureType = iota - SignatureV4 - SignatureV2 - SignatureV4Streaming -) - -var emptySHA256 = sum256(nil) - -// isV2 - is signature SignatureV2? -func (s SignatureType) isV2() bool { - return s == SignatureV2 -} - -// isV4 - is signature SignatureV4? -func (s SignatureType) isV4() bool { - return s == SignatureV4 || s == Latest -} - -// isStreamingV4 - is signature SignatureV4Streaming? -func (s SignatureType) isStreamingV4() bool { - return s == SignatureV4Streaming -} diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go index 93cd1712f..d06f1f52c 100644 --- a/vendor/github.com/minio/minio-go/utils.go +++ b/vendor/github.com/minio/minio-go/utils.go @@ -28,7 +28,6 @@ import ( "regexp" "strings" "time" - "unicode/utf8" "github.com/minio/minio-go/pkg/s3utils" ) @@ -110,6 +109,8 @@ func closeResponse(resp *http.Response) { } } +var emptySHA256 = sum256(nil) + // Sentinel URL is the default url value which is invalid. var sentinelURL = url.URL{} @@ -146,63 +147,6 @@ func isValidExpiry(expires time.Duration) error { return nil } -// We support '.' with bucket names but we fallback to using path -// style requests instead for such buckets. -var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) - -// Invalid bucket name with double dot. -var invalidDotBucketName = regexp.MustCompile(`\.\.`) - -// isValidBucketName - verify bucket name in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html -func isValidBucketName(bucketName string) error { - if strings.TrimSpace(bucketName) == "" { - return ErrInvalidBucketName("Bucket name cannot be empty.") - } - if len(bucketName) < 3 { - return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.") - } - if len(bucketName) > 63 { - return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.") - } - if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' { - return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.") - } - if invalidDotBucketName.MatchString(bucketName) { - return ErrInvalidBucketName("Bucket name cannot have successive periods.") - } - if !validBucketName.MatchString(bucketName) { - return ErrInvalidBucketName("Bucket name contains invalid characters.") - } - return nil -} - -// isValidObjectName - verify object name in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html -func isValidObjectName(objectName string) error { - if strings.TrimSpace(objectName) == "" { - return ErrInvalidObjectName("Object name cannot be empty.") - } - if len(objectName) > 1024 { - return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.") - } - if !utf8.ValidString(objectName) { - return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.") - } - return nil -} - -// isValidObjectPrefix - verify if object prefix is valid. -func isValidObjectPrefix(objectPrefix string) error { - if len(objectPrefix) > 1024 { - return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.") - } - if !utf8.ValidString(objectPrefix) { - return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.") - } - return nil -} - // make a copy of http.Header func cloneHeader(h http.Header) http.Header { h2 := make(http.Header, len(h)) @@ -225,3 +169,26 @@ func filterHeader(header http.Header, filterKeys []string) (filteredHeader http. } return filteredHeader } + +// regCred matches credential string in HTTP header +var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") + +// regCred matches signature string in HTTP header +var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") + +// Redact out signature value from authorization string. +func redactSignature(origAuth string) string { + if !strings.HasPrefix(origAuth, signV4Algorithm) { + // Set a temporary redacted auth + return "AWS **REDACTED**:**REDACTED**" + } + + /// Signature V4 authorization header. + + // Strip out accessKeyID from: + // Credential=////aws4_request + newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") + + // Strip out 256-bit signature from: Signature=<256-bit signature> + return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") +} diff --git a/vendor/vendor.json b/vendor/vendor.json index e7ec8ca6d..c281a3f9b 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -93,6 +93,12 @@ "revision": "0d253a66e6e1349f4581d6d2b300ee434ee2da9f", "revisionTime": "2017-02-16T21:49:44Z" }, + { + "checksumSHA1": "+IH9gXMht4fL/fxKRZ4sqGBps1g=", + "path": "github.com/go-ini/ini", + "revision": "e7fea39b01aea8d5671f6858f0532f56e8bff3a5", + "revisionTime": "2017-03-28T15:39:02Z" + }, { "checksumSHA1": "QD6LqgLz2JMxXqns8TaxtK9AuHs=", "path": "github.com/go-sql-driver/mysql", @@ -228,40 +234,46 @@ "revisionTime": "2016-02-29T08:42:30-08:00" }, { - "checksumSHA1": "pvBoasrJ8jrJJG0JNJvxyCZighw=", + "checksumSHA1": "o6xvhvD7RCOkBZ5pUPOsPcsz/B8=", "path": "github.com/minio/minio-go", - "revision": "75a218a15d5413c9a961318160f4c12a29bf9147", - "revisionTime": "2017-05-04T18:42:21Z" + "revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835", + "revisionTime": "2017-06-19T22:00:32Z" }, { - "checksumSHA1": "lsxCcRcNUDxhQyO999SOdvKzzfM=", + "checksumSHA1": "wDNvEYgDy1gOkzJ81WuuYore3dw=", + "path": "github.com/minio/minio-go/pkg/credentials", + "revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835", + "revisionTime": "2017-06-19T22:00:32Z" + }, + { + "checksumSHA1": "pggIpSePizRBQ7ybhB0CuaSQydw=", "path": "github.com/minio/minio-go/pkg/encrypt", - "revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459", - "revisionTime": "2017-04-26T18:23:05Z" + "revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835", + "revisionTime": "2017-06-19T22:00:32Z" }, { - "checksumSHA1": "neH34/65OXeKHM/MlV8MbhcdFBc=", + "checksumSHA1": "3tl2ehmod/EzXE9o9WJ5HM2AQPE=", "path": "github.com/minio/minio-go/pkg/policy", - "revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459", - "revisionTime": "2017-04-26T18:23:05Z" + "revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835", + "revisionTime": "2017-06-19T22:00:32Z" }, { - "checksumSHA1": "/5IXp1nGKqOfHn8Piiod3OCkG2U=", + "checksumSHA1": "uvEv7QS9WamqQHyru27ugQGzyLU=", "path": "github.com/minio/minio-go/pkg/s3signer", - "revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459", - "revisionTime": "2017-04-26T18:23:05Z" + "revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835", + "revisionTime": "2017-06-19T22:00:32Z" }, { - "checksumSHA1": "gRnCFKb4x83GBLVUZXoOjujd+U0=", + "checksumSHA1": "XTEUN/pAWAusSXT3yn6UznCl3iA=", "path": "github.com/minio/minio-go/pkg/s3utils", - "revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459", - "revisionTime": "2017-04-26T18:23:05Z" + "revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835", + "revisionTime": "2017-06-19T22:00:32Z" }, { "checksumSHA1": "maUy+dbN6VfTTnfErrAW2lLit1w=", "path": "github.com/minio/minio-go/pkg/set", - "revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459", - "revisionTime": "2017-04-26T18:23:05Z" + "revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835", + "revisionTime": "2017-06-19T22:00:32Z" }, { "checksumSHA1": "URVle4qtadmW9w9BulDRHY3kxnA=",