diff --git a/cmd/api-headers.go b/cmd/api-headers.go index 94a5ebb2e..2a0517bcf 100644 --- a/cmd/api-headers.go +++ b/cmd/api-headers.go @@ -70,6 +70,14 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *h w.Header().Set("ETag", "\""+objInfo.ETag+"\"") } + if objInfo.ContentType != "" { + w.Header().Set("Content-Type", objInfo.ContentType) + } + + if objInfo.ContentEncoding != "" { + w.Header().Set("Content-Encoding", objInfo.ContentEncoding) + } + // Set all other user defined metadata. for k, v := range objInfo.UserDefined { w.Header().Set(k, v) diff --git a/cmd/gateway-b2-anonymous.go b/cmd/gateway-b2-anonymous.go new file mode 100644 index 000000000..1389ee6a3 --- /dev/null +++ b/cmd/gateway-b2-anonymous.go @@ -0,0 +1,133 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// mkRange converts offset, size into Range header equivalent. +func mkRange(offset, size int64) string { + if offset == 0 && size == 0 { + return "" + } + if size == 0 { + return fmt.Sprintf("%s%d-", byteRangePrefix, offset) + } + return fmt.Sprintf("%s%d-%d", byteRangePrefix, offset, offset+size-1) +} + +// AnonGetObject - performs a plain http GET request on a public resource, +// fails if the resource is not public. +func (l *b2Objects) AnonGetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error { + uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object) + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return b2ToObjectError(traceError(err), bucket, object) + } + rng := mkRange(startOffset, length) + if rng != "" { + req.Header.Set("Range", rng) + } + resp, err := l.anonClient.Do(req) + if err != nil { + return b2ToObjectError(traceError(err), bucket, object) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return b2ToObjectError(traceError(errors.New(resp.Status)), bucket, object) + } + _, err = io.Copy(writer, resp.Body) + return b2ToObjectError(traceError(err), bucket, object) +} + +// Converts http Header into ObjectInfo. This function looks for all the +// standard Backblaze B2 headers to convert into ObjectInfo. +// +// Content-Length is converted to Size. +// X-Bz-Upload-Timestamp is converted to ModTime. +// X-Bz-Info-
: is converted to
: +// Content-Type is converted to ContentType. +// X-Bz-Content-Sha1 is converted to ETag. +func headerToObjectInfo(bucket, object string, header http.Header) (objInfo ObjectInfo, err error) { + clen, err := strconv.ParseInt(header.Get("Content-Length"), 10, 64) + if err != nil { + return objInfo, b2ToObjectError(traceError(err), bucket, object) + } + + // Converting upload timestamp in milliseconds to a time.Time value for ObjectInfo.ModTime. + timeStamp, err := strconv.ParseInt(header.Get("X-Bz-Upload-Timestamp"), 10, 64) + if err != nil { + return objInfo, b2ToObjectError(traceError(err), bucket, object) + } + + // Populate user metadata by looking for all the X-Bz-Info- + // HTTP headers, ignore other headers since they have their own + // designated meaning, for more details refer B2 API documentation. + userMetadata := make(map[string]string) + for key := range header { + if strings.HasPrefix(key, "X-Bz-Info-") { + var name string + name, err = url.QueryUnescape(strings.TrimPrefix(key, "X-Bz-Info-")) + if err != nil { + return objInfo, b2ToObjectError(traceError(err), bucket, object) + } + var val string + val, err = url.QueryUnescape(header.Get(key)) + if err != nil { + return objInfo, b2ToObjectError(traceError(err), bucket, object) + } + userMetadata[name] = val + } + } + + return ObjectInfo{ + Bucket: bucket, + Name: object, + ContentType: header.Get("Content-Type"), + ModTime: time.Unix(0, 0).Add(time.Duration(timeStamp) * time.Millisecond), + Size: clen, + ETag: header.Get("X-Bz-File-Id"), + UserDefined: userMetadata, + }, nil +} + +// AnonGetObjectInfo - performs a plain http HEAD request on a public resource, +// fails if the resource is not public. +func (l *b2Objects) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { + uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object) + req, err := http.NewRequest("HEAD", uri, nil) + if err != nil { + return objInfo, b2ToObjectError(traceError(err), bucket, object) + } + resp, err := l.anonClient.Do(req) + if err != nil { + return objInfo, b2ToObjectError(traceError(err), bucket, object) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return objInfo, b2ToObjectError(traceError(errors.New(resp.Status)), bucket, object) + } + return headerToObjectInfo(bucket, object, resp.Header) +} diff --git a/cmd/gateway-b2.go b/cmd/gateway-b2.go new file mode 100644 index 000000000..6545ea58e --- /dev/null +++ b/cmd/gateway-b2.go @@ -0,0 +1,703 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "crypto/sha1" + "fmt" + "hash" + "io" + "io/ioutil" + "net/http" + "strings" + "sync" + "time" + + b2 "github.com/minio/blazer/base" + "github.com/minio/minio-go/pkg/policy" +) + +// Supported bucket types by B2 backend. +const ( + bucketTypePrivate = "allPrivate" + bucketTypeReadOnly = "allPublic" +) + +// b2Object implements gateway for Minio and BackBlaze B2 compatible object storage servers. +type b2Objects struct { + gatewayUnsupported + mu sync.Mutex + creds credential + b2Client *b2.B2 + anonClient *http.Client + ctx context.Context +} + +// newB2Gateway returns b2 gateway layer, implements GatewayLayer interface to +// talk to B2 remote backend. +func newB2Gateway() (GatewayLayer, error) { + ctx := context.Background() + creds := serverConfig.GetCredential() + + client, err := b2.AuthorizeAccount(ctx, creds.AccessKey, creds.SecretKey, b2.Transport(newCustomHTTPTransport())) + if err != nil { + return nil, err + } + + return &b2Objects{ + creds: creds, + b2Client: client, + anonClient: &http.Client{ + Transport: newCustomHTTPTransport(), + }, + ctx: ctx, + }, nil +} + +// Convert B2 errors to minio object layer errors. +func b2ToObjectError(err error, params ...string) error { + if err == nil { + return nil + } + + e, ok := err.(*Error) + if !ok { + // Code should be fixed if this function is called without doing traceError() + // Else handling different situations in this function makes this function complicated. + errorIf(err, "Expected type *Error") + return err + } + + err = e.e + bucket := "" + object := "" + uploadID := "" + if len(params) >= 1 { + bucket = params[0] + } + if len(params) == 2 { + object = params[1] + } + if len(params) == 3 { + uploadID = params[2] + } + + // Following code is a non-exhaustive check to convert + // B2 errors into S3 compatible errors. + // + // For a more complete information - https://www.backblaze.com/b2/docs/ + statusCode, code, msg := b2.Code(err) + if statusCode == 0 { + // We don't interpret non B2 errors. B2 errors have statusCode + // to help us convert them to S3 object errors. + return e + } + + switch code { + case "duplicate_bucket_name": + err = BucketAlreadyOwnedByYou{Bucket: bucket} + case "bad_request": + if object != "" { + err = ObjectNameInvalid{bucket, object} + } else if bucket != "" { + err = BucketNotFound{Bucket: bucket} + } + case "bad_bucket_id": + err = BucketNotFound{Bucket: bucket} + case "file_not_present", "not_found": + err = ObjectNotFound{bucket, object} + case "cannot_delete_non_empty_bucket": + err = BucketNotEmpty{bucket, ""} + } + + // Special interpretation like this is required for Multipart sessions. + if strings.Contains(msg, "No active upload for") && uploadID != "" { + err = InvalidUploadID{uploadID} + } + + e.e = err + return e +} + +// Shutdown saves any gateway metadata to disk +// if necessary and reload upon next restart. +func (l *b2Objects) Shutdown() error { + // TODO + return nil +} + +// StorageInfo is not relevant to B2 backend. +func (l *b2Objects) StorageInfo() (si StorageInfo) { + return si +} + +// MakeBucket creates a new container on B2 backend. +func (l *b2Objects) MakeBucketWithLocation(bucket, location string) error { + // location is ignored for B2 backend. + + // All buckets are set to private by default. + _, err := l.b2Client.CreateBucket(l.ctx, bucket, bucketTypePrivate, nil, nil) + return b2ToObjectError(traceError(err), bucket) +} + +func (l *b2Objects) reAuthorizeAccount() error { + client, err := b2.AuthorizeAccount(l.ctx, l.creds.AccessKey, l.creds.SecretKey, b2.Transport(newCustomHTTPTransport())) + if err != nil { + return err + } + l.mu.Lock() + l.b2Client.Update(client) + l.mu.Unlock() + return nil +} + +// listBuckets is a wrapper similar to ListBuckets, which re-authorizes +// the account and updates the B2 client safely. Once successfully +// authorized performs the call again and returns list of buckets. +// For any errors which are not actionable we return an error. +func (l *b2Objects) listBuckets(err error) ([]*b2.Bucket, error) { + if err != nil { + if b2.Action(err) != b2.ReAuthenticate { + return nil, err + } + if rerr := l.reAuthorizeAccount(); rerr != nil { + return nil, rerr + } + } + bktList, lerr := l.b2Client.ListBuckets(l.ctx) + if lerr != nil { + return l.listBuckets(lerr) + } + return bktList, nil +} + +// Bucket - is a helper which provides a *Bucket instance +// for performing an API operation. B2 API doesn't +// provide a direct way to access the bucket so we need +// to employ following technique. +func (l *b2Objects) Bucket(bucket string) (*b2.Bucket, error) { + bktList, err := l.listBuckets(nil) + if err != nil { + return nil, b2ToObjectError(traceError(err), bucket) + } + for _, bkt := range bktList { + if bkt.Name == bucket { + return bkt, nil + } + } + return nil, traceError(BucketNotFound{Bucket: bucket}) +} + +// GetBucketInfo gets bucket metadata.. +func (l *b2Objects) GetBucketInfo(bucket string) (bi BucketInfo, err error) { + if _, err = l.Bucket(bucket); err != nil { + return bi, err + } + return BucketInfo{ + Name: bucket, + Created: time.Unix(0, 0), + }, nil +} + +// ListBuckets lists all B2 buckets +func (l *b2Objects) ListBuckets() ([]BucketInfo, error) { + bktList, err := l.listBuckets(nil) + if err != nil { + return nil, err + } + var bktInfo []BucketInfo + for _, bkt := range bktList { + bktInfo = append(bktInfo, BucketInfo{ + Name: bkt.Name, + Created: time.Unix(0, 0), + }) + } + return bktInfo, nil +} + +// DeleteBucket deletes a bucket on B2 +func (l *b2Objects) DeleteBucket(bucket string) error { + bkt, err := l.Bucket(bucket) + if err != nil { + return err + } + err = bkt.DeleteBucket(l.ctx) + return b2ToObjectError(traceError(err), bucket) +} + +// ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time. +func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { + bkt, err := l.Bucket(bucket) + if err != nil { + return loi, err + } + loi = ListObjectsInfo{} + files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter) + if lerr != nil { + return loi, b2ToObjectError(traceError(lerr), bucket) + } + loi.IsTruncated = next != "" + loi.NextMarker = next + for _, file := range files { + switch file.Status { + case "folder": + loi.Prefixes = append(loi.Prefixes, file.Name) + case "upload": + loi.Objects = append(loi.Objects, ObjectInfo{ + Bucket: bucket, + Name: file.Name, + ModTime: file.Timestamp, + Size: file.Size, + ETag: file.Info.ID, + ContentType: file.Info.ContentType, + UserDefined: file.Info.Info, + }) + } + } + return loi, nil +} + +// ListObjectsV2 lists all objects in B2 bucket filtered by prefix, returns upto max 1000 entries at a time. +func (l *b2Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, + fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) { + // fetchOwner, startAfter are not supported and unused. + bkt, err := l.Bucket(bucket) + if err != nil { + return loi, err + } + loi = ListObjectsV2Info{} + files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, continuationToken, prefix, delimiter) + if lerr != nil { + return loi, b2ToObjectError(traceError(lerr), bucket) + } + loi.IsTruncated = next != "" + loi.ContinuationToken = continuationToken + loi.NextContinuationToken = next + for _, file := range files { + switch file.Status { + case "folder": + loi.Prefixes = append(loi.Prefixes, file.Name) + case "upload": + loi.Objects = append(loi.Objects, ObjectInfo{ + Bucket: bucket, + Name: file.Name, + ModTime: file.Timestamp, + Size: file.Size, + ETag: file.Info.ID, + ContentType: file.Info.ContentType, + UserDefined: file.Info.Info, + }) + } + } + return loi, nil +} + +// GetObject reads an object from B2. Supports additional +// parameters like offset and length which are synonymous with +// HTTP Range requests. +// +// startOffset indicates the starting read location of the object. +// length indicates the total length of the object. +func (l *b2Objects) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error { + bkt, err := l.Bucket(bucket) + if err != nil { + return err + } + reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length) + if err != nil { + return b2ToObjectError(traceError(err), bucket, object) + } + defer reader.Close() + _, err = io.Copy(writer, reader) + return b2ToObjectError(traceError(err), bucket, object) +} + +// GetObjectInfo reads object info and replies back ObjectInfo +func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { + bkt, err := l.Bucket(bucket) + if err != nil { + return objInfo, err + } + f, err := bkt.DownloadFileByName(l.ctx, object, 0, 1) + if err != nil { + return objInfo, b2ToObjectError(traceError(err), bucket, object) + } + f.Close() + fi, err := bkt.File(f.ID, object).GetFileInfo(l.ctx) + if err != nil { + return objInfo, b2ToObjectError(traceError(err), bucket, object) + } + objInfo = ObjectInfo{ + Bucket: bucket, + Name: object, + ETag: fi.ID, + Size: fi.Size, + ModTime: fi.Timestamp, + ContentType: fi.ContentType, + UserDefined: fi.Info, + } + return objInfo, nil +} + +// In B2 - You must always include the X-Bz-Content-Sha1 header with +// your upload request. The value you provide can be: +// (1) the 40-character hex checksum of the file, +// (2) the string hex_digits_at_end, or +// (3) the string do_not_verify. +// For more reference - https://www.backblaze.com/b2/docs/uploading.html +// +const ( + sha1NoVerify = "do_not_verify" + sha1AtEOF = "hex_digits_at_end" +) + +// With the second option mentioned above, you append the 40-character hex sha1 +// to the end of the request body, immediately after the contents of the file +// being uploaded. Note that the content length is the size of the file plus 40 +// of the original size of the reader. +// +// newB2Reader implements a B2 compatible reader by wrapping the HashReader into +// a new io.Reader which will emit out the sha1 hex digits at io.EOF. +// It also means that your overall content size is now original size + 40 bytes. +// Additionally this reader also verifies Hash encapsulated inside HashReader +// at io.EOF if the verification failed we return an error and do not send +// the content to server. +func newB2Reader(r *HashReader, size int64) *B2Reader { + return &B2Reader{ + r: r, + size: size, + sha1Hash: sha1.New(), + } +} + +// B2Reader - is a Reader wraps the HashReader which will emit out the sha1 +// hex digits at io.EOF. It also means that your overall content size is +// now original size + 40 bytes. Additionally this reader also verifies +// Hash encapsulated inside HashReader at io.EOF if the verification +// failed we return an error and do not send the content to server. +type B2Reader struct { + r *HashReader + size int64 + sha1Hash hash.Hash + + isEOF bool + buf *strings.Reader +} + +// Size - Returns the total size of Reader. +func (nb *B2Reader) Size() int64 { return nb.size + 40 } +func (nb *B2Reader) Read(p []byte) (int, error) { + if nb.isEOF { + return nb.buf.Read(p) + } + // Read into hash to update the on going checksum. + n, err := io.TeeReader(nb.r, nb.sha1Hash).Read(p) + if err == io.EOF { + // Verify checksum at io.EOF + if err = nb.r.Verify(); err != nil { + return n, err + } + // Stream is not corrupted on this end + // now fill in the last 40 bytes of sha1 hex + // so that the server can verify the stream on + // their end. + err = nil + nb.isEOF = true + nb.buf = strings.NewReader(fmt.Sprintf("%x", nb.sha1Hash.Sum(nil))) + } + return n, err +} + +// PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB. +func (l *b2Objects) PutObject(bucket string, object string, data *HashReader, metadata map[string]string) (ObjectInfo, error) { + var objInfo ObjectInfo + bkt, err := l.Bucket(bucket) + if err != nil { + return objInfo, err + } + contentType := metadata["content-type"] + delete(metadata, "content-type") + delete(metadata, "etag") + + var u *b2.URL + u, err = bkt.GetUploadURL(l.ctx) + if err != nil { + return objInfo, b2ToObjectError(traceError(err), bucket, object) + } + + hr := newB2Reader(data, data.Size()) + var f *b2.File + f, err = u.UploadFile(l.ctx, hr, int(hr.Size()), object, contentType, sha1AtEOF, metadata) + if err != nil { + return objInfo, b2ToObjectError(traceError(err), bucket, object) + } + + var fi *b2.FileInfo + fi, err = f.GetFileInfo(l.ctx) + if err != nil { + return objInfo, b2ToObjectError(traceError(err), bucket, object) + } + + return ObjectInfo{ + Bucket: bucket, + Name: object, + ETag: fi.ID, + Size: fi.Size, + ModTime: fi.Timestamp, + ContentType: fi.ContentType, + UserDefined: fi.Info, + }, nil +} + +// CopyObject copies a blob from source container to destination container. +func (l *b2Objects) CopyObject(srcBucket string, srcObject string, dstBucket string, + dstObject string, metadata map[string]string) (objInfo ObjectInfo, err error) { + return objInfo, traceError(NotImplemented{}) +} + +// DeleteObject deletes a blob in bucket +func (l *b2Objects) DeleteObject(bucket string, object string) error { + bkt, err := l.Bucket(bucket) + if err != nil { + return err + } + reader, err := bkt.DownloadFileByName(l.ctx, object, 0, 1) + if err != nil { + return b2ToObjectError(traceError(err), bucket, object) + } + io.Copy(ioutil.Discard, reader) + reader.Close() + err = bkt.File(reader.ID, object).DeleteFileVersion(l.ctx) + return b2ToObjectError(traceError(err), bucket, object) +} + +// ListMultipartUploads lists all multipart uploads. +func (l *b2Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, + delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) { + // keyMarker, prefix, delimiter are all ignored, Backblaze B2 doesn't support any + // of these parameters only equivalent parameter is uploadIDMarker. + bkt, err := l.Bucket(bucket) + if err != nil { + return lmi, err + } + // The maximum number of files to return from this call. + // The default value is 100, and the maximum allowed is 100. + if maxUploads > 100 { + maxUploads = 100 + } + largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, uploadIDMarker, maxUploads) + if err != nil { + return lmi, b2ToObjectError(traceError(err), bucket) + } + lmi = ListMultipartsInfo{ + MaxUploads: maxUploads, + } + if nextMarker != "" { + lmi.IsTruncated = true + lmi.NextUploadIDMarker = nextMarker + } + for _, largeFile := range largeFiles { + lmi.Uploads = append(lmi.Uploads, uploadMetadata{ + Object: largeFile.Name, + UploadID: largeFile.ID, + Initiated: largeFile.Timestamp, + }) + } + return lmi, nil +} + +// NewMultipartUpload upload object in multiple parts, uses B2's LargeFile upload API. +// Large files can range in size from 5MB to 10TB. +// Each large file must consist of at least 2 parts, and all of the parts except the +// last one must be at least 5MB in size. The last part must contain at least one byte. +// For more information - https://www.backblaze.com/b2/docs/large_files.html +func (l *b2Objects) NewMultipartUpload(bucket string, object string, metadata map[string]string) (string, error) { + var uploadID string + bkt, err := l.Bucket(bucket) + if err != nil { + return uploadID, err + } + + contentType := metadata["content-type"] + delete(metadata, "content-type") + lf, err := bkt.StartLargeFile(l.ctx, object, contentType, metadata) + if err != nil { + return uploadID, b2ToObjectError(traceError(err), bucket, object) + } + + return lf.ID, nil +} + +// CopyObjectPart copy part of object to other bucket and object. +func (l *b2Objects) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, + uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) { + return PartInfo{}, traceError(NotImplemented{}) +} + +// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API. +func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *HashReader) (pi PartInfo, err error) { + bkt, err := l.Bucket(bucket) + if err != nil { + return pi, err + } + + fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx) + if err != nil { + return pi, b2ToObjectError(traceError(err), bucket, object, uploadID) + } + + hr := newB2Reader(data, data.Size()) + sha1, err := fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID) + if err != nil { + return pi, b2ToObjectError(traceError(err), bucket, object, uploadID) + } + + return PartInfo{ + PartNumber: partID, + LastModified: UTCNow(), + ETag: sha1, + Size: data.Size(), + }, nil +} + +// ListObjectParts returns all object parts for specified object in specified bucket, uses B2's LargeFile upload API. +func (l *b2Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) { + bkt, err := l.Bucket(bucket) + if err != nil { + return lpi, err + } + lpi = ListPartsInfo{ + Bucket: bucket, + Object: object, + UploadID: uploadID, + MaxParts: maxParts, + PartNumberMarker: partNumberMarker, + } + // startPartNumber must be in the range 1 - 10000 for B2. + partNumberMarker++ + partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts) + if err != nil { + return lpi, b2ToObjectError(traceError(err), bucket, object, uploadID) + } + if next != 0 { + lpi.IsTruncated = true + lpi.NextPartNumberMarker = next + } + for _, part := range partsList { + lpi.Parts = append(lpi.Parts, PartInfo{ + PartNumber: part.Number, + ETag: part.SHA1, + Size: part.Size, + }) + } + return lpi, nil +} + +// AbortMultipartUpload aborts a on going multipart upload, uses B2's LargeFile upload API. +func (l *b2Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error { + bkt, err := l.Bucket(bucket) + if err != nil { + return err + } + err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx) + return b2ToObjectError(traceError(err), bucket, object, uploadID) +} + +// CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API. +func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (oi ObjectInfo, err error) { + bkt, err := l.Bucket(bucket) + if err != nil { + return oi, err + } + hashes := make(map[int]string) + for i, uploadedPart := range uploadedParts { + // B2 requires contigous part numbers starting with 1, they do not support + // hand picking part numbers, we return an S3 compatible error instead. + if i+1 != uploadedPart.PartNumber { + return oi, b2ToObjectError(traceError(InvalidPart{}), bucket, object, uploadID) + } + hashes[uploadedPart.PartNumber] = uploadedPart.ETag + } + + if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil { + return oi, b2ToObjectError(traceError(err), bucket, object, uploadID) + } + + return l.GetObjectInfo(bucket, object) +} + +// SetBucketPolicies - B2 supports 2 types of bucket policies: +// bucketType.AllPublic - bucketTypeReadOnly means that anybody can download the files is the bucket; +// bucketType.AllPrivate - bucketTypePrivate means that you need an authorization token to download them. +// Default is AllPrivate for all buckets. +func (l *b2Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { + var policies []BucketAccessPolicy + + for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) { + policies = append(policies, BucketAccessPolicy{ + Prefix: prefix, + Policy: policy, + }) + } + prefix := bucket + "/*" // For all objects inside the bucket. + if len(policies) != 1 { + return traceError(NotImplemented{}) + } + if policies[0].Prefix != prefix { + return traceError(NotImplemented{}) + } + if policies[0].Policy != policy.BucketPolicyReadOnly { + return traceError(NotImplemented{}) + } + bkt, err := l.Bucket(bucket) + if err != nil { + return err + } + bkt.Type = bucketTypeReadOnly + _, err = bkt.Update(l.ctx) + return b2ToObjectError(traceError(err)) +} + +// GetBucketPolicies, returns the current bucketType from B2 backend and convert +// it into S3 compatible bucket policy info. +func (l *b2Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { + policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} + bkt, err := l.Bucket(bucket) + if err != nil { + return policyInfo, err + } + if bkt.Type == bucketTypeReadOnly { + policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") + return policyInfo, nil + } + // bkt.Type can also be snapshot, but it is only allowed through B2 browser console, + // just return back as policy not found for all cases. + // CreateBucket always sets the value to allPrivate by default. + return policy.BucketAccessPolicy{}, traceError(PolicyNotFound{Bucket: bucket}) +} + +// DeleteBucketPolicies - resets the bucketType of bucket on B2 to 'allPrivate'. +func (l *b2Objects) DeleteBucketPolicies(bucket string) error { + bkt, err := l.Bucket(bucket) + if err != nil { + return err + } + bkt.Type = bucketTypePrivate + _, err = bkt.Update(l.ctx) + return b2ToObjectError(traceError(err)) +} diff --git a/cmd/gateway-b2_test.go b/cmd/gateway-b2_test.go new file mode 100644 index 000000000..bdfaa4b5a --- /dev/null +++ b/cmd/gateway-b2_test.go @@ -0,0 +1,104 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "net/http" + "testing" +) + +// Tests headerToObjectInfo +func TestHeaderToObjectInfo(t *testing.T) { + testCases := []struct { + bucket, object string + header http.Header + objInfo ObjectInfo + }{ + { + bucket: "bucket", + object: "object", + header: http.Header{ + "Content-Length": []string{"10"}, + "Content-Type": []string{"application/javascript"}, + "X-Bz-Upload-Timestamp": []string{"1000"}, + "X-Bz-Info-X-Amz-Meta-1": []string{"test1"}, + "X-Bz-File-Id": []string{"xxxxx"}, + }, + objInfo: ObjectInfo{ + Bucket: "bucket", + Name: "object", + ContentType: "application/javascript", + Size: 10, + UserDefined: map[string]string{ + "X-Amz-Meta-1": "test1", + }, + ETag: "xxxxx", + }, + }, + } + for i, testCase := range testCases { + gotObjInfo, err := headerToObjectInfo(testCase.bucket, testCase.object, testCase.header) + if err != nil { + t.Fatalf("Test %d: %s", i+1, err) + } + if gotObjInfo.Bucket != testCase.objInfo.Bucket { + t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.Bucket, gotObjInfo.Bucket) + } + if gotObjInfo.Name != testCase.objInfo.Name { + t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.Name, gotObjInfo.Name) + } + if gotObjInfo.ContentType != testCase.objInfo.ContentType { + t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.ContentType, gotObjInfo.ContentType) + } + if gotObjInfo.ETag != testCase.objInfo.ETag { + t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.ETag, gotObjInfo.ETag) + } + } +} + +// Tests mkRange test. +func TestMkRange(t *testing.T) { + testCases := []struct { + offset, size int64 + expectedRng string + }{ + // No offset set, size not set. + { + offset: 0, + size: 0, + expectedRng: "", + }, + // Offset set, size not set. + { + offset: 10, + size: 0, + expectedRng: "bytes=10-", + }, + // Offset set, size set. + { + offset: 10, + size: 11, + expectedRng: "bytes=10-20", + }, + } + for i, testCase := range testCases { + gotRng := mkRange(testCase.offset, testCase.size) + if gotRng != testCase.expectedRng { + t.Errorf("Test %d: expected %s, got %s", i+1, testCase.expectedRng, gotRng) + } + } +} diff --git a/cmd/gateway-gcs-errors.go b/cmd/gateway-gcs-errors.go deleted file mode 100644 index a4cb06b89..000000000 --- a/cmd/gateway-gcs-errors.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import "errors" - -var ( - // Project ID format is not valid. - errGCSInvalidProjectID = errors.New("GCS project id is either empty or invalid") - - // Project ID not found - errGCSProjectIDNotFound = errors.New("unknown project id") -) diff --git a/cmd/gateway-gcs.go b/cmd/gateway-gcs.go index 2259648d3..6c986e3d4 100644 --- a/cmd/gateway-gcs.go +++ b/cmd/gateway-gcs.go @@ -21,6 +21,7 @@ import ( "encoding/base64" "encoding/hex" "encoding/json" + "errors" "fmt" "io" "math" @@ -39,6 +40,14 @@ import ( "github.com/minio/minio-go/pkg/policy" ) +var ( + // Project ID format is not valid. + errGCSInvalidProjectID = errors.New("GCS project id is either empty or invalid") + + // Project ID not found + errGCSProjectIDNotFound = errors.New("unknown project id") +) + const ( // Path where multipart objects are saved. // If we change the backend format we will use a different url path like /multipart/v2 diff --git a/cmd/gateway-main.go b/cmd/gateway-main.go index cafbaa113..f4802433d 100644 --- a/cmd/gateway-main.go +++ b/cmd/gateway-main.go @@ -125,6 +125,31 @@ EXAMPLES: ` +const b2GatewayTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} +{{if .VisibleFlags}} +FLAGS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +ENVIRONMENT VARIABLES: + ACCESS: + MINIO_ACCESS_KEY: B2 account id. + MINIO_SECRET_KEY: B2 application key. + + BROWSER: + MINIO_BROWSER: To disable web browser access, set this value to "off". + +EXAMPLES: + 1. Start minio gateway server for B2 backend. + $ export MINIO_ACCESS_KEY=accountID + $ export MINIO_SECRET_KEY=applicationKey + $ {{.HelpName}} + +` + var ( azureBackendCmd = cli.Command{ Name: "azure", @@ -143,6 +168,7 @@ var ( Flags: append(serverFlags, globalFlags...), HideHelpCommand: true, } + gcsBackendCmd = cli.Command{ Name: "gcs", Usage: "Google Cloud Storage.", @@ -152,12 +178,21 @@ var ( HideHelpCommand: true, } + b2BackendCmd = cli.Command{ + Name: "b2", + Usage: "Backblaze B2.", + Action: b2GatewayMain, + CustomHelpTemplate: b2GatewayTemplate, + Flags: append(serverFlags, globalFlags...), + HideHelpCommand: true, + } + gatewayCmd = cli.Command{ Name: "gateway", Usage: "Start object storage gateway.", Flags: append(serverFlags, globalFlags...), HideHelpCommand: true, - Subcommands: []cli.Command{azureBackendCmd, s3BackendCmd, gcsBackendCmd}, + Subcommands: []cli.Command{azureBackendCmd, s3BackendCmd, gcsBackendCmd, b2BackendCmd}, } ) @@ -168,6 +203,7 @@ const ( azureBackend gatewayBackend = "azure" s3Backend gatewayBackend = "s3" gcsBackend gatewayBackend = "gcs" + b2Backend gatewayBackend = "b2" // Add more backends here. ) @@ -177,6 +213,7 @@ const ( // - Azure Blob Storage. // - AWS S3. // - Google Cloud Storage. +// - Backblaze B2. // - Add your favorite backend here. func newGatewayLayer(backendType gatewayBackend, arg string) (GatewayLayer, error) { switch backendType { @@ -189,6 +226,11 @@ func newGatewayLayer(backendType gatewayBackend, arg string) (GatewayLayer, erro // will be removed when gcs is ready for production use. log.Println(colorYellow("\n *** Warning: Not Ready for Production ***")) return newGCSGateway(arg) + case b2Backend: + // FIXME: The following print command is temporary and + // will be removed when B2 is ready for production use. + log.Println(colorYellow("\n *** Warning: Not Ready for Production ***")) + return newB2Gateway() } return nil, fmt.Errorf("Unrecognized backend type %s", backendType) @@ -285,6 +327,17 @@ func gcsGatewayMain(ctx *cli.Context) { gatewayMain(ctx, gcsBackend) } +func b2GatewayMain(ctx *cli.Context) { + if ctx.Args().Present() && ctx.Args().First() == "help" { + cli.ShowCommandHelpAndExit(ctx, "b2", 1) + } + + // Validate gateway arguments. + fatalIf(validateGatewayArguments(ctx.GlobalString("address"), ctx.Args().First()), "Invalid argument") + + gatewayMain(ctx, b2Backend) +} + // Handler for 'minio gateway'. func gatewayMain(ctx *cli.Context, backendType gatewayBackend) { // Get quiet flag from command line argument. @@ -393,6 +446,8 @@ func gatewayMain(ctx *cli.Context, backendType gatewayBackend) { mode = globalMinioModeGatewayGCS case s3Backend: mode = globalMinioModeGatewayS3 + case b2Backend: + mode = globalMinioModeGatewayB2 } // Check update mode. diff --git a/cmd/gateway-unsupported.go b/cmd/gateway-unsupported.go index c257005ff..93fe9e8cd 100644 --- a/cmd/gateway-unsupported.go +++ b/cmd/gateway-unsupported.go @@ -26,12 +26,6 @@ func (a gatewayUnsupported) CopyObjectPart(srcBucket, srcObject, destBucket, des return info, traceError(NotImplemented{}) } -// AnonPutObject creates a new object anonymously with the incoming data, -func (a gatewayUnsupported) AnonPutObject(bucket, object string, size int64, data io.Reader, - metadata map[string]string, sha256sum string) (ObjectInfo, error) { - return ObjectInfo{}, traceError(NotImplemented{}) -} - // HealBucket - Not relevant. func (a gatewayUnsupported) HealBucket(bucket string) error { return traceError(NotImplemented{}) @@ -57,3 +51,26 @@ func (a gatewayUnsupported) ListUploadsHeal(bucket, prefix, marker, uploadIDMark delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { return lmi, traceError(NotImplemented{}) } + +// AnonListObjects - List objects anonymously +func (a gatewayUnsupported) AnonListObjects(bucket string, prefix string, marker string, delimiter string, + maxKeys int) (loi ListObjectsInfo, err error) { + return loi, traceError(NotImplemented{}) +} + +// AnonListObjectsV2 - List objects in V2 mode, anonymously +func (a gatewayUnsupported) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, + fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) { + return loi, traceError(NotImplemented{}) +} + +// AnonGetBucketInfo - Get bucket metadata anonymously. +func (a gatewayUnsupported) AnonGetBucketInfo(bucket string) (bi BucketInfo, err error) { + return bi, traceError(NotImplemented{}) +} + +// AnonPutObject creates a new object anonymously with the incoming data, +func (a gatewayUnsupported) AnonPutObject(bucket, object string, size int64, data io.Reader, + metadata map[string]string, sha256sum string) (ObjectInfo, error) { + return ObjectInfo{}, traceError(NotImplemented{}) +} diff --git a/cmd/globals.go b/cmd/globals.go index ba5dc4d52..d958d6713 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -53,6 +53,7 @@ const ( globalMinioModeGatewayAzure = "mode-gateway-azure" globalMinioModeGatewayS3 = "mode-gateway-s3" globalMinioModeGatewayGCS = "mode-gateway-gcs" + globalMinioModeGatewayB2 = "mode-gateway-b2" // globalMinioSysTmp prefix is used in Azure/GCS gateway for save metadata sent by Initialize Multipart Upload API. globalMinioSysTmp = "minio.sys.tmp/" diff --git a/docs/gateway/README.md b/docs/gateway/README.md index b7087fa5d..4fcf415a4 100644 --- a/docs/gateway/README.md +++ b/docs/gateway/README.md @@ -2,8 +2,8 @@ Minio Gateway adds Amazon S3 compatibility to third party cloud storage providers. - [Microsoft Azure Blob Storage](https://github.com/minio/minio/blob/master/docs/gateway/azure.md) - [Google Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/gcs.md) _Alpha release_ +- [Backblaze B2](https://github.com/minio/minio/blob/master/docs/gateway/b2.md) _Alpha release_ ## Roadmap -* Minio & AWS S3 * Edge Caching - Disk based proxy caching support diff --git a/docs/gateway/b2.md b/docs/gateway/b2.md new file mode 100644 index 000000000..932d2ebe1 --- /dev/null +++ b/docs/gateway/b2.md @@ -0,0 +1,48 @@ +# Minio B2 Gateway [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) +Minio Gateway adds Amazon S3 compatibility to Backblaze B2 Cloud Storage. + +## Run Minio Gateway for Backblaze B2 Cloud Storage +Please follow this [guide](https://www.backblaze.com/b2/docs/quick_account.html) to create an account on backblaze.com to obtain your access credentisals for B2 Cloud storage. + +### Using Binary +Please download the test binary for gateway B2 support https://data.minio.io:10000/minio-b2/linux-amd64/minio-b2 + +``` +export MINIO_ACCESS_KEY=b2_accound_id +export MINIO_SECRET_KEY=b2_application_key +minio gateway b2 +``` +## Test using Minio Browser +Minio Gateway comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully. + +![Screenshot](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/minio-browser-gateway.png) + +## Test using Minio Client `mc` +`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. + +### Configure `mc` +``` +mc config host add myb2 http://gateway-ip:9000 b2_account_id b2_application_key +``` + +### List buckets on Backblaze B2 +``` +mc ls myb2 +[2017-02-22 01:50:43 PST] 0B ferenginar/ +[2017-02-26 21:43:51 PST] 0B my-bucket/ +[2017-02-26 22:10:11 PST] 0B test-bucket1/ +``` + +### Known limitations +Gateway inherits the following B2 limitations: +- No support for CopyObject S3 API (There are no equivalent APIs available on Backblaze B2). +- No support for CopyObjectPart S3 API (There are no equivalent APIs available on Backblaze B2). +- Only read-only bucket policy supported at bucket level, all other variations will return API Notimplemented error. + +Other limitations: +- Bucket notification APIs are not supported on Gateway. + +## Explore Further +- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide) +- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio) +- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide) \ No newline at end of file diff --git a/vendor/github.com/minio/blazer/LICENSE b/vendor/github.com/minio/blazer/LICENSE new file mode 100644 index 000000000..88755c6ad --- /dev/null +++ b/vendor/github.com/minio/blazer/LICENSE @@ -0,0 +1,13 @@ +Copyright 2016, Google + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/minio/blazer/base/base.go b/vendor/github.com/minio/blazer/base/base.go new file mode 100644 index 000000000..cc20aba1b --- /dev/null +++ b/vendor/github.com/minio/blazer/base/base.go @@ -0,0 +1,1204 @@ +// Copyright 2016, Google +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package base provides a very low-level interface on top of the B2 v1 API. +// It is not intended to be used directly. +// +// It currently lacks support for the following APIs: +// +// b2_download_file_by_id +// b2_list_unfinished_large_files +package base + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/minio/blazer/internal/b2types" + "github.com/minio/blazer/internal/blog" +) + +const ( + APIBase = "https://api.backblazeb2.com" + DefaultUserAgent = "blazer/0.1.1" +) + +type b2err struct { + msg string + method string + retry int + statusCode int + code string +} + +func (e b2err) Error() string { + if e.method == "" { + return fmt.Sprintf("b2 error: %s", e.msg) + } + return fmt.Sprintf("%s: %d: %s: %s", e.method, e.statusCode, e.code, e.msg) +} + +// Action checks an error and returns a recommended course of action. +func Action(err error) ErrAction { + e, ok := err.(b2err) + if !ok { + return Punt + } + if e.retry > 0 { + return Retry + } + if e.statusCode >= http.StatusInternalServerError && e.statusCode < 600 { + if e.method == "b2_upload_file" || e.method == "b2_upload_part" { + return AttemptNewUpload + } + } + switch e.statusCode { + case http.StatusUnauthorized: + if e.method == "b2_authorize_account" { + return Punt + } + if e.method == "b2_upload_file" || e.method == "b2_upload_part" { + return AttemptNewUpload + } + return ReAuthenticate + case http.StatusBadRequest: + // See restic/restic#1207 + if e.method == "b2_upload_file" && strings.HasPrefix(e.msg, "more than one upload using auth token") { + return AttemptNewUpload + } + return Punt + case http.StatusRequestTimeout: + return AttemptNewUpload + case http.StatusTooManyRequests, http.StatusInternalServerError, http.StatusServiceUnavailable: + return Retry + } + return Punt +} + +// ErrAction is an action that a caller can take when any function returns an +// error. +type ErrAction int + +// Code returns the error code and message. +func Code(err error) (int, string, string) { + e, ok := err.(b2err) + if !ok { + return 0, "", "" + } + return e.statusCode, e.code, e.msg +} + +const ( + // ReAuthenticate indicates that the B2 account authentication tokens have + // expired, and should be refreshed with a new call to AuthorizeAccount. + ReAuthenticate ErrAction = iota + + // AttemptNewUpload indicates that an upload's authentication token (or URL + // endpoint) has expired, and that users should request new ones with a call + // to GetUploadURL or GetUploadPartURL. + AttemptNewUpload + + // Retry indicates that the caller should wait an appropriate amount of time, + // and then reattempt the RPC. + Retry + + // Punt means that there is no useful action to be taken on this error, and + // that it should be displayed to the user. + Punt +) + +func mkErr(resp *http.Response) error { + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + logResponse(resp, data) + msg := &b2types.ErrorMessage{} + if err := json.Unmarshal(data, msg); err != nil { + return err + } + var retryAfter int + retry := resp.Header.Get("Retry-After") + if retry != "" { + r, err := strconv.ParseInt(retry, 10, 64) + if err != nil { + return err + } + retryAfter = int(r) + } + return b2err{ + msg: msg.Msg, + retry: retryAfter, + statusCode: resp.StatusCode, + code: msg.Code, + method: resp.Request.Header.Get("X-Blazer-Method"), + } +} + +// Backoff returns an appropriate amount of time to wait, given an error, if +// any was returned by the server. If the return value is 0, but Action +// indicates Retry, the user should implement their own exponential backoff, +// beginning with one second. +func Backoff(err error) time.Duration { + e, ok := err.(b2err) + if !ok { + return 0 + } + return time.Duration(e.retry) * time.Second +} + +func logRequest(req *http.Request, args []byte) { + if !blog.V(2) { + return + } + var headers []string + for k, v := range req.Header { + if k == "Authorization" || k == "X-Blazer-Method" { + continue + } + headers = append(headers, fmt.Sprintf("%s: %s", k, strings.Join(v, ","))) + } + hstr := strings.Join(headers, ";") + method := req.Header.Get("X-Blazer-Method") + if args != nil { + blog.V(2).Infof(">> %s uri: %v headers: {%s} args: (%s)", method, req.URL, hstr, string(args)) + return + } + blog.V(2).Infof(">> %s uri: %v {%s} (no args)", method, req.URL, hstr) +} + +var authRegexp = regexp.MustCompile(`"authorizationToken": ".[^"]*"`) + +func logResponse(resp *http.Response, reply []byte) { + if !blog.V(2) { + return + } + var headers []string + for k, v := range resp.Header { + headers = append(headers, fmt.Sprintf("%s: %s", k, strings.Join(v, ","))) + } + hstr := strings.Join(headers, "; ") + method := resp.Request.Header.Get("X-Blazer-Method") + id := resp.Request.Header.Get("X-Blazer-Request-ID") + if reply != nil { + safe := string(authRegexp.ReplaceAll(reply, []byte(`"authorizationToken": "[redacted]"`))) + blog.V(2).Infof("<< %s (%s) %s {%s} (%s)", method, id, resp.Status, hstr, safe) + return + } + blog.V(2).Infof("<< %s (%s) %s {%s} (no reply)", method, id, resp.Status, hstr) +} + +func millitime(t int64) time.Time { + return time.Unix(t/1000, t%1000*1e6) +} + +type b2Options struct { + transport http.RoundTripper + failSomeUploads bool + expireTokens bool + capExceeded bool + apiBase string + userAgent string +} + +func (o *b2Options) getAPIBase() string { + if o.apiBase != "" { + return o.apiBase + } + return APIBase +} + +func (o *b2Options) getUserAgent() string { + if o.userAgent != "" { + return fmt.Sprintf("%s %s", o.userAgent, DefaultUserAgent) + } + return DefaultUserAgent +} + +func (o *b2Options) getTransport() http.RoundTripper { + if o.transport == nil { + return http.DefaultTransport + } + return o.transport +} + +// B2 holds account information for Backblaze. +type B2 struct { + accountID string + authToken string + apiURI string + DownloadURI string + MinPartSize int + opts *b2Options +} + +// Update replaces the B2 object with a new one, in-place. +func (b *B2) Update(n *B2) { + b.accountID = n.accountID + b.authToken = n.authToken + b.apiURI = n.apiURI + b.DownloadURI = n.DownloadURI + b.MinPartSize = n.MinPartSize + b.opts = n.opts +} + +type httpReply struct { + resp *http.Response + err error +} + +func makeNetRequest(req *http.Request, rt http.RoundTripper) <-chan httpReply { + ch := make(chan httpReply) + go func() { + resp, err := rt.RoundTrip(req) + ch <- httpReply{resp, err} + close(ch) + }() + return ch +} + +type requestBody struct { + size int64 + body io.Reader +} + +func (rb *requestBody) getSize() int64 { + if rb == nil { + return 0 + } + return rb.size +} + +func (rb *requestBody) getBody() io.Reader { + if rb == nil { + return nil + } + return rb.body +} + +type keepFinalBytes struct { + r io.Reader + remain int + sha [40]byte +} + +func (k *keepFinalBytes) Read(p []byte) (int, error) { + n, err := k.r.Read(p) + if k.remain-n > 40 { + k.remain -= n + return n, err + } + // This was a whole lot harder than it looks. + pi := -40 + k.remain + if pi < 0 { + pi = 0 + } + pe := n + ki := 40 - k.remain + if ki < 0 { + ki = 0 + } + ke := n - k.remain + 40 + copy(k.sha[ki:ke], p[pi:pe]) + k.remain -= n + return n, err +} + +var reqID int64 + +func (o *b2Options) makeRequest(ctx context.Context, method, verb, uri string, b2req, b2resp interface{}, headers map[string]string, body *requestBody) error { + var args []byte + if b2req != nil { + enc, err := json.Marshal(b2req) + if err != nil { + return err + } + args = enc + body = &requestBody{ + body: bytes.NewBuffer(enc), + size: int64(len(enc)), + } + } + req, err := http.NewRequest(verb, uri, body.getBody()) + if err != nil { + return err + } + req.ContentLength = body.getSize() + for k, v := range headers { + if strings.HasPrefix(k, "X-Bz-Info") || strings.HasPrefix(k, "X-Bz-File-Name") { + v = escape(v) + } + req.Header.Set(k, v) + } + req.Header.Set("User-Agent", o.getUserAgent()) + req.Header.Set("X-Blazer-Request-ID", fmt.Sprintf("%d", atomic.AddInt64(&reqID, 1))) + req.Header.Set("X-Blazer-Method", method) + if o.failSomeUploads { + req.Header.Add("X-Bz-Test-Mode", "fail_some_uploads") + } + if o.expireTokens { + req.Header.Add("X-Bz-Test-Mode", "expire_some_account_authorization_tokens") + } + if o.capExceeded { + req.Header.Add("X-Bz-Test-Mode", "force_cap_exceeded") + } + cancel := make(chan struct{}) + req.Cancel = cancel + logRequest(req, args) + ch := makeNetRequest(req, o.getTransport()) + var reply httpReply + select { + case reply = <-ch: + case <-ctx.Done(): + close(cancel) + return ctx.Err() + } + if reply.err != nil { + // Connection errors are retryable. + blog.V(2).Infof(">> %s uri: %v err: %v", method, req.URL, reply.err) + return b2err{ + msg: reply.err.Error(), + retry: 1, + } + } + resp := reply.resp + defer resp.Body.Close() + if resp.StatusCode != 200 { + return mkErr(resp) + } + var replyArgs []byte + if b2resp != nil { + rbuf := &bytes.Buffer{} + r := io.TeeReader(resp.Body, rbuf) + decoder := json.NewDecoder(r) + if err := decoder.Decode(b2resp); err != nil { + return err + } + replyArgs = rbuf.Bytes() + } else { + replyArgs, err = ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + } + logResponse(resp, replyArgs) + return nil +} + +// AuthorizeAccount wraps b2_authorize_account. +func AuthorizeAccount(ctx context.Context, account, key string, opts ...AuthOption) (*B2, error) { + auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", account, key))) + b2resp := &b2types.AuthorizeAccountResponse{} + headers := map[string]string{ + "Authorization": fmt.Sprintf("Basic %s", auth), + } + b2opts := &b2Options{} + for _, f := range opts { + f(b2opts) + } + if err := b2opts.makeRequest(ctx, "b2_authorize_account", "GET", b2opts.getAPIBase()+b2types.V1api+"b2_authorize_account", nil, b2resp, headers, nil); err != nil { + return nil, err + } + return &B2{ + accountID: b2resp.AccountID, + authToken: b2resp.AuthToken, + apiURI: b2resp.URI, + DownloadURI: b2resp.DownloadURI, + MinPartSize: b2resp.MinPartSize, + opts: b2opts, + }, nil +} + +// An AuthOption allows callers to choose per-session settings. +type AuthOption func(*b2Options) + +// UserAgent sets the User-Agent HTTP header. The default header is +// "blazer/"; the value set here will be prepended to that. This can +// be set multiple times. +func UserAgent(agent string) AuthOption { + return func(o *b2Options) { + if o.userAgent == "" { + o.userAgent = agent + return + } + o.userAgent = fmt.Sprintf("%s %s", agent, o.userAgent) + } +} + +// Transport returns an AuthOption that sets the underlying HTTP mechanism. +func Transport(rt http.RoundTripper) AuthOption { + return func(o *b2Options) { + o.transport = rt + } +} + +// FailSomeUploads requests intermittent upload failures from the B2 service. +// This is mostly useful for testing. +func FailSomeUploads() AuthOption { + return func(o *b2Options) { + o.failSomeUploads = true + } +} + +// ExpireSomeAuthTokens requests intermittent authentication failures from the +// B2 service. +func ExpireSomeAuthTokens() AuthOption { + return func(o *b2Options) { + o.expireTokens = true + } +} + +// ForceCapExceeded requests a cap limit from the B2 service. This causes all +// uploads to be treated as if they would exceed the configure B2 capacity. +func ForceCapExceeded() AuthOption { + return func(o *b2Options) { + o.capExceeded = true + } +} + +type LifecycleRule struct { + Prefix string + DaysNewUntilHidden int + DaysHiddenUntilDeleted int +} + +// CreateBucket wraps b2_create_bucket. +func (b *B2) CreateBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (*Bucket, error) { + if btype != "allPublic" { + btype = "allPrivate" + } + var b2rules []b2types.LifecycleRule + for _, rule := range rules { + b2rules = append(b2rules, b2types.LifecycleRule{ + Prefix: rule.Prefix, + DaysNewUntilHidden: rule.DaysNewUntilHidden, + DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, + }) + } + b2req := &b2types.CreateBucketRequest{ + AccountID: b.accountID, + Name: name, + Type: btype, + Info: info, + LifecycleRules: b2rules, + } + b2resp := &b2types.CreateBucketResponse{} + headers := map[string]string{ + "Authorization": b.authToken, + } + if err := b.opts.makeRequest(ctx, "b2_create_bucket", "POST", b.apiURI+b2types.V1api+"b2_create_bucket", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + var respRules []LifecycleRule + for _, rule := range b2resp.LifecycleRules { + respRules = append(respRules, LifecycleRule{ + Prefix: rule.Prefix, + DaysNewUntilHidden: rule.DaysNewUntilHidden, + DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, + }) + } + return &Bucket{ + Name: name, + Info: b2resp.Info, + LifecycleRules: respRules, + id: b2resp.BucketID, + rev: b2resp.Revision, + b2: b, + }, nil +} + +// DeleteBucket wraps b2_delete_bucket. +func (b *Bucket) DeleteBucket(ctx context.Context) error { + b2req := &b2types.DeleteBucketRequest{ + AccountID: b.b2.accountID, + BucketID: b.id, + } + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + return b.b2.opts.makeRequest(ctx, "b2_delete_bucket", "POST", b.b2.apiURI+b2types.V1api+"b2_delete_bucket", b2req, nil, headers, nil) +} + +// Bucket holds B2 bucket details. +type Bucket struct { + Name string + Type string + Info map[string]string + LifecycleRules []LifecycleRule + id string + rev int + b2 *B2 +} + +// Update wraps b2_update_bucket. +func (b *Bucket) Update(ctx context.Context) (*Bucket, error) { + var rules []b2types.LifecycleRule + for _, rule := range b.LifecycleRules { + rules = append(rules, b2types.LifecycleRule{ + DaysNewUntilHidden: rule.DaysNewUntilHidden, + DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, + Prefix: rule.Prefix, + }) + } + b2req := &b2types.UpdateBucketRequest{ + AccountID: b.b2.accountID, + BucketID: b.id, + // Name: b.Name, + Type: b.Type, + Info: b.Info, + LifecycleRules: rules, + IfRevisionIs: b.rev, + } + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + b2resp := &b2types.UpdateBucketResponse{} + if err := b.b2.opts.makeRequest(ctx, "b2_update_bucket", "POST", b.b2.apiURI+b2types.V1api+"b2_update_bucket", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + var respRules []LifecycleRule + for _, rule := range b2resp.LifecycleRules { + respRules = append(respRules, LifecycleRule{ + Prefix: rule.Prefix, + DaysNewUntilHidden: rule.DaysNewUntilHidden, + DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, + }) + } + return &Bucket{ + Name: b.Name, + Type: b2resp.Type, + Info: b2resp.Info, + LifecycleRules: respRules, + id: b2resp.BucketID, + b2: b.b2, + }, nil +} + +// BaseURL returns the base part of the download URLs. +func (b *Bucket) BaseURL() string { + return b.b2.DownloadURI +} + +// ListBuckets wraps b2_list_buckets. +func (b *B2) ListBuckets(ctx context.Context) ([]*Bucket, error) { + b2req := &b2types.ListBucketsRequest{ + AccountID: b.accountID, + } + b2resp := &b2types.ListBucketsResponse{} + headers := map[string]string{ + "Authorization": b.authToken, + } + if err := b.opts.makeRequest(ctx, "b2_list_buckets", "POST", b.apiURI+b2types.V1api+"b2_list_buckets", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + var buckets []*Bucket + for _, bucket := range b2resp.Buckets { + var rules []LifecycleRule + for _, rule := range bucket.LifecycleRules { + rules = append(rules, LifecycleRule{ + Prefix: rule.Prefix, + DaysNewUntilHidden: rule.DaysNewUntilHidden, + DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, + }) + } + buckets = append(buckets, &Bucket{ + Name: bucket.Name, + Type: bucket.Type, + Info: bucket.Info, + LifecycleRules: rules, + id: bucket.BucketID, + rev: bucket.Revision, + b2: b, + }) + } + return buckets, nil +} + +// URL holds information from the b2_get_upload_url API. +type URL struct { + uri string + token string + b2 *B2 + bucket *Bucket +} + +// Reload reloads URL in-place, by reissuing a b2_get_upload_url and +// overwriting the previous values. +func (url *URL) Reload(ctx context.Context) error { + n, err := url.bucket.GetUploadURL(ctx) + if err != nil { + return err + } + url.uri = n.uri + url.token = n.token + return nil +} + +// GetUploadURL wraps b2_get_upload_url. +func (b *Bucket) GetUploadURL(ctx context.Context) (*URL, error) { + b2req := &b2types.GetUploadURLRequest{ + BucketID: b.id, + } + b2resp := &b2types.GetUploadURLResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_get_upload_url", "POST", b.b2.apiURI+b2types.V1api+"b2_get_upload_url", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + return &URL{ + uri: b2resp.URI, + token: b2resp.Token, + b2: b.b2, + bucket: b, + }, nil +} + +// File represents a B2 file. +type File struct { + Name string + Size int64 + Status string + Timestamp time.Time + Info *FileInfo + id string + b2 *B2 +} + +// File returns a bare File struct, but with the appropriate id and b2 +// interfaces. +func (b *Bucket) File(id, name string) *File { + return &File{id: id, b2: b.b2, Name: name} +} + +// UploadFile wraps b2_upload_file. +func (u *URL) UploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (*File, error) { + headers := map[string]string{ + "Authorization": u.token, + "X-Bz-File-Name": name, + "Content-Type": contentType, + "Content-Length": fmt.Sprintf("%d", size), + "X-Bz-Content-Sha1": sha1, + } + for k, v := range info { + headers[fmt.Sprintf("X-Bz-Info-%s", k)] = v + } + b2resp := &b2types.UploadFileResponse{} + if err := u.b2.opts.makeRequest(ctx, "b2_upload_file", "POST", u.uri, nil, b2resp, headers, &requestBody{body: r, size: int64(size)}); err != nil { + return nil, err + } + return &File{ + Name: name, + Size: int64(size), + Timestamp: millitime(b2resp.Timestamp), + Status: b2resp.Action, + id: b2resp.FileID, + b2: u.b2, + }, nil +} + +// DeleteFileVersion wraps b2_delete_file_version. +func (f *File) DeleteFileVersion(ctx context.Context) error { + b2req := &b2types.DeleteFileVersionRequest{ + Name: f.Name, + FileID: f.id, + } + headers := map[string]string{ + "Authorization": f.b2.authToken, + } + return f.b2.opts.makeRequest(ctx, "b2_delete_file_version", "POST", f.b2.apiURI+b2types.V1api+"b2_delete_file_version", b2req, nil, headers, nil) +} + +// LargeFile holds information necessary to implement B2 large file support. +type LargeFile struct { + ID string + Timestamp time.Time + Name string + ContentType string + Info map[string]string + + b2 *B2 + + mu sync.Mutex + size int64 + hashes map[int]string +} + +// StartLargeFile wraps b2_start_large_file. +func (b *Bucket) StartLargeFile(ctx context.Context, name, contentType string, info map[string]string) (*LargeFile, error) { + b2req := &b2types.StartLargeFileRequest{ + BucketID: b.id, + Name: name, + ContentType: contentType, + Info: info, + } + b2resp := &b2types.StartLargeFileResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_start_large_file", "POST", b.b2.apiURI+b2types.V1api+"b2_start_large_file", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + return &LargeFile{ + ID: b2resp.ID, + b2: b.b2, + hashes: make(map[int]string), + }, nil +} + +// ListUnfinishedLargeFiles - lists all the unfinied large files. +func (b *Bucket) ListUnfinishedLargeFiles(ctx context.Context, continuation string, count int) ([]*LargeFile, string, error) { + b2req := &b2types.ListUnfinishedLargeFilesRequest{ + BucketID: b.id, + Continuation: continuation, + Count: count, + } + b2resp := &b2types.ListUnfinishedLargeFilesResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_list_unfinished_large_files", "POST", b.b2.apiURI+b2types.V1api+"b2_list_unfinished_large_files", + b2req, b2resp, headers, nil); err != nil { + return nil, "", err + } + cont := b2resp.NextID + var largeFiles []*LargeFile + for _, f := range b2resp.Files { + largeFiles = append(largeFiles, &LargeFile{ + ID: f.ID, + Timestamp: millitime(f.Timestamp), + Name: f.Name, + Info: f.Info, + ContentType: f.ContentType, + b2: b.b2, + hashes: make(map[int]string), + }) + } + return largeFiles, cont, nil +} + +// CancelLargeFile wraps b2_cancel_large_file. +func (l *LargeFile) CancelLargeFile(ctx context.Context) error { + b2req := &b2types.CancelLargeFileRequest{ + ID: l.ID, + } + headers := map[string]string{ + "Authorization": l.b2.authToken, + } + return l.b2.opts.makeRequest(ctx, "b2_cancel_large_file", "POST", l.b2.apiURI+b2types.V1api+"b2_cancel_large_file", b2req, nil, headers, nil) +} + +// FilePart is a piece of a started, but not finished, large file upload. +type FilePart struct { + Number int + SHA1 string + Size int64 +} + +// ListParts wraps b2_list_parts. +func (f *File) ListParts(ctx context.Context, next, count int) ([]*FilePart, int, error) { + b2req := &b2types.ListPartsRequest{ + ID: f.id, + Start: next, + Count: count, + } + b2resp := &b2types.ListPartsResponse{} + headers := map[string]string{ + "Authorization": f.b2.authToken, + } + if err := f.b2.opts.makeRequest(ctx, "b2_list_parts", "POST", f.b2.apiURI+b2types.V1api+"b2_list_parts", b2req, b2resp, headers, nil); err != nil { + return nil, 0, err + } + var parts []*FilePart + for _, part := range b2resp.Parts { + parts = append(parts, &FilePart{ + Number: part.Number, + SHA1: part.SHA1, + Size: part.Size, + }) + } + return parts, b2resp.Next, nil +} + +// CompileParts returns a LargeFile that can accept new data. Seen is a +// mapping of completed part numbers to SHA1 strings; size is the total size of +// all the completed parts to this point. +func (f *File) CompileParts(size int64, seen map[int]string) *LargeFile { + s := make(map[int]string) + for k, v := range seen { + s[k] = v + } + return &LargeFile{ + ID: f.id, + b2: f.b2, + size: size, + hashes: s, + } +} + +// FileChunk holds information necessary for uploading file chunks. +type FileChunk struct { + url string + token string + file *LargeFile +} + +type getUploadPartURLRequest struct { + ID string `json:"fileId"` +} + +type getUploadPartURLResponse struct { + URL string `json:"uploadUrl"` + Token string `json:"authorizationToken"` +} + +// GetUploadPartURL wraps b2_get_upload_part_url. +func (l *LargeFile) GetUploadPartURL(ctx context.Context) (*FileChunk, error) { + b2req := &getUploadPartURLRequest{ + ID: l.ID, + } + b2resp := &getUploadPartURLResponse{} + headers := map[string]string{ + "Authorization": l.b2.authToken, + } + if err := l.b2.opts.makeRequest(ctx, "b2_get_upload_part_url", "POST", l.b2.apiURI+b2types.V1api+"b2_get_upload_part_url", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + return &FileChunk{ + url: b2resp.URL, + token: b2resp.Token, + file: l, + }, nil +} + +// Reload reloads FileChunk in-place. +func (fc *FileChunk) Reload(ctx context.Context) error { + n, err := fc.file.GetUploadPartURL(ctx) + if err != nil { + return err + } + fc.url = n.url + fc.token = n.token + return nil +} + +// UploadPart wraps b2_upload_part. +func (fc *FileChunk) UploadPart(ctx context.Context, r io.Reader, sha1 string, size, index int) (string, error) { + headers := map[string]string{ + "Authorization": fc.token, + "X-Bz-Part-Number": fmt.Sprintf("%d", index), + "Content-Length": fmt.Sprintf("%d", size), + "X-Bz-Content-Sha1": sha1, + } + b2resp := &b2types.UploadPartResponse{} + if sha1 == "hex_digits_at_end" { + r = &keepFinalBytes{r: r, remain: size} + } + if err := fc.file.b2.opts.makeRequest(ctx, "b2_upload_part", "POST", fc.url, nil, b2resp, headers, &requestBody{body: r, size: int64(size)}); err != nil { + return "", err + } + fc.file.mu.Lock() + if sha1 == "hex_digits_at_end" { + sha1 = string(r.(*keepFinalBytes).sha[:]) + } + fc.file.hashes[index] = sha1 + fc.file.size += int64(size) + fc.file.mu.Unlock() + return b2resp.SHA1, nil +} + +// FinishLargeFile wraps b2_finish_large_file. +func (l *LargeFile) FinishLargeFile(ctx context.Context) (*File, error) { + l.mu.Lock() + defer l.mu.Unlock() + b2req := &b2types.FinishLargeFileRequest{ + ID: l.ID, + Hashes: make([]string, len(l.hashes)), + } + b2resp := &b2types.FinishLargeFileResponse{} + for k, v := range l.hashes { + b2req.Hashes[k-1] = v + } + headers := map[string]string{ + "Authorization": l.b2.authToken, + } + if err := l.b2.opts.makeRequest(ctx, "b2_finish_large_file", "POST", l.b2.apiURI+b2types.V1api+"b2_finish_large_file", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + return &File{ + Name: b2resp.Name, + Size: l.size, + Timestamp: millitime(b2resp.Timestamp), + Status: b2resp.Action, + id: b2resp.FileID, + b2: l.b2, + }, nil +} + +// ListFileNames wraps b2_list_file_names. +func (b *Bucket) ListFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]*File, string, error) { + b2req := &b2types.ListFileNamesRequest{ + Count: count, + Continuation: continuation, + BucketID: b.id, + Prefix: prefix, + Delimiter: delimiter, + } + b2resp := &b2types.ListFileNamesResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_list_file_names", "POST", b.b2.apiURI+b2types.V1api+"b2_list_file_names", b2req, b2resp, headers, nil); err != nil { + return nil, "", err + } + cont := b2resp.Continuation + var files []*File + for _, f := range b2resp.Files { + files = append(files, &File{ + Name: f.Name, + Size: f.Size, + Status: f.Action, + Timestamp: millitime(f.Timestamp), + Info: &FileInfo{ + Name: f.Name, + SHA1: f.SHA1, + Size: f.Size, + ContentType: f.ContentType, + Info: f.Info, + Status: f.Action, + Timestamp: millitime(f.Timestamp), + }, + id: f.FileID, + b2: b.b2, + }) + } + return files, cont, nil +} + +// ListFileVersions wraps b2_list_file_versions. +func (b *Bucket) ListFileVersions(ctx context.Context, count int, startName, startID, prefix, delimiter string) ([]*File, string, string, error) { + b2req := &b2types.ListFileVersionsRequest{ + BucketID: b.id, + Count: count, + StartName: startName, + StartID: startID, + Prefix: prefix, + Delimiter: delimiter, + } + b2resp := &b2types.ListFileVersionsResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_list_file_versions", "POST", b.b2.apiURI+b2types.V1api+"b2_list_file_versions", b2req, b2resp, headers, nil); err != nil { + return nil, "", "", err + } + var files []*File + for _, f := range b2resp.Files { + files = append(files, &File{ + Name: f.Name, + Size: f.Size, + Status: f.Action, + Timestamp: millitime(f.Timestamp), + Info: &FileInfo{ + Name: f.Name, + SHA1: f.SHA1, + Size: f.Size, + ContentType: f.ContentType, + Info: f.Info, + Status: f.Action, + Timestamp: millitime(f.Timestamp), + }, + id: f.FileID, + b2: b.b2, + }) + } + return files, b2resp.NextName, b2resp.NextID, nil +} + +// GetDownloadAuthorization wraps b2_get_download_authorization. +func (b *Bucket) GetDownloadAuthorization(ctx context.Context, prefix string, valid time.Duration) (string, error) { + b2req := &b2types.GetDownloadAuthorizationRequest{ + BucketID: b.id, + Prefix: prefix, + Valid: int(valid.Seconds()), + } + b2resp := &b2types.GetDownloadAuthorizationResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_get_download_authorization", "POST", b.b2.apiURI+b2types.V1api+"b2_get_download_authorization", b2req, b2resp, headers, nil); err != nil { + return "", err + } + return b2resp.Token, nil +} + +// FileReader is an io.ReadCloser that downloads a file from B2. +type FileReader struct { + io.ReadCloser + ContentLength int + ContentType string + SHA1 string + ID string + Info map[string]string +} + +func mkRange(offset, size int64) string { + if offset == 0 && size == 0 { + return "" + } + if size == 0 { + return fmt.Sprintf("bytes=%d-", offset) + } + return fmt.Sprintf("bytes=%d-%d", offset, offset+size-1) +} + +// DownloadFileByName wraps b2_download_file_by_name. +func (b *Bucket) DownloadFileByName(ctx context.Context, name string, offset, size int64) (*FileReader, error) { + uri := fmt.Sprintf("%s/file/%s/%s", b.b2.DownloadURI, b.Name, name) + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", b.b2.authToken) + req.Header.Set("X-Blazer-Request-ID", fmt.Sprintf("%d", atomic.AddInt64(&reqID, 1))) + req.Header.Set("X-Blazer-Method", "b2_download_file_by_name") + rng := mkRange(offset, size) + if rng != "" { + req.Header.Set("Range", rng) + } + cancel := make(chan struct{}) + req.Cancel = cancel + logRequest(req, nil) + ch := makeNetRequest(req, b.b2.opts.getTransport()) + var reply httpReply + select { + case reply = <-ch: + case <-ctx.Done(): + close(cancel) + return nil, ctx.Err() + } + if reply.err != nil { + return nil, reply.err + } + resp := reply.resp + logResponse(resp, nil) + if resp.StatusCode != 200 && resp.StatusCode != 206 { + defer resp.Body.Close() + return nil, mkErr(resp) + } + clen, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + resp.Body.Close() + return nil, err + } + info := make(map[string]string) + for key := range resp.Header { + if !strings.HasPrefix(key, "X-Bz-Info-") { + continue + } + name, err := unescape(strings.TrimPrefix(key, "X-Bz-Info-")) + if err != nil { + resp.Body.Close() + return nil, err + } + val, err := unescape(resp.Header.Get(key)) + if err != nil { + resp.Body.Close() + return nil, err + } + info[name] = val + } + return &FileReader{ + ReadCloser: resp.Body, + SHA1: resp.Header.Get("X-Bz-Content-Sha1"), + ID: resp.Header.Get("X-Bz-File-Id"), + ContentType: resp.Header.Get("Content-Type"), + ContentLength: int(clen), + Info: info, + }, nil +} + +// HideFile wraps b2_hide_file. +func (b *Bucket) HideFile(ctx context.Context, name string) (*File, error) { + b2req := &b2types.HideFileRequest{ + BucketID: b.id, + File: name, + } + b2resp := &b2types.HideFileResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_hide_file", "POST", b.b2.apiURI+b2types.V1api+"b2_hide_file", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + return &File{ + Status: b2resp.Action, + Name: name, + Timestamp: millitime(b2resp.Timestamp), + b2: b.b2, + id: b2resp.ID, + }, nil +} + +// FileInfo holds information about a specific file. +type FileInfo struct { + Name string + SHA1 string + ID string + Size int64 + ContentType string + Info map[string]string + Status string + Timestamp time.Time +} + +// GetFileInfo wraps b2_get_file_info. +func (f *File) GetFileInfo(ctx context.Context) (*FileInfo, error) { + b2req := &b2types.GetFileInfoRequest{ + ID: f.id, + } + b2resp := &b2types.GetFileInfoResponse{} + headers := map[string]string{ + "Authorization": f.b2.authToken, + } + if err := f.b2.opts.makeRequest(ctx, "b2_get_file_info", "POST", f.b2.apiURI+b2types.V1api+"b2_get_file_info", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + f.Status = b2resp.Action + f.Name = b2resp.Name + f.Timestamp = millitime(b2resp.Timestamp) + f.Info = &FileInfo{ + Name: b2resp.Name, + SHA1: b2resp.SHA1, + Size: b2resp.Size, + ContentType: b2resp.ContentType, + Info: b2resp.Info, + Status: b2resp.Action, + ID: b2resp.FileID, + Timestamp: millitime(b2resp.Timestamp), + } + return f.Info, nil +} diff --git a/vendor/github.com/minio/blazer/base/strings.go b/vendor/github.com/minio/blazer/base/strings.go new file mode 100644 index 000000000..88e615f3e --- /dev/null +++ b/vendor/github.com/minio/blazer/base/strings.go @@ -0,0 +1,81 @@ +// Copyright 2017, Google +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package base + +import ( + "bytes" + "errors" + "fmt" +) + +func noEscape(c byte) bool { + switch c { + case '.', '_', '-', '/', '~', '!', '$', '\'', '(', ')', '*', ';', '=', ':', '@': + return true + } + return false +} + +func escape(s string) string { + // cribbed from url.go, kinda + b := &bytes.Buffer{} + for i := 0; i < len(s); i++ { + switch c := s[i]; { + case c == '/': + b.WriteByte(c) + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9': + b.WriteByte(c) + case noEscape(c): + b.WriteByte(c) + default: + fmt.Fprintf(b, "%%%X", c) + } + } + return b.String() +} + +func unescape(s string) (string, error) { + b := &bytes.Buffer{} + for i := 0; i < len(s); i++ { + c := s[i] + switch c { + case '/': + b.WriteString("/") + case '+': + b.WriteString(" ") + case '%': + if len(s)-i < 3 { + return "", errors.New("unescape: bad encoding") + } + b.WriteByte(unhex(s[i+1])<<4 | unhex(s[i+2])) + i += 2 + default: + b.WriteByte(c) + } + } + return b.String(), nil +} + +func unhex(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 0 +} diff --git a/vendor/github.com/minio/blazer/internal/b2types/b2types.go b/vendor/github.com/minio/blazer/internal/b2types/b2types.go new file mode 100644 index 000000000..c1f78c47c --- /dev/null +++ b/vendor/github.com/minio/blazer/internal/b2types/b2types.go @@ -0,0 +1,255 @@ +// Copyright 2016, Google +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package b2types implements internal types common to the B2 API. +package b2types + +// You know what would be amazing? If I could autogen this from like a JSON +// file. Wouldn't that be amazing? That would be amazing. + +const ( + V1api = "/b2api/v1/" +) + +type ErrorMessage struct { + Status int `json:"status"` + Code string `json:"code"` + Msg string `json:"message"` +} + +type AuthorizeAccountResponse struct { + AccountID string `json:"accountId"` + AuthToken string `json:"authorizationToken"` + URI string `json:"apiUrl"` + DownloadURI string `json:"downloadUrl"` + MinPartSize int `json:"minimumPartSize"` +} + +type LifecycleRule struct { + DaysHiddenUntilDeleted int `json:"daysFromHidingToDeleting,omitempty"` + DaysNewUntilHidden int `json:"daysFromUploadingToHiding,omitempty"` + Prefix string `json:"fileNamePrefix"` +} + +type CreateBucketRequest struct { + AccountID string `json:"accountId"` + Name string `json:"bucketName"` + Type string `json:"bucketType"` + Info map[string]string `json:"bucketInfo"` + LifecycleRules []LifecycleRule `json:"lifecycleRules"` +} + +type CreateBucketResponse struct { + BucketID string `json:"bucketId"` + Name string `json:"bucketName"` + Type string `json:"bucketType"` + Info map[string]string `json:"bucketInfo"` + LifecycleRules []LifecycleRule `json:"lifecycleRules"` + Revision int `json:"revision"` +} + +type DeleteBucketRequest struct { + AccountID string `json:"accountId"` + BucketID string `json:"bucketId"` +} + +type ListBucketsRequest struct { + AccountID string `json:"accountId"` +} + +type ListBucketsResponse struct { + Buckets []CreateBucketResponse `json:"buckets"` +} + +type UpdateBucketRequest struct { + AccountID string `json:"accountId"` + BucketID string `json:"bucketId"` + // bucketName is a required field according to + // https://www.backblaze.com/b2/docs/b2_update_bucket.html. + // + // However, actually setting it returns 400: unknown field in + // com.backblaze.modules.b2.data.UpdateBucketRequest: bucketName + // + //Name string `json:"bucketName"` + Type string `json:"bucketType,omitempty"` + Info map[string]string `json:"bucketInfo,omitempty"` + LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"` + IfRevisionIs int `json:"ifRevisionIs,omitempty"` +} + +type UpdateBucketResponse CreateBucketResponse + +type GetUploadURLRequest struct { + BucketID string `json:"bucketId"` +} + +type GetUploadURLResponse struct { + URI string `json:"uploadUrl"` + Token string `json:"authorizationToken"` +} + +type UploadFileResponse struct { + FileID string `json:"fileId"` + Timestamp int64 `json:"uploadTimestamp"` + Action string `json:"action"` +} + +type DeleteFileVersionRequest struct { + Name string `json:"fileName"` + FileID string `json:"fileId"` +} + +type StartLargeFileRequest struct { + BucketID string `json:"bucketId"` + Name string `json:"fileName"` + ContentType string `json:"contentType"` + Info map[string]string `json:"fileInfo,omitempty"` +} + +type StartLargeFileResponse struct { + ID string `json:"fileId"` +} + +type CancelLargeFileRequest struct { + ID string `json:"fileId"` +} + +type ListUnfinishedLargeFilesRequest struct { + BucketID string `json:"bucketId"` + Continuation string `json:"startFileId,omitempty"` + Count int `json:"maxFileCount,omitempty"` +} + +type ListUnfinishedLargeFilesResponse struct { + NextID string `json:"nextFileId"` + Files []struct { + AccountID string `json:"accountId"` + BucketID string `json:"bucketId"` + Name string `json:"fileName"` + ID string `json:"fileId"` + Timestamp int64 `json:"uploadTimestamp"` + ContentType string `json:"contentType"` + Info map[string]string `json:"fileInfo,omitempty"` + } `json:"files"` +} + +type ListPartsRequest struct { + ID string `json:"fileId"` + Start int `json:"startPartNumber"` + Count int `json:"maxPartCount"` +} + +type ListPartsResponse struct { + Next int `json:"nextPartNumber"` + Parts []struct { + ID string `json:"fileId"` + Number int `json:"partNumber"` + SHA1 string `json:"contentSha1"` + Size int64 `json:"contentLength"` + } `json:"parts"` +} + +type getUploadPartURLRequest struct { + ID string `json:"fileId"` +} + +type getUploadPartURLResponse struct { + URL string `json:"uploadUrl"` + Token string `json:"authorizationToken"` +} + +type UploadPartResponse struct { + ID string `json:"fileId"` + PartNumber int `json:"partNumber"` + Size int64 `json:"contentLength"` + SHA1 string `json:"contentSha1"` +} + +type FinishLargeFileRequest struct { + ID string `json:"fileId"` + Hashes []string `json:"partSha1Array"` +} + +type FinishLargeFileResponse struct { + Name string `json:"fileName"` + FileID string `json:"fileId"` + Timestamp int64 `json:"uploadTimestamp"` + Action string `json:"action"` +} + +type ListFileNamesRequest struct { + BucketID string `json:"bucketId"` + Count int `json:"maxFileCount"` + Continuation string `json:"startFileName,omitempty"` + Prefix string `json:"prefix,omitempty"` + Delimiter string `json:"delimiter,omitempty"` +} + +type ListFileNamesResponse struct { + Continuation string `json:"nextFileName"` + Files []GetFileInfoResponse `json:"files"` +} + +type ListFileVersionsRequest struct { + BucketID string `json:"bucketId"` + Count int `json:"maxFileCount"` + StartName string `json:"startFileName,omitempty"` + StartID string `json:"startFileId,omitempty"` + Prefix string `json:"prefix,omitempty"` + Delimiter string `json:"delimiter,omitempty"` +} + +type ListFileVersionsResponse struct { + NextName string `json:"nextFileName"` + NextID string `json:"nextFileId"` + Files []GetFileInfoResponse `json:"files"` +} + +type HideFileRequest struct { + BucketID string `json:"bucketId"` + File string `json:"fileName"` +} + +type HideFileResponse struct { + ID string `json:"fileId"` + Timestamp int64 `json:"uploadTimestamp"` + Action string `json:"action"` +} + +type GetFileInfoRequest struct { + ID string `json:"fileId"` +} + +type GetFileInfoResponse struct { + FileID string `json:"fileId"` + Name string `json:"fileName"` + SHA1 string `json:"contentSha1"` + Size int64 `json:"contentLength"` + ContentType string `json:"contentType"` + Info map[string]string `json:"fileInfo"` + Action string `json:"action"` + Timestamp int64 `json:"uploadTimestamp"` +} + +type GetDownloadAuthorizationRequest struct { + BucketID string `json:"bucketId"` + Prefix string `json:"fileNamePrefix"` + Valid int `json:"validDurationInSeconds"` +} + +type GetDownloadAuthorizationResponse struct { + BucketID string `json:"bucketId"` + Prefix string `json:"fileNamePrefix"` + Token string `json:"authorizationToken"` +} diff --git a/vendor/github.com/minio/blazer/internal/blog/blog.go b/vendor/github.com/minio/blazer/internal/blog/blog.go new file mode 100644 index 000000000..6ffe5cbf0 --- /dev/null +++ b/vendor/github.com/minio/blazer/internal/blog/blog.go @@ -0,0 +1,54 @@ +// Copyright 2017, Google +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package blog implements a private logger, in the manner of glog, without +// polluting the flag namespace or leaving files all over /tmp. +// +// It has almost no features, and a bunch of global state. +package blog + +import ( + "log" + "os" + "strconv" +) + +var level int32 + +type Verbose bool + +func init() { + lvl := os.Getenv("B2_LOG_LEVEL") + i, err := strconv.ParseInt(lvl, 10, 32) + if err != nil { + return + } + level = int32(i) +} + +func (v Verbose) Info(a ...interface{}) { + if v { + log.Print(a...) + } +} + +func (v Verbose) Infof(format string, a ...interface{}) { + if v { + log.Printf(format, a...) + } +} + +func V(target int32) Verbose { + return Verbose(target <= level) +} diff --git a/vendor/golang.org/x/text/internal/gen/code.go b/vendor/golang.org/x/text/internal/gen/code.go deleted file mode 100644 index d7031b694..000000000 --- a/vendor/golang.org/x/text/internal/gen/code.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gen - -import ( - "bytes" - "encoding/gob" - "fmt" - "hash" - "hash/fnv" - "io" - "log" - "os" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -// This file contains utilities for generating code. - -// TODO: other write methods like: -// - slices, maps, types, etc. - -// CodeWriter is a utility for writing structured code. It computes the content -// hash and size of written content. It ensures there are newlines between -// written code blocks. -type CodeWriter struct { - buf bytes.Buffer - Size int - Hash hash.Hash32 // content hash - gob *gob.Encoder - // For comments we skip the usual one-line separator if they are followed by - // a code block. - skipSep bool -} - -func (w *CodeWriter) Write(p []byte) (n int, err error) { - return w.buf.Write(p) -} - -// NewCodeWriter returns a new CodeWriter. -func NewCodeWriter() *CodeWriter { - h := fnv.New32() - return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} -} - -// WriteGoFile appends the buffer with the total size of all created structures -// and writes it as a Go file to the the given file with the given package name. -func (w *CodeWriter) WriteGoFile(filename, pkg string) { - f, err := os.Create(filename) - if err != nil { - log.Fatalf("Could not create file %s: %v", filename, err) - } - defer f.Close() - if _, err = w.WriteGo(f, pkg); err != nil { - log.Fatalf("Error writing file %s: %v", filename, err) - } -} - -// WriteGo appends the buffer with the total size of all created structures and -// writes it as a Go file to the the given writer with the given package name. -func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) { - sz := w.Size - w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) - defer w.buf.Reset() - return WriteGo(out, pkg, w.buf.Bytes()) -} - -func (w *CodeWriter) printf(f string, x ...interface{}) { - fmt.Fprintf(w, f, x...) -} - -func (w *CodeWriter) insertSep() { - if w.skipSep { - w.skipSep = false - return - } - // Use at least two newlines to ensure a blank space between the previous - // block. WriteGoFile will remove extraneous newlines. - w.printf("\n\n") -} - -// WriteComment writes a comment block. All line starts are prefixed with "//". -// Initial empty lines are gobbled. The indentation for the first line is -// stripped from consecutive lines. -func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { - s := fmt.Sprintf(comment, args...) - s = strings.Trim(s, "\n") - - // Use at least two newlines to ensure a blank space between the previous - // block. WriteGoFile will remove extraneous newlines. - w.printf("\n\n// ") - w.skipSep = true - - // strip first indent level. - sep := "\n" - for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { - sep += s[:1] - } - - strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) - - w.printf("\n") -} - -func (w *CodeWriter) writeSizeInfo(size int) { - w.printf("// Size: %d bytes\n", size) -} - -// WriteConst writes a constant of the given name and value. -func (w *CodeWriter) WriteConst(name string, x interface{}) { - w.insertSep() - v := reflect.ValueOf(x) - - switch v.Type().Kind() { - case reflect.String: - w.printf("const %s %s = ", name, typeName(x)) - w.WriteString(v.String()) - w.printf("\n") - default: - w.printf("const %s = %#v\n", name, x) - } -} - -// WriteVar writes a variable of the given name and value. -func (w *CodeWriter) WriteVar(name string, x interface{}) { - w.insertSep() - v := reflect.ValueOf(x) - oldSize := w.Size - sz := int(v.Type().Size()) - w.Size += sz - - switch v.Type().Kind() { - case reflect.String: - w.printf("var %s %s = ", name, typeName(x)) - w.WriteString(v.String()) - case reflect.Struct: - w.gob.Encode(x) - fallthrough - case reflect.Slice, reflect.Array: - w.printf("var %s = ", name) - w.writeValue(v) - w.writeSizeInfo(w.Size - oldSize) - default: - w.printf("var %s %s = ", name, typeName(x)) - w.gob.Encode(x) - w.writeValue(v) - w.writeSizeInfo(w.Size - oldSize) - } - w.printf("\n") -} - -func (w *CodeWriter) writeValue(v reflect.Value) { - x := v.Interface() - switch v.Kind() { - case reflect.String: - w.WriteString(v.String()) - case reflect.Array: - // Don't double count: callers of WriteArray count on the size being - // added, so we need to discount it here. - w.Size -= int(v.Type().Size()) - w.writeSlice(x, true) - case reflect.Slice: - w.writeSlice(x, false) - case reflect.Struct: - w.printf("%s{\n", typeName(v.Interface())) - t := v.Type() - for i := 0; i < v.NumField(); i++ { - w.printf("%s: ", t.Field(i).Name) - w.writeValue(v.Field(i)) - w.printf(",\n") - } - w.printf("}") - default: - w.printf("%#v", x) - } -} - -// WriteString writes a string literal. -func (w *CodeWriter) WriteString(s string) { - s = strings.Replace(s, `\`, `\\`, -1) - io.WriteString(w.Hash, s) // content hash - w.Size += len(s) - - const maxInline = 40 - if len(s) <= maxInline { - w.printf("%q", s) - return - } - - // We will render the string as a multi-line string. - const maxWidth = 80 - 4 - len(`"`) - len(`" +`) - - // When starting on its own line, go fmt indents line 2+ an extra level. - n, max := maxWidth, maxWidth-4 - - // As per https://golang.org/issue/18078, the compiler has trouble - // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, - // for large N. We insert redundant, explicit parentheses to work around - // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 + - // ... + s127) + etc + (etc + ... + sN). - explicitParens, extraComment := len(s) > 128*1024, "" - if explicitParens { - w.printf(`(`) - extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" - } - - // Print "" +\n, if a string does not start on its own line. - b := w.buf.Bytes() - if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { - w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) - n, max = maxWidth, maxWidth - } - - w.printf(`"`) - - for sz, p, nLines := 0, 0, 0; p < len(s); { - var r rune - r, sz = utf8.DecodeRuneInString(s[p:]) - out := s[p : p+sz] - chars := 1 - if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { - switch sz { - case 1: - out = fmt.Sprintf("\\x%02x", s[p]) - case 2, 3: - out = fmt.Sprintf("\\u%04x", r) - case 4: - out = fmt.Sprintf("\\U%08x", r) - } - chars = len(out) - } - if n -= chars; n < 0 { - nLines++ - if explicitParens && nLines&63 == 63 { - w.printf("\") + (\"") - } - w.printf("\" +\n\"") - n = max - len(out) - } - w.printf("%s", out) - p += sz - } - w.printf(`"`) - if explicitParens { - w.printf(`)`) - } -} - -// WriteSlice writes a slice value. -func (w *CodeWriter) WriteSlice(x interface{}) { - w.writeSlice(x, false) -} - -// WriteArray writes an array value. -func (w *CodeWriter) WriteArray(x interface{}) { - w.writeSlice(x, true) -} - -func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { - v := reflect.ValueOf(x) - w.gob.Encode(v.Len()) - w.Size += v.Len() * int(v.Type().Elem().Size()) - name := typeName(x) - if isArray { - name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) - } - if isArray { - w.printf("%s{\n", name) - } else { - w.printf("%s{ // %d elements\n", name, v.Len()) - } - - switch kind := v.Type().Elem().Kind(); kind { - case reflect.String: - for _, s := range x.([]string) { - w.WriteString(s) - w.printf(",\n") - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - // nLine and nBlock are the number of elements per line and block. - nLine, nBlock, format := 8, 64, "%d," - switch kind { - case reflect.Uint8: - format = "%#02x," - case reflect.Uint16: - format = "%#04x," - case reflect.Uint32: - nLine, nBlock, format = 4, 32, "%#08x," - case reflect.Uint, reflect.Uint64: - nLine, nBlock, format = 4, 32, "%#016x," - case reflect.Int8: - nLine = 16 - } - n := nLine - for i := 0; i < v.Len(); i++ { - if i%nBlock == 0 && v.Len() > nBlock { - w.printf("// Entry %X - %X\n", i, i+nBlock-1) - } - x := v.Index(i).Interface() - w.gob.Encode(x) - w.printf(format, x) - if n--; n == 0 { - n = nLine - w.printf("\n") - } - } - w.printf("\n") - case reflect.Struct: - zero := reflect.Zero(v.Type().Elem()).Interface() - for i := 0; i < v.Len(); i++ { - x := v.Index(i).Interface() - w.gob.EncodeValue(v) - if !reflect.DeepEqual(zero, x) { - line := fmt.Sprintf("%#v,\n", x) - line = line[strings.IndexByte(line, '{'):] - w.printf("%d: ", i) - w.printf(line) - } - } - case reflect.Array: - for i := 0; i < v.Len(); i++ { - w.printf("%d: %#v,\n", i, v.Index(i).Interface()) - } - default: - panic("gen: slice elem type not supported") - } - w.printf("}") -} - -// WriteType writes a definition of the type of the given value and returns the -// type name. -func (w *CodeWriter) WriteType(x interface{}) string { - t := reflect.TypeOf(x) - w.printf("type %s struct {\n", t.Name()) - for i := 0; i < t.NumField(); i++ { - w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) - } - w.printf("}\n") - return t.Name() -} - -// typeName returns the name of the go type of x. -func typeName(x interface{}) string { - t := reflect.ValueOf(x).Type() - return strings.Replace(fmt.Sprint(t), "main.", "", 1) -} diff --git a/vendor/golang.org/x/text/internal/gen/gen.go b/vendor/golang.org/x/text/internal/gen/gen.go deleted file mode 100644 index 2acb0355a..000000000 --- a/vendor/golang.org/x/text/internal/gen/gen.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gen contains common code for the various code generation tools in the -// text repository. Its usage ensures consistency between tools. -// -// This package defines command line flags that are common to most generation -// tools. The flags allow for specifying specific Unicode and CLDR versions -// in the public Unicode data repository (http://www.unicode.org/Public). -// -// A local Unicode data mirror can be set through the flag -local or the -// environment variable UNICODE_DIR. The former takes precedence. The local -// directory should follow the same structure as the public repository. -// -// IANA data can also optionally be mirrored by putting it in the iana directory -// rooted at the top of the local mirror. Beware, though, that IANA data is not -// versioned. So it is up to the developer to use the right version. -package gen // import "golang.org/x/text/internal/gen" - -import ( - "bytes" - "flag" - "fmt" - "go/build" - "go/format" - "io" - "io/ioutil" - "log" - "net/http" - "os" - "path" - "path/filepath" - "sync" - "unicode" - - "golang.org/x/text/unicode/cldr" -) - -var ( - url = flag.String("url", - "http://www.unicode.org/Public", - "URL of Unicode database directory") - iana = flag.String("iana", - "http://www.iana.org", - "URL of the IANA repository") - unicodeVersion = flag.String("unicode", - getEnv("UNICODE_VERSION", unicode.Version), - "unicode version to use") - cldrVersion = flag.String("cldr", - getEnv("CLDR_VERSION", cldr.Version), - "cldr version to use") -) - -func getEnv(name, def string) string { - if v := os.Getenv(name); v != "" { - return v - } - return def -} - -// Init performs common initialization for a gen command. It parses the flags -// and sets up the standard logging parameters. -func Init() { - log.SetPrefix("") - log.SetFlags(log.Lshortfile) - flag.Parse() -} - -const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package %s - -` - -// UnicodeVersion reports the requested Unicode version. -func UnicodeVersion() string { - return *unicodeVersion -} - -// UnicodeVersion reports the requested CLDR version. -func CLDRVersion() string { - return *cldrVersion -} - -// IsLocal reports whether data files are available locally. -func IsLocal() bool { - dir, err := localReadmeFile() - if err != nil { - return false - } - if _, err = os.Stat(dir); err != nil { - return false - } - return true -} - -// OpenUCDFile opens the requested UCD file. The file is specified relative to -// the public Unicode root directory. It will call log.Fatal if there are any -// errors. -func OpenUCDFile(file string) io.ReadCloser { - return openUnicode(path.Join(*unicodeVersion, "ucd", file)) -} - -// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there -// are any errors. -func OpenCLDRCoreZip() io.ReadCloser { - return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") -} - -// OpenUnicodeFile opens the requested file of the requested category from the -// root of the Unicode data archive. The file is specified relative to the -// public Unicode root directory. If version is "", it will use the default -// Unicode version. It will call log.Fatal if there are any errors. -func OpenUnicodeFile(category, version, file string) io.ReadCloser { - if version == "" { - version = UnicodeVersion() - } - return openUnicode(path.Join(category, version, file)) -} - -// OpenIANAFile opens the requested IANA file. The file is specified relative -// to the IANA root, which is typically either http://www.iana.org or the -// iana directory in the local mirror. It will call log.Fatal if there are any -// errors. -func OpenIANAFile(path string) io.ReadCloser { - return Open(*iana, "iana", path) -} - -var ( - dirMutex sync.Mutex - localDir string -) - -const permissions = 0755 - -func localReadmeFile() (string, error) { - p, err := build.Import("golang.org/x/text", "", build.FindOnly) - if err != nil { - return "", fmt.Errorf("Could not locate package: %v", err) - } - return filepath.Join(p.Dir, "DATA", "README"), nil -} - -func getLocalDir() string { - dirMutex.Lock() - defer dirMutex.Unlock() - - readme, err := localReadmeFile() - if err != nil { - log.Fatal(err) - } - dir := filepath.Dir(readme) - if _, err := os.Stat(readme); err != nil { - if err := os.MkdirAll(dir, permissions); err != nil { - log.Fatalf("Could not create directory: %v", err) - } - ioutil.WriteFile(readme, []byte(readmeTxt), permissions) - } - return dir -} - -const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT. - -This directory contains downloaded files used to generate the various tables -in the golang.org/x/text subrepo. - -Note that the language subtag repo (iana/assignments/language-subtag-registry) -and all other times in the iana subdirectory are not versioned and will need -to be periodically manually updated. The easiest way to do this is to remove -the entire iana directory. This is mostly of concern when updating the language -package. -` - -// Open opens subdir/path if a local directory is specified and the file exists, -// where subdir is a directory relative to the local root, or fetches it from -// urlRoot/path otherwise. It will call log.Fatal if there are any errors. -func Open(urlRoot, subdir, path string) io.ReadCloser { - file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path)) - return open(file, urlRoot, path) -} - -func openUnicode(path string) io.ReadCloser { - file := filepath.Join(getLocalDir(), filepath.FromSlash(path)) - return open(file, *url, path) -} - -// TODO: automatically periodically update non-versioned files. - -func open(file, urlRoot, path string) io.ReadCloser { - if f, err := os.Open(file); err == nil { - return f - } - r := get(urlRoot, path) - defer r.Close() - b, err := ioutil.ReadAll(r) - if err != nil { - log.Fatalf("Could not download file: %v", err) - } - os.MkdirAll(filepath.Dir(file), permissions) - if err := ioutil.WriteFile(file, b, permissions); err != nil { - log.Fatalf("Could not create file: %v", err) - } - return ioutil.NopCloser(bytes.NewReader(b)) -} - -func get(root, path string) io.ReadCloser { - url := root + "/" + path - fmt.Printf("Fetching %s...", url) - defer fmt.Println(" done.") - resp, err := http.Get(url) - if err != nil { - log.Fatalf("HTTP GET: %v", err) - } - if resp.StatusCode != 200 { - log.Fatalf("Bad GET status for %q: %q", url, resp.Status) - } - return resp.Body -} - -// TODO: use Write*Version in all applicable packages. - -// WriteUnicodeVersion writes a constant for the Unicode version from which the -// tables are generated. -func WriteUnicodeVersion(w io.Writer) { - fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") - fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) -} - -// WriteCLDRVersion writes a constant for the CLDR version from which the -// tables are generated. -func WriteCLDRVersion(w io.Writer) { - fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") - fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) -} - -// WriteGoFile prepends a standard file comment and package statement to the -// given bytes, applies gofmt, and writes them to a file with the given name. -// It will call log.Fatal if there are any errors. -func WriteGoFile(filename, pkg string, b []byte) { - w, err := os.Create(filename) - if err != nil { - log.Fatalf("Could not create file %s: %v", filename, err) - } - defer w.Close() - if _, err = WriteGo(w, pkg, b); err != nil { - log.Fatalf("Error writing file %s: %v", filename, err) - } -} - -// WriteGo prepends a standard file comment and package statement to the given -// bytes, applies gofmt, and writes them to w. -func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) { - src := []byte(fmt.Sprintf(header, pkg)) - src = append(src, b...) - formatted, err := format.Source(src) - if err != nil { - // Print the generated code even in case of an error so that the - // returned error can be meaningfully interpreted. - n, _ = w.Write(src) - return n, err - } - return w.Write(formatted) -} - -// Repackage rewrites a Go file from belonging to package main to belonging to -// the given package. -func Repackage(inFile, outFile, pkg string) { - src, err := ioutil.ReadFile(inFile) - if err != nil { - log.Fatalf("reading %s: %v", inFile, err) - } - const toDelete = "package main\n\n" - i := bytes.Index(src, []byte(toDelete)) - if i < 0 { - log.Fatalf("Could not find %q in %s.", toDelete, inFile) - } - w := &bytes.Buffer{} - w.Write(src[i+len(toDelete):]) - WriteGoFile(outFile, pkg, w.Bytes()) -} diff --git a/vendor/golang.org/x/text/internal/triegen/compact.go b/vendor/golang.org/x/text/internal/triegen/compact.go deleted file mode 100644 index 397b975c1..000000000 --- a/vendor/golang.org/x/text/internal/triegen/compact.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package triegen - -// This file defines Compacter and its implementations. - -import "io" - -// A Compacter generates an alternative, more space-efficient way to store a -// trie value block. A trie value block holds all possible values for the last -// byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block -// always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0). -type Compacter interface { - // Size returns whether the Compacter could encode the given block as well - // as its size in case it can. len(v) is always 64. - Size(v []uint64) (sz int, ok bool) - - // Store stores the block using the Compacter's compression method. - // It returns a handle with which the block can be retrieved. - // len(v) is always 64. - Store(v []uint64) uint32 - - // Print writes the data structures associated to the given store to w. - Print(w io.Writer) error - - // Handler returns the name of a function that gets called during trie - // lookup for blocks generated by the Compacter. The function should be of - // the form func (n uint32, b byte) uint64, where n is the index returned by - // the Compacter's Store method and b is the last byte of the UTF-8 - // encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the - // block. - Handler() string -} - -// simpleCompacter is the default Compacter used by builder. It implements a -// normal trie block. -type simpleCompacter builder - -func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) { - return blockSize * b.ValueSize, true -} - -func (b *simpleCompacter) Store(v []uint64) uint32 { - h := uint32(len(b.ValueBlocks) - blockOffset) - b.ValueBlocks = append(b.ValueBlocks, v) - return h -} - -func (b *simpleCompacter) Print(io.Writer) error { - // Structures are printed in print.go. - return nil -} - -func (b *simpleCompacter) Handler() string { - panic("Handler should be special-cased for this Compacter") -} diff --git a/vendor/golang.org/x/text/internal/triegen/print.go b/vendor/golang.org/x/text/internal/triegen/print.go deleted file mode 100644 index 8d9f120bc..000000000 --- a/vendor/golang.org/x/text/internal/triegen/print.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package triegen - -import ( - "bytes" - "fmt" - "io" - "strings" - "text/template" -) - -// print writes all the data structures as well as the code necessary to use the -// trie to w. -func (b *builder) print(w io.Writer) error { - b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize - b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize - b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize - b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize - b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize - - // If we only have one root trie, all starter blocks are at position 0 and - // we can access the arrays directly. - if len(b.Trie) == 1 { - // At this point we cannot refer to the generated tables directly. - b.ASCIIBlock = b.Name + "Values" - b.StarterBlock = b.Name + "Index" - } else { - // Otherwise we need to have explicit starter indexes in the trie - // structure. - b.ASCIIBlock = "t.ascii" - b.StarterBlock = "t.utf8Start" - } - - b.SourceType = "[]byte" - if err := lookupGen.Execute(w, b); err != nil { - return err - } - - b.SourceType = "string" - if err := lookupGen.Execute(w, b); err != nil { - return err - } - - if err := trieGen.Execute(w, b); err != nil { - return err - } - - for _, c := range b.Compactions { - if err := c.c.Print(w); err != nil { - return err - } - } - - return nil -} - -func printValues(n int, values []uint64) string { - w := &bytes.Buffer{} - boff := n * blockSize - fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff) - var newline bool - for i, v := range values { - if i%6 == 0 { - newline = true - } - if v != 0 { - if newline { - fmt.Fprintf(w, "\n") - newline = false - } - fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v) - } - } - return w.String() -} - -func printIndex(b *builder, nr int, n *node) string { - w := &bytes.Buffer{} - boff := nr * blockSize - fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff) - var newline bool - for i, c := range n.children { - if i%8 == 0 { - newline = true - } - if c != nil { - v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index) - if v != 0 { - if newline { - fmt.Fprintf(w, "\n") - newline = false - } - fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v) - } - } - } - return w.String() -} - -var ( - trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{ - "printValues": printValues, - "printIndex": printIndex, - "title": strings.Title, - "dec": func(x int) int { return x - 1 }, - "psize": func(n int) string { - return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024) - }, - }).Parse(trieTemplate)) - lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate)) -) - -// TODO: consider the return type of lookup. It could be uint64, even if the -// internal value type is smaller. We will have to verify this with the -// performance of unicode/norm, which is very sensitive to such changes. -const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}} -// {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}. -type {{.Name}}Trie struct { {{if $multi}} - ascii []{{.ValueType}} // index for ASCII bytes - utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0 -{{end}}} - -func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}} - h := {{.Name}}TrieHandles[i] - return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] } -} - -type {{.Name}}TrieHandle struct { - ascii, multi {{.IndexType}} -} - -// {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes -var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{ -{{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}} -{{end}}}{{else}} - return &{{.Name}}Trie{} -} -{{end}} -// lookupValue determines the type of block n and looks up the value for b. -func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} { - switch { {{range $i, $c := .Compactions}} - {{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}} - n -= {{$c.Offset}}{{end}} - return {{print $b.ValueType}}({{$c.Handler}}){{end}} - } -} - -// {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes -// The third block is the zero block. -var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} { -{{range $i, $v := .ValueBlocks}}{{printValues $i $v}} -{{end}}} - -// {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes -// Block 0 is the zero block. -var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} { -{{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}} -{{end}}} -` - -// TODO: consider allowing zero-length strings after evaluating performance with -// unicode/norm. -const lookupTemplate = ` -// lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return {{.ASCIIBlock}}[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := {{.StarterBlock}}[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := {{.StarterBlock}}[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = {{.Name}}Index[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := {{.StarterBlock}}[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = {{.Name}}Index[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = {{.Name}}Index[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return {{.ASCIIBlock}}[c0] - } - i := {{.StarterBlock}}[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} -` diff --git a/vendor/golang.org/x/text/internal/triegen/triegen.go b/vendor/golang.org/x/text/internal/triegen/triegen.go deleted file mode 100644 index adb010812..000000000 --- a/vendor/golang.org/x/text/internal/triegen/triegen.go +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package triegen implements a code generator for a trie for associating -// unsigned integer values with UTF-8 encoded runes. -// -// Many of the go.text packages use tries for storing per-rune information. A -// trie is especially useful if many of the runes have the same value. If this -// is the case, many blocks can be expected to be shared allowing for -// information on many runes to be stored in little space. -// -// As most of the lookups are done directly on []byte slices, the tries use the -// UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to -// runes and contributes a little bit to better performance. It also naturally -// provides a fast path for ASCII. -// -// Space is also an issue. There are many code points defined in Unicode and as -// a result tables can get quite large. So every byte counts. The triegen -// package automatically chooses the smallest integer values to represent the -// tables. Compacters allow further compression of the trie by allowing for -// alternative representations of individual trie blocks. -// -// triegen allows generating multiple tries as a single structure. This is -// useful when, for example, one wants to generate tries for several languages -// that have a lot of values in common. Some existing libraries for -// internationalization store all per-language data as a dynamically loadable -// chunk. The go.text packages are designed with the assumption that the user -// typically wants to compile in support for all supported languages, in line -// with the approach common to Go to create a single standalone binary. The -// multi-root trie approach can give significant storage savings in this -// scenario. -// -// triegen generates both tables and code. The code is optimized to use the -// automatically chosen data types. The following code is generated for a Trie -// or multiple Tries named "foo": -// - type fooTrie -// The trie type. -// -// - func newFooTrie(x int) *fooTrie -// Trie constructor, where x is the index of the trie passed to Gen. -// -// - func (t *fooTrie) lookup(s []byte) (v uintX, sz int) -// The lookup method, where uintX is automatically chosen. -// -// - func lookupString, lookupUnsafe and lookupStringUnsafe -// Variants of the above. -// -// - var fooValues and fooIndex and any tables generated by Compacters. -// The core trie data. -// -// - var fooTrieHandles -// Indexes of starter blocks in case of multiple trie roots. -// -// It is recommended that users test the generated trie by checking the returned -// value for every rune. Such exhaustive tests are possible as the the number of -// runes in Unicode is limited. -package triegen // import "golang.org/x/text/internal/triegen" - -// TODO: Arguably, the internally optimized data types would not have to be -// exposed in the generated API. We could also investigate not generating the -// code, but using it through a package. We would have to investigate the impact -// on performance of making such change, though. For packages like unicode/norm, -// small changes like this could tank performance. - -import ( - "encoding/binary" - "fmt" - "hash/crc64" - "io" - "log" - "unicode/utf8" -) - -// builder builds a set of tries for associating values with runes. The set of -// tries can share common index and value blocks. -type builder struct { - Name string - - // ValueType is the type of the trie values looked up. - ValueType string - - // ValueSize is the byte size of the ValueType. - ValueSize int - - // IndexType is the type of trie index values used for all UTF-8 bytes of - // a rune except the last one. - IndexType string - - // IndexSize is the byte size of the IndexType. - IndexSize int - - // SourceType is used when generating the lookup functions. If the user - // requests StringSupport, all lookup functions will be generated for - // string input as well. - SourceType string - - Trie []*Trie - - IndexBlocks []*node - ValueBlocks [][]uint64 - Compactions []compaction - Checksum uint64 - - ASCIIBlock string - StarterBlock string - - indexBlockIdx map[uint64]int - valueBlockIdx map[uint64]nodeIndex - asciiBlockIdx map[uint64]int - - // Stats are used to fill out the template. - Stats struct { - NValueEntries int - NValueBytes int - NIndexEntries int - NIndexBytes int - NHandleBytes int - } - - err error -} - -// A nodeIndex encodes the index of a node, which is defined by the compaction -// which stores it and an index within the compaction. For internal nodes, the -// compaction is always 0. -type nodeIndex struct { - compaction int - index int -} - -// compaction keeps track of stats used for the compaction. -type compaction struct { - c Compacter - blocks []*node - maxHandle uint32 - totalSize int - - // Used by template-based generator and thus exported. - Cutoff uint32 - Offset uint32 - Handler string -} - -func (b *builder) setError(err error) { - if b.err == nil { - b.err = err - } -} - -// An Option can be passed to Gen. -type Option func(b *builder) error - -// Compact configures the trie generator to use the given Compacter. -func Compact(c Compacter) Option { - return func(b *builder) error { - b.Compactions = append(b.Compactions, compaction{ - c: c, - Handler: c.Handler() + "(n, b)"}) - return nil - } -} - -// Gen writes Go code for a shared trie lookup structure to w for the given -// Tries. The generated trie type will be called nameTrie. newNameTrie(x) will -// return the *nameTrie for tries[x]. A value can be looked up by using one of -// the various lookup methods defined on nameTrie. It returns the table size of -// the generated trie. -func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) { - // The index contains two dummy blocks, followed by the zero block. The zero - // block is at offset 0x80, so that the offset for the zero block for - // continuation bytes is 0. - b := &builder{ - Name: name, - Trie: tries, - IndexBlocks: []*node{{}, {}, {}}, - Compactions: []compaction{{ - Handler: name + "Values[n<<6+uint32(b)]", - }}, - // The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero - // block. - indexBlockIdx: map[uint64]int{0: 0}, - valueBlockIdx: map[uint64]nodeIndex{0: {}}, - asciiBlockIdx: map[uint64]int{}, - } - b.Compactions[0].c = (*simpleCompacter)(b) - - for _, f := range opts { - if err := f(b); err != nil { - return 0, err - } - } - b.build() - if b.err != nil { - return 0, b.err - } - if err = b.print(w); err != nil { - return 0, err - } - return b.Size(), nil -} - -// A Trie represents a single root node of a trie. A builder may build several -// overlapping tries at once. -type Trie struct { - root *node - - hiddenTrie -} - -// hiddenTrie contains values we want to be visible to the template generator, -// but hidden from the API documentation. -type hiddenTrie struct { - Name string - Checksum uint64 - ASCIIIndex int - StarterIndex int -} - -// NewTrie returns a new trie root. -func NewTrie(name string) *Trie { - return &Trie{ - &node{ - children: make([]*node, blockSize), - values: make([]uint64, utf8.RuneSelf), - }, - hiddenTrie{Name: name}, - } -} - -// Gen is a convenience wrapper around the Gen func passing t as the only trie -// and uses the name passed to NewTrie. It returns the size of the generated -// tables. -func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) { - return Gen(w, t.Name, []*Trie{t}, opts...) -} - -// node is a node of the intermediate trie structure. -type node struct { - // children holds this node's children. It is always of length 64. - // A child node may be nil. - children []*node - - // values contains the values of this node. If it is non-nil, this node is - // either a root or leaf node: - // For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F]. - // For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF]. - values []uint64 - - index nodeIndex -} - -// Insert associates value with the given rune. Insert will panic if a non-zero -// value is passed for an invalid rune. -func (t *Trie) Insert(r rune, value uint64) { - if value == 0 { - return - } - s := string(r) - if []rune(s)[0] != r && value != 0 { - // Note: The UCD tables will always assign what amounts to a zero value - // to a surrogate. Allowing a zero value for an illegal rune allows - // users to iterate over [0..MaxRune] without having to explicitly - // exclude surrogates, which would be tedious. - panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r)) - } - if len(s) == 1 { - // It is a root node value (ASCII). - t.root.values[s[0]] = value - return - } - - n := t.root - for ; len(s) > 1; s = s[1:] { - if n.children == nil { - n.children = make([]*node, blockSize) - } - p := s[0] % blockSize - c := n.children[p] - if c == nil { - c = &node{} - n.children[p] = c - } - if len(s) > 2 && c.values != nil { - log.Fatalf("triegen: insert(%U): found internal node with values", r) - } - n = c - } - if n.values == nil { - n.values = make([]uint64, blockSize) - } - if n.children != nil { - log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r) - } - n.values[s[0]-0x80] = value -} - -// Size returns the number of bytes the generated trie will take to store. It -// needs to be exported as it is used in the templates. -func (b *builder) Size() int { - // Index blocks. - sz := len(b.IndexBlocks) * blockSize * b.IndexSize - - // Skip the first compaction, which represents the normal value blocks, as - // its totalSize does not account for the ASCII blocks, which are managed - // separately. - sz += len(b.ValueBlocks) * blockSize * b.ValueSize - for _, c := range b.Compactions[1:] { - sz += c.totalSize - } - - // TODO: this computation does not account for the fixed overhead of a using - // a compaction, either code or data. As for data, though, the typical - // overhead of data is in the order of bytes (2 bytes for cases). Further, - // the savings of using a compaction should anyway be substantial for it to - // be worth it. - - // For multi-root tries, we also need to account for the handles. - if len(b.Trie) > 1 { - sz += 2 * b.IndexSize * len(b.Trie) - } - return sz -} - -func (b *builder) build() { - // Compute the sizes of the values. - var vmax uint64 - for _, t := range b.Trie { - vmax = maxValue(t.root, vmax) - } - b.ValueType, b.ValueSize = getIntType(vmax) - - // Compute all block allocations. - // TODO: first compute the ASCII blocks for all tries and then the other - // nodes. ASCII blocks are more restricted in placement, as they require two - // blocks to be placed consecutively. Processing them first may improve - // sharing (at least one zero block can be expected to be saved.) - for _, t := range b.Trie { - b.Checksum += b.buildTrie(t) - } - - // Compute the offsets for all the Compacters. - offset := uint32(0) - for i := range b.Compactions { - c := &b.Compactions[i] - c.Offset = offset - offset += c.maxHandle + 1 - c.Cutoff = offset - } - - // Compute the sizes of indexes. - // TODO: different byte positions could have different sizes. So far we have - // not found a case where this is beneficial. - imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff) - for _, ib := range b.IndexBlocks { - if x := uint64(ib.index.index); x > imax { - imax = x - } - } - b.IndexType, b.IndexSize = getIntType(imax) -} - -func maxValue(n *node, max uint64) uint64 { - if n == nil { - return max - } - for _, c := range n.children { - max = maxValue(c, max) - } - for _, v := range n.values { - if max < v { - max = v - } - } - return max -} - -func getIntType(v uint64) (string, int) { - switch { - case v < 1<<8: - return "uint8", 1 - case v < 1<<16: - return "uint16", 2 - case v < 1<<32: - return "uint32", 4 - } - return "uint64", 8 -} - -const ( - blockSize = 64 - - // Subtract two blocks to offset 0x80, the first continuation byte. - blockOffset = 2 - - // Subtract three blocks to offset 0xC0, the first non-ASCII starter. - rootBlockOffset = 3 -) - -var crcTable = crc64.MakeTable(crc64.ISO) - -func (b *builder) buildTrie(t *Trie) uint64 { - n := t.root - - // Get the ASCII offset. For the first trie, the ASCII block will be at - // position 0. - hasher := crc64.New(crcTable) - binary.Write(hasher, binary.BigEndian, n.values) - hash := hasher.Sum64() - - v, ok := b.asciiBlockIdx[hash] - if !ok { - v = len(b.ValueBlocks) - b.asciiBlockIdx[hash] = v - - b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:]) - if v == 0 { - // Add the zero block at position 2 so that it will be assigned a - // zero reference in the lookup blocks. - // TODO: always do this? This would allow us to remove a check from - // the trie lookup, but at the expense of extra space. Analyze - // performance for unicode/norm. - b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize)) - } - } - t.ASCIIIndex = v - - // Compute remaining offsets. - t.Checksum = b.computeOffsets(n, true) - // We already subtracted the normal blockOffset from the index. Subtract the - // difference for starter bytes. - t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset) - return t.Checksum -} - -func (b *builder) computeOffsets(n *node, root bool) uint64 { - // For the first trie, the root lookup block will be at position 3, which is - // the offset for UTF-8 non-ASCII starter bytes. - first := len(b.IndexBlocks) == rootBlockOffset - if first { - b.IndexBlocks = append(b.IndexBlocks, n) - } - - // We special-case the cases where all values recursively are 0. This allows - // for the use of a zero block to which all such values can be directed. - hash := uint64(0) - if n.children != nil || n.values != nil { - hasher := crc64.New(crcTable) - for _, c := range n.children { - var v uint64 - if c != nil { - v = b.computeOffsets(c, false) - } - binary.Write(hasher, binary.BigEndian, v) - } - binary.Write(hasher, binary.BigEndian, n.values) - hash = hasher.Sum64() - } - - if first { - b.indexBlockIdx[hash] = rootBlockOffset - blockOffset - } - - // Compacters don't apply to internal nodes. - if n.children != nil { - v, ok := b.indexBlockIdx[hash] - if !ok { - v = len(b.IndexBlocks) - blockOffset - b.IndexBlocks = append(b.IndexBlocks, n) - b.indexBlockIdx[hash] = v - } - n.index = nodeIndex{0, v} - } else { - h, ok := b.valueBlockIdx[hash] - if !ok { - bestI, bestSize := 0, blockSize*b.ValueSize - for i, c := range b.Compactions[1:] { - if sz, ok := c.c.Size(n.values); ok && bestSize > sz { - bestI, bestSize = i+1, sz - } - } - c := &b.Compactions[bestI] - c.totalSize += bestSize - v := c.c.Store(n.values) - if c.maxHandle < v { - c.maxHandle = v - } - h = nodeIndex{bestI, int(v)} - b.valueBlockIdx[hash] = h - } - n.index = h - } - return hash -} diff --git a/vendor/golang.org/x/text/internal/ucd/ucd.go b/vendor/golang.org/x/text/internal/ucd/ucd.go deleted file mode 100644 index 309e8d8b1..000000000 --- a/vendor/golang.org/x/text/internal/ucd/ucd.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ucd provides a parser for Unicode Character Database files, the -// format of which is defined in http://www.unicode.org/reports/tr44/. See -// http://www.unicode.org/Public/UCD/latest/ucd/ for example files. -// -// It currently does not support substitutions of missing fields. -package ucd // import "golang.org/x/text/internal/ucd" - -import ( - "bufio" - "bytes" - "errors" - "io" - "log" - "regexp" - "strconv" - "strings" -) - -// UnicodeData.txt fields. -const ( - CodePoint = iota - Name - GeneralCategory - CanonicalCombiningClass - BidiClass - DecompMapping - DecimalValue - DigitValue - NumericValue - BidiMirrored - Unicode1Name - ISOComment - SimpleUppercaseMapping - SimpleLowercaseMapping - SimpleTitlecaseMapping -) - -// Parse calls f for each entry in the given reader of a UCD file. It will close -// the reader upon return. It will call log.Fatal if any error occurred. -// -// This implements the most common usage pattern of using Parser. -func Parse(r io.ReadCloser, f func(p *Parser)) { - defer r.Close() - - p := New(r) - for p.Next() { - f(p) - } - if err := p.Err(); err != nil { - r.Close() // os.Exit will cause defers not to be called. - log.Fatal(err) - } -} - -// An Option is used to configure a Parser. -type Option func(p *Parser) - -func keepRanges(p *Parser) { - p.keepRanges = true -} - -var ( - // KeepRanges prevents the expansion of ranges. The raw ranges can be - // obtained by calling Range(0) on the parser. - KeepRanges Option = keepRanges -) - -// The Part option register a handler for lines starting with a '@'. The text -// after a '@' is available as the first field. Comments are handled as usual. -func Part(f func(p *Parser)) Option { - return func(p *Parser) { - p.partHandler = f - } -} - -// The CommentHandler option passes comments that are on a line by itself to -// a given handler. -func CommentHandler(f func(s string)) Option { - return func(p *Parser) { - p.commentHandler = f - } -} - -// A Parser parses Unicode Character Database (UCD) files. -type Parser struct { - scanner *bufio.Scanner - - keepRanges bool // Don't expand rune ranges in field 0. - - err error - comment []byte - field [][]byte - // parsedRange is needed in case Range(0) is called more than once for one - // field. In some cases this requires scanning ahead. - parsedRange bool - rangeStart, rangeEnd rune - - partHandler func(p *Parser) - commentHandler func(s string) -} - -func (p *Parser) setError(err error) { - if p.err == nil { - p.err = err - } -} - -func (p *Parser) getField(i int) []byte { - if i >= len(p.field) { - return nil - } - return p.field[i] -} - -// Err returns a non-nil error if any error occurred during parsing. -func (p *Parser) Err() error { - return p.err -} - -// New returns a Parser for the given Reader. -func New(r io.Reader, o ...Option) *Parser { - p := &Parser{ - scanner: bufio.NewScanner(r), - } - for _, f := range o { - f(p) - } - return p -} - -// Next parses the next line in the file. It returns true if a line was parsed -// and false if it reached the end of the file. -func (p *Parser) Next() bool { - if !p.keepRanges && p.rangeStart < p.rangeEnd { - p.rangeStart++ - return true - } - p.comment = nil - p.field = p.field[:0] - p.parsedRange = false - - for p.scanner.Scan() { - b := p.scanner.Bytes() - if len(b) == 0 { - continue - } - if b[0] == '#' { - if p.commentHandler != nil { - p.commentHandler(strings.TrimSpace(string(b[1:]))) - } - continue - } - - // Parse line - if i := bytes.IndexByte(b, '#'); i != -1 { - p.comment = bytes.TrimSpace(b[i+1:]) - b = b[:i] - } - if b[0] == '@' { - if p.partHandler != nil { - p.field = append(p.field, bytes.TrimSpace(b[1:])) - p.partHandler(p) - p.field = p.field[:0] - } - p.comment = nil - continue - } - for { - i := bytes.IndexByte(b, ';') - if i == -1 { - p.field = append(p.field, bytes.TrimSpace(b)) - break - } - p.field = append(p.field, bytes.TrimSpace(b[:i])) - b = b[i+1:] - } - if !p.keepRanges { - p.rangeStart, p.rangeEnd = p.getRange(0) - } - return true - } - p.setError(p.scanner.Err()) - return false -} - -func parseRune(b []byte) (rune, error) { - if len(b) > 2 && b[0] == 'U' && b[1] == '+' { - b = b[2:] - } - x, err := strconv.ParseUint(string(b), 16, 32) - return rune(x), err -} - -func (p *Parser) parseRune(b []byte) rune { - x, err := parseRune(b) - p.setError(err) - return x -} - -// Rune parses and returns field i as a rune. -func (p *Parser) Rune(i int) rune { - if i > 0 || p.keepRanges { - return p.parseRune(p.getField(i)) - } - return p.rangeStart -} - -// Runes interprets and returns field i as a sequence of runes. -func (p *Parser) Runes(i int) (runes []rune) { - add := func(b []byte) { - if b = bytes.TrimSpace(b); len(b) > 0 { - runes = append(runes, p.parseRune(b)) - } - } - for b := p.getField(i); ; { - i := bytes.IndexByte(b, ' ') - if i == -1 { - add(b) - break - } - add(b[:i]) - b = b[i+1:] - } - return -} - -var ( - errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>") - - // reRange matches one line of a legacy rune range. - reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$") -) - -// Range parses and returns field i as a rune range. A range is inclusive at -// both ends. If the field only has one rune, first and last will be identical. -// It supports the legacy format for ranges used in UnicodeData.txt. -func (p *Parser) Range(i int) (first, last rune) { - if !p.keepRanges { - return p.rangeStart, p.rangeStart - } - return p.getRange(i) -} - -func (p *Parser) getRange(i int) (first, last rune) { - b := p.getField(i) - if k := bytes.Index(b, []byte("..")); k != -1 { - return p.parseRune(b[:k]), p.parseRune(b[k+2:]) - } - // The first field may not be a rune, in which case we may ignore any error - // and set the range as 0..0. - x, err := parseRune(b) - if err != nil { - // Disable range parsing henceforth. This ensures that an error will be - // returned if the user subsequently will try to parse this field as - // a Rune. - p.keepRanges = true - } - // Special case for UnicodeData that was retained for backwards compatibility. - if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) { - if p.parsedRange { - return p.rangeStart, p.rangeEnd - } - mf := reRange.FindStringSubmatch(p.scanner.Text()) - if mf == nil || !p.scanner.Scan() { - p.setError(errIncorrectLegacyRange) - return x, x - } - // Using Bytes would be more efficient here, but Text is a lot easier - // and this is not a frequent case. - ml := reRange.FindStringSubmatch(p.scanner.Text()) - if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] { - p.setError(errIncorrectLegacyRange) - return x, x - } - p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])]) - p.parsedRange = true - return p.rangeStart, p.rangeEnd - } - return x, x -} - -// bools recognizes all valid UCD boolean values. -var bools = map[string]bool{ - "": false, - "N": false, - "No": false, - "F": false, - "False": false, - "Y": true, - "Yes": true, - "T": true, - "True": true, -} - -// Bool parses and returns field i as a boolean value. -func (p *Parser) Bool(i int) bool { - b := p.getField(i) - for s, v := range bools { - if bstrEq(b, s) { - return v - } - } - p.setError(strconv.ErrSyntax) - return false -} - -// Int parses and returns field i as an integer value. -func (p *Parser) Int(i int) int { - x, err := strconv.ParseInt(string(p.getField(i)), 10, 64) - p.setError(err) - return int(x) -} - -// Uint parses and returns field i as an unsigned integer value. -func (p *Parser) Uint(i int) uint { - x, err := strconv.ParseUint(string(p.getField(i)), 10, 64) - p.setError(err) - return uint(x) -} - -// Float parses and returns field i as a decimal value. -func (p *Parser) Float(i int) float64 { - x, err := strconv.ParseFloat(string(p.getField(i)), 64) - p.setError(err) - return x -} - -// String parses and returns field i as a string value. -func (p *Parser) String(i int) string { - return string(p.getField(i)) -} - -// Strings parses and returns field i as a space-separated list of strings. -func (p *Parser) Strings(i int) []string { - ss := strings.Split(string(p.getField(i)), " ") - for i, s := range ss { - ss[i] = strings.TrimSpace(s) - } - return ss -} - -// Comment returns the comments for the current line. -func (p *Parser) Comment() string { - return string(p.comment) -} - -var errUndefinedEnum = errors.New("ucd: undefined enum value") - -// Enum interprets and returns field i as a value that must be one of the values -// in enum. -func (p *Parser) Enum(i int, enum ...string) string { - b := p.getField(i) - for _, s := range enum { - if bstrEq(b, s) { - return s - } - } - p.setError(errUndefinedEnum) - return "" -} - -func bstrEq(b []byte, s string) bool { - if len(b) != len(s) { - return false - } - for i, c := range b { - if c != s[i] { - return false - } - } - return true -} diff --git a/vendor/golang.org/x/text/unicode/cldr/base.go b/vendor/golang.org/x/text/unicode/cldr/base.go deleted file mode 100644 index 2382f4d6d..000000000 --- a/vendor/golang.org/x/text/unicode/cldr/base.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cldr - -import ( - "encoding/xml" - "regexp" - "strconv" -) - -// Elem is implemented by every XML element. -type Elem interface { - setEnclosing(Elem) - setName(string) - enclosing() Elem - - GetCommon() *Common -} - -type hidden struct { - CharData string `xml:",chardata"` - Alias *struct { - Common - Source string `xml:"source,attr"` - Path string `xml:"path,attr"` - } `xml:"alias"` - Def *struct { - Common - Choice string `xml:"choice,attr,omitempty"` - Type string `xml:"type,attr,omitempty"` - } `xml:"default"` -} - -// Common holds several of the most common attributes and sub elements -// of an XML element. -type Common struct { - XMLName xml.Name - name string - enclElem Elem - Type string `xml:"type,attr,omitempty"` - Reference string `xml:"reference,attr,omitempty"` - Alt string `xml:"alt,attr,omitempty"` - ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` - Draft string `xml:"draft,attr,omitempty"` - hidden -} - -// Default returns the default type to select from the enclosed list -// or "" if no default value is specified. -func (e *Common) Default() string { - if e.Def == nil { - return "" - } - if e.Def.Choice != "" { - return e.Def.Choice - } else if e.Def.Type != "" { - // Type is still used by the default element in collation. - return e.Def.Type - } - return "" -} - -// GetCommon returns e. It is provided such that Common implements Elem. -func (e *Common) GetCommon() *Common { - return e -} - -// Data returns the character data accumulated for this element. -func (e *Common) Data() string { - e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) - return e.CharData -} - -func (e *Common) setName(s string) { - e.name = s -} - -func (e *Common) enclosing() Elem { - return e.enclElem -} - -func (e *Common) setEnclosing(en Elem) { - e.enclElem = en -} - -// Escape characters that can be escaped without further escaping the string. -var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) - -// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. -// It assumes the input string is correctly formatted. -func replaceUnicode(s string) string { - if s[1] == '#' { - r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) - return string(r) - } - r, _, _, _ := strconv.UnquoteChar(s, 0) - return string(r) -} diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go deleted file mode 100644 index 2197f8ac2..000000000 --- a/vendor/golang.org/x/text/unicode/cldr/cldr.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run makexml.go -output xml.go - -// Package cldr provides a parser for LDML and related XML formats. -// This package is intended to be used by the table generation tools -// for the various internationalization-related packages. -// As the XML types are generated from the CLDR DTD, and as the CLDR standard -// is periodically amended, this package may change considerably over time. -// This mostly means that data may appear and disappear between versions. -// That is, old code should keep compiling for newer versions, but data -// may have moved or changed. -// CLDR version 22 is the first version supported by this package. -// Older versions may not work. -package cldr // import "golang.org/x/text/unicode/cldr" - -import ( - "fmt" - "sort" -) - -// CLDR provides access to parsed data of the Unicode Common Locale Data Repository. -type CLDR struct { - parent map[string][]string - locale map[string]*LDML - resolved map[string]*LDML - bcp47 *LDMLBCP47 - supp *SupplementalData -} - -func makeCLDR() *CLDR { - return &CLDR{ - parent: make(map[string][]string), - locale: make(map[string]*LDML), - resolved: make(map[string]*LDML), - bcp47: &LDMLBCP47{}, - supp: &SupplementalData{}, - } -} - -// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. -func (cldr *CLDR) BCP47() *LDMLBCP47 { - return nil -} - -// Draft indicates the draft level of an element. -type Draft int - -const ( - Approved Draft = iota - Contributed - Provisional - Unconfirmed -) - -var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} - -// ParseDraft returns the Draft value corresponding to the given string. The -// empty string corresponds to Approved. -func ParseDraft(level string) (Draft, error) { - if level == "" { - return Approved, nil - } - for i, s := range drafts { - if level == s { - return Unconfirmed - Draft(i), nil - } - } - return Approved, fmt.Errorf("cldr: unknown draft level %q", level) -} - -func (d Draft) String() string { - return drafts[len(drafts)-1-int(d)] -} - -// SetDraftLevel sets which draft levels to include in the evaluated LDML. -// Any draft element for which the draft level is higher than lev will be excluded. -// If multiple draft levels are available for a single element, the one with the -// lowest draft level will be selected, unless preferDraft is true, in which case -// the highest draft will be chosen. -// It is assumed that the underlying LDML is canonicalized. -func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { - // TODO: implement - cldr.resolved = make(map[string]*LDML) -} - -// RawLDML returns the LDML XML for id in unresolved form. -// id must be one of the strings returned by Locales. -func (cldr *CLDR) RawLDML(loc string) *LDML { - return cldr.locale[loc] -} - -// LDML returns the fully resolved LDML XML for loc, which must be one of -// the strings returned by Locales. -func (cldr *CLDR) LDML(loc string) (*LDML, error) { - return cldr.resolve(loc) -} - -// Supplemental returns the parsed supplemental data. If no such data was parsed, -// nil is returned. -func (cldr *CLDR) Supplemental() *SupplementalData { - return cldr.supp -} - -// Locales returns the locales for which there exist files. -// Valid sublocales for which there is no file are not included. -// The root locale is always sorted first. -func (cldr *CLDR) Locales() []string { - loc := []string{"root"} - hasRoot := false - for l, _ := range cldr.locale { - if l == "root" { - hasRoot = true - continue - } - loc = append(loc, l) - } - sort.Strings(loc[1:]) - if !hasRoot { - return loc[1:] - } - return loc -} - -// Get fills in the fields of x based on the XPath path. -func Get(e Elem, path string) (res Elem, err error) { - return walkXPath(e, path) -} diff --git a/vendor/golang.org/x/text/unicode/cldr/collate.go b/vendor/golang.org/x/text/unicode/cldr/collate.go deleted file mode 100644 index 80ee28d79..000000000 --- a/vendor/golang.org/x/text/unicode/cldr/collate.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cldr - -import ( - "bufio" - "encoding/xml" - "errors" - "fmt" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// RuleProcessor can be passed to Collator's Process method, which -// parses the rules and calls the respective method for each rule found. -type RuleProcessor interface { - Reset(anchor string, before int) error - Insert(level int, str, context, extend string) error - Index(id string) -} - -const ( - // cldrIndex is a Unicode-reserved sentinel value used to mark the start - // of a grouping within an index. - // We ignore any rule that starts with this rune. - // See http://unicode.org/reports/tr35/#Collation_Elements for details. - cldrIndex = "\uFDD0" - - // specialAnchor is the format in which to represent logical reset positions, - // such as "first tertiary ignorable". - specialAnchor = "<%s/>" -) - -// Process parses the rules for the tailorings of this collation -// and calls the respective methods of p for each rule found. -func (c Collation) Process(p RuleProcessor) (err error) { - if len(c.Cr) > 0 { - if len(c.Cr) > 1 { - return fmt.Errorf("multiple cr elements, want 0 or 1") - } - return processRules(p, c.Cr[0].Data()) - } - if c.Rules.Any != nil { - return c.processXML(p) - } - return errors.New("no tailoring data") -} - -// processRules parses rules in the Collation Rule Syntax defined in -// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. -func processRules(p RuleProcessor, s string) (err error) { - chk := func(s string, e error) string { - if err == nil { - err = e - } - return s - } - i := 0 // Save the line number for use after the loop. - scanner := bufio.NewScanner(strings.NewReader(s)) - for ; scanner.Scan() && err == nil; i++ { - for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { - level := 5 - var ch byte - switch ch, s = s[0], s[1:]; ch { - case '&': // followed by or '[' ']' - if s = skipSpace(s); consume(&s, '[') { - s = chk(parseSpecialAnchor(p, s)) - } else { - s = chk(parseAnchor(p, 0, s)) - } - case '<': // sort relation '<'{1,4}, optionally followed by '*'. - for level = 1; consume(&s, '<'); level++ { - } - if level > 4 { - err = fmt.Errorf("level %d > 4", level) - } - fallthrough - case '=': // identity relation, optionally followed by *. - if consume(&s, '*') { - s = chk(parseSequence(p, level, s)) - } else { - s = chk(parseOrder(p, level, s)) - } - default: - chk("", fmt.Errorf("illegal operator %q", ch)) - break - } - } - } - if chk("", scanner.Err()); err != nil { - return fmt.Errorf("%d: %v", i, err) - } - return nil -} - -// parseSpecialAnchor parses the anchor syntax which is either of the form -// ['before' ] -// or -// [