mirror of
https://github.com/minio/minio.git
synced 2025-02-20 10:02:31 -05:00
Remove gateway implementations for manta, sia and b2 (#7115)
This commit is contained in:
parent
4fdacb8b14
commit
3265112d04
@ -34,17 +34,6 @@ var (
|
||||
// MustGetUUID function alias.
|
||||
MustGetUUID = mustGetUUID
|
||||
|
||||
// IsMinAllowedPartSize function alias.
|
||||
IsMinAllowedPartSize = isMinAllowedPartSize
|
||||
|
||||
// GetCompleteMultipartMD5 functon alias.
|
||||
GetCompleteMultipartMD5 = getCompleteMultipartMD5
|
||||
|
||||
// Contains function alias.
|
||||
Contains = contains
|
||||
|
||||
// ExtractETag provides extractETag function alias.
|
||||
ExtractETag = extractETag
|
||||
// CleanMetadataKeys provides cleanMetadataKeys function alias.
|
||||
CleanMetadataKeys = cleanMetadataKeys
|
||||
)
|
||||
|
@ -1,850 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
b2 "github.com/minio/blazer/base"
|
||||
"github.com/minio/cli"
|
||||
miniogopolicy "github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
h2 "github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
"github.com/minio/minio/pkg/policy/condition"
|
||||
|
||||
minio "github.com/minio/minio/cmd"
|
||||
)
|
||||
|
||||
// Supported bucket types by B2 backend.
|
||||
const (
|
||||
bucketTypePrivate = "allPrivate"
|
||||
bucketTypeReadOnly = "allPublic"
|
||||
b2Backend = "b2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
const b2GatewayTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}}
|
||||
{{if .VisibleFlags}}
|
||||
FLAGS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: B2 account id.
|
||||
MINIO_SECRET_KEY: B2 application key.
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
DOMAIN:
|
||||
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to Minio host domain name.
|
||||
|
||||
CACHE:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for B2 backend.
|
||||
$ export MINIO_ACCESS_KEY=accountID
|
||||
$ export MINIO_SECRET_KEY=applicationKey
|
||||
$ {{.HelpName}}
|
||||
|
||||
2. Start minio gateway server for B2 backend with edge caching enabled.
|
||||
$ export MINIO_ACCESS_KEY=accountID
|
||||
$ export MINIO_SECRET_KEY=applicationKey
|
||||
$ export MINIO_CACHE_DRIVES="/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
|
||||
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
|
||||
$ export MINIO_CACHE_EXPIRY=40
|
||||
$ export MINIO_CACHE_MAXUSE=80
|
||||
$ {{.HelpName}}
|
||||
`
|
||||
minio.RegisterGatewayCommand(cli.Command{
|
||||
Name: b2Backend,
|
||||
Usage: "Backblaze B2",
|
||||
Action: b2GatewayMain,
|
||||
CustomHelpTemplate: b2GatewayTemplate,
|
||||
HideHelpCommand: true,
|
||||
})
|
||||
}
|
||||
|
||||
// Handler for 'minio gateway b2' command line.
|
||||
func b2GatewayMain(ctx *cli.Context) {
|
||||
minio.StartGateway(ctx, &B2{})
|
||||
}
|
||||
|
||||
// B2 implements Minio Gateway
|
||||
type B2 struct{}
|
||||
|
||||
// Name implements Gateway interface.
|
||||
func (g *B2) Name() string {
|
||||
return b2Backend
|
||||
}
|
||||
|
||||
// NewGatewayLayer returns b2 gateway layer, implements ObjectLayer interface to
|
||||
// talk to B2 remote backend.
|
||||
func (g *B2) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
|
||||
ctx := context.Background()
|
||||
client, err := b2.AuthorizeAccount(ctx, creds.AccessKey, creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &b2Objects{
|
||||
creds: creds,
|
||||
b2Client: client,
|
||||
ctx: ctx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Production - Ready for production use.
|
||||
func (g *B2) Production() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// b2Object implements gateway for Minio and BackBlaze B2 compatible object storage servers.
|
||||
type b2Objects struct {
|
||||
minio.GatewayUnsupported
|
||||
mu sync.Mutex
|
||||
creds auth.Credentials
|
||||
b2Client *b2.B2
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// Convert B2 errors to minio object layer errors.
|
||||
func b2ToObjectError(err error, params ...string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
bucket := ""
|
||||
object := ""
|
||||
uploadID := ""
|
||||
if len(params) >= 1 {
|
||||
bucket = params[0]
|
||||
}
|
||||
if len(params) == 2 {
|
||||
object = params[1]
|
||||
}
|
||||
if len(params) == 3 {
|
||||
uploadID = params[2]
|
||||
}
|
||||
|
||||
// Following code is a non-exhaustive check to convert
|
||||
// B2 errors into S3 compatible errors.
|
||||
//
|
||||
// For a more complete information - https://www.backblaze.com/b2/docs/
|
||||
statusCode, code, msg := b2.Code(err)
|
||||
if statusCode == 0 {
|
||||
// We don't interpret non B2 errors. B2 errors have statusCode
|
||||
// to help us convert them to S3 object errors.
|
||||
return err
|
||||
}
|
||||
|
||||
switch code {
|
||||
case "duplicate_bucket_name":
|
||||
err = minio.BucketAlreadyOwnedByYou{Bucket: bucket}
|
||||
case "bad_request":
|
||||
if object != "" {
|
||||
err = minio.ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
} else if bucket != "" {
|
||||
err = minio.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
case "bad_json":
|
||||
if object != "" {
|
||||
err = minio.ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
} else if bucket != "" {
|
||||
err = minio.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
case "bad_bucket_id":
|
||||
err = minio.BucketNotFound{Bucket: bucket}
|
||||
case "file_not_present", "not_found":
|
||||
err = minio.ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
case "cannot_delete_non_empty_bucket":
|
||||
err = minio.BucketNotEmpty{Bucket: bucket}
|
||||
}
|
||||
|
||||
// Special interpretation like this is required for Multipart sessions.
|
||||
if strings.Contains(msg, "No active upload for") && uploadID != "" {
|
||||
err = minio.InvalidUploadID{UploadID: uploadID}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Shutdown saves any gateway metadata to disk
|
||||
// if necessary and reload upon next restart.
|
||||
func (l *b2Objects) Shutdown(ctx context.Context) error {
|
||||
// TODO
|
||||
return nil
|
||||
}
|
||||
|
||||
// StorageInfo is not relevant to B2 backend.
|
||||
func (l *b2Objects) StorageInfo(ctx context.Context) (si minio.StorageInfo) {
|
||||
return si
|
||||
}
|
||||
|
||||
// MakeBucket creates a new container on B2 backend.
|
||||
func (l *b2Objects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
||||
// location is ignored for B2 backend.
|
||||
|
||||
// All buckets are set to private by default.
|
||||
_, err := l.b2Client.CreateBucket(l.ctx, bucket, bucketTypePrivate, nil, nil)
|
||||
logger.LogIf(ctx, err)
|
||||
return b2ToObjectError(err, bucket)
|
||||
}
|
||||
|
||||
func (l *b2Objects) reAuthorizeAccount(ctx context.Context) error {
|
||||
client, err := b2.AuthorizeAccount(l.ctx, l.creds.AccessKey, l.creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.mu.Lock()
|
||||
l.b2Client.Update(client)
|
||||
l.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// listBuckets is a wrapper similar to ListBuckets, which re-authorizes
|
||||
// the account and updates the B2 client safely. Once successfully
|
||||
// authorized performs the call again and returns list of buckets.
|
||||
// For any errors which are not actionable we return an error.
|
||||
func (l *b2Objects) listBuckets(ctx context.Context, err error) ([]*b2.Bucket, error) {
|
||||
if err != nil {
|
||||
if b2.Action(err) != b2.ReAuthenticate {
|
||||
return nil, err
|
||||
}
|
||||
if rerr := l.reAuthorizeAccount(ctx); rerr != nil {
|
||||
return nil, rerr
|
||||
}
|
||||
}
|
||||
bktList, lerr := l.b2Client.ListBuckets(l.ctx)
|
||||
if lerr != nil {
|
||||
return l.listBuckets(ctx, lerr)
|
||||
}
|
||||
return bktList, nil
|
||||
}
|
||||
|
||||
// Bucket - is a helper which provides a *Bucket instance
|
||||
// for performing an API operation. B2 API doesn't
|
||||
// provide a direct way to access the bucket so we need
|
||||
// to employ following technique.
|
||||
func (l *b2Objects) Bucket(ctx context.Context, bucket string) (*b2.Bucket, error) {
|
||||
bktList, err := l.listBuckets(ctx, nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, b2ToObjectError(err, bucket)
|
||||
}
|
||||
for _, bkt := range bktList {
|
||||
if bkt.Name == bucket {
|
||||
return bkt, nil
|
||||
}
|
||||
}
|
||||
return nil, minio.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
// GetBucketInfo gets bucket metadata..
|
||||
func (l *b2Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) {
|
||||
if _, err = l.Bucket(ctx, bucket); err != nil {
|
||||
return bi, err
|
||||
}
|
||||
return minio.BucketInfo{
|
||||
Name: bucket,
|
||||
Created: time.Unix(0, 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListBuckets lists all B2 buckets
|
||||
func (l *b2Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error) {
|
||||
bktList, err := l.listBuckets(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var bktInfo []minio.BucketInfo
|
||||
for _, bkt := range bktList {
|
||||
bktInfo = append(bktInfo, minio.BucketInfo{
|
||||
Name: bkt.Name,
|
||||
Created: time.Unix(0, 0),
|
||||
})
|
||||
}
|
||||
return bktInfo, nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket on B2
|
||||
func (l *b2Objects) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = bkt.DeleteBucket(l.ctx)
|
||||
logger.LogIf(ctx, err)
|
||||
return b2ToObjectError(err, bucket)
|
||||
}
|
||||
|
||||
// ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time.
|
||||
func (l *b2Objects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return loi, err
|
||||
}
|
||||
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
|
||||
if lerr != nil {
|
||||
logger.LogIf(ctx, lerr)
|
||||
return loi, b2ToObjectError(lerr, bucket)
|
||||
}
|
||||
loi.IsTruncated = next != ""
|
||||
loi.NextMarker = next
|
||||
for _, file := range files {
|
||||
switch file.Status {
|
||||
case "folder":
|
||||
loi.Prefixes = append(loi.Prefixes, file.Name)
|
||||
case "upload":
|
||||
loi.Objects = append(loi.Objects, minio.ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: file.Name,
|
||||
ModTime: file.Timestamp,
|
||||
Size: file.Size,
|
||||
ETag: minio.ToS3ETag(file.Info.ID),
|
||||
ContentType: file.Info.ContentType,
|
||||
UserDefined: file.Info.Info,
|
||||
})
|
||||
}
|
||||
}
|
||||
return loi, nil
|
||||
}
|
||||
|
||||
// ListObjectsV2 lists all objects in B2 bucket filtered by prefix, returns upto max 1000 entries at a time.
|
||||
func (l *b2Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int,
|
||||
fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) {
|
||||
// fetchOwner is not supported and unused.
|
||||
marker := continuationToken
|
||||
if marker == "" {
|
||||
// B2's continuation token is an object name to "start at" rather than "start after"
|
||||
// startAfter plus the lowest character B2 supports is used so that the startAfter
|
||||
// object isn't included in the results
|
||||
marker = startAfter + " "
|
||||
}
|
||||
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return loi, err
|
||||
}
|
||||
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
|
||||
if lerr != nil {
|
||||
logger.LogIf(ctx, lerr)
|
||||
return loi, b2ToObjectError(lerr, bucket)
|
||||
}
|
||||
loi.IsTruncated = next != ""
|
||||
loi.ContinuationToken = continuationToken
|
||||
loi.NextContinuationToken = next
|
||||
for _, file := range files {
|
||||
switch file.Status {
|
||||
case "folder":
|
||||
loi.Prefixes = append(loi.Prefixes, file.Name)
|
||||
case "upload":
|
||||
loi.Objects = append(loi.Objects, minio.ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: file.Name,
|
||||
ModTime: file.Timestamp,
|
||||
Size: file.Size,
|
||||
ETag: minio.ToS3ETag(file.Info.ID),
|
||||
ContentType: file.Info.ContentType,
|
||||
UserDefined: file.Info.Info,
|
||||
})
|
||||
}
|
||||
}
|
||||
return loi, nil
|
||||
}
|
||||
|
||||
// GetObjectNInfo - returns object info and locked object ReadCloser
|
||||
func (l *b2Objects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) {
|
||||
var objInfo minio.ObjectInfo
|
||||
objInfo, err = l.GetObjectInfo(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var startOffset, length int64
|
||||
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts)
|
||||
pw.CloseWithError(err)
|
||||
}()
|
||||
// Setup cleanup function to cause the above go-routine to
|
||||
// exit in case of partial read
|
||||
pipeCloser := func() { pr.Close() }
|
||||
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
|
||||
}
|
||||
|
||||
// GetObject reads an object from B2. Supports additional
|
||||
// parameters like offset and length which are synonymous with
|
||||
// HTTP Range requests.
|
||||
//
|
||||
// startOffset indicates the starting read location of the object.
|
||||
// length indicates the total length of the object.
|
||||
func (l *b2Objects) GetObject(ctx context.Context, bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return b2ToObjectError(err, bucket, object)
|
||||
}
|
||||
defer reader.Close()
|
||||
_, err = io.Copy(writer, reader)
|
||||
logger.LogIf(ctx, err)
|
||||
return b2ToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
// GetObjectInfo reads object info and replies back ObjectInfo
|
||||
func (l *b2Objects) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
f, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, b2ToObjectError(err, bucket, object)
|
||||
}
|
||||
f.Close()
|
||||
fi, err := bkt.File(f.ID, object).GetFileInfo(l.ctx)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, b2ToObjectError(err, bucket, object)
|
||||
}
|
||||
return minio.ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ETag: minio.ToS3ETag(fi.ID),
|
||||
Size: fi.Size,
|
||||
ModTime: fi.Timestamp,
|
||||
ContentType: fi.ContentType,
|
||||
UserDefined: fi.Info,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// In B2 - You must always include the X-Bz-Content-Sha1 header with
|
||||
// your upload request. The value you provide can be:
|
||||
// (1) the 40-character hex checksum of the file,
|
||||
// (2) the string hex_digits_at_end, or
|
||||
// (3) the string do_not_verify.
|
||||
// For more reference - https://www.backblaze.com/b2/docs/uploading.html
|
||||
//
|
||||
// In our case we are going to use (2) option
|
||||
const sha1AtEOF = "hex_digits_at_end"
|
||||
|
||||
// With the second option mentioned above, you append the 40-character hex sha1
|
||||
// to the end of the request body, immediately after the contents of the file
|
||||
// being uploaded. Note that the content length is the size of the file plus 40
|
||||
// of the original size of the reader.
|
||||
//
|
||||
// newB2Reader implements a B2 compatible reader by wrapping the hash.Reader into
|
||||
// a new io.Reader which will emit out the sha1 hex digits at io.EOF.
|
||||
// It also means that your overall content size is now original size + 40 bytes.
|
||||
// Additionally this reader also verifies Hash encapsulated inside hash.Reader
|
||||
// at io.EOF if the verification failed we return an error and do not send
|
||||
// the content to server.
|
||||
func newB2Reader(r *h2.Reader, size int64) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
size: size,
|
||||
sha1Hash: sha1.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// Reader - is a Reader wraps the hash.Reader which will emit out the sha1
|
||||
// hex digits at io.EOF. It also means that your overall content size is
|
||||
// now original size + 40 bytes. Additionally this reader also verifies
|
||||
// Hash encapsulated inside hash.Reader at io.EOF if the verification
|
||||
// failed we return an error and do not send the content to server.
|
||||
type Reader struct {
|
||||
r *h2.Reader
|
||||
size int64
|
||||
sha1Hash hash.Hash
|
||||
|
||||
isEOF bool
|
||||
buf *strings.Reader
|
||||
}
|
||||
|
||||
// Size - Returns the total size of Reader.
|
||||
func (nb *Reader) Size() int64 { return nb.size + 40 }
|
||||
func (nb *Reader) Read(p []byte) (int, error) {
|
||||
if nb.isEOF {
|
||||
return nb.buf.Read(p)
|
||||
}
|
||||
// Read into hash to update the on going checksum.
|
||||
n, err := io.TeeReader(nb.r, nb.sha1Hash).Read(p)
|
||||
if err == io.EOF {
|
||||
// Stream is not corrupted on this end
|
||||
// now fill in the last 40 bytes of sha1 hex
|
||||
// so that the server can verify the stream on
|
||||
// their end.
|
||||
err = nil
|
||||
nb.isEOF = true
|
||||
nb.buf = strings.NewReader(fmt.Sprintf("%x", nb.sha1Hash.Sum(nil)))
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB.
|
||||
func (l *b2Objects) PutObject(ctx context.Context, bucket string, object string, r *minio.PutObjReader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||
data := r.Reader
|
||||
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
contentType := metadata["content-type"]
|
||||
delete(metadata, "content-type")
|
||||
|
||||
var u *b2.URL
|
||||
u, err = bkt.GetUploadURL(l.ctx)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, b2ToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
hr := newB2Reader(data, data.Size())
|
||||
var f *b2.File
|
||||
f, err = u.UploadFile(l.ctx, hr, int(hr.Size()), object, contentType, sha1AtEOF, metadata)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, b2ToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
var fi *b2.FileInfo
|
||||
fi, err = f.GetFileInfo(l.ctx)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, b2ToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
return minio.ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ETag: minio.ToS3ETag(fi.ID),
|
||||
Size: fi.Size,
|
||||
ModTime: fi.Timestamp,
|
||||
ContentType: fi.ContentType,
|
||||
UserDefined: fi.Info,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteObject deletes a blob in bucket
|
||||
func (l *b2Objects) DeleteObject(ctx context.Context, bucket string, object string) error {
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return b2ToObjectError(err, bucket, object)
|
||||
}
|
||||
io.Copy(ioutil.Discard, reader)
|
||||
reader.Close()
|
||||
err = bkt.File(reader.ID, object).DeleteFileVersion(l.ctx)
|
||||
logger.LogIf(ctx, err)
|
||||
return b2ToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
// ListMultipartUploads lists all multipart uploads.
|
||||
func (l *b2Objects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string,
|
||||
delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) {
|
||||
// keyMarker, prefix, delimiter are all ignored, Backblaze B2 doesn't support any
|
||||
// of these parameters only equivalent parameter is uploadIDMarker.
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return lmi, err
|
||||
}
|
||||
// The maximum number of files to return from this call.
|
||||
// The default value is 100, and the maximum allowed is 100.
|
||||
if maxUploads > 100 {
|
||||
maxUploads = 100
|
||||
}
|
||||
largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, uploadIDMarker, maxUploads)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return lmi, b2ToObjectError(err, bucket)
|
||||
}
|
||||
lmi = minio.ListMultipartsInfo{
|
||||
MaxUploads: maxUploads,
|
||||
}
|
||||
if nextMarker != "" {
|
||||
lmi.IsTruncated = true
|
||||
lmi.NextUploadIDMarker = nextMarker
|
||||
}
|
||||
for _, largeFile := range largeFiles {
|
||||
lmi.Uploads = append(lmi.Uploads, minio.MultipartInfo{
|
||||
Object: largeFile.Name,
|
||||
UploadID: largeFile.ID,
|
||||
Initiated: largeFile.Timestamp,
|
||||
})
|
||||
}
|
||||
return lmi, nil
|
||||
}
|
||||
|
||||
// NewMultipartUpload upload object in multiple parts, uses B2's LargeFile upload API.
|
||||
// Large files can range in size from 5MB to 10TB.
|
||||
// Each large file must consist of at least 2 parts, and all of the parts except the
|
||||
// last one must be at least 5MB in size. The last part must contain at least one byte.
|
||||
// For more information - https://www.backblaze.com/b2/docs/large_files.html
|
||||
func (l *b2Objects) NewMultipartUpload(ctx context.Context, bucket string, object string, metadata map[string]string, o minio.ObjectOptions) (string, error) {
|
||||
var uploadID string
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return uploadID, err
|
||||
}
|
||||
|
||||
contentType := metadata["content-type"]
|
||||
delete(metadata, "content-type")
|
||||
lf, err := bkt.StartLargeFile(l.ctx, object, contentType, metadata)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return uploadID, b2ToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
return lf.ID, nil
|
||||
}
|
||||
|
||||
// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
|
||||
func (l *b2Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, err error) {
|
||||
data := r.Reader
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return pi, err
|
||||
}
|
||||
|
||||
fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return pi, b2ToObjectError(err, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
hr := newB2Reader(data, data.Size())
|
||||
sha1, err := fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return pi, b2ToObjectError(err, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
return minio.PartInfo{
|
||||
PartNumber: partID,
|
||||
LastModified: minio.UTCNow(),
|
||||
ETag: minio.ToS3ETag(sha1),
|
||||
Size: data.Size(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListObjectParts returns all object parts for specified object in specified bucket, uses B2's LargeFile upload API.
|
||||
func (l *b2Objects) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (lpi minio.ListPartsInfo, err error) {
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return lpi, err
|
||||
}
|
||||
lpi = minio.ListPartsInfo{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
UploadID: uploadID,
|
||||
MaxParts: maxParts,
|
||||
PartNumberMarker: partNumberMarker,
|
||||
}
|
||||
// startPartNumber must be in the range 1 - 10000 for B2.
|
||||
partNumberMarker++
|
||||
partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return lpi, b2ToObjectError(err, bucket, object, uploadID)
|
||||
}
|
||||
if next != 0 {
|
||||
lpi.IsTruncated = true
|
||||
lpi.NextPartNumberMarker = next
|
||||
}
|
||||
for _, part := range partsList {
|
||||
lpi.Parts = append(lpi.Parts, minio.PartInfo{
|
||||
PartNumber: part.Number,
|
||||
ETag: minio.ToS3ETag(part.SHA1),
|
||||
Size: part.Size,
|
||||
})
|
||||
}
|
||||
return lpi, nil
|
||||
}
|
||||
|
||||
// AbortMultipartUpload aborts a on going multipart upload, uses B2's LargeFile upload API.
|
||||
func (l *b2Objects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error {
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx)
|
||||
logger.LogIf(ctx, err)
|
||||
return b2ToObjectError(err, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API.
|
||||
func (l *b2Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, err error) {
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
hashes := make(map[int]string)
|
||||
for i, uploadedPart := range uploadedParts {
|
||||
// B2 requires contigous part numbers starting with 1, they do not support
|
||||
// hand picking part numbers, we return an S3 compatible error instead.
|
||||
if i+1 != uploadedPart.PartNumber {
|
||||
logger.LogIf(ctx, minio.InvalidPart{})
|
||||
return oi, b2ToObjectError(minio.InvalidPart{}, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
// Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag.
|
||||
hashes[uploadedPart.PartNumber] = strings.TrimSuffix(uploadedPart.ETag, "-1")
|
||||
}
|
||||
|
||||
if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return oi, b2ToObjectError(err, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
return l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
|
||||
}
|
||||
|
||||
// SetBucketPolicy - B2 supports 2 types of bucket policies:
|
||||
// bucketType.AllPublic - bucketTypeReadOnly means that anybody can download the files is the bucket;
|
||||
// bucketType.AllPrivate - bucketTypePrivate means that you need an authorization token to download them.
|
||||
// Default is AllPrivate for all buckets.
|
||||
func (l *b2Objects) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error {
|
||||
policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy)
|
||||
if err != nil {
|
||||
// This should not happen.
|
||||
return b2ToObjectError(err, bucket)
|
||||
}
|
||||
|
||||
var policies []minio.BucketAccessPolicy
|
||||
for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") {
|
||||
policies = append(policies, minio.BucketAccessPolicy{
|
||||
Prefix: prefix,
|
||||
Policy: policy,
|
||||
})
|
||||
}
|
||||
prefix := bucket + "/*" // For all objects inside the bucket.
|
||||
if len(policies) != 1 {
|
||||
logger.LogIf(ctx, minio.NotImplemented{})
|
||||
return minio.NotImplemented{}
|
||||
}
|
||||
if policies[0].Prefix != prefix {
|
||||
logger.LogIf(ctx, minio.NotImplemented{})
|
||||
return minio.NotImplemented{}
|
||||
}
|
||||
if policies[0].Policy != miniogopolicy.BucketPolicyReadOnly {
|
||||
logger.LogIf(ctx, minio.NotImplemented{})
|
||||
return minio.NotImplemented{}
|
||||
}
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bkt.Type = bucketTypeReadOnly
|
||||
_, err = bkt.Update(l.ctx)
|
||||
logger.LogIf(ctx, err)
|
||||
return b2ToObjectError(err)
|
||||
}
|
||||
|
||||
// GetBucketPolicy, returns the current bucketType from B2 backend and convert
|
||||
// it into S3 compatible bucket policy info.
|
||||
func (l *b2Objects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) {
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// bkt.Type can also be snapshot, but it is only allowed through B2 browser console,
|
||||
// just return back as policy not found for all cases.
|
||||
// CreateBucket always sets the value to allPrivate by default.
|
||||
if bkt.Type != bucketTypeReadOnly {
|
||||
return nil, minio.BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.GetBucketLocationAction,
|
||||
policy.ListBucketAction,
|
||||
policy.GetObjectAction,
|
||||
),
|
||||
policy.NewResourceSet(
|
||||
policy.NewResource(bucket, ""),
|
||||
policy.NewResource(bucket, "*"),
|
||||
),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy - resets the bucketType of bucket on B2 to 'allPrivate'.
|
||||
func (l *b2Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
bkt, err := l.Bucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bkt.Type = bucketTypePrivate
|
||||
_, err = bkt.Update(l.ctx)
|
||||
logger.LogIf(ctx, err)
|
||||
return b2ToObjectError(err)
|
||||
}
|
||||
|
||||
// IsCompressionSupported returns whether compression is applicable for this layer.
|
||||
func (l *b2Objects) IsCompressionSupported() bool {
|
||||
return false
|
||||
}
|
@ -1,119 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
b2 "github.com/minio/blazer/base"
|
||||
|
||||
minio "github.com/minio/minio/cmd"
|
||||
)
|
||||
|
||||
// Test b2 object error.
|
||||
func TestB2ObjectError(t *testing.T) {
|
||||
testCases := []struct {
|
||||
params []string
|
||||
b2Err error
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
[]string{}, nil, nil,
|
||||
},
|
||||
{
|
||||
[]string{}, fmt.Errorf("Not *Error"), fmt.Errorf("Not *Error"),
|
||||
},
|
||||
{
|
||||
[]string{}, fmt.Errorf("Non B2 Error"), fmt.Errorf("Non B2 Error"),
|
||||
},
|
||||
{
|
||||
[]string{"bucket"}, b2.Error{
|
||||
StatusCode: 1,
|
||||
Code: "duplicate_bucket_name",
|
||||
}, minio.BucketAlreadyOwnedByYou{
|
||||
Bucket: "bucket",
|
||||
},
|
||||
},
|
||||
{
|
||||
[]string{"bucket"}, b2.Error{
|
||||
StatusCode: 1,
|
||||
Code: "bad_request",
|
||||
}, minio.BucketNotFound{
|
||||
Bucket: "bucket",
|
||||
},
|
||||
},
|
||||
{
|
||||
[]string{"bucket", "object"}, b2.Error{
|
||||
StatusCode: 1,
|
||||
Code: "bad_request",
|
||||
}, minio.ObjectNameInvalid{
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
},
|
||||
},
|
||||
{
|
||||
[]string{"bucket"}, b2.Error{
|
||||
StatusCode: 1,
|
||||
Code: "bad_bucket_id",
|
||||
}, minio.BucketNotFound{Bucket: "bucket"},
|
||||
},
|
||||
{
|
||||
[]string{"bucket", "object"}, b2.Error{
|
||||
StatusCode: 1,
|
||||
Code: "file_not_present",
|
||||
}, minio.ObjectNotFound{
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
},
|
||||
},
|
||||
{
|
||||
[]string{"bucket", "object"}, b2.Error{
|
||||
StatusCode: 1,
|
||||
Code: "not_found",
|
||||
}, minio.ObjectNotFound{
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
},
|
||||
},
|
||||
{
|
||||
[]string{"bucket"}, b2.Error{
|
||||
StatusCode: 1,
|
||||
Code: "cannot_delete_non_empty_bucket",
|
||||
}, minio.BucketNotEmpty{
|
||||
Bucket: "bucket",
|
||||
},
|
||||
},
|
||||
{
|
||||
[]string{"bucket", "object", "uploadID"}, b2.Error{
|
||||
StatusCode: 1,
|
||||
Message: "No active upload for",
|
||||
}, minio.InvalidUploadID{
|
||||
UploadID: "uploadID",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
actualErr := b2ToObjectError(testCase.b2Err, testCase.params...)
|
||||
if actualErr != nil {
|
||||
if actualErr.Error() != testCase.expectedErr.Error() {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.expectedErr, actualErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -19,12 +19,9 @@ package gateway
|
||||
import (
|
||||
// Import all gateways.
|
||||
_ "github.com/minio/minio/cmd/gateway/azure"
|
||||
_ "github.com/minio/minio/cmd/gateway/b2"
|
||||
_ "github.com/minio/minio/cmd/gateway/gcs"
|
||||
_ "github.com/minio/minio/cmd/gateway/manta"
|
||||
_ "github.com/minio/minio/cmd/gateway/nas"
|
||||
_ "github.com/minio/minio/cmd/gateway/oss"
|
||||
_ "github.com/minio/minio/cmd/gateway/s3"
|
||||
_ "github.com/minio/minio/cmd/gateway/sia"
|
||||
// Add your gateway here.
|
||||
)
|
||||
|
@ -1,666 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package manta
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
triton "github.com/joyent/triton-go"
|
||||
"github.com/joyent/triton-go/authentication"
|
||||
terrors "github.com/joyent/triton-go/errors"
|
||||
"github.com/joyent/triton-go/storage"
|
||||
"github.com/minio/cli"
|
||||
minio "github.com/minio/minio/cmd"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
)
|
||||
|
||||
// stor is a namespace within manta where you store any documents that are deemed as private
|
||||
// and require access credentials to read them. Within the stor namespace, you can create any
|
||||
// number of directories and objects.
|
||||
const (
|
||||
mantaBackend = "manta"
|
||||
defaultMantaRoot = "/stor"
|
||||
defaultMantaURL = "https://us-east.manta.joyent.com"
|
||||
)
|
||||
|
||||
var mantaRoot = defaultMantaRoot
|
||||
|
||||
func init() {
|
||||
const mantaGatewayTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
USAGE:
|
||||
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT]
|
||||
{{if .VisibleFlags}}
|
||||
FLAGS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
ENDPOINT:
|
||||
Manta server endpoint. Default ENDPOINT is https://us-east.manta.joyent.com
|
||||
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: The Manta account name.
|
||||
MINIO_SECRET_KEY: A KeyID associated with the Manta account.
|
||||
MANTA_KEY_MATERIAL: The path to the SSH Key associated with the Manta account if the MINIO_SECRET_KEY is not in SSH Agent.
|
||||
MANTA_SUBUSER: The username of a user who has limited access to your account.
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
DOMAIN:
|
||||
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to Minio host domain name.
|
||||
|
||||
CACHE:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for Manta Object Storage backend.
|
||||
$ export MINIO_ACCESS_KEY=manta_account_name
|
||||
$ export MINIO_SECRET_KEY=manta_key_id
|
||||
$ {{.HelpName}}
|
||||
|
||||
2. Start minio gateway server for Manta Object Storage backend on custom endpoint.
|
||||
$ export MINIO_ACCESS_KEY=manta_account_name
|
||||
$ export MINIO_SECRET_KEY=manta_key_id
|
||||
$ {{.HelpName}} https://us-west.manta.joyent.com
|
||||
|
||||
3. Start minio gateway server for Manta Object Storage backend without using SSH Agent.
|
||||
$ export MINIO_ACCESS_KEY=manta_account_name
|
||||
$ export MINIO_SECRET_KEY=manta_key_id
|
||||
$ export MANTA_KEY_MATERIAL=~/.ssh/custom_rsa
|
||||
$ {{.HelpName}}
|
||||
|
||||
4. Start minio gateway server for Manta Object Storage backend with edge caching enabled.
|
||||
$ export MINIO_ACCESS_KEY=manta_account_name
|
||||
$ export MINIO_SECRET_KEY=manta_key_id
|
||||
$ export MINIO_CACHE_DRIVES="/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
|
||||
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
|
||||
$ export MINIO_CACHE_EXPIRY=40
|
||||
$ export MINIO_CACHE_MAXUSE=80
|
||||
$ {{.HelpName}}
|
||||
`
|
||||
|
||||
minio.RegisterGatewayCommand(cli.Command{
|
||||
Name: mantaBackend,
|
||||
Usage: "Manta Object Storage",
|
||||
Action: mantaGatewayMain,
|
||||
CustomHelpTemplate: mantaGatewayTemplate,
|
||||
HideHelpCommand: true,
|
||||
})
|
||||
}
|
||||
|
||||
func mantaGatewayMain(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if !ctx.Args().Present() {
|
||||
args = cli.Args{"https://us-east.manta.joyent.com"}
|
||||
}
|
||||
|
||||
// Validate gateway arguments.
|
||||
logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), args.First()), "Invalid argument")
|
||||
|
||||
// Start the gateway..
|
||||
minio.StartGateway(ctx, &Manta{args.First()})
|
||||
}
|
||||
|
||||
// Manta implements Gateway.
|
||||
type Manta struct {
|
||||
host string
|
||||
}
|
||||
|
||||
// Name implements Gateway interface.
|
||||
func (g *Manta) Name() string {
|
||||
return mantaBackend
|
||||
}
|
||||
|
||||
// NewGatewayLayer returns manta gateway layer, implements ObjectLayer interface to
|
||||
// talk to manta remote backend.
|
||||
func (g *Manta) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
|
||||
var err error
|
||||
var secure bool
|
||||
var signer authentication.Signer
|
||||
var endpoint = defaultMantaURL
|
||||
ctx := context.Background()
|
||||
|
||||
if g.host != "" {
|
||||
endpoint, secure, err = minio.ParseGatewayEndpoint(g.host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if secure {
|
||||
endpoint = "https://" + endpoint
|
||||
} else {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
}
|
||||
if overrideRoot, ok := os.LookupEnv("MANTA_ROOT"); ok {
|
||||
mantaRoot = overrideRoot
|
||||
}
|
||||
|
||||
keyMaterial := os.Getenv("MANTA_KEY_MATERIAL")
|
||||
|
||||
if keyMaterial == "" {
|
||||
input := authentication.SSHAgentSignerInput{
|
||||
KeyID: creds.SecretKey,
|
||||
AccountName: creds.AccessKey,
|
||||
}
|
||||
if userName, ok := os.LookupEnv("MANTA_SUBUSER"); ok {
|
||||
input.Username = userName
|
||||
}
|
||||
signer, err = authentication.NewSSHAgentSigner(input)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
var keyBytes []byte
|
||||
if _, err = os.Stat(keyMaterial); err == nil {
|
||||
keyBytes, err = ioutil.ReadFile(keyMaterial)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error reading key material from %s: %s",
|
||||
keyMaterial, err)
|
||||
}
|
||||
block, _ := pem.Decode(keyBytes)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Failed to read key material '%s': no key found", keyMaterial)
|
||||
}
|
||||
|
||||
if block.Headers["Proc-Type"] == "4,ENCRYPTED" {
|
||||
return nil, fmt.Errorf(
|
||||
"Failed to read key '%s': password protected keys are\n"+
|
||||
"not currently supported. Please decrypt the key prior to use.", keyMaterial)
|
||||
}
|
||||
|
||||
} else {
|
||||
keyBytes = []byte(keyMaterial)
|
||||
}
|
||||
|
||||
input := authentication.PrivateKeySignerInput{
|
||||
KeyID: creds.SecretKey,
|
||||
PrivateKeyMaterial: keyBytes,
|
||||
AccountName: creds.AccessKey,
|
||||
}
|
||||
if userName, ok := os.LookupEnv("MANTA_SUBUSER"); ok {
|
||||
input.Username = userName
|
||||
}
|
||||
|
||||
signer, err = authentication.NewPrivateKeySigner(input)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tc, err := storage.NewClient(&triton.ClientConfig{
|
||||
MantaURL: endpoint,
|
||||
AccountName: creds.AccessKey,
|
||||
Signers: []authentication.Signer{signer},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tc.Client.HTTPClient = &http.Client{
|
||||
Transport: minio.NewCustomHTTPTransport(),
|
||||
}
|
||||
|
||||
return &tritonObjects{
|
||||
client: tc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Production - Manta is production ready.
|
||||
func (g *Manta) Production() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// tritonObjects - Implements Object layer for Triton Manta storage
|
||||
type tritonObjects struct {
|
||||
minio.GatewayUnsupported
|
||||
client *storage.StorageClient
|
||||
}
|
||||
|
||||
// Shutdown - save any gateway metadata to disk
|
||||
// if necessary and reload upon next restart.
|
||||
func (t *tritonObjects) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// StorageInfo - Not relevant to Triton backend.
|
||||
func (t *tritonObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo) {
|
||||
return si
|
||||
}
|
||||
|
||||
//
|
||||
// ~~~ Buckets ~~~
|
||||
//
|
||||
|
||||
// MakeBucketWithLocation - Create a new directory within manta.
|
||||
//
|
||||
// https://apidocs.joyent.com/manta/api.html#PutDirectory
|
||||
func (t *tritonObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
||||
err := t.client.Dir().Put(ctx, &storage.PutDirectoryInput{
|
||||
DirectoryName: path.Join(mantaRoot, bucket),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketInfo - Get directory metadata..
|
||||
//
|
||||
// https://apidocs.joyent.com/manta/api.html#GetObject
|
||||
func (t *tritonObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, e error) {
|
||||
var info minio.BucketInfo
|
||||
resp, err := t.client.Objects().Get(ctx, &storage.GetObjectInput{
|
||||
ObjectPath: path.Join(mantaRoot, bucket),
|
||||
})
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
return minio.BucketInfo{
|
||||
Name: bucket,
|
||||
Created: resp.LastModified,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListBuckets - Lists all Manta directories, uses Manta equivalent
|
||||
// ListDirectories.
|
||||
//
|
||||
// https://apidocs.joyent.com/manta/api.html#ListDirectory
|
||||
func (t *tritonObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
|
||||
dirs, err := t.client.Dir().List(ctx, &storage.ListDirectoryInput{
|
||||
DirectoryName: path.Join(mantaRoot),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, dir := range dirs.Entries {
|
||||
if dir.Type == "directory" {
|
||||
buckets = append(buckets, minio.BucketInfo{
|
||||
Name: dir.Name,
|
||||
Created: dir.ModifiedTime,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
// DeleteBucket - Delete a directory in Manta, uses Manta equivalent
|
||||
// DeleteDirectory.
|
||||
//
|
||||
// https://apidocs.joyent.com/manta/api.html#DeleteDirectory
|
||||
func (t *tritonObjects) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
return t.client.Dir().Delete(ctx, &storage.DeleteDirectoryInput{
|
||||
DirectoryName: path.Join(mantaRoot, bucket),
|
||||
})
|
||||
}
|
||||
|
||||
//
|
||||
// ~~~ Objects ~~~
|
||||
//
|
||||
|
||||
// ListObjects - Lists all objects in Manta with a container filtered by prefix
|
||||
// and marker, uses Manta equivalent ListDirectory.
|
||||
//
|
||||
// https://apidocs.joyent.com/manta/api.html#ListDirectory
|
||||
func (t *tritonObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result minio.ListObjectsInfo, err error) {
|
||||
var (
|
||||
dirName string
|
||||
objs *storage.ListDirectoryOutput
|
||||
input *storage.ListDirectoryInput
|
||||
|
||||
pathBase = path.Base(prefix)
|
||||
)
|
||||
|
||||
// Make sure to only request a Dir.List for the parent "directory" for a
|
||||
// given prefix first. We don't know if our prefix is referencing a
|
||||
// directory or file name and can't send file names into Dir.List because
|
||||
// that'll cause Manta to return file content in the response body. Dir.List
|
||||
// expects to parse out directory entries in JSON. So, try the first
|
||||
// directory name of the prefix path provided.
|
||||
if pathDir := path.Dir(prefix); pathDir == "." {
|
||||
dirName = path.Join(mantaRoot, bucket)
|
||||
} else {
|
||||
dirName = path.Join(mantaRoot, bucket, pathDir)
|
||||
}
|
||||
|
||||
if marker != "" {
|
||||
// Manta uses the marker as the key to start at rather than start after
|
||||
// A space is appended to the marker so that the corresponding object is not
|
||||
// included in the results
|
||||
marker += " "
|
||||
}
|
||||
|
||||
input = &storage.ListDirectoryInput{
|
||||
DirectoryName: dirName,
|
||||
Limit: uint64(maxKeys),
|
||||
Marker: marker,
|
||||
}
|
||||
objs, err = t.client.Dir().List(ctx, input)
|
||||
if err != nil {
|
||||
if terrors.IsResourceNotFoundError(err) {
|
||||
return result, nil
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
return result, err
|
||||
}
|
||||
|
||||
for _, obj := range objs.Entries {
|
||||
// If the base name of our prefix was found to be of type "directory"
|
||||
// than we need to pull the directory entries for that instead.
|
||||
if obj.Name == pathBase && obj.Type == "directory" {
|
||||
input.DirectoryName = path.Join(mantaRoot, bucket, prefix)
|
||||
objs, err = t.client.Dir().List(ctx, input)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return result, err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
isTruncated := true // Always send a second request.
|
||||
if marker == "" && len(objs.Entries) < maxKeys {
|
||||
isTruncated = false
|
||||
} else if marker != "" && len(objs.Entries) < maxKeys {
|
||||
isTruncated = false
|
||||
}
|
||||
|
||||
for _, obj := range objs.Entries {
|
||||
if obj.Type == "directory" {
|
||||
result.Prefixes = append(result.Prefixes, obj.Name+delimiter)
|
||||
} else {
|
||||
result.Objects = append(result.Objects, minio.ObjectInfo{
|
||||
Name: obj.Name,
|
||||
Size: int64(obj.Size),
|
||||
ModTime: obj.ModifiedTime,
|
||||
ETag: obj.ETag,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
result.IsTruncated = isTruncated
|
||||
if isTruncated {
|
||||
result.NextMarker = result.Objects[len(result.Objects)-1].Name
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
//
|
||||
// ~~~ Objects ~~~
|
||||
//
|
||||
|
||||
// ListObjectsV2 - Lists all objects in Manta with a container filtered by prefix
|
||||
// and continuationToken, uses Manta equivalent ListDirectory.
|
||||
//
|
||||
// https://apidocs.joyent.com/manta/api.html#ListDirectory
|
||||
func (t *tritonObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result minio.ListObjectsV2Info, err error) {
|
||||
var (
|
||||
dirName string
|
||||
objs *storage.ListDirectoryOutput
|
||||
input *storage.ListDirectoryInput
|
||||
|
||||
pathBase = path.Base(prefix)
|
||||
)
|
||||
|
||||
marker := continuationToken
|
||||
if marker == "" {
|
||||
marker = startAfter
|
||||
}
|
||||
|
||||
if marker != "" {
|
||||
// Manta uses the marker as the key to start at rather than start after.
|
||||
// A space is appended to the marker so that the corresponding object is not
|
||||
// included in the results
|
||||
marker += " "
|
||||
}
|
||||
|
||||
if pathDir := path.Dir(prefix); pathDir == "." {
|
||||
dirName = path.Join(mantaRoot, bucket)
|
||||
} else {
|
||||
dirName = path.Join(mantaRoot, bucket, pathDir)
|
||||
}
|
||||
|
||||
input = &storage.ListDirectoryInput{
|
||||
DirectoryName: dirName,
|
||||
Limit: uint64(maxKeys),
|
||||
Marker: marker,
|
||||
}
|
||||
objs, err = t.client.Dir().List(ctx, input)
|
||||
if err != nil {
|
||||
if terrors.IsResourceNotFoundError(err) {
|
||||
return result, nil
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
return result, err
|
||||
}
|
||||
|
||||
for _, obj := range objs.Entries {
|
||||
if obj.Name == pathBase && obj.Type == "directory" {
|
||||
input.DirectoryName = path.Join(mantaRoot, bucket, prefix)
|
||||
objs, err = t.client.Dir().List(ctx, input)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return result, err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
isTruncated := true // Always send a second request.
|
||||
if continuationToken == "" && len(objs.Entries) < maxKeys {
|
||||
isTruncated = false
|
||||
} else if continuationToken != "" && len(objs.Entries) < maxKeys {
|
||||
isTruncated = false
|
||||
}
|
||||
|
||||
for _, obj := range objs.Entries {
|
||||
if obj.Type == "directory" {
|
||||
result.Prefixes = append(result.Prefixes, obj.Name+delimiter)
|
||||
} else {
|
||||
result.Objects = append(result.Objects, minio.ObjectInfo{
|
||||
Name: obj.Name,
|
||||
Size: int64(obj.Size),
|
||||
ModTime: obj.ModifiedTime,
|
||||
ETag: obj.ETag,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
result.IsTruncated = isTruncated
|
||||
if isTruncated {
|
||||
result.NextContinuationToken = result.Objects[len(result.Objects)-1].Name
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetObjectNInfo - returns object info and locked object ReadCloser
|
||||
func (t *tritonObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) {
|
||||
var objInfo minio.ObjectInfo
|
||||
objInfo, err = t.GetObjectInfo(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var startOffset, length int64
|
||||
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
err := t.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts)
|
||||
pw.CloseWithError(err)
|
||||
}()
|
||||
// Setup cleanup function to cause the above go-routine to
|
||||
// exit in case of partial read
|
||||
pipeCloser := func() { pr.Close() }
|
||||
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
|
||||
}
|
||||
|
||||
// GetObject - Reads an object from Manta. Supports additional parameters like
|
||||
// offset and length which are synonymous with HTTP Range requests.
|
||||
//
|
||||
// startOffset indicates the starting read location of the object. length
|
||||
// indicates the total length of the object.
|
||||
//
|
||||
// https://apidocs.joyent.com/manta/api.html#GetObject
|
||||
func (t *tritonObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
||||
// Start offset cannot be negative.
|
||||
if startOffset < 0 {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
|
||||
return fmt.Errorf("Unexpected error")
|
||||
}
|
||||
|
||||
output, err := t.client.Objects().Get(ctx, &storage.GetObjectInput{
|
||||
ObjectPath: path.Join(mantaRoot, bucket, object),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer output.ObjectReader.Close()
|
||||
|
||||
// Read until startOffset and discard, Manta object storage doesn't support range GET requests yet.
|
||||
if _, err = io.CopyN(ioutil.Discard, output.ObjectReader, startOffset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if length > 0 {
|
||||
_, err = io.Copy(writer, io.LimitReader(output.ObjectReader, length))
|
||||
} else {
|
||||
_, err = io.Copy(writer, output.ObjectReader)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetObjectInfo - reads blob metadata properties and replies back minio.ObjectInfo,
|
||||
// uses Triton equivalent GetBlobProperties.
|
||||
//
|
||||
// https://apidocs.joyent.com/manta/api.html#GetObject
|
||||
func (t *tritonObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||
info, err := t.client.Objects().GetInfo(ctx, &storage.GetInfoInput{
|
||||
ObjectPath: path.Join(mantaRoot, bucket, object),
|
||||
})
|
||||
if err != nil {
|
||||
if terrors.IsStatusNotFoundCode(err) {
|
||||
return objInfo, minio.ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
}
|
||||
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
return minio.ObjectInfo{
|
||||
Bucket: bucket,
|
||||
ContentType: info.ContentType,
|
||||
Size: int64(info.ContentLength),
|
||||
ETag: info.ETag,
|
||||
ModTime: info.LastModified,
|
||||
UserDefined: info.Metadata,
|
||||
IsDir: strings.HasSuffix(info.ContentType, "type=directory"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type dummySeeker struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (d dummySeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// PutObject - Create a new blob with the incoming data, uses Triton equivalent
|
||||
// CreateBlockBlobFromReader.
|
||||
//
|
||||
// https://apidocs.joyent.com/manta/api.html#PutObject
|
||||
func (t *tritonObjects) PutObject(ctx context.Context, bucket, object string, r *minio.PutObjReader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||
data := r.Reader
|
||||
if err = t.client.Objects().Put(ctx, &storage.PutObjectInput{
|
||||
ContentLength: uint64(data.Size()),
|
||||
ObjectPath: path.Join(mantaRoot, bucket, object),
|
||||
ContentType: metadata["content-type"],
|
||||
// TODO: Change to `string(data.md5sum)` if/when that becomes an exported field
|
||||
ContentMD5: metadata["content-md5"],
|
||||
ObjectReader: dummySeeker{data},
|
||||
ForceInsert: true,
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, err
|
||||
}
|
||||
if err = data.Verify(); err != nil {
|
||||
t.DeleteObject(ctx, bucket, object)
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
return t.GetObjectInfo(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// CopyObject - Copies a blob from source container to destination container.
|
||||
// Uses Manta Snaplinks API.
|
||||
//
|
||||
// https://apidocs.joyent.com/manta/api.html#PutSnapLink
|
||||
func (t *tritonObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||
if err = t.client.SnapLinks().Put(ctx, &storage.PutSnapLinkInput{
|
||||
SourcePath: path.Join(mantaRoot, srcBucket, srcObject),
|
||||
LinkPath: path.Join(mantaRoot, destBucket, destObject),
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
return t.GetObjectInfo(ctx, destBucket, destObject, dstOpts)
|
||||
}
|
||||
|
||||
// DeleteObject - Delete a blob in Manta, uses Triton equivalent DeleteBlob API.
|
||||
//
|
||||
// https://apidocs.joyent.com/manta/api.html#DeleteObject
|
||||
func (t *tritonObjects) DeleteObject(ctx context.Context, bucket, object string) error {
|
||||
if err := t.client.Objects().Delete(ctx, &storage.DeleteObjectInput{
|
||||
ObjectPath: path.Join(mantaRoot, bucket, object),
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsCompressionSupported returns whether compression is applicable for this layer.
|
||||
func (t *tritonObjects) IsCompressionSupported() bool {
|
||||
return false
|
||||
}
|
@ -1,658 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package sia
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/fatih/color"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
minio "github.com/minio/minio/cmd"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
const (
|
||||
siaBackend = "sia"
|
||||
)
|
||||
|
||||
type siaObjects struct {
|
||||
minio.GatewayUnsupported
|
||||
Address string // Address and port of Sia Daemon.
|
||||
TempDir string // Temporary storage location for file transfers.
|
||||
RootDir string // Root directory to store files on Sia.
|
||||
password string // Sia password for uploading content in authenticated manner.
|
||||
}
|
||||
|
||||
func init() {
|
||||
const siaGatewayTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [SIA_DAEMON_ADDR]
|
||||
{{if .VisibleFlags}}
|
||||
FLAGS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
ENVIRONMENT VARIABLES: (Default values in parenthesis)
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: Custom access key (Do not reuse same access keys on all instances)
|
||||
MINIO_SECRET_KEY: Custom secret key (Do not reuse same secret keys on all instances)
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
DOMAIN:
|
||||
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to Minio host domain name.
|
||||
|
||||
CACHE:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
SIA_TEMP_DIR: The name of the local Sia temporary storage directory. (.sia_temp)
|
||||
SIA_API_PASSWORD: API password for Sia daemon. (default is empty)
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for Sia backend.
|
||||
$ {{.HelpName}}
|
||||
|
||||
2. Start minio gateway server for Sia backend with edge caching enabled.
|
||||
$ export MINIO_CACHE_DRIVES="/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
|
||||
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
|
||||
$ export MINIO_CACHE_EXPIRY=40
|
||||
$ export MINIO_CACHE_MAXUSE=80
|
||||
$ {{.HelpName}}
|
||||
`
|
||||
|
||||
minio.RegisterGatewayCommand(cli.Command{
|
||||
Name: siaBackend,
|
||||
Usage: "Sia Decentralized Cloud",
|
||||
Action: siaGatewayMain,
|
||||
CustomHelpTemplate: siaGatewayTemplate,
|
||||
HideHelpCommand: true,
|
||||
})
|
||||
}
|
||||
|
||||
// Handler for 'minio gateway sia' command line.
|
||||
func siaGatewayMain(ctx *cli.Context) {
|
||||
// Validate gateway arguments.
|
||||
host := ctx.Args().First()
|
||||
// Validate gateway arguments.
|
||||
logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
|
||||
|
||||
minio.StartGateway(ctx, &Sia{host})
|
||||
}
|
||||
|
||||
// Sia implements Gateway.
|
||||
type Sia struct {
|
||||
host string // Sia daemon host address
|
||||
}
|
||||
|
||||
// Name implements Gateway interface.
|
||||
func (g *Sia) Name() string {
|
||||
return siaBackend
|
||||
}
|
||||
|
||||
// NewGatewayLayer returns Sia gateway layer, implements ObjectLayer interface to
|
||||
// talk to Sia backend.
|
||||
func (g *Sia) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
|
||||
sia := &siaObjects{
|
||||
Address: g.host,
|
||||
// RootDir uses access key directly, provides partitioning for
|
||||
// concurrent users talking to same sia daemon.
|
||||
RootDir: creds.AccessKey,
|
||||
TempDir: os.Getenv("SIA_TEMP_DIR"),
|
||||
password: os.Getenv("SIA_API_PASSWORD"),
|
||||
}
|
||||
|
||||
// If Address not provided on command line or ENV, default to:
|
||||
if sia.Address == "" {
|
||||
sia.Address = "127.0.0.1:9980"
|
||||
}
|
||||
|
||||
// If local Sia temp directory not specified, default to:
|
||||
if sia.TempDir == "" {
|
||||
sia.TempDir = ".sia_temp"
|
||||
}
|
||||
|
||||
var err error
|
||||
sia.TempDir, err = filepath.Abs(sia.TempDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the temp directory with proper permissions.
|
||||
// Ignore error when dir already exists.
|
||||
if err = os.MkdirAll(sia.TempDir, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
colorBlue := color.New(color.FgBlue).SprintfFunc()
|
||||
colorBold := color.New(color.Bold).SprintFunc()
|
||||
|
||||
formatStr := "%" + fmt.Sprintf("%ds", len(sia.Address)+7)
|
||||
logger.StartupMessage(colorBlue("\nSia Configuration:"))
|
||||
logger.StartupMessage(colorBlue(" API Address:") + colorBold(fmt.Sprintf(formatStr, sia.Address)))
|
||||
logger.StartupMessage(colorBlue(" Staging Directory:") + colorBold(fmt.Sprintf(" %s", sia.TempDir)))
|
||||
|
||||
return sia, nil
|
||||
}
|
||||
|
||||
// Production - sia gateway is not ready for production use.
|
||||
func (g *Sia) Production() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// non2xx returns true for non-success HTTP status codes.
|
||||
func non2xx(code int) bool {
|
||||
return code < 200 || code > 299
|
||||
}
|
||||
|
||||
// decodeError returns the api.Error from a API response. This method should
|
||||
// only be called if the response's status code is non-2xx. The error returned
|
||||
// may not be of type api.Error in the event of an error unmarshalling the
|
||||
// JSON.
|
||||
type siaError struct {
|
||||
// Message describes the error in English. Typically it is set to
|
||||
// `err.Error()`. This field is required.
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func (s siaError) Error() string {
|
||||
return s.Message
|
||||
}
|
||||
|
||||
func decodeError(resp *http.Response) error {
|
||||
// Error is a type that is encoded as JSON and returned in an API response in
|
||||
// the event of an error. Only the Message field is required. More fields may
|
||||
// be added to this struct in the future for better error reporting.
|
||||
var apiErr siaError
|
||||
if err := json.NewDecoder(resp.Body).Decode(&apiErr); err != nil {
|
||||
return err
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
|
||||
// MethodNotSupported - returned if call returned error.
|
||||
type MethodNotSupported struct {
|
||||
method string
|
||||
}
|
||||
|
||||
func (s MethodNotSupported) Error() string {
|
||||
return fmt.Sprintf("API call not recognized: %s", s.method)
|
||||
}
|
||||
|
||||
// apiGet wraps a GET request with a status code check, such that if the GET does
|
||||
// not return 2xx, the error will be read and returned. The response body is
|
||||
// not closed.
|
||||
func apiGet(ctx context.Context, addr, call, apiPassword string) (*http.Response, error) {
|
||||
req, err := http.NewRequest("GET", "http://"+addr+call, nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("User-Agent", "Sia-Agent")
|
||||
if apiPassword != "" {
|
||||
req.SetBasicAuth("", apiPassword)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
minio.CloseResponse(resp.Body)
|
||||
logger.LogIf(ctx, MethodNotSupported{call})
|
||||
return nil, MethodNotSupported{call}
|
||||
}
|
||||
if non2xx(resp.StatusCode) {
|
||||
err := decodeError(resp)
|
||||
minio.CloseResponse(resp.Body)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// apiPost wraps a POST request with a status code check, such that if the POST
|
||||
// does not return 2xx, the error will be read and returned. The response body
|
||||
// is not closed.
|
||||
func apiPost(ctx context.Context, addr, call, vals, apiPassword string) (*http.Response, error) {
|
||||
req, err := http.NewRequest("POST", "http://"+addr+call, strings.NewReader(vals))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("User-Agent", "Sia-Agent")
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
if apiPassword != "" {
|
||||
req.SetBasicAuth("", apiPassword)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
minio.CloseResponse(resp.Body)
|
||||
return nil, MethodNotSupported{call}
|
||||
}
|
||||
|
||||
if non2xx(resp.StatusCode) {
|
||||
err := decodeError(resp)
|
||||
minio.CloseResponse(resp.Body)
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// post makes an API call and discards the response. An error is returned if
|
||||
// the response status is not 2xx.
|
||||
func post(ctx context.Context, addr, call, vals, apiPassword string) error {
|
||||
resp, err := apiPost(ctx, addr, call, vals, apiPassword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
minio.CloseResponse(resp.Body)
|
||||
return nil
|
||||
}
|
||||
|
||||
// list makes a lists all the uploaded files, decodes the json response.
|
||||
func list(ctx context.Context, addr string, apiPassword string, obj *renterFiles) error {
|
||||
resp, err := apiGet(ctx, addr, "/renter/files", apiPassword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer minio.CloseResponse(resp.Body)
|
||||
|
||||
if resp.StatusCode == http.StatusNoContent {
|
||||
logger.LogIf(ctx, fmt.Errorf("Expecting a response, but API returned %s", resp.Status))
|
||||
return fmt.Errorf("Expecting a response, but API returned %s", resp.Status)
|
||||
}
|
||||
err = json.NewDecoder(resp.Body).Decode(obj)
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// get makes an API call and discards the response. An error is returned if the
|
||||
// responsee status is not 2xx.
|
||||
func get(ctx context.Context, addr, call, apiPassword string) error {
|
||||
resp, err := apiGet(ctx, addr, call, apiPassword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
minio.CloseResponse(resp.Body)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown saves any gateway metadata to disk
|
||||
// if necessary and reload upon next restart.
|
||||
func (s *siaObjects) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// StorageInfo is not relevant to Sia backend.
|
||||
func (s *siaObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo) {
|
||||
return si
|
||||
}
|
||||
|
||||
// MakeBucket creates a new container on Sia backend.
|
||||
func (s *siaObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
||||
srcFile := path.Join(s.TempDir, minio.MustGetUUID())
|
||||
defer os.Remove(srcFile)
|
||||
|
||||
writer, err := os.Create(srcFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = io.Copy(writer, bytes.NewReader([]byte(""))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sha256sum := sha256.Sum256([]byte(bucket))
|
||||
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
|
||||
return post(ctx, s.Address, "/renter/upload/"+siaObj, "source="+srcFile, s.password)
|
||||
}
|
||||
|
||||
// GetBucketInfo gets bucket metadata.
|
||||
func (s *siaObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) {
|
||||
sha256sum := sha256.Sum256([]byte(bucket))
|
||||
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
|
||||
|
||||
dstFile := path.Join(s.TempDir, minio.MustGetUUID())
|
||||
defer os.Remove(dstFile)
|
||||
|
||||
if err := get(ctx, s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
|
||||
return bi, err
|
||||
}
|
||||
return minio.BucketInfo{Name: bucket}, nil
|
||||
}
|
||||
|
||||
// ListBuckets will detect and return existing buckets on Sia.
|
||||
func (s *siaObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
|
||||
sObjs, serr := s.listRenterFiles(ctx, "")
|
||||
if serr != nil {
|
||||
return buckets, serr
|
||||
}
|
||||
|
||||
m := make(set.StringSet)
|
||||
|
||||
prefix := s.RootDir + "/"
|
||||
for _, sObj := range sObjs {
|
||||
if strings.HasPrefix(sObj.SiaPath, prefix) {
|
||||
siaObj := strings.TrimPrefix(sObj.SiaPath, prefix)
|
||||
idx := strings.Index(siaObj, "/")
|
||||
if idx > 0 {
|
||||
m.Add(siaObj[0:idx])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, bktName := range m.ToSlice() {
|
||||
buckets = append(buckets, minio.BucketInfo{
|
||||
Name: bktName,
|
||||
Created: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
})
|
||||
}
|
||||
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket on Sia.
|
||||
func (s *siaObjects) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
sha256sum := sha256.Sum256([]byte(bucket))
|
||||
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
|
||||
|
||||
return post(ctx, s.Address, "/renter/delete/"+siaObj, "", s.password)
|
||||
}
|
||||
|
||||
func (s *siaObjects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
|
||||
siaObjs, siaErr := s.listRenterFiles(ctx, bucket)
|
||||
if siaErr != nil {
|
||||
return loi, siaErr
|
||||
}
|
||||
|
||||
loi.IsTruncated = false
|
||||
loi.NextMarker = ""
|
||||
|
||||
root := s.RootDir + "/"
|
||||
|
||||
sha256sum := sha256.Sum256([]byte(bucket))
|
||||
// FIXME(harsha) - No paginated output supported for Sia backend right now, only prefix
|
||||
// based filtering. Once list renter files API supports paginated output we can support
|
||||
// paginated results here as well - until then Listing is an expensive operation.
|
||||
for _, sObj := range siaObjs {
|
||||
name := strings.TrimPrefix(sObj.SiaPath, path.Join(root, bucket)+"/")
|
||||
// Skip the file created specially when bucket was created.
|
||||
if name == hex.EncodeToString(sha256sum[:]) {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(name, prefix) {
|
||||
loi.Objects = append(loi.Objects, minio.ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: name,
|
||||
Size: int64(sObj.Filesize),
|
||||
IsDir: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
return loi, nil
|
||||
}
|
||||
|
||||
// GetObjectNInfo - returns object info and locked object ReadCloser
|
||||
func (s *siaObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) {
|
||||
var objInfo minio.ObjectInfo
|
||||
objInfo, err = s.GetObjectInfo(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var startOffset, length int64
|
||||
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
err := s.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts)
|
||||
pw.CloseWithError(err)
|
||||
}()
|
||||
// Setup cleanup function to cause the above go-routine to
|
||||
// exit in case of partial read
|
||||
pipeCloser := func() { pr.Close() }
|
||||
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
|
||||
}
|
||||
|
||||
func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
||||
dstFile := path.Join(s.TempDir, minio.MustGetUUID())
|
||||
defer os.Remove(dstFile)
|
||||
|
||||
var siaObj = path.Join(s.RootDir, bucket, object)
|
||||
if err := get(ctx, s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reader, err := os.Open(dstFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer reader.Close()
|
||||
st, err := reader.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size := st.Size()
|
||||
if _, err = reader.Seek(startOffset, os.SEEK_SET); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For negative length we read everything.
|
||||
if length < 0 {
|
||||
length = size - startOffset
|
||||
}
|
||||
|
||||
bufSize := int64(1 * humanize.MiByte)
|
||||
if bufSize > length {
|
||||
bufSize = length
|
||||
}
|
||||
|
||||
// Reply back invalid range if the input offset and length fall out of range.
|
||||
if startOffset > size || startOffset+length > size {
|
||||
logger.LogIf(ctx, minio.InvalidRange{
|
||||
OffsetBegin: startOffset,
|
||||
OffsetEnd: length,
|
||||
ResourceSize: size,
|
||||
})
|
||||
return minio.InvalidRange{
|
||||
OffsetBegin: startOffset,
|
||||
OffsetEnd: length,
|
||||
ResourceSize: size,
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate a staging buffer.
|
||||
buf := make([]byte, int(bufSize))
|
||||
|
||||
_, err = io.CopyBuffer(writer, io.LimitReader(reader, length), buf)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// findSiaObject retrieves the siaObjectInfo for the Sia object with the given
|
||||
// Sia path name.
|
||||
func (s *siaObjects) findSiaObject(ctx context.Context, bucket, object string) (siaObjectInfo, error) {
|
||||
siaPath := path.Join(s.RootDir, bucket, object)
|
||||
|
||||
sObjs, err := s.listRenterFiles(ctx, "")
|
||||
if err != nil {
|
||||
return siaObjectInfo{}, err
|
||||
}
|
||||
|
||||
for _, sObj := range sObjs {
|
||||
if sObj.SiaPath == siaPath {
|
||||
return sObj, nil
|
||||
}
|
||||
}
|
||||
logger.LogIf(ctx, minio.ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
})
|
||||
return siaObjectInfo{}, minio.ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
}
|
||||
|
||||
// GetObjectInfo reads object info and replies back ObjectInfo
|
||||
func (s *siaObjects) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
||||
so, err := s.findSiaObject(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return minio.ObjectInfo{}, err
|
||||
}
|
||||
|
||||
// Metadata about sia objects is just quite minimal. Sia only provides file size.
|
||||
return minio.ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ModTime: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Size: int64(so.Filesize),
|
||||
IsDir: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutObject creates a new object with the incoming data,
|
||||
func (s *siaObjects) PutObject(ctx context.Context, bucket string, object string, r *minio.PutObjReader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||
data := r.Reader
|
||||
srcFile := path.Join(s.TempDir, minio.MustGetUUID())
|
||||
writer, err := os.Create(srcFile)
|
||||
if err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
wsize, err := io.CopyN(writer, data, data.Size())
|
||||
if err != nil {
|
||||
os.Remove(srcFile)
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
if err = post(ctx, s.Address, "/renter/upload/"+path.Join(s.RootDir, bucket, object), "source="+srcFile, s.password); err != nil {
|
||||
os.Remove(srcFile)
|
||||
return objInfo, err
|
||||
}
|
||||
defer s.deleteTempFileWhenUploadCompletes(ctx, srcFile, bucket, object)
|
||||
|
||||
return minio.ObjectInfo{
|
||||
Name: object,
|
||||
Bucket: bucket,
|
||||
ModTime: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Size: wsize,
|
||||
ETag: minio.GenETag(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteObject deletes a blob in bucket
|
||||
func (s *siaObjects) DeleteObject(ctx context.Context, bucket string, object string) error {
|
||||
// Tell Sia daemon to delete the object
|
||||
var siaObj = path.Join(s.RootDir, bucket, object)
|
||||
return post(ctx, s.Address, "/renter/delete/"+siaObj, "", s.password)
|
||||
}
|
||||
|
||||
// siaObjectInfo represents object info stored on Sia
|
||||
type siaObjectInfo struct {
|
||||
SiaPath string `json:"siapath"`
|
||||
LocalPath string `json:"localpath"`
|
||||
Filesize uint64 `json:"filesize"`
|
||||
Available bool `json:"available"`
|
||||
Renewing bool `json:"renewing"`
|
||||
Redundancy float64 `json:"redundancy"`
|
||||
UploadProgress float64 `json:"uploadprogress"`
|
||||
}
|
||||
|
||||
type renterFiles struct {
|
||||
Files []siaObjectInfo `json:"files"`
|
||||
}
|
||||
|
||||
// listRenterFiles will return a list of existing objects in the bucket provided
|
||||
func (s *siaObjects) listRenterFiles(ctx context.Context, bucket string) (siaObjs []siaObjectInfo, err error) {
|
||||
// Get list of all renter files
|
||||
var rf renterFiles
|
||||
if err = list(ctx, s.Address, s.password, &rf); err != nil {
|
||||
return siaObjs, err
|
||||
}
|
||||
|
||||
var prefix string
|
||||
root := s.RootDir + "/"
|
||||
if bucket == "" {
|
||||
prefix = root
|
||||
} else {
|
||||
prefix = root + bucket + "/"
|
||||
}
|
||||
|
||||
for _, f := range rf.Files {
|
||||
if strings.HasPrefix(f.SiaPath, prefix) {
|
||||
siaObjs = append(siaObjs, f)
|
||||
}
|
||||
}
|
||||
|
||||
return siaObjs, nil
|
||||
}
|
||||
|
||||
// deleteTempFileWhenUploadCompletes checks the status of a Sia file upload
|
||||
// until it reaches 100% upload progress, then deletes the local temp copy from
|
||||
// the filesystem.
|
||||
func (s *siaObjects) deleteTempFileWhenUploadCompletes(ctx context.Context, tempFile string, bucket, object string) {
|
||||
var soi siaObjectInfo
|
||||
// Wait until 100% upload instead of 1x redundancy because if we delete
|
||||
// after 1x redundancy, the user has to pay the cost of other hosts
|
||||
// redistributing the file.
|
||||
for soi.UploadProgress < 100.0 {
|
||||
var err error
|
||||
soi, err = s.findSiaObject(ctx, bucket, object)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Sleep between each check so that we're not hammering
|
||||
// the Sia daemon with requests.
|
||||
time.Sleep(15 * time.Second)
|
||||
}
|
||||
|
||||
os.Remove(tempFile)
|
||||
}
|
||||
|
||||
// IsCompressionSupported returns whether compression is applicable for this layer.
|
||||
func (s *siaObjects) IsCompressionSupported() bool {
|
||||
return false
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package sia
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSianon2xx(t *testing.T) {
|
||||
for i := 0; i < 1000; i++ {
|
||||
actual := non2xx(i)
|
||||
expected := i < 200 || i > 299
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("Test case %d: non2xx(%d) returned %t but expected %t", i+1, i, actual, expected)
|
||||
}
|
||||
}
|
||||
}
|
@ -2,8 +2,6 @@
|
||||
Minio Gateway adds Amazon S3 compatibility to third party cloud storage providers.
|
||||
- [NAS](https://github.com/minio/minio/blob/master/docs/gateway/nas.md)
|
||||
- [Microsoft Azure Blob Storage](https://github.com/minio/minio/blob/master/docs/gateway/azure.md)
|
||||
- [Google Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/gcs.md)
|
||||
- [Backblaze B2](https://github.com/minio/minio/blob/master/docs/gateway/b2.md)
|
||||
- [S3](https://github.com/minio/minio/blob/master/docs/gateway/s3.md)
|
||||
- [Sia Decentralized Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/sia.md) _Alpha release_
|
||||
- [Manta Object Storage](https://github.com/minio/minio/blob/master/docs/gateway/manta.md) _Alpha release_
|
||||
- [Google Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/gcs.md)
|
||||
- [Alibaba Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/oss.md)
|
||||
|
@ -1,58 +0,0 @@
|
||||
# Minio B2 Gateway [](https://slack.minio.io)
|
||||
Minio Gateway adds Amazon S3 compatibility to Backblaze B2 Cloud Storage.
|
||||
|
||||
## Run Minio Gateway for Backblaze B2 Cloud Storage
|
||||
Please follow this [guide](https://www.backblaze.com/b2/docs/quick_account.html) to create an account on backblaze.com to obtain your access credentials for B2 Cloud storage.
|
||||
|
||||
### Using Docker
|
||||
```
|
||||
docker run -p 9000:9000 --name b2-s3 \
|
||||
-e "MINIO_ACCESS_KEY=b2_account_id" \
|
||||
-e "MINIO_SECRET_KEY=b2_application_key" \
|
||||
minio/minio gateway b2
|
||||
```
|
||||
|
||||
### Using Binary
|
||||
```
|
||||
export MINIO_ACCESS_KEY=b2_account_id
|
||||
export MINIO_SECRET_KEY=b2_application_key
|
||||
minio gateway b2
|
||||
```
|
||||
|
||||
## Test using Minio Browser
|
||||
Minio Gateway comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure that your server has started successfully.
|
||||
|
||||

|
||||
|
||||
## Test using Minio Client `mc`
|
||||
`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services.
|
||||
|
||||
### Configure `mc`
|
||||
```
|
||||
mc config host add myb2 http://gateway-ip:9000 b2_account_id b2_application_key
|
||||
```
|
||||
|
||||
### List buckets on Backblaze B2
|
||||
```
|
||||
mc ls myb2
|
||||
[2017-02-22 01:50:43 PST] 0B ferenginar/
|
||||
[2017-02-26 21:43:51 PST] 0B my-bucket/
|
||||
[2017-02-26 22:10:11 PST] 0B test-bucket1/
|
||||
```
|
||||
|
||||
### Known limitations
|
||||
Gateway inherits the following B2 limitations:
|
||||
|
||||
- No support for CopyObject S3 API (There are no equivalent APIs available on Backblaze B2).
|
||||
- No support for CopyObjectPart S3 API (There are no equivalent APIs available on Backblaze B2).
|
||||
- Only read-only bucket policy supported at bucket level, all other variations will return API Notimplemented error.
|
||||
- DeleteObject() might not delete the object right away on Backblaze B2, so you might see the object immediately after a Delete request.
|
||||
|
||||
Other limitations:
|
||||
|
||||
- Bucket notification APIs are not supported.
|
||||
|
||||
## Explore Further
|
||||
- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide)
|
||||
- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio)
|
||||
- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide)
|
@ -1,78 +0,0 @@
|
||||
# Minio Manta Gateway [](https://slack.minio.io)
|
||||
Minio Gateway adds Amazon S3 compatibility to Manta Object Storage.
|
||||
|
||||
## Run Minio Gateway for Manta Object Storage
|
||||
### Using Docker
|
||||
```
|
||||
docker run -p 9000:9000 --name manta-s3 \
|
||||
-e "MINIO_ACCESS_KEY=joyentaccountname" \
|
||||
-e "MINIO_SECRET_KEY=joyentkeyid" \
|
||||
-e "MANTA_KEY_MATERIAL=~/.ssh/id_rsa" \
|
||||
-e "MANTA_SUBUSER=devuser"
|
||||
minio/minio gateway manta
|
||||
```
|
||||
|
||||
### Using Binary
|
||||
```
|
||||
export MINIO_ACCESS_KEY=joyentaccountname
|
||||
export MINIO_SECRET_KEY=joyentkeyid
|
||||
export MANTA_KEY_MATERIAL=~/.ssh/id_rsa
|
||||
export MANTA_SUBUSER=devuser
|
||||
minio gateway manta
|
||||
```
|
||||
|
||||
## Run Minio Gateway for Manta Object Storage Custom Endpoints
|
||||
### Using Docker
|
||||
```
|
||||
docker run -p 9000:9000 --name manta-s3 \
|
||||
-e "MINIO_ACCESS_KEY=joyentaccountname" \
|
||||
-e "MINIO_SECRET_KEY=joyentkeyid" \
|
||||
-e "MANTA_KEY_MATERIAL=~/.ssh/id_rsa" \
|
||||
-e "MANTA_SUBUSER=devuser"
|
||||
minio/minio gateway manta https://manta_service_endpoint:port
|
||||
```
|
||||
|
||||
### Using Binary
|
||||
```
|
||||
export MINIO_ACCESS_KEY=joyentaccountname
|
||||
export MINIO_SECRET_KEY=joyentkeyid
|
||||
export MANTA_KEY_MATERIAL=~/.ssh/id_rsa
|
||||
export MANTA_SUBUSER=devuser
|
||||
minio gateway manta https://manta_service_endpoint:port
|
||||
```
|
||||
|
||||
## Test using Minio Browser
|
||||
Minio Gateway comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure that your server has started successfully.
|
||||
|
||||

|
||||
## Test using Minio Client `mc`
|
||||
`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services.
|
||||
|
||||
### Configure `mc`
|
||||
```
|
||||
export MINIO_KEY_MATERIAL=~/.ssh/id_rsa
|
||||
mc config host add mymanta http://gateway-ip:9000 joyentaccountname joyentkeyid
|
||||
```
|
||||
|
||||
### List containers on Manta Object Storage
|
||||
```
|
||||
mc ls mymanta
|
||||
[2017-02-22 01:50:43 PST] 0B ferenginar/
|
||||
[2017-02-26 21:43:51 PST] 0B my-container/
|
||||
[2017-02-26 22:10:11 PST] 0B test-container1/
|
||||
```
|
||||
|
||||
### Known limitations
|
||||
Gateway inherits the following Manta limitations:
|
||||
|
||||
- No support for MultiPartUpload.
|
||||
- No support for bucket policies.
|
||||
|
||||
Other limitations:
|
||||
|
||||
- Bucket notification APIs are not supported.
|
||||
|
||||
## Explore Further
|
||||
- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide)
|
||||
- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio)
|
||||
- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide)
|
@ -1,60 +0,0 @@
|
||||
# Minio Sia Gateway [](https://slack.minio.io)
|
||||
Minio Sia Gateway adds Amazon S3 compatibility to Sia Decentralized Cloud Storage.
|
||||
|
||||
## What is Sia?
|
||||
Sia is a blockchain-based decentralized storage service with built-in privacy and redundancy that costs up to 10x LESS than Amazon S3 and most other cloud providers! See [sia.tech](https://sia.tech) to learn how awesome Sia truly is.
|
||||
|
||||
## Getting Started with Sia
|
||||
|
||||
### Install Sia Daemon
|
||||
To use Sia for backend storage, Minio will need access to a running Sia daemon that is:
|
||||
1. fully synchronized with the Sia network,
|
||||
2. has sufficient rental contract allowances, and
|
||||
3. has an unlocked wallet.
|
||||
|
||||
To download and install Sia for your platform, visit [sia.tech](http://sia.tech).
|
||||
|
||||
To purchase inexpensive rental contracts with Sia, you have to possess some Siacoin in your wallet. To obtain Siacoin, you will need to purchase some on an exchange such as Bittrex using bitcoin. To obtain bitcoin, you'll need to use a service such as Coinbase to buy bitcoin using a bank account or credit card. If you need help, there are many friendly people active on [Sia's Slack](http://slackin.sia.tech).
|
||||
|
||||
### Configuration
|
||||
Once you have the Sia Daemon running and synchronized, with rental allowances created, you just need to configure the Minio server to use Sia. Configuration is accomplished using environment variables, and is only necessary if the default values need to be changed. On a linux machine using bash shell, you can easily set environment variables by adding export statements to the "~/.bash_profile" file. For example:
|
||||
```
|
||||
export MY_ENV_VAR=VALUE
|
||||
```
|
||||
Just remember to reload the profile by executing: "source ~/.bash_profile" on the command prompt.
|
||||
|
||||
#### Supported Environment Variables
|
||||
Environment Variable | Description | Default Value
|
||||
--- | --- | ---
|
||||
`MINIO_ACCESS_KEY` | The access key required to access Minio. | (empty)
|
||||
`MINIO_SECRET_KEY` | The secret key required to access Minio. | (empty)
|
||||
`SIA_TEMP_DIR` | The local directory to use for temporary storage. | .sia_temp
|
||||
`SIA_API_PASSWORD` | The API password required to access the Sia daemon, if needed. | (empty)
|
||||
|
||||
### Running Minio with Sia Gateway
|
||||
```
|
||||
export MINIO_ACCESS_KEY=minioaccesskey
|
||||
export MINIO_SECRET_KEY=miniosecretkey
|
||||
minio gateway sia [SIA_DAEMON_ADDRESS]
|
||||
```
|
||||
The [SIA_DAEMON_ADDRESS] is optional, and it defaults to "127.0.0.1:9980".
|
||||
Access information should then be presented on-screen. To connect to the server and upload files using your web browser, open a web browser and point it to the address displayed for "Browser Access." Then log in using the "AccessKey" and "SecretKey" that are also displayed on-screen. You should then be able to create buckets (folders) and upload files.
|
||||
|
||||

|
||||
|
||||
### Known limitations
|
||||
|
||||
Gateway inherits the following Sia limitations:
|
||||
|
||||
- Multipart uploads are not currently supported.
|
||||
- Bucket policies are not currently supported.
|
||||
|
||||
Other limitations:
|
||||
|
||||
- Bucket notification APIs are not supported.
|
||||
|
||||
## Explore Further
|
||||
- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide)
|
||||
- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio)
|
||||
- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide)
|
||||
|
64
vendor/github.com/joyent/triton-go/CHANGELOG.md
generated
vendored
64
vendor/github.com/joyent/triton-go/CHANGELOG.md
generated
vendored
@ -1,64 +0,0 @@
|
||||
## Unreleased
|
||||
|
||||
- Add support for managing columes in Triton [#100]
|
||||
- identity/policies: Add support for managing policies in Triton [#86]
|
||||
- addition of triton-go errors package to expose unwraping of internal errors
|
||||
- Migration from hashicorp/errwrap to pkg/errors
|
||||
- Using path.Join() for URL structures rather than fmt.Sprintf()
|
||||
|
||||
## 0.5.2 (December 28)
|
||||
|
||||
- Standardise the API SSH Signers input casing and naming
|
||||
|
||||
## 0.5.1 (December 28)
|
||||
|
||||
- Include leading '/' when working with SSH Agent signers
|
||||
|
||||
## 0.5.0 (December 28)
|
||||
|
||||
- Add support for RBAC in triton-go [#82]
|
||||
This is a breaking change. No longer do we pass individual parameters to the SSH Signer funcs, but we now pass an input Struct. This will guard from from additional parameter changes in the future.
|
||||
We also now add support for using `SDC_*` and `TRITON_*` env vars when working with the Default agent signer
|
||||
|
||||
## 0.4.2 (December 22)
|
||||
|
||||
- Fixing a panic when the user loses network connectivity when making a GET request to instance [#81]
|
||||
|
||||
## 0.4.1 (December 15)
|
||||
|
||||
- Clean up the handling of directory sanitization. Use abs paths everywhere [#79]
|
||||
|
||||
## 0.4.0 (December 15)
|
||||
|
||||
- Fix an issue where Manta HEAD requests do not return an error resp body [#77]
|
||||
- Add support for recursively creating child directories [#78]
|
||||
|
||||
## 0.3.0 (December 14)
|
||||
|
||||
- Introduce CloudAPI's ListRulesMachines under networking
|
||||
- Enable HTTP KeepAlives by default in the client. 15s idle timeout, 2x
|
||||
connections per host, total of 10x connections per client.
|
||||
- Expose an optional Headers attribute to clients to allow them to customize
|
||||
HTTP headers when making Object requests.
|
||||
- Fix a bug in Directory ListIndex [#69](https://github.com/joyent/issues/69)
|
||||
- Inputs to Object inputs have been relaxed to `io.Reader` (formerly a
|
||||
`io.ReadSeeker`) [#73](https://github.com/joyent/issues/73).
|
||||
- Add support for ForceDelete of all children of a directory [#71](https://github.com/joyent/issues/71)
|
||||
- storage: Introduce `Objects.GetInfo` and `Objects.IsDir` using HEAD requests [#74](https://github.com/joyent/triton-go/issues/74)
|
||||
|
||||
## 0.2.1 (November 8)
|
||||
|
||||
- Fixing a bug where CreateUser and UpdateUser didn't return the UserID
|
||||
|
||||
## 0.2.0 (November 7)
|
||||
|
||||
- Introduce CloudAPI's Ping under compute
|
||||
- Introduce CloudAPI's RebootMachine under compute instances
|
||||
- Introduce CloudAPI's ListUsers, GetUser, CreateUser, UpdateUser and DeleteUser under identity package
|
||||
- Introduce CloudAPI's ListMachineSnapshots, GetMachineSnapshot, CreateSnapshot, DeleteMachineSnapshot and StartMachineFromSnapshot under compute package
|
||||
- tools: Introduce unit testing and scripts for linting, etc.
|
||||
- bug: Fix the `compute.ListMachineRules` endpoint
|
||||
|
||||
## 0.1.0 (November 2)
|
||||
|
||||
- Initial release of a versioned SDK
|
46
vendor/github.com/joyent/triton-go/GNUmakefile
generated
vendored
46
vendor/github.com/joyent/triton-go/GNUmakefile
generated
vendored
@ -1,46 +0,0 @@
|
||||
TEST?=$$(go list ./... |grep -Ev 'vendor|examples|testutils')
|
||||
GOFMT_FILES?=$$(find . -name '*.go' |grep -v vendor)
|
||||
|
||||
default: vet errcheck test
|
||||
|
||||
tools:: ## Download and install all dev/code tools
|
||||
@echo "==> Installing dev tools"
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get -u github.com/kisielk/errcheck
|
||||
@echo "==> Installing test package dependencies"
|
||||
go test -i $(TEST) || exit 1
|
||||
|
||||
test:: ## Run unit tests
|
||||
@echo "==> Running unit test with coverage"
|
||||
@./scripts/go-test-with-coverage.sh
|
||||
|
||||
testacc:: ## Run acceptance tests
|
||||
@echo "==> Running acceptance tests"
|
||||
TRITON_TEST=1 go test $(TEST) -v $(TESTARGS) -run -timeout 120m
|
||||
|
||||
vet:: ## Check for unwanted code constructs
|
||||
@echo "go vet ."
|
||||
@go vet $$(go list ./... | grep -v vendor/) ; if [ $$? -eq 1 ]; then \
|
||||
echo ""; \
|
||||
echo "Vet found suspicious constructs. Please check the reported constructs"; \
|
||||
echo "and fix them if necessary before submitting the code for review."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
lint:: ## Lint and vet code by common Go standards
|
||||
@bash $(CURDIR)/scripts/lint.sh
|
||||
|
||||
fmt:: ## Format as canonical Go code
|
||||
gofmt -w $(GOFMT_FILES)
|
||||
|
||||
fmtcheck:: ## Check if code format is canonical Go
|
||||
@bash $(CURDIR)/scripts/gofmtcheck.sh
|
||||
|
||||
errcheck:: ## Check for unhandled errors
|
||||
@bash $(CURDIR)/scripts/errcheck.sh
|
||||
|
||||
.PHONY: help
|
||||
help:: ## Display this help message
|
||||
@echo "GNU make(1) targets:"
|
||||
@grep -E '^[a-zA-Z_.-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}'
|
33
vendor/github.com/joyent/triton-go/Gopkg.lock
generated
vendored
33
vendor/github.com/joyent/triton-go/Gopkg.lock
generated
vendored
@ -1,33 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/abdullin/seq"
|
||||
packages = ["."]
|
||||
revision = "d5467c17e7afe8d8f08f556c6c811a50c3feb28d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/sean-/seed"
|
||||
packages = ["."]
|
||||
revision = "e2103e2c35297fb7e17febb81e49b312087a2372"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","ssh","ssh/agent"]
|
||||
revision = "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "f7efd974ae38e2ee077c4d2698df74128a04797460b5f9c833853ddfaa86a0a0"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
38
vendor/github.com/joyent/triton-go/Gopkg.toml
generated
vendored
38
vendor/github.com/joyent/triton-go/Gopkg.toml
generated
vendored
@ -1,38 +0,0 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/abdullin/seq"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/sean-/seed"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
373
vendor/github.com/joyent/triton-go/LICENSE
generated
vendored
373
vendor/github.com/joyent/triton-go/LICENSE
generated
vendored
@ -1,373 +0,0 @@
|
||||
Mozilla Public License Version 2.0
|
||||
==================================
|
||||
|
||||
1. Definitions
|
||||
--------------
|
||||
|
||||
1.1. "Contributor"
|
||||
means each individual or legal entity that creates, contributes to
|
||||
the creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
means the combination of the Contributions of others (if any) used
|
||||
by a Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
means Source Code Form to which the initial Contributor has attached
|
||||
the notice in Exhibit A, the Executable Form of such Source Code
|
||||
Form, and Modifications of such Source Code Form, in each case
|
||||
including portions thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
(a) that the initial Contributor has attached the notice described
|
||||
in Exhibit B to the Covered Software; or
|
||||
|
||||
(b) that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the
|
||||
terms of a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
means a work that combines Covered Software with other material, in
|
||||
a separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
means having the right to grant, to the maximum extent possible,
|
||||
whether at the time of the initial grant or subsequently, any and
|
||||
all of the rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
means any of the following:
|
||||
|
||||
(a) any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered
|
||||
Software; or
|
||||
|
||||
(b) any new file in Source Code Form that contains any Covered
|
||||
Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the
|
||||
License, by the making, using, selling, offering for sale, having
|
||||
made, import, or transfer of either its Contributions or its
|
||||
Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
means either the GNU General Public License, Version 2.0, the GNU
|
||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||
Public License, Version 3.0, or any later versions of those
|
||||
licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that
|
||||
controls, is controlled by, or is under common control with You. For
|
||||
purposes of this definition, "control" means (a) the power, direct
|
||||
or indirect, to cause the direction or management of such entity,
|
||||
whether by contract or otherwise, or (b) ownership of more than
|
||||
fifty percent (50%) of the outstanding shares or beneficial
|
||||
ownership of such entity.
|
||||
|
||||
2. License Grants and Conditions
|
||||
--------------------------------
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||
for sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
(a) for any code that a Contributor has removed from Covered Software;
|
||||
or
|
||||
|
||||
(b) for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights
|
||||
to grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||
in Section 2.1.
|
||||
|
||||
3. Responsibilities
|
||||
-------------------
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
(a) such Covered Software must also be made available in Source Code
|
||||
Form, as described in Section 3.1, and You must inform recipients of
|
||||
the Executable Form how they can obtain a copy of such Source Code
|
||||
Form by reasonable means in a timely manner, at a charge no more
|
||||
than the cost of distribution to the recipient; and
|
||||
|
||||
(b) You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter
|
||||
the recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty,
|
||||
or limitations of liability) contained within the Source Code Form of
|
||||
the Covered Software, except that You may alter any license notices to
|
||||
the extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
---------------------------------------------------
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this
|
||||
License with respect to some or all of the Covered Software due to
|
||||
statute, judicial order, or regulation then You must: (a) comply with
|
||||
the terms of this License to the maximum extent possible; and (b)
|
||||
describe the limitations and the code they affect. Such description must
|
||||
be placed in a text file included with all distributions of the Covered
|
||||
Software under this License. Except to the extent prohibited by statute
|
||||
or regulation, such description must be sufficiently detailed for a
|
||||
recipient of ordinary skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
--------------
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically
|
||||
if You fail to comply with any of its terms. However, if You become
|
||||
compliant, then the rights granted under this License from a particular
|
||||
Contributor are reinstated (a) provisionally, unless and until such
|
||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||
ongoing basis, if such Contributor fails to notify You of the
|
||||
non-compliance by some reasonable means prior to 60 days after You have
|
||||
come back into compliance. Moreover, Your grants from a particular
|
||||
Contributor are reinstated on an ongoing basis if such Contributor
|
||||
notifies You of the non-compliance by some reasonable means, this is the
|
||||
first time You have received notice of non-compliance with this License
|
||||
from such Contributor, and You become compliant prior to 30 days after
|
||||
Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||
end user license agreements (excluding distributors and resellers) which
|
||||
have been validly granted by You or Your distributors under this License
|
||||
prior to termination shall survive termination.
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 6. Disclaimer of Warranty *
|
||||
* ------------------------- *
|
||||
* *
|
||||
* Covered Software is provided under this License on an "as is" *
|
||||
* basis, without warranty of any kind, either expressed, implied, or *
|
||||
* statutory, including, without limitation, warranties that the *
|
||||
* Covered Software is free of defects, merchantable, fit for a *
|
||||
* particular purpose or non-infringing. The entire risk as to the *
|
||||
* quality and performance of the Covered Software is with You. *
|
||||
* Should any Covered Software prove defective in any respect, You *
|
||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||
* essential part of this License. No use of any Covered Software is *
|
||||
* authorized under this License except under this disclaimer. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 7. Limitation of Liability *
|
||||
* -------------------------- *
|
||||
* *
|
||||
* Under no circumstances and under no legal theory, whether tort *
|
||||
* (including negligence), contract, or otherwise, shall any *
|
||||
* Contributor, or anyone who distributes Covered Software as *
|
||||
* permitted above, be liable to You for any direct, indirect, *
|
||||
* special, incidental, or consequential damages of any character *
|
||||
* including, without limitation, damages for lost profits, loss of *
|
||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||
* and all other commercial damages or losses, even if such party *
|
||||
* shall have been informed of the possibility of such damages. This *
|
||||
* limitation of liability shall not apply to liability for death or *
|
||||
* personal injury resulting from such party's negligence to the *
|
||||
* extent applicable law prohibits such limitation. Some *
|
||||
* jurisdictions do not allow the exclusion or limitation of *
|
||||
* incidental or consequential damages, so this exclusion and *
|
||||
* limitation may not apply to You. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
8. Litigation
|
||||
-------------
|
||||
|
||||
Any litigation relating to this License may be brought only in the
|
||||
courts of a jurisdiction where the defendant maintains its principal
|
||||
place of business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions.
|
||||
Nothing in this Section shall prevent a party's ability to bring
|
||||
cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
----------------
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides
|
||||
that the language of a contract shall be construed against the drafter
|
||||
shall not be used to construe this License against a Contributor.
|
||||
|
||||
10. Versions of the License
|
||||
---------------------------
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses
|
||||
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
-------------------------------------------
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to look
|
||||
for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
---------------------------------------------------------
|
||||
|
||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||
defined by the Mozilla Public License, v. 2.0.
|
238
vendor/github.com/joyent/triton-go/README.md
generated
vendored
238
vendor/github.com/joyent/triton-go/README.md
generated
vendored
@ -1,238 +0,0 @@
|
||||
# triton-go
|
||||
|
||||
`triton-go` is an idiomatic library exposing a client SDK for Go applications
|
||||
using Joyent's Triton Compute and Storage (Manta) APIs.
|
||||
|
||||
[](https://travis-ci.org/joyent/triton-go) [](https://goreportcard.com/report/github.com/joyent/triton-go)
|
||||
|
||||
## Usage
|
||||
|
||||
Triton uses [HTTP Signature][4] to sign the Date header in each HTTP request
|
||||
made to the Triton API. Currently, requests can be signed using either a private
|
||||
key file loaded from disk (using an [`authentication.PrivateKeySigner`][5]), or
|
||||
using a key stored with the local SSH Agent (using an [`SSHAgentSigner`][6].
|
||||
|
||||
To construct a Signer, use the `New*` range of methods in the `authentication`
|
||||
package. In the case of `authentication.NewSSHAgentSigner`, the parameters are
|
||||
the fingerprint of the key with which to sign, and the account name (normally
|
||||
stored in the `TRITON_ACCOUNT` environment variable). There is also support for
|
||||
passing in a username, this will allow you to use an account other than the main
|
||||
Triton account. For example:
|
||||
|
||||
```go
|
||||
input := authentication.SSHAgentSignerInput{
|
||||
KeyID: "a4:c6:f3:75:80:27:e0:03:a9:98:79:ef:c5:0a:06:11",
|
||||
AccountName: "AccountName",
|
||||
Username: "Username",
|
||||
}
|
||||
sshKeySigner, err := authentication.NewSSHAgentSigner(input)
|
||||
if err != nil {
|
||||
log.Fatalf("NewSSHAgentSigner: %s", err)
|
||||
}
|
||||
```
|
||||
|
||||
An appropriate key fingerprint can be generated using `ssh-keygen`.
|
||||
|
||||
```
|
||||
ssh-keygen -Emd5 -lf ~/.ssh/id_rsa.pub | cut -d " " -f 2 | sed 's/MD5://'
|
||||
```
|
||||
|
||||
Each top level package, `account`, `compute`, `identity`, `network`, all have
|
||||
their own seperate client. In order to initialize a package client, simply pass
|
||||
the global `triton.ClientConfig` struct into the client's constructor function.
|
||||
|
||||
```go
|
||||
config := &triton.ClientConfig{
|
||||
TritonURL: os.Getenv("TRITON_URL"),
|
||||
MantaURL: os.Getenv("MANTA_URL"),
|
||||
AccountName: accountName,
|
||||
Username: os.Getenv("TRITON_USER"),
|
||||
Signers: []authentication.Signer{sshKeySigner},
|
||||
}
|
||||
|
||||
c, err := compute.NewClient(config)
|
||||
if err != nil {
|
||||
log.Fatalf("compute.NewClient: %s", err)
|
||||
}
|
||||
```
|
||||
|
||||
Constructing `compute.Client` returns an interface which exposes `compute` API
|
||||
resources. The same goes for all other packages. Reference their unique
|
||||
documentation for more information.
|
||||
|
||||
The same `triton.ClientConfig` will initialize the Manta `storage` client as
|
||||
well...
|
||||
|
||||
```go
|
||||
c, err := storage.NewClient(config)
|
||||
if err != nil {
|
||||
log.Fatalf("storage.NewClient: %s", err)
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
If an error is returned by the HTTP API, the `error` returned from the function
|
||||
will contain an instance of `compute.TritonError` in the chain. Error wrapping
|
||||
is performed using the [errwrap][7] library from HashiCorp.
|
||||
|
||||
## Acceptance Tests
|
||||
|
||||
Acceptance Tests run directly against the Triton API, so you will need either a
|
||||
local installation of Triton or an account with Joyent's Public Cloud offering
|
||||
in order to run them. The tests create real resources (and thus cost real
|
||||
money)!
|
||||
|
||||
In order to run acceptance tests, the following environment variables must be
|
||||
set:
|
||||
|
||||
- `TRITON_TEST` - must be set to any value in order to indicate desire to create
|
||||
resources
|
||||
- `TRITON_URL` - the base endpoint for the Triton API
|
||||
- `TRITON_ACCOUNT` - the account name for the Triton API
|
||||
- `TRITON_KEY_ID` - the fingerprint of the SSH key identifying the key
|
||||
|
||||
Additionally, you may set `TRITON_KEY_MATERIAL` to the contents of an unencrypted
|
||||
private key. If this is set, the PrivateKeySigner (see above) will be used - if
|
||||
not the SSHAgentSigner will be used. You can also set `TRITON_USER` to run the tests
|
||||
against an account other than the main Triton account
|
||||
|
||||
### Example Run
|
||||
|
||||
The verbose output has been removed for brevity here.
|
||||
|
||||
```
|
||||
$ HTTP_PROXY=http://localhost:8888 \
|
||||
TRITON_TEST=1 \
|
||||
TRITON_URL=https://us-sw-1.api.joyent.com \
|
||||
TRITON_ACCOUNT=AccountName \
|
||||
TRITON_KEY_ID=a4:c6:f3:75:80:27:e0:03:a9:98:79:ef:c5:0a:06:11 \
|
||||
go test -v -run "TestAccKey"
|
||||
=== RUN TestAccKey_Create
|
||||
--- PASS: TestAccKey_Create (12.46s)
|
||||
=== RUN TestAccKey_Get
|
||||
--- PASS: TestAccKey_Get (4.30s)
|
||||
=== RUN TestAccKey_Delete
|
||||
--- PASS: TestAccKey_Delete (15.08s)
|
||||
PASS
|
||||
ok github.com/joyent/triton-go 31.861s
|
||||
```
|
||||
|
||||
## Example API
|
||||
|
||||
There's an `examples/` directory available with sample code setup for many of
|
||||
the APIs within this library. Most of these can be run using `go run` and
|
||||
referencing your SSH key file use by your active `triton` CLI profile.
|
||||
|
||||
```sh
|
||||
$ eval "$(triton env us-sw-1)"
|
||||
$ TRITON_KEY_FILE=~/.ssh/triton-id_rsa go run examples/compute/instances.go
|
||||
```
|
||||
|
||||
The following is a complete example of how to initialize the `compute` package
|
||||
client and list all instances under an account. More detailed usage of this
|
||||
library follows.
|
||||
|
||||
```go
|
||||
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
triton "github.com/joyent/triton-go"
|
||||
"github.com/joyent/triton-go/authentication"
|
||||
"github.com/joyent/triton-go/compute"
|
||||
)
|
||||
|
||||
func main() {
|
||||
keyID := os.Getenv("TRITON_KEY_ID")
|
||||
accountName := os.Getenv("TRITON_ACCOUNT")
|
||||
keyMaterial := os.Getenv("TRITON_KEY_MATERIAL")
|
||||
userName := os.Getenv("TRITON_USER")
|
||||
|
||||
var signer authentication.Signer
|
||||
var err error
|
||||
|
||||
if keyMaterial == "" {
|
||||
input := authentication.SSHAgentSignerInput{
|
||||
KeyID: keyID,
|
||||
AccountName: accountName,
|
||||
Username: userName,
|
||||
}
|
||||
signer, err = authentication.NewSSHAgentSigner(input)
|
||||
if err != nil {
|
||||
log.Fatalf("Error Creating SSH Agent Signer: {{err}}", err)
|
||||
}
|
||||
} else {
|
||||
var keyBytes []byte
|
||||
if _, err = os.Stat(keyMaterial); err == nil {
|
||||
keyBytes, err = ioutil.ReadFile(keyMaterial)
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading key material from %s: %s",
|
||||
keyMaterial, err)
|
||||
}
|
||||
block, _ := pem.Decode(keyBytes)
|
||||
if block == nil {
|
||||
log.Fatalf(
|
||||
"Failed to read key material '%s': no key found", keyMaterial)
|
||||
}
|
||||
|
||||
if block.Headers["Proc-Type"] == "4,ENCRYPTED" {
|
||||
log.Fatalf(
|
||||
"Failed to read key '%s': password protected keys are\n"+
|
||||
"not currently supported. Please decrypt the key prior to use.", keyMaterial)
|
||||
}
|
||||
|
||||
} else {
|
||||
keyBytes = []byte(keyMaterial)
|
||||
}
|
||||
|
||||
input := authentication.PrivateKeySignerInput{
|
||||
KeyID: keyID,
|
||||
PrivateKeyMaterial: keyBytes,
|
||||
AccountName: accountName,
|
||||
Username: userName,
|
||||
}
|
||||
signer, err = authentication.NewPrivateKeySigner(input)
|
||||
if err != nil {
|
||||
log.Fatalf("Error Creating SSH Private Key Signer: {{err}}", err)
|
||||
}
|
||||
}
|
||||
|
||||
config := &triton.ClientConfig{
|
||||
TritonURL: os.Getenv("TRITON_URL"),
|
||||
AccountName: accountName,
|
||||
Username: userName,
|
||||
Signers: []authentication.Signer{signer},
|
||||
}
|
||||
|
||||
c, err := compute.NewClient(config)
|
||||
if err != nil {
|
||||
log.Fatalf("compute.NewClient: %s", err)
|
||||
}
|
||||
|
||||
listInput := &compute.ListInstancesInput{}
|
||||
instances, err := c.Instances().List(context.Background(), listInput)
|
||||
if err != nil {
|
||||
log.Fatalf("compute.Instances.List: %v", err)
|
||||
}
|
||||
numInstances := 0
|
||||
for _, instance := range instances {
|
||||
numInstances++
|
||||
fmt.Println(fmt.Sprintf("-- Instance: %v", instance.Name))
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
[4]: https://github.com/joyent/node-http-signature/blob/master/http_signing.md
|
||||
[5]: https://godoc.org/github.com/joyent/triton-go/authentication
|
||||
[6]: https://godoc.org/github.com/joyent/triton-go/authentication
|
||||
[7]: https://github.com/hashicorp/go-errwrap
|
80
vendor/github.com/joyent/triton-go/authentication/dummy.go
generated
vendored
80
vendor/github.com/joyent/triton-go/authentication/dummy.go
generated
vendored
@ -1,80 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package authentication
|
||||
|
||||
// DON'T USE THIS OUTSIDE TESTING ~ This key was only created to use for
|
||||
// internal unit testing. It should never be used for acceptance testing either.
|
||||
//
|
||||
// This is just a randomly generated key pair.
|
||||
var Dummy = struct {
|
||||
Fingerprint string
|
||||
PrivateKey []byte
|
||||
PublicKey []byte
|
||||
Signer Signer
|
||||
}{
|
||||
"9f:d6:65:fc:d6:60:dc:d0:4e:db:2d:75:f7:92:8c:31",
|
||||
[]byte(`-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIJKAIBAAKCAgEAui9lNjCJahHeFSFC6HXi/CNX588C/L2gJUx65bnNphVC98hW
|
||||
1wzoRvPXHx5aWnb7lEbpNhP6B0UoCBDTaPgt9hHfD/oNQ+6HT1QpDIGfZmXI91/t
|
||||
cjGVSBbxN7WaYt/HsPrGjbalwvQPChN53sMVmFkMTEDR5G3zOBOAGrOimlCT80wI
|
||||
2S5Xg0spd8jjKM5I1swDR0xtuDWnHTR1Ohin+pEQIE6glLTfYq7oQx6nmMXXBNmk
|
||||
+SaPD1FAyjkF/81im2EHXBygNEwraVrDcAxK2mKlU2XMJiogQKNYWlm3UkbNB6WP
|
||||
Le12+Ka02rmIVsSqIpc/ZCBraAlCaSWlYCkU+vJ2hH/+ypy5bXNlbaTiWZK+vuI7
|
||||
PC87T50yLNeXVuNZAynzDpBCvsjiiHrB/ZFRfVfF6PviV8CV+m7GTzfAwJhVeSbl
|
||||
rR6nts16K0HTD48v57DU0b0t5VOvC7cWPShs+afdSL3Z8ReL5EWMgU1wfvtycRKe
|
||||
hiDVGj3Ms2cf83RIANr387G+1LcTQYP7JJuB7Svy5j+R6+HjI0cgu4EMUPdWfCNG
|
||||
GyrlxwJNtPmUSfasH1xUKpqr7dC+0sN4/gfJw75WTAYrATkPzexoYNaMsGDfhuoh
|
||||
kYa3Tn2q1g3kqhsX/R0Fd5d8d5qc137qcRCxiZYz9f3bVkXQbhYmO9da3KsCAwEA
|
||||
AQKCAgAeEAURqOinPddUJhi9nDtYZwSMo3piAORY4W5+pW+1P32esLSE6MqgmkLD
|
||||
/YytSsT4fjKtzq/yeJIsKztXmasiLmSMGd4Gd/9VKcuu/0cTq5+1gcG/TI5EI6Az
|
||||
VJlnGacOxo9E1pcRUYMUJ2zoMSvNe6NmtJivf6lkBpIKvbKlpBkfkclj9/2db4d0
|
||||
lfVH43cTZ8Gnw4l70v320z+Sb+S/qqil7swy9rmTH5bVL5/0JQ3A9LuUl0tGN+J0
|
||||
RJzZXvprCFG958leaGYiDsu7zeBQPtlfC/LYvriSd02O2SmmmVQFxg/GZK9vGsvc
|
||||
/VQsXnjyOOW9bxaop8YXYELBsiB21ipTHzOwoqHT8wFnjgU9Y/7iZIv7YbZKQsCS
|
||||
DrwdlZ/Yw90wiif+ldYryIVinWfytt6ERv4Dgezc98+1XPi1Z/WB74/lIaDXFl3M
|
||||
3ypjtvLYbKew2IkIjeAwjvZJg/QpC/50RrrPtVDgeAI1Ni01ikixUhMYsHJ1kRih
|
||||
0tqLvLqSPoHmr6luFlaoKdc2eBqb+8U6K/TrXhKtT7BeUFiSbvnVfdbrH9r+AY/2
|
||||
zYtG6llzkE5DH8ZR3Qp+dx7QEDtvYhGftWhx9uasd79AN7CuGYnL54YFLKGRrWKN
|
||||
ylysqfUyOQYiitdWdNCw9PP2vGRx5JAsMMSy+ft18jjTJvNQ0QKCAQEA28M11EE6
|
||||
MpnHxfyP00Dl1+3wl2lRyNXZnZ4hgkk1f83EJGpoB2amiMTF8P1qJb7US1fXtf7l
|
||||
gkJMMk6t6iccexV1/NBh/7tDZHH/v4HPirFTXQFizflaghD8dEADy9DY4BpQYFRe
|
||||
8zGsv4/4U0txCXkUIfKcENt/FtXv2T9blJT6cDV0yTx9IAyd4Kor7Ly2FIYroSME
|
||||
uqnOQt5PwB+2qkE+9hdg4xBhFs9sW5dvyBvQvlBfX/xOmMw2ygH6vsaJlNfZ5VPa
|
||||
EP/wFP/qHyhDlCfbHdL6qF2//wUoM2QM9RgBdZNhcKU7zWuf7Ev199tmlLC5O14J
|
||||
PkQxUGftMfmWxQKCAQEA2OLKD8dwOzpwGJiPQdBmGpwCamfcCY4nDwqEaCu4vY1R
|
||||
OJR+rpYdC2hgl5PTXWH7qzJVdT/ZAz2xUQOgB1hD3Ltk7DQ+EZIA8+vJdaicQOme
|
||||
vfpMPNDxCEX9ee0AXAmAC3aET82B4cMFnjXjl1WXLLTowF/Jp/hMorm6tl2m15A2
|
||||
oTyWlB/i/W/cxHl2HFWK7o8uCNoKpKJjheNYn+emEcH1bkwrk8sxQ78cBNmqe/gk
|
||||
MLgu8qfXQ0LLKIL7wqmIUHeUpkepOod8uXcTmmN2X9saCIwFKx4Jal5hh5v5cy0G
|
||||
MkyZcUIhhnmzr7lXbepauE5V2Sj5Qp040AfRVjZcrwKCAQANe8OwuzPL6P2F20Ij
|
||||
zwaLIhEx6QdYkC5i6lHaAY3jwoc3SMQLODQdjh0q9RFvMW8rFD+q7fG89T5hk8w9
|
||||
4ppvvthXY52vqBixcAEmCdvnAYxA15XtV1BDTLGAnHDfL3gu/85QqryMpU6ZDkdJ
|
||||
LQbJcwFWN+F1c1Iv335w0N9YlW9sNQtuUWTH8544K5i4VLfDOJwyrchbf5GlLqir
|
||||
/AYkGg634KVUKSwbzywxzm/QUkyTcLD5Xayg2V6/NDHjRKEqXbgDxwpJIrrjPvRp
|
||||
ZvoGfA+Im+o/LElcZz+ZL5lP7GIiiaFf3PN3XhQY1mxIAdEgbFthFhrxFBQGf+ng
|
||||
uBSVAoIBAHl12K8pg8LHoUtE9MVoziWMxRWOAH4ha+JSg4BLK/SLlbbYAnIHg1CG
|
||||
LcH1eWNMokJnt9An54KXJBw4qYAzgB23nHdjcncoivwPSg1oVclMjCfcaqGMac+2
|
||||
UpPblF32vAyvXL3MWzZxn03Q5Bo2Rqk0zzwc6LP2rARdeyDyJaOHEfEOG03s5ZQE
|
||||
91/YnbqUdW/QI3m1kkxM3Ot4PIOgmTJMqwQQCD+GhZppBmn49C7k8m+OVkxyjm0O
|
||||
lPOlFxUXGE3oCgltDGrIwaKj+wh1Ny/LZjLvJ13UPnWhUYE+al6EEnpMx4nT/S5w
|
||||
LZ71bu8RVajtxcoN1jnmDpECL8vWOeUCggEBAIEuKoY7pVHfs5gr5dXfQeVZEtqy
|
||||
LnSdsd37/aqQZRlUpVmBrPNl1JBLiEVhk2SL3XJIDU4Er7f0idhtYLY3eE7wqZ4d
|
||||
38Iaj5tv3zBc/wb1bImPgOgXCH7QrrbW7uTiYMLScuUbMR4uSpfubLaV8Zc9WHT8
|
||||
kTJ2pKKtA1GPJ4V7HCIxuTjD2iyOK1CRkaqSC+5VUuq5gHf92CEstv9AIvvy5cWg
|
||||
gnfBQoS89m3aO035henSfRFKVJkHaEoasj8hB3pwl9FGZUJp1c2JxiKzONqZhyGa
|
||||
6tcIAM3od0QtAfDJ89tWJ5D31W8KNNysobFSQxZ62WgLUUtXrkN1LGodxGQ=
|
||||
-----END RSA PRIVATE KEY-----`),
|
||||
[]byte(`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC6L2U2MIlqEd4VIULodeL8I1fnzwL8vaAlTHrluc2mFUL3yFbXDOhG89cfHlpadvuURuk2E/oHRSgIENNo+C32Ed8P+g1D7odPVCkMgZ9mZcj3X+1yMZVIFvE3tZpi38ew+saNtqXC9A8KE3newxWYWQxMQNHkbfM4E4Aas6KaUJPzTAjZLleDSyl3yOMozkjWzANHTG24NacdNHU6GKf6kRAgTqCUtN9iruhDHqeYxdcE2aT5Jo8PUUDKOQX/zWKbYQdcHKA0TCtpWsNwDEraYqVTZcwmKiBAo1haWbdSRs0HpY8t7Xb4prTauYhWxKoilz9kIGtoCUJpJaVgKRT68naEf/7KnLltc2VtpOJZkr6+4js8LztPnTIs15dW41kDKfMOkEK+yOKIesH9kVF9V8Xo++JXwJX6bsZPN8DAmFV5JuWtHqe2zXorQdMPjy/nsNTRvS3lU68LtxY9KGz5p91IvdnxF4vkRYyBTXB++3JxEp6GINUaPcyzZx/zdEgA2vfzsb7UtxNBg/skm4HtK/LmP5Hr4eMjRyC7gQxQ91Z8I0YbKuXHAk20+ZRJ9qwfXFQqmqvt0L7Sw3j+B8nDvlZMBisBOQ/N7Ghg1oywYN+G6iGRhrdOfarWDeSqGxf9HQV3l3x3mpzXfupxELGJljP1/dtWRdBuFiY711rcqw== test-dummy-20171002140848`),
|
||||
nil,
|
||||
}
|
||||
|
||||
func init() {
|
||||
testSigner, _ := NewTestSigner()
|
||||
Dummy.Signer = testSigner
|
||||
}
|
74
vendor/github.com/joyent/triton-go/authentication/ecdsa_signature.go
generated
vendored
74
vendor/github.com/joyent/triton-go/authentication/ecdsa_signature.go
generated
vendored
@ -1,74 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package authentication
|
||||
|
||||
import (
|
||||
"encoding/asn1"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type ecdsaSignature struct {
|
||||
hashAlgorithm string
|
||||
R *big.Int
|
||||
S *big.Int
|
||||
}
|
||||
|
||||
func (s *ecdsaSignature) SignatureType() string {
|
||||
return fmt.Sprintf("ecdsa-%s", s.hashAlgorithm)
|
||||
}
|
||||
|
||||
func (s *ecdsaSignature) String() string {
|
||||
toEncode := struct {
|
||||
R *big.Int
|
||||
S *big.Int
|
||||
}{
|
||||
R: s.R,
|
||||
S: s.S,
|
||||
}
|
||||
|
||||
signatureBytes, err := asn1.Marshal(toEncode)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error marshaling signature: %s", err))
|
||||
}
|
||||
|
||||
return base64.StdEncoding.EncodeToString(signatureBytes)
|
||||
}
|
||||
|
||||
func newECDSASignature(signatureBlob []byte) (*ecdsaSignature, error) {
|
||||
var ecSig struct {
|
||||
R *big.Int
|
||||
S *big.Int
|
||||
}
|
||||
|
||||
if err := ssh.Unmarshal(signatureBlob, &ecSig); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshall signature")
|
||||
}
|
||||
|
||||
rValue := ecSig.R.Bytes()
|
||||
var hashAlgorithm string
|
||||
switch len(rValue) {
|
||||
case 31, 32:
|
||||
hashAlgorithm = "sha256"
|
||||
case 65, 66:
|
||||
hashAlgorithm = "sha512"
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported key length: %d", len(rValue))
|
||||
}
|
||||
|
||||
return &ecdsaSignature{
|
||||
hashAlgorithm: hashAlgorithm,
|
||||
R: ecSig.R,
|
||||
S: ecSig.S,
|
||||
}, nil
|
||||
}
|
132
vendor/github.com/joyent/triton-go/authentication/private_key_signer.go
generated
vendored
132
vendor/github.com/joyent/triton-go/authentication/private_key_signer.go
generated
vendored
@ -1,132 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package authentication
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type PrivateKeySigner struct {
|
||||
formattedKeyFingerprint string
|
||||
keyFingerprint string
|
||||
algorithm string
|
||||
accountName string
|
||||
userName string
|
||||
hashFunc crypto.Hash
|
||||
|
||||
privateKey *rsa.PrivateKey
|
||||
}
|
||||
|
||||
type PrivateKeySignerInput struct {
|
||||
KeyID string
|
||||
PrivateKeyMaterial []byte
|
||||
AccountName string
|
||||
Username string
|
||||
}
|
||||
|
||||
func NewPrivateKeySigner(input PrivateKeySignerInput) (*PrivateKeySigner, error) {
|
||||
keyFingerprintMD5 := strings.Replace(input.KeyID, ":", "", -1)
|
||||
|
||||
block, _ := pem.Decode(input.PrivateKeyMaterial)
|
||||
if block == nil {
|
||||
return nil, errors.New("Error PEM-decoding private key material: nil block received")
|
||||
}
|
||||
|
||||
rsakey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to parse private key")
|
||||
}
|
||||
|
||||
sshPublicKey, err := ssh.NewPublicKey(rsakey.Public())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to parse SSH key from private key")
|
||||
}
|
||||
|
||||
matchKeyFingerprint := formatPublicKeyFingerprint(sshPublicKey, false)
|
||||
displayKeyFingerprint := formatPublicKeyFingerprint(sshPublicKey, true)
|
||||
if matchKeyFingerprint != keyFingerprintMD5 {
|
||||
return nil, errors.New("Private key file does not match public key fingerprint")
|
||||
}
|
||||
|
||||
signer := &PrivateKeySigner{
|
||||
formattedKeyFingerprint: displayKeyFingerprint,
|
||||
keyFingerprint: input.KeyID,
|
||||
accountName: input.AccountName,
|
||||
|
||||
hashFunc: crypto.SHA1,
|
||||
privateKey: rsakey,
|
||||
}
|
||||
|
||||
if input.Username != "" {
|
||||
signer.userName = input.Username
|
||||
}
|
||||
|
||||
_, algorithm, err := signer.SignRaw("HelloWorld")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot sign using ssh agent: %s", err)
|
||||
}
|
||||
signer.algorithm = algorithm
|
||||
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
func (s *PrivateKeySigner) Sign(dateHeader string) (string, error) {
|
||||
const headerName = "date"
|
||||
|
||||
hash := s.hashFunc.New()
|
||||
hash.Write([]byte(fmt.Sprintf("%s: %s", headerName, dateHeader)))
|
||||
digest := hash.Sum(nil)
|
||||
|
||||
signed, err := rsa.SignPKCS1v15(rand.Reader, s.privateKey, s.hashFunc, digest)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unable to sign date header")
|
||||
}
|
||||
signedBase64 := base64.StdEncoding.EncodeToString(signed)
|
||||
|
||||
var keyID string
|
||||
if s.userName != "" {
|
||||
|
||||
keyID = path.Join("/", s.accountName, "users", s.userName, "keys", s.formattedKeyFingerprint)
|
||||
} else {
|
||||
keyID = path.Join("/", s.accountName, "keys", s.formattedKeyFingerprint)
|
||||
}
|
||||
return fmt.Sprintf(authorizationHeaderFormat, keyID, "rsa-sha1", headerName, signedBase64), nil
|
||||
}
|
||||
|
||||
func (s *PrivateKeySigner) SignRaw(toSign string) (string, string, error) {
|
||||
hash := s.hashFunc.New()
|
||||
hash.Write([]byte(toSign))
|
||||
digest := hash.Sum(nil)
|
||||
|
||||
signed, err := rsa.SignPKCS1v15(rand.Reader, s.privateKey, s.hashFunc, digest)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "unable to sign date header")
|
||||
}
|
||||
signedBase64 := base64.StdEncoding.EncodeToString(signed)
|
||||
return signedBase64, "rsa-sha1", nil
|
||||
}
|
||||
|
||||
func (s *PrivateKeySigner) KeyFingerprint() string {
|
||||
return s.formattedKeyFingerprint
|
||||
}
|
||||
|
||||
func (s *PrivateKeySigner) DefaultAlgorithm() string {
|
||||
return s.algorithm
|
||||
}
|
33
vendor/github.com/joyent/triton-go/authentication/rsa_signature.go
generated
vendored
33
vendor/github.com/joyent/triton-go/authentication/rsa_signature.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package authentication
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
)
|
||||
|
||||
type rsaSignature struct {
|
||||
hashAlgorithm string
|
||||
signature []byte
|
||||
}
|
||||
|
||||
func (s *rsaSignature) SignatureType() string {
|
||||
return s.hashAlgorithm
|
||||
}
|
||||
|
||||
func (s *rsaSignature) String() string {
|
||||
return base64.StdEncoding.EncodeToString(s.signature)
|
||||
}
|
||||
|
||||
func newRSASignature(signatureBlob []byte) (*rsaSignature, error) {
|
||||
return &rsaSignature{
|
||||
hashAlgorithm: "rsa-sha1",
|
||||
signature: signatureBlob,
|
||||
}, nil
|
||||
}
|
35
vendor/github.com/joyent/triton-go/authentication/signature.go
generated
vendored
35
vendor/github.com/joyent/triton-go/authentication/signature.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package authentication
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
type httpAuthSignature interface {
|
||||
SignatureType() string
|
||||
String() string
|
||||
}
|
||||
|
||||
func keyFormatToKeyType(keyFormat string) (string, error) {
|
||||
if keyFormat == "ssh-rsa" {
|
||||
return "rsa", nil
|
||||
}
|
||||
|
||||
if keyFormat == "ssh-ed25519" {
|
||||
return "ed25519", nil
|
||||
}
|
||||
|
||||
if regexp.MustCompile("^ecdsa-sha2-*").Match([]byte(keyFormat)) {
|
||||
return "ecdsa", nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Unknown key format: %s", keyFormat)
|
||||
}
|
18
vendor/github.com/joyent/triton-go/authentication/signer.go
generated
vendored
18
vendor/github.com/joyent/triton-go/authentication/signer.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package authentication
|
||||
|
||||
const authorizationHeaderFormat = `Signature keyId="%s",algorithm="%s",headers="%s",signature="%s"`
|
||||
|
||||
type Signer interface {
|
||||
DefaultAlgorithm() string
|
||||
KeyFingerprint() string
|
||||
Sign(dateHeader string) (string, error)
|
||||
SignRaw(toSign string) (string, string, error)
|
||||
}
|
190
vendor/github.com/joyent/triton-go/authentication/ssh_agent_signer.go
generated
vendored
190
vendor/github.com/joyent/triton-go/authentication/ssh_agent_signer.go
generated
vendored
@ -1,190 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package authentication
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
pkgerrors "github.com/pkg/errors"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/crypto/ssh/agent"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnsetEnvVar = pkgerrors.New("environment variable SSH_AUTH_SOCK not set")
|
||||
)
|
||||
|
||||
type SSHAgentSigner struct {
|
||||
formattedKeyFingerprint string
|
||||
keyFingerprint string
|
||||
algorithm string
|
||||
accountName string
|
||||
userName string
|
||||
keyIdentifier string
|
||||
|
||||
agent agent.Agent
|
||||
key ssh.PublicKey
|
||||
}
|
||||
|
||||
type SSHAgentSignerInput struct {
|
||||
KeyID string
|
||||
AccountName string
|
||||
Username string
|
||||
}
|
||||
|
||||
func NewSSHAgentSigner(input SSHAgentSignerInput) (*SSHAgentSigner, error) {
|
||||
sshAgentAddress, agentOk := os.LookupEnv("SSH_AUTH_SOCK")
|
||||
if !agentOk {
|
||||
return nil, ErrUnsetEnvVar
|
||||
}
|
||||
|
||||
conn, err := net.Dial("unix", sshAgentAddress)
|
||||
if err != nil {
|
||||
return nil, pkgerrors.Wrap(err, "unable to dial SSH agent")
|
||||
}
|
||||
|
||||
ag := agent.NewClient(conn)
|
||||
|
||||
signer := &SSHAgentSigner{
|
||||
keyFingerprint: input.KeyID,
|
||||
accountName: input.AccountName,
|
||||
agent: ag,
|
||||
}
|
||||
|
||||
matchingKey, err := signer.MatchKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signer.key = matchingKey
|
||||
signer.formattedKeyFingerprint = formatPublicKeyFingerprint(signer.key, true)
|
||||
if input.Username != "" {
|
||||
signer.userName = input.Username
|
||||
signer.keyIdentifier = path.Join("/", signer.accountName, "users", input.Username, "keys", signer.formattedKeyFingerprint)
|
||||
} else {
|
||||
signer.keyIdentifier = path.Join("/", signer.accountName, "keys", signer.formattedKeyFingerprint)
|
||||
}
|
||||
|
||||
_, algorithm, err := signer.SignRaw("HelloWorld")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot sign using ssh agent: %s", err)
|
||||
}
|
||||
signer.algorithm = algorithm
|
||||
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
func (s *SSHAgentSigner) MatchKey() (ssh.PublicKey, error) {
|
||||
keys, err := s.agent.List()
|
||||
if err != nil {
|
||||
return nil, pkgerrors.Wrap(err, "unable to list keys in SSH Agent")
|
||||
}
|
||||
|
||||
keyFingerprintStripped := strings.TrimPrefix(s.keyFingerprint, "MD5:")
|
||||
keyFingerprintStripped = strings.TrimPrefix(keyFingerprintStripped, "SHA256:")
|
||||
keyFingerprintStripped = strings.Replace(keyFingerprintStripped, ":", "", -1)
|
||||
|
||||
var matchingKey ssh.PublicKey
|
||||
for _, key := range keys {
|
||||
keyMD5 := md5.New()
|
||||
keyMD5.Write(key.Marshal())
|
||||
finalizedMD5 := fmt.Sprintf("%x", keyMD5.Sum(nil))
|
||||
|
||||
keySHA256 := sha256.New()
|
||||
keySHA256.Write(key.Marshal())
|
||||
finalizedSHA256 := base64.RawStdEncoding.EncodeToString(keySHA256.Sum(nil))
|
||||
|
||||
if keyFingerprintStripped == finalizedMD5 || keyFingerprintStripped == finalizedSHA256 {
|
||||
matchingKey = key
|
||||
}
|
||||
}
|
||||
|
||||
if matchingKey == nil {
|
||||
return nil, fmt.Errorf("No key in the SSH Agent matches fingerprint: %s", s.keyFingerprint)
|
||||
}
|
||||
|
||||
return matchingKey, nil
|
||||
}
|
||||
|
||||
func (s *SSHAgentSigner) Sign(dateHeader string) (string, error) {
|
||||
const headerName = "date"
|
||||
|
||||
signature, err := s.agent.Sign(s.key, []byte(fmt.Sprintf("%s: %s", headerName, dateHeader)))
|
||||
if err != nil {
|
||||
return "", pkgerrors.Wrap(err, "unable to sign date header")
|
||||
}
|
||||
|
||||
keyFormat, err := keyFormatToKeyType(signature.Format)
|
||||
if err != nil {
|
||||
return "", pkgerrors.Wrap(err, "unable to format signature")
|
||||
}
|
||||
|
||||
var authSignature httpAuthSignature
|
||||
switch keyFormat {
|
||||
case "rsa":
|
||||
authSignature, err = newRSASignature(signature.Blob)
|
||||
if err != nil {
|
||||
return "", pkgerrors.Wrap(err, "unable to read RSA signature")
|
||||
}
|
||||
case "ecdsa":
|
||||
authSignature, err = newECDSASignature(signature.Blob)
|
||||
if err != nil {
|
||||
return "", pkgerrors.Wrap(err, "unable to read ECDSA signature")
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("Unsupported algorithm from SSH agent: %s", signature.Format)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(authorizationHeaderFormat, s.keyIdentifier,
|
||||
authSignature.SignatureType(), headerName, authSignature.String()), nil
|
||||
}
|
||||
|
||||
func (s *SSHAgentSigner) SignRaw(toSign string) (string, string, error) {
|
||||
signature, err := s.agent.Sign(s.key, []byte(toSign))
|
||||
if err != nil {
|
||||
return "", "", pkgerrors.Wrap(err, "unable to sign string")
|
||||
}
|
||||
|
||||
keyFormat, err := keyFormatToKeyType(signature.Format)
|
||||
if err != nil {
|
||||
return "", "", pkgerrors.Wrap(err, "unable to format key")
|
||||
}
|
||||
|
||||
var authSignature httpAuthSignature
|
||||
switch keyFormat {
|
||||
case "rsa":
|
||||
authSignature, err = newRSASignature(signature.Blob)
|
||||
if err != nil {
|
||||
return "", "", pkgerrors.Wrap(err, "unable to read RSA signature")
|
||||
}
|
||||
case "ecdsa":
|
||||
authSignature, err = newECDSASignature(signature.Blob)
|
||||
if err != nil {
|
||||
return "", "", pkgerrors.Wrap(err, "unable to read ECDSA signature")
|
||||
}
|
||||
default:
|
||||
return "", "", fmt.Errorf("Unsupported algorithm from SSH agent: %s", signature.Format)
|
||||
}
|
||||
|
||||
return authSignature.String(), authSignature.SignatureType(), nil
|
||||
}
|
||||
|
||||
func (s *SSHAgentSigner) KeyFingerprint() string {
|
||||
return s.formattedKeyFingerprint
|
||||
}
|
||||
|
||||
func (s *SSHAgentSigner) DefaultAlgorithm() string {
|
||||
return s.algorithm
|
||||
}
|
35
vendor/github.com/joyent/triton-go/authentication/test_signer.go
generated
vendored
35
vendor/github.com/joyent/triton-go/authentication/test_signer.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package authentication
|
||||
|
||||
// TestSigner represents an authentication key signer which we can use for
|
||||
// testing purposes only. This will largely be a stub to send through client
|
||||
// unit tests.
|
||||
type TestSigner struct{}
|
||||
|
||||
// NewTestSigner constructs a new instance of test signer
|
||||
func NewTestSigner() (Signer, error) {
|
||||
return &TestSigner{}, nil
|
||||
}
|
||||
|
||||
func (s *TestSigner) DefaultAlgorithm() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *TestSigner) KeyFingerprint() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *TestSigner) Sign(dateHeader string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (s *TestSigner) SignRaw(toSign string) (string, string, error) {
|
||||
return "", "", nil
|
||||
}
|
37
vendor/github.com/joyent/triton-go/authentication/util.go
generated
vendored
37
vendor/github.com/joyent/triton-go/authentication/util.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package authentication
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// formatPublicKeyFingerprint produces the MD5 fingerprint of the given SSH
|
||||
// public key. If display is true, the fingerprint is formatted with colons
|
||||
// between each byte, as per the output of OpenSSL.
|
||||
func formatPublicKeyFingerprint(key ssh.PublicKey, display bool) string {
|
||||
publicKeyFingerprint := md5.New()
|
||||
publicKeyFingerprint.Write(key.Marshal())
|
||||
publicKeyFingerprintString := fmt.Sprintf("%x", publicKeyFingerprint.Sum(nil))
|
||||
|
||||
if !display {
|
||||
return publicKeyFingerprintString
|
||||
}
|
||||
|
||||
formatted := ""
|
||||
for i := 0; i < len(publicKeyFingerprintString); i = i + 2 {
|
||||
formatted = fmt.Sprintf("%s%s:", formatted, publicKeyFingerprintString[i:i+2])
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(formatted, ":")
|
||||
}
|
444
vendor/github.com/joyent/triton-go/client/client.go
generated
vendored
444
vendor/github.com/joyent/triton-go/client/client.go
generated
vendored
@ -1,444 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/joyent/triton-go"
|
||||
"github.com/joyent/triton-go/authentication"
|
||||
"github.com/joyent/triton-go/errors"
|
||||
pkgerrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const nilContext = "nil context"
|
||||
|
||||
var (
|
||||
ErrDefaultAuth = pkgerrors.New("default SSH agent authentication requires SDC_KEY_ID / TRITON_KEY_ID and SSH_AUTH_SOCK")
|
||||
ErrAccountName = pkgerrors.New("missing account name")
|
||||
ErrMissingURL = pkgerrors.New("missing API URL")
|
||||
|
||||
InvalidTritonURL = "invalid format of Triton URL"
|
||||
InvalidMantaURL = "invalid format of Manta URL"
|
||||
)
|
||||
|
||||
// Client represents a connection to the Triton Compute or Object Storage APIs.
|
||||
type Client struct {
|
||||
HTTPClient *http.Client
|
||||
Authorizers []authentication.Signer
|
||||
TritonURL url.URL
|
||||
MantaURL url.URL
|
||||
AccountName string
|
||||
Username string
|
||||
}
|
||||
|
||||
// New is used to construct a Client in order to make API
|
||||
// requests to the Triton API.
|
||||
//
|
||||
// At least one signer must be provided - example signers include
|
||||
// authentication.PrivateKeySigner and authentication.SSHAgentSigner.
|
||||
func New(tritonURL string, mantaURL string, accountName string, signers ...authentication.Signer) (*Client, error) {
|
||||
if accountName == "" {
|
||||
return nil, ErrAccountName
|
||||
}
|
||||
|
||||
if tritonURL == "" && mantaURL == "" {
|
||||
return nil, ErrMissingURL
|
||||
}
|
||||
|
||||
cloudURL, err := url.Parse(tritonURL)
|
||||
if err != nil {
|
||||
return nil, pkgerrors.Wrapf(err, InvalidTritonURL)
|
||||
}
|
||||
|
||||
storageURL, err := url.Parse(mantaURL)
|
||||
if err != nil {
|
||||
return nil, pkgerrors.Wrapf(err, InvalidMantaURL)
|
||||
}
|
||||
|
||||
authorizers := make([]authentication.Signer, 0)
|
||||
for _, key := range signers {
|
||||
if key != nil {
|
||||
authorizers = append(authorizers, key)
|
||||
}
|
||||
}
|
||||
|
||||
newClient := &Client{
|
||||
HTTPClient: &http.Client{
|
||||
Transport: httpTransport(false),
|
||||
CheckRedirect: doNotFollowRedirects,
|
||||
},
|
||||
Authorizers: authorizers,
|
||||
TritonURL: *cloudURL,
|
||||
MantaURL: *storageURL,
|
||||
AccountName: accountName,
|
||||
}
|
||||
|
||||
// Default to constructing an SSHAgentSigner if there are no other signers
|
||||
// passed into NewClient and there's an TRITON_KEY_ID and SSH_AUTH_SOCK
|
||||
// available in the user's environ(7).
|
||||
if len(newClient.Authorizers) == 0 {
|
||||
if err := newClient.DefaultAuth(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return newClient, nil
|
||||
}
|
||||
|
||||
var envPrefixes = []string{"TRITON", "SDC"}
|
||||
|
||||
// GetTritonEnv looks up environment variables using the preferred "TRITON"
|
||||
// prefix, but falls back to the SDC prefix. For example, looking up "USER"
|
||||
// will search for "TRITON_USER" followed by "SDC_USER". If the environment
|
||||
// variable is not set, an empty string is returned. GetTritonEnv() is used to
|
||||
// aid in the transition and deprecation of the SDC_* environment variables.
|
||||
func GetTritonEnv(name string) string {
|
||||
for _, prefix := range envPrefixes {
|
||||
if val, found := os.LookupEnv(prefix + "_" + name); found {
|
||||
return val
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// initDefaultAuth provides a default key signer for a client. This should only
|
||||
// be used internally if the client has no other key signer for authenticating
|
||||
// with Triton. We first look for both `SDC_KEY_ID` and `SSH_AUTH_SOCK` in the
|
||||
// user's environ(7). If so we default to the SSH agent key signer.
|
||||
func (c *Client) DefaultAuth() error {
|
||||
tritonKeyId := GetTritonEnv("KEY_ID")
|
||||
if tritonKeyId != "" {
|
||||
input := authentication.SSHAgentSignerInput{
|
||||
KeyID: tritonKeyId,
|
||||
AccountName: c.AccountName,
|
||||
Username: c.Username,
|
||||
}
|
||||
defaultSigner, err := authentication.NewSSHAgentSigner(input)
|
||||
if err != nil {
|
||||
return pkgerrors.Wrapf(err, "unable to initialize NewSSHAgentSigner")
|
||||
}
|
||||
c.Authorizers = append(c.Authorizers, defaultSigner)
|
||||
}
|
||||
|
||||
return ErrDefaultAuth
|
||||
}
|
||||
|
||||
// InsecureSkipTLSVerify turns off TLS verification for the client connection. This
|
||||
// allows connection to an endpoint with a certificate which was signed by a non-
|
||||
// trusted CA, such as self-signed certificates. This can be useful when connecting
|
||||
// to temporary Triton installations such as Triton Cloud-On-A-Laptop.
|
||||
func (c *Client) InsecureSkipTLSVerify() {
|
||||
if c.HTTPClient == nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.HTTPClient.Transport = httpTransport(true)
|
||||
}
|
||||
|
||||
func httpTransport(insecureSkipTLSVerify bool) *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
MaxIdleConns: 10,
|
||||
IdleConnTimeout: 15 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: insecureSkipTLSVerify,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func doNotFollowRedirects(*http.Request, []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
|
||||
func (c *Client) DecodeError(resp *http.Response, requestMethod string) error {
|
||||
err := &errors.APIError{
|
||||
StatusCode: resp.StatusCode,
|
||||
}
|
||||
|
||||
if requestMethod != http.MethodHead && resp.Body != nil {
|
||||
errorDecoder := json.NewDecoder(resp.Body)
|
||||
if err := errorDecoder.Decode(err); err != nil {
|
||||
return pkgerrors.Wrapf(err, "unable to decode error response")
|
||||
}
|
||||
}
|
||||
|
||||
if err.Message == "" {
|
||||
err.Message = fmt.Sprintf("HTTP response returned status code %d", err.StatusCode)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
type RequestInput struct {
|
||||
Method string
|
||||
Path string
|
||||
Query *url.Values
|
||||
Headers *http.Header
|
||||
Body interface{}
|
||||
}
|
||||
|
||||
func (c *Client) ExecuteRequestURIParams(ctx context.Context, inputs RequestInput) (io.ReadCloser, error) {
|
||||
method := inputs.Method
|
||||
path := inputs.Path
|
||||
body := inputs.Body
|
||||
query := inputs.Query
|
||||
|
||||
var requestBody io.Reader
|
||||
if body != nil {
|
||||
marshaled, err := json.MarshalIndent(body, "", " ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requestBody = bytes.NewReader(marshaled)
|
||||
}
|
||||
|
||||
endpoint := c.TritonURL
|
||||
endpoint.Path = path
|
||||
if query != nil {
|
||||
endpoint.RawQuery = query.Encode()
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, endpoint.String(), requestBody)
|
||||
if err != nil {
|
||||
return nil, pkgerrors.Wrapf(err, "unable to construct HTTP request")
|
||||
}
|
||||
|
||||
dateHeader := time.Now().UTC().Format(time.RFC1123)
|
||||
req.Header.Set("date", dateHeader)
|
||||
|
||||
// NewClient ensures there's always an authorizer (unless this is called
|
||||
// outside that constructor).
|
||||
authHeader, err := c.Authorizers[0].Sign(dateHeader)
|
||||
if err != nil {
|
||||
return nil, pkgerrors.Wrapf(err, "unable to sign HTTP request")
|
||||
}
|
||||
req.Header.Set("Authorization", authHeader)
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Accept-Version", triton.CloudAPIMajorVersion)
|
||||
req.Header.Set("User-Agent", triton.UserAgent())
|
||||
|
||||
if body != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
resp, err := c.HTTPClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, pkgerrors.Wrapf(err, "unable to execute HTTP request")
|
||||
}
|
||||
|
||||
// We will only return a response from the API it is in the HTTP StatusCode 2xx range
|
||||
// StatusMultipleChoices is StatusCode 300
|
||||
if resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices {
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
return nil, c.DecodeError(resp, req.Method)
|
||||
}
|
||||
|
||||
func (c *Client) ExecuteRequest(ctx context.Context, inputs RequestInput) (io.ReadCloser, error) {
|
||||
return c.ExecuteRequestURIParams(ctx, inputs)
|
||||
}
|
||||
|
||||
func (c *Client) ExecuteRequestRaw(ctx context.Context, inputs RequestInput) (*http.Response, error) {
|
||||
method := inputs.Method
|
||||
path := inputs.Path
|
||||
body := inputs.Body
|
||||
|
||||
var requestBody io.Reader
|
||||
if body != nil {
|
||||
marshaled, err := json.MarshalIndent(body, "", " ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requestBody = bytes.NewReader(marshaled)
|
||||
}
|
||||
|
||||
endpoint := c.TritonURL
|
||||
endpoint.Path = path
|
||||
|
||||
req, err := http.NewRequest(method, endpoint.String(), requestBody)
|
||||
if err != nil {
|
||||
return nil, pkgerrors.Wrapf(err, "unable to construct HTTP request")
|
||||
}
|
||||
|
||||
dateHeader := time.Now().UTC().Format(time.RFC1123)
|
||||
req.Header.Set("date", dateHeader)
|
||||
|
||||
// NewClient ensures there's always an authorizer (unless this is called
|
||||
// outside that constructor).
|
||||
authHeader, err := c.Authorizers[0].Sign(dateHeader)
|
||||
if err != nil {
|
||||
return nil, pkgerrors.Wrapf(err, "unable to sign HTTP request")
|
||||
}
|
||||
req.Header.Set("Authorization", authHeader)
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Accept-Version", triton.CloudAPIMajorVersion)
|
||||
req.Header.Set("User-Agent", triton.UserAgent())
|
||||
|
||||
if body != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
resp, err := c.HTTPClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, pkgerrors.Wrapf(err, "unable to execute HTTP request")
|
||||
}
|
||||
|
||||
// We will only return a response from the API it is in the HTTP StatusCode 2xx range
|
||||
// StatusMultipleChoices is StatusCode 300
|
||||
if resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
return nil, c.DecodeError(resp, req.Method)
|
||||
}
|
||||
|
||||
func (c *Client) ExecuteRequestStorage(ctx context.Context, inputs RequestInput) (io.ReadCloser, http.Header, error) {
|
||||
method := inputs.Method
|
||||
path := inputs.Path
|
||||
query := inputs.Query
|
||||
headers := inputs.Headers
|
||||
body := inputs.Body
|
||||
|
||||
endpoint := c.MantaURL
|
||||
endpoint.Path = path
|
||||
|
||||
var requestBody io.Reader
|
||||
if body != nil {
|
||||
marshaled, err := json.MarshalIndent(body, "", " ")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
requestBody = bytes.NewReader(marshaled)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, endpoint.String(), requestBody)
|
||||
if err != nil {
|
||||
return nil, nil, pkgerrors.Wrapf(err, "unable to construct HTTP request")
|
||||
}
|
||||
|
||||
if body != nil && (headers == nil || headers.Get("Content-Type") == "") {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
if headers != nil {
|
||||
for key, values := range *headers {
|
||||
for _, value := range values {
|
||||
req.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dateHeader := time.Now().UTC().Format(time.RFC1123)
|
||||
req.Header.Set("date", dateHeader)
|
||||
|
||||
authHeader, err := c.Authorizers[0].Sign(dateHeader)
|
||||
if err != nil {
|
||||
return nil, nil, pkgerrors.Wrapf(err, "unable to sign HTTP request")
|
||||
}
|
||||
req.Header.Set("Authorization", authHeader)
|
||||
req.Header.Set("Accept", "*/*")
|
||||
req.Header.Set("User-Agent", triton.UserAgent())
|
||||
|
||||
if query != nil {
|
||||
req.URL.RawQuery = query.Encode()
|
||||
}
|
||||
|
||||
resp, err := c.HTTPClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, nil, pkgerrors.Wrapf(err, "unable to execute HTTP request")
|
||||
}
|
||||
|
||||
// We will only return a response from the API it is in the HTTP StatusCode 2xx range
|
||||
// StatusMultipleChoices is StatusCode 300
|
||||
if resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices {
|
||||
return resp.Body, resp.Header, nil
|
||||
}
|
||||
|
||||
return nil, nil, c.DecodeError(resp, req.Method)
|
||||
}
|
||||
|
||||
type RequestNoEncodeInput struct {
|
||||
Method string
|
||||
Path string
|
||||
Query *url.Values
|
||||
Headers *http.Header
|
||||
Body io.Reader
|
||||
}
|
||||
|
||||
func (c *Client) ExecuteRequestNoEncode(ctx context.Context, inputs RequestNoEncodeInput) (io.ReadCloser, http.Header, error) {
|
||||
method := inputs.Method
|
||||
path := inputs.Path
|
||||
query := inputs.Query
|
||||
headers := inputs.Headers
|
||||
body := inputs.Body
|
||||
|
||||
endpoint := c.MantaURL
|
||||
endpoint.Path = path
|
||||
|
||||
req, err := http.NewRequest(method, endpoint.String(), body)
|
||||
if err != nil {
|
||||
return nil, nil, pkgerrors.Wrapf(err, "unable to construct HTTP request")
|
||||
}
|
||||
|
||||
if headers != nil {
|
||||
for key, values := range *headers {
|
||||
for _, value := range values {
|
||||
req.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dateHeader := time.Now().UTC().Format(time.RFC1123)
|
||||
req.Header.Set("date", dateHeader)
|
||||
|
||||
authHeader, err := c.Authorizers[0].Sign(dateHeader)
|
||||
if err != nil {
|
||||
return nil, nil, pkgerrors.Wrapf(err, "unable to sign HTTP request")
|
||||
}
|
||||
req.Header.Set("Authorization", authHeader)
|
||||
req.Header.Set("Accept", "*/*")
|
||||
req.Header.Set("Accept-Version", triton.CloudAPIMajorVersion)
|
||||
req.Header.Set("User-Agent", triton.UserAgent())
|
||||
|
||||
if query != nil {
|
||||
req.URL.RawQuery = query.Encode()
|
||||
}
|
||||
|
||||
resp, err := c.HTTPClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, nil, pkgerrors.Wrapf(err, "unable to execute HTTP request")
|
||||
}
|
||||
|
||||
// We will only return a response from the API it is in the HTTP StatusCode 2xx range
|
||||
// StatusMultipleChoices is StatusCode 300
|
||||
if resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices {
|
||||
return resp.Body, resp.Header, nil
|
||||
}
|
||||
|
||||
return nil, nil, c.DecodeError(resp, req.Method)
|
||||
}
|
297
vendor/github.com/joyent/triton-go/errors/errors.go
generated
vendored
297
vendor/github.com/joyent/triton-go/errors/errors.go
generated
vendored
@ -1,297 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// APIError represents an error code and message along with
|
||||
// the status code of the HTTP request which resulted in the error
|
||||
// message. Error codes used by the Triton API are listed at
|
||||
// https://apidocs.joyent.com/cloudapi/#cloudapi-http-responses
|
||||
// Error codes used by the Manta API are listed at
|
||||
// https://apidocs.joyent.com/manta/api.html#errors
|
||||
type APIError struct {
|
||||
StatusCode int
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// Error implements interface Error on the APIError type.
|
||||
func (e APIError) Error() string {
|
||||
return strings.Trim(fmt.Sprintf("%+q", e.Code), `"`) + ": " + strings.Trim(fmt.Sprintf("%+q", e.Message), `"`)
|
||||
}
|
||||
|
||||
// ClientError represents an error code and message returned
|
||||
// when connecting to the triton-go client
|
||||
type ClientError struct {
|
||||
StatusCode int
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// Error implements interface Error on the ClientError type.
|
||||
func (e ClientError) Error() string {
|
||||
return strings.Trim(fmt.Sprintf("%+q", e.Code), `"`) + ": " + strings.Trim(fmt.Sprintf("%+q", e.Message), `"`)
|
||||
}
|
||||
|
||||
func IsAuthSchemeError(err error) bool {
|
||||
return IsSpecificError(err, "AuthScheme")
|
||||
}
|
||||
|
||||
func IsAuthorizationError(err error) bool {
|
||||
return IsSpecificError(err, "Authorization")
|
||||
}
|
||||
|
||||
func IsBadRequestError(err error) bool {
|
||||
return IsSpecificError(err, "BadRequest")
|
||||
}
|
||||
|
||||
func IsChecksumError(err error) bool {
|
||||
return IsSpecificError(err, "Checksum")
|
||||
}
|
||||
|
||||
func IsConcurrentRequestError(err error) bool {
|
||||
return IsSpecificError(err, "ConcurrentRequest")
|
||||
}
|
||||
|
||||
func IsContentLengthError(err error) bool {
|
||||
return IsSpecificError(err, "ContentLength")
|
||||
}
|
||||
|
||||
func IsContentMD5MismatchError(err error) bool {
|
||||
return IsSpecificError(err, "ContentMD5Mismatch")
|
||||
}
|
||||
|
||||
func IsEntityExistsError(err error) bool {
|
||||
return IsSpecificError(err, "EntityExists")
|
||||
}
|
||||
|
||||
func IsInvalidArgumentError(err error) bool {
|
||||
return IsSpecificError(err, "InvalidArgument")
|
||||
}
|
||||
|
||||
func IsInvalidAuthTokenError(err error) bool {
|
||||
return IsSpecificError(err, "InvalidAuthToken")
|
||||
}
|
||||
|
||||
func IsInvalidCredentialsError(err error) bool {
|
||||
return IsSpecificError(err, "InvalidCredentials")
|
||||
}
|
||||
|
||||
func IsInvalidDurabilityLevelError(err error) bool {
|
||||
return IsSpecificError(err, "InvalidDurabilityLevel")
|
||||
}
|
||||
|
||||
func IsInvalidKeyIdError(err error) bool {
|
||||
return IsSpecificError(err, "InvalidKeyId")
|
||||
}
|
||||
|
||||
func IsInvalidJobError(err error) bool {
|
||||
return IsSpecificError(err, "InvalidJob")
|
||||
}
|
||||
|
||||
func IsInvalidLinkError(err error) bool {
|
||||
return IsSpecificError(err, "InvalidLink")
|
||||
}
|
||||
|
||||
func IsInvalidLimitError(err error) bool {
|
||||
return IsSpecificError(err, "InvalidLimit")
|
||||
}
|
||||
|
||||
func IsInvalidSignatureError(err error) bool {
|
||||
return IsSpecificError(err, "InvalidSignature")
|
||||
}
|
||||
|
||||
func IsInvalidUpdateError(err error) bool {
|
||||
return IsSpecificError(err, "InvalidUpdate")
|
||||
}
|
||||
|
||||
func IsDirectoryDoesNotExistError(err error) bool {
|
||||
return IsSpecificError(err, "DirectoryDoesNotExist")
|
||||
}
|
||||
|
||||
func IsDirectoryExistsError(err error) bool {
|
||||
return IsSpecificError(err, "DirectoryExists")
|
||||
}
|
||||
|
||||
func IsDirectoryNotEmptyError(err error) bool {
|
||||
return IsSpecificError(err, "DirectoryNotEmpty")
|
||||
}
|
||||
|
||||
func IsDirectoryOperationError(err error) bool {
|
||||
return IsSpecificError(err, "DirectoryOperation")
|
||||
}
|
||||
|
||||
func IsInternalError(err error) bool {
|
||||
return IsSpecificError(err, "Internal")
|
||||
}
|
||||
|
||||
func IsJobNotFoundError(err error) bool {
|
||||
return IsSpecificError(err, "JobNotFound")
|
||||
}
|
||||
|
||||
func IsJobStateError(err error) bool {
|
||||
return IsSpecificError(err, "JobState")
|
||||
}
|
||||
|
||||
func IsKeyDoesNotExistError(err error) bool {
|
||||
return IsSpecificError(err, "KeyDoesNotExist")
|
||||
}
|
||||
|
||||
func IsNotAcceptableError(err error) bool {
|
||||
return IsSpecificError(err, "NotAcceptable")
|
||||
}
|
||||
|
||||
func IsNotEnoughSpaceError(err error) bool {
|
||||
return IsSpecificError(err, "NotEnoughSpace")
|
||||
}
|
||||
|
||||
func IsLinkNotFoundError(err error) bool {
|
||||
return IsSpecificError(err, "LinkNotFound")
|
||||
}
|
||||
|
||||
func IsLinkNotObjectError(err error) bool {
|
||||
return IsSpecificError(err, "LinkNotObject")
|
||||
}
|
||||
|
||||
func IsLinkRequiredError(err error) bool {
|
||||
return IsSpecificError(err, "LinkRequired")
|
||||
}
|
||||
|
||||
func IsParentNotDirectoryError(err error) bool {
|
||||
return IsSpecificError(err, "ParentNotDirectory")
|
||||
}
|
||||
|
||||
func IsPreconditionFailedError(err error) bool {
|
||||
return IsSpecificError(err, "PreconditionFailed")
|
||||
}
|
||||
|
||||
func IsPreSignedRequestError(err error) bool {
|
||||
return IsSpecificError(err, "PreSignedRequest")
|
||||
}
|
||||
|
||||
func IsRequestEntityTooLargeError(err error) bool {
|
||||
return IsSpecificError(err, "RequestEntityTooLarge")
|
||||
}
|
||||
|
||||
func IsResourceNotFoundError(err error) bool {
|
||||
return IsSpecificError(err, "ResourceNotFound")
|
||||
}
|
||||
|
||||
func IsRootDirectoryError(err error) bool {
|
||||
return IsSpecificError(err, "RootDirectory")
|
||||
}
|
||||
|
||||
func IsServiceUnavailableError(err error) bool {
|
||||
return IsSpecificError(err, "ServiceUnavailable")
|
||||
}
|
||||
|
||||
func IsSSLRequiredError(err error) bool {
|
||||
return IsSpecificError(err, "SSLRequired")
|
||||
}
|
||||
|
||||
func IsUploadTimeoutError(err error) bool {
|
||||
return IsSpecificError(err, "UploadTimeout")
|
||||
}
|
||||
|
||||
func IsUserDoesNotExistError(err error) bool {
|
||||
return IsSpecificError(err, "UserDoesNotExist")
|
||||
}
|
||||
|
||||
func IsBadRequest(err error) bool {
|
||||
return IsSpecificError(err, "BadRequest")
|
||||
}
|
||||
|
||||
func IsInUseError(err error) bool {
|
||||
return IsSpecificError(err, "InUseError")
|
||||
}
|
||||
|
||||
func IsInvalidArgument(err error) bool {
|
||||
return IsSpecificError(err, "InvalidArgument")
|
||||
}
|
||||
|
||||
func IsInvalidCredentials(err error) bool {
|
||||
return IsSpecificError(err, "InvalidCredentials")
|
||||
}
|
||||
|
||||
func IsInvalidHeader(err error) bool {
|
||||
return IsSpecificError(err, "InvalidHeader")
|
||||
}
|
||||
|
||||
func IsInvalidVersion(err error) bool {
|
||||
return IsSpecificError(err, "InvalidVersion")
|
||||
}
|
||||
|
||||
func IsMissingParameter(err error) bool {
|
||||
return IsSpecificError(err, "MissingParameter")
|
||||
}
|
||||
|
||||
func IsNotAuthorized(err error) bool {
|
||||
return IsSpecificError(err, "NotAuthorized")
|
||||
}
|
||||
|
||||
func IsRequestThrottled(err error) bool {
|
||||
return IsSpecificError(err, "RequestThrottled")
|
||||
}
|
||||
|
||||
func IsRequestTooLarge(err error) bool {
|
||||
return IsSpecificError(err, "RequestTooLarge")
|
||||
}
|
||||
|
||||
func IsRequestMoved(err error) bool {
|
||||
return IsSpecificError(err, "RequestMoved")
|
||||
}
|
||||
|
||||
func IsResourceFound(err error) bool {
|
||||
return IsSpecificError(err, "ResourceFound")
|
||||
}
|
||||
|
||||
func IsResourceNotFound(err error) bool {
|
||||
return IsSpecificError(err, "ResourceNotFound")
|
||||
}
|
||||
|
||||
func IsUnknownError(err error) bool {
|
||||
return IsSpecificError(err, "UnknownError")
|
||||
}
|
||||
|
||||
func IsEmptyResponse(err error) bool {
|
||||
return IsSpecificError(err, "EmptyResponse")
|
||||
}
|
||||
|
||||
func IsStatusNotFoundCode(err error) bool {
|
||||
return IsSpecificStatusCode(err, http.StatusNotFound)
|
||||
}
|
||||
|
||||
func IsSpecificError(myError error, errorCode string) bool {
|
||||
switch err := errors.Cause(myError).(type) {
|
||||
case *APIError:
|
||||
if err.Code == errorCode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func IsSpecificStatusCode(myError error, statusCode int) bool {
|
||||
switch err := errors.Cause(myError).(type) {
|
||||
case *APIError:
|
||||
if err.StatusCode == statusCode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
59
vendor/github.com/joyent/triton-go/storage/client.go
generated
vendored
59
vendor/github.com/joyent/triton-go/storage/client.go
generated
vendored
@ -1,59 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
triton "github.com/joyent/triton-go"
|
||||
"github.com/joyent/triton-go/client"
|
||||
)
|
||||
|
||||
type StorageClient struct {
|
||||
Client *client.Client
|
||||
}
|
||||
|
||||
func newStorageClient(client *client.Client) *StorageClient {
|
||||
return &StorageClient{
|
||||
Client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// NewClient returns a new client for working with Storage endpoints and
|
||||
// resources within CloudAPI
|
||||
func NewClient(config *triton.ClientConfig) (*StorageClient, error) {
|
||||
// TODO: Utilize config interface within the function itself
|
||||
client, err := client.New(config.TritonURL, config.MantaURL, config.AccountName, config.Signers...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newStorageClient(client), nil
|
||||
}
|
||||
|
||||
// Dir returns a DirectoryClient used for accessing functions pertaining to
|
||||
// Directories functionality of the Manta API.
|
||||
func (c *StorageClient) Dir() *DirectoryClient {
|
||||
return &DirectoryClient{c.Client}
|
||||
}
|
||||
|
||||
// Jobs returns a JobClient used for accessing functions pertaining to Jobs
|
||||
// functionality of the Triton Object Storage API.
|
||||
func (c *StorageClient) Jobs() *JobClient {
|
||||
return &JobClient{c.Client}
|
||||
}
|
||||
|
||||
// Objects returns an ObjectsClient used for accessing functions pertaining to
|
||||
// Objects functionality of the Triton Object Storage API.
|
||||
func (c *StorageClient) Objects() *ObjectsClient {
|
||||
return &ObjectsClient{c.Client}
|
||||
}
|
||||
|
||||
// SnapLinks returns an SnapLinksClient used for accessing functions pertaining to
|
||||
// SnapLinks functionality of the Triton Object Storage API.
|
||||
func (c *StorageClient) SnapLinks() *SnapLinksClient {
|
||||
return &SnapLinksClient{c.Client}
|
||||
}
|
207
vendor/github.com/joyent/triton-go/storage/directory.go
generated
vendored
207
vendor/github.com/joyent/triton-go/storage/directory.go
generated
vendored
@ -1,207 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/joyent/triton-go/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type DirectoryClient struct {
|
||||
client *client.Client
|
||||
}
|
||||
|
||||
// DirectoryEntry represents an object or directory in Manta.
|
||||
type DirectoryEntry struct {
|
||||
ETag string `json:"etag"`
|
||||
ModifiedTime time.Time `json:"mtime"`
|
||||
Name string `json:"name"`
|
||||
Size uint64 `json:"size"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// ListDirectoryInput represents parameters to a List operation.
|
||||
type ListDirectoryInput struct {
|
||||
DirectoryName string
|
||||
Limit uint64
|
||||
Marker string
|
||||
}
|
||||
|
||||
// ListDirectoryOutput contains the outputs of a List operation.
|
||||
type ListDirectoryOutput struct {
|
||||
Entries []*DirectoryEntry
|
||||
ResultSetSize uint64
|
||||
}
|
||||
|
||||
// List lists the contents of a directory on the Triton Object Store service.
|
||||
func (s *DirectoryClient) List(ctx context.Context, input *ListDirectoryInput) (*ListDirectoryOutput, error) {
|
||||
absPath := absFileInput(s.client.AccountName, input.DirectoryName)
|
||||
query := &url.Values{}
|
||||
if input.Limit != 0 {
|
||||
query.Set("limit", strconv.FormatUint(input.Limit, 10))
|
||||
}
|
||||
if input.Marker != "" {
|
||||
query.Set("manta_path", input.Marker)
|
||||
}
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodGet,
|
||||
Path: string(absPath),
|
||||
Query: query,
|
||||
}
|
||||
respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to list directory")
|
||||
}
|
||||
defer respBody.Close()
|
||||
|
||||
var results []*DirectoryEntry
|
||||
scanner := bufio.NewScanner(respBody)
|
||||
for scanner.Scan() {
|
||||
current := &DirectoryEntry{}
|
||||
if err := json.Unmarshal(scanner.Bytes(), current); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to decode list directories response")
|
||||
}
|
||||
|
||||
results = append(results, current)
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to decode list directories response")
|
||||
}
|
||||
|
||||
output := &ListDirectoryOutput{
|
||||
Entries: results,
|
||||
}
|
||||
|
||||
resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64)
|
||||
if err == nil {
|
||||
output.ResultSetSize = resultSetSize
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// PutDirectoryInput represents parameters to a Put operation.
|
||||
type PutDirectoryInput struct {
|
||||
DirectoryName string
|
||||
}
|
||||
|
||||
// Put puts a directoy into the Triton Object Storage service is an idempotent
|
||||
// create-or-update operation. Your private namespace starts at /:login, and you
|
||||
// can create any nested set of directories or objects within it.
|
||||
func (s *DirectoryClient) Put(ctx context.Context, input *PutDirectoryInput) error {
|
||||
absPath := absFileInput(s.client.AccountName, input.DirectoryName)
|
||||
|
||||
headers := &http.Header{}
|
||||
headers.Set("Content-Type", "application/json; type=directory")
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodPut,
|
||||
Path: string(absPath),
|
||||
Headers: headers,
|
||||
}
|
||||
respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to put directory")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteDirectoryInput represents parameters to a Delete operation.
|
||||
type DeleteDirectoryInput struct {
|
||||
DirectoryName string
|
||||
ForceDelete bool //Will recursively delete all child directories and objects
|
||||
}
|
||||
|
||||
// Delete deletes a directory on the Triton Object Storage. The directory must
|
||||
// be empty.
|
||||
func (s *DirectoryClient) Delete(ctx context.Context, input *DeleteDirectoryInput) error {
|
||||
absPath := absFileInput(s.client.AccountName, input.DirectoryName)
|
||||
|
||||
if input.ForceDelete {
|
||||
err := deleteAll(*s, ctx, absPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err := deleteDirectory(*s, ctx, absPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteAll(c DirectoryClient, ctx context.Context, directoryPath _AbsCleanPath) error {
|
||||
objs, err := c.List(ctx, &ListDirectoryInput{
|
||||
DirectoryName: string(directoryPath),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, obj := range objs.Entries {
|
||||
newPath := absFileInput(c.client.AccountName, path.Join(string(directoryPath), obj.Name))
|
||||
if obj.Type == "directory" {
|
||||
err := deleteDirectory(c, ctx, newPath)
|
||||
if err != nil {
|
||||
return deleteAll(c, ctx, newPath)
|
||||
}
|
||||
} else {
|
||||
return deleteObject(c, ctx, newPath)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteDirectory(c DirectoryClient, ctx context.Context, directoryPath _AbsCleanPath) error {
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodDelete,
|
||||
Path: string(directoryPath),
|
||||
}
|
||||
respBody, _, err := c.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to delete directory")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteObject(c DirectoryClient, ctx context.Context, path _AbsCleanPath) error {
|
||||
objClient := &ObjectsClient{
|
||||
client: c.client,
|
||||
}
|
||||
|
||||
err := objClient.Delete(ctx, &DeleteObjectInput{
|
||||
ObjectPath: string(path),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
448
vendor/github.com/joyent/triton-go/storage/job.go
generated
vendored
448
vendor/github.com/joyent/triton-go/storage/job.go
generated
vendored
@ -1,448 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/joyent/triton-go/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type JobClient struct {
|
||||
client *client.Client
|
||||
}
|
||||
|
||||
const (
|
||||
JobStateDone = "done"
|
||||
JobStateRunning = "running"
|
||||
)
|
||||
|
||||
// JobPhase represents the specification for a map or reduce phase of a Manta
|
||||
// job.
|
||||
type JobPhase struct {
|
||||
// Type is the type of phase. Must be `map` or `reduce`.
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// Assets is an array of objects to be placed in your compute zones.
|
||||
Assets []string `json:"assets,omitempty"`
|
||||
|
||||
// Exec is the shell statement to execute. It may be any valid shell
|
||||
// command, including pipelines and other shell syntax. You can also
|
||||
// execute programs stored in the service by including them in "assets"
|
||||
// and referencing them as /assets/$manta_path.
|
||||
Exec string `json:"exec"`
|
||||
|
||||
// Init is a shell statement to execute in each compute zone before
|
||||
// any tasks are executed. The same constraints apply as to Exec.
|
||||
Init string `json:"init"`
|
||||
|
||||
// ReducerCount is an optional number of reducers for this phase. The
|
||||
// default value if not specified is 1. The maximum value is 1024.
|
||||
ReducerCount uint `json:"count,omitempty"`
|
||||
|
||||
// Memory is the amount of DRAM in MB to be allocated to the compute
|
||||
// zone. Valid values are 256, 512, 1024, 2048, 4096 or 8192.
|
||||
Memory uint64 `json:"memory,omitempty"`
|
||||
|
||||
// Disk is the amount of disk space in GB to be allocated to the compute
|
||||
// zone. Valid values are 2, 4, 8, 16, 32, 64, 128, 256, 512 or 1024.
|
||||
Disk uint64 `json:"disk,omitempty"`
|
||||
}
|
||||
|
||||
// JobSummary represents the summary of a compute job in Manta.
|
||||
type JobSummary struct {
|
||||
ModifiedTime time.Time `json:"mtime"`
|
||||
ID string `json:"name"`
|
||||
}
|
||||
|
||||
// Job represents a compute job in Manta.
|
||||
type Job struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Phases []*JobPhase `json:"phases"`
|
||||
State string `json:"state"`
|
||||
Cancelled bool `json:"cancelled"`
|
||||
InputDone bool `json:"inputDone"`
|
||||
CreatedTime time.Time `json:"timeCreated"`
|
||||
DoneTime time.Time `json:"timeDone"`
|
||||
Transient bool `json:"transient"`
|
||||
Stats *JobStats `json:"stats"`
|
||||
}
|
||||
|
||||
// JobStats represents statistics for a compute job in Manta.
|
||||
type JobStats struct {
|
||||
Errors uint64 `json:"errors"`
|
||||
Outputs uint64 `json:"outputs"`
|
||||
Retries uint64 `json:"retries"`
|
||||
Tasks uint64 `json:"tasks"`
|
||||
TasksDone uint64 `json:"tasksDone"`
|
||||
}
|
||||
|
||||
// CreateJobInput represents parameters to a CreateJob operation.
|
||||
type CreateJobInput struct {
|
||||
Name string `json:"name"`
|
||||
Phases []*JobPhase `json:"phases"`
|
||||
}
|
||||
|
||||
// CreateJobOutput contains the outputs of a CreateJob operation.
|
||||
type CreateJobOutput struct {
|
||||
JobID string
|
||||
}
|
||||
|
||||
// CreateJob submits a new job to be executed. This call is not
|
||||
// idempotent, so calling it twice will create two jobs.
|
||||
func (s *JobClient) Create(ctx context.Context, input *CreateJobInput) (*CreateJobOutput, error) {
|
||||
fullPath := path.Join("/", s.client.AccountName, "jobs")
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodPost,
|
||||
Path: fullPath,
|
||||
Body: input,
|
||||
}
|
||||
respBody, respHeaders, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to create job")
|
||||
}
|
||||
|
||||
jobURI := respHeaders.Get("Location")
|
||||
parts := strings.Split(jobURI, "/")
|
||||
jobID := parts[len(parts)-1]
|
||||
|
||||
response := &CreateJobOutput{
|
||||
JobID: jobID,
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// AddJobInputs represents parameters to a AddJobInputs operation.
|
||||
type AddJobInputsInput struct {
|
||||
JobID string
|
||||
ObjectPaths []string
|
||||
}
|
||||
|
||||
// AddJobInputs submits inputs to an already created job.
|
||||
func (s *JobClient) AddInputs(ctx context.Context, input *AddJobInputsInput) error {
|
||||
fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "in")
|
||||
headers := &http.Header{}
|
||||
headers.Set("Content-Type", "text/plain")
|
||||
|
||||
reader := strings.NewReader(strings.Join(input.ObjectPaths, "\n"))
|
||||
|
||||
reqInput := client.RequestNoEncodeInput{
|
||||
Method: http.MethodPost,
|
||||
Path: fullPath,
|
||||
Headers: headers,
|
||||
Body: reader,
|
||||
}
|
||||
respBody, _, err := s.client.ExecuteRequestNoEncode(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to add job inputs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EndJobInputInput represents parameters to a EndJobInput operation.
|
||||
type EndJobInputInput struct {
|
||||
JobID string
|
||||
}
|
||||
|
||||
// EndJobInput submits inputs to an already created job.
|
||||
func (s *JobClient) EndInput(ctx context.Context, input *EndJobInputInput) error {
|
||||
fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "in", "end")
|
||||
|
||||
reqInput := client.RequestNoEncodeInput{
|
||||
Method: http.MethodPost,
|
||||
Path: fullPath,
|
||||
}
|
||||
respBody, _, err := s.client.ExecuteRequestNoEncode(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to end job inputs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CancelJobInput represents parameters to a CancelJob operation.
|
||||
type CancelJobInput struct {
|
||||
JobID string
|
||||
}
|
||||
|
||||
// CancelJob cancels a job from doing any further work. Cancellation
|
||||
// is asynchronous and "best effort"; there is no guarantee the job
|
||||
// will actually stop. For example, short jobs where input is already
|
||||
// closed will likely still run to completion.
|
||||
//
|
||||
// This is however useful when:
|
||||
// - input is still open
|
||||
// - you have a long-running job
|
||||
func (s *JobClient) Cancel(ctx context.Context, input *CancelJobInput) error {
|
||||
fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "cancel")
|
||||
|
||||
reqInput := client.RequestNoEncodeInput{
|
||||
Method: http.MethodPost,
|
||||
Path: fullPath,
|
||||
}
|
||||
respBody, _, err := s.client.ExecuteRequestNoEncode(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to cancel job")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListJobsInput represents parameters to a ListJobs operation.
|
||||
type ListJobsInput struct {
|
||||
RunningOnly bool
|
||||
Limit uint64
|
||||
Marker string
|
||||
}
|
||||
|
||||
// ListJobsOutput contains the outputs of a ListJobs operation.
|
||||
type ListJobsOutput struct {
|
||||
Jobs []*JobSummary
|
||||
ResultSetSize uint64
|
||||
}
|
||||
|
||||
// ListJobs returns the list of jobs you currently have.
|
||||
func (s *JobClient) List(ctx context.Context, input *ListJobsInput) (*ListJobsOutput, error) {
|
||||
fullPath := path.Join("/", s.client.AccountName, "jobs")
|
||||
query := &url.Values{}
|
||||
if input.RunningOnly {
|
||||
query.Set("state", "running")
|
||||
}
|
||||
if input.Limit != 0 {
|
||||
query.Set("limit", strconv.FormatUint(input.Limit, 10))
|
||||
}
|
||||
if input.Marker != "" {
|
||||
query.Set("manta_path", input.Marker)
|
||||
}
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodGet,
|
||||
Path: fullPath,
|
||||
Query: query,
|
||||
}
|
||||
respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to list jobs")
|
||||
}
|
||||
|
||||
var results []*JobSummary
|
||||
for {
|
||||
current := &JobSummary{}
|
||||
decoder := json.NewDecoder(respBody)
|
||||
if err = decoder.Decode(¤t); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, errors.Wrap(err, "unable to decode list jobs response")
|
||||
}
|
||||
results = append(results, current)
|
||||
}
|
||||
|
||||
output := &ListJobsOutput{
|
||||
Jobs: results,
|
||||
}
|
||||
|
||||
resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64)
|
||||
if err == nil {
|
||||
output.ResultSetSize = resultSetSize
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// GetJobInput represents parameters to a GetJob operation.
|
||||
type GetJobInput struct {
|
||||
JobID string
|
||||
}
|
||||
|
||||
// GetJobOutput contains the outputs of a GetJob operation.
|
||||
type GetJobOutput struct {
|
||||
Job *Job
|
||||
}
|
||||
|
||||
// GetJob returns the list of jobs you currently have.
|
||||
func (s *JobClient) Get(ctx context.Context, input *GetJobInput) (*GetJobOutput, error) {
|
||||
fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "status")
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodGet,
|
||||
Path: fullPath,
|
||||
}
|
||||
respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get job")
|
||||
}
|
||||
|
||||
job := &Job{}
|
||||
decoder := json.NewDecoder(respBody)
|
||||
if err = decoder.Decode(&job); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to decode get job response")
|
||||
}
|
||||
|
||||
return &GetJobOutput{
|
||||
Job: job,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetJobOutputInput represents parameters to a GetJobOutput operation.
|
||||
type GetJobOutputInput struct {
|
||||
JobID string
|
||||
}
|
||||
|
||||
// GetJobOutputOutput contains the outputs for a GetJobOutput operation. It is your
|
||||
// responsibility to ensure that the io.ReadCloser Items is closed.
|
||||
type GetJobOutputOutput struct {
|
||||
ResultSetSize uint64
|
||||
Items io.ReadCloser
|
||||
}
|
||||
|
||||
// GetJobOutput returns the current "live" set of outputs from a job. Think of
|
||||
// this like `tail -f`. If error is nil (i.e. the operation is successful), it is
|
||||
// your responsibility to close the io.ReadCloser named Items in the output.
|
||||
func (s *JobClient) GetOutput(ctx context.Context, input *GetJobOutputInput) (*GetJobOutputOutput, error) {
|
||||
fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "out")
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodGet,
|
||||
Path: fullPath,
|
||||
}
|
||||
respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get job output")
|
||||
}
|
||||
|
||||
output := &GetJobOutputOutput{
|
||||
Items: respBody,
|
||||
}
|
||||
|
||||
resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64)
|
||||
if err == nil {
|
||||
output.ResultSetSize = resultSetSize
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// GetJobInputInput represents parameters to a GetJobOutput operation.
|
||||
type GetJobInputInput struct {
|
||||
JobID string
|
||||
}
|
||||
|
||||
// GetJobInputOutput contains the outputs for a GetJobOutput operation. It is your
|
||||
// responsibility to ensure that the io.ReadCloser Items is closed.
|
||||
type GetJobInputOutput struct {
|
||||
ResultSetSize uint64
|
||||
Items io.ReadCloser
|
||||
}
|
||||
|
||||
// GetJobInput returns the current "live" set of inputs from a job. Think of
|
||||
// this like `tail -f`. If error is nil (i.e. the operation is successful), it is
|
||||
// your responsibility to close the io.ReadCloser named Items in the output.
|
||||
func (s *JobClient) GetInput(ctx context.Context, input *GetJobInputInput) (*GetJobInputOutput, error) {
|
||||
fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "in")
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodGet,
|
||||
Path: fullPath,
|
||||
}
|
||||
respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get job input")
|
||||
}
|
||||
|
||||
output := &GetJobInputOutput{
|
||||
Items: respBody,
|
||||
}
|
||||
|
||||
resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64)
|
||||
if err == nil {
|
||||
output.ResultSetSize = resultSetSize
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// GetJobFailuresInput represents parameters to a GetJobFailures operation.
|
||||
type GetJobFailuresInput struct {
|
||||
JobID string
|
||||
}
|
||||
|
||||
// GetJobFailuresOutput contains the outputs for a GetJobFailures operation. It is your
|
||||
// responsibility to ensure that the io.ReadCloser Items is closed.
|
||||
type GetJobFailuresOutput struct {
|
||||
ResultSetSize uint64
|
||||
Items io.ReadCloser
|
||||
}
|
||||
|
||||
// GetJobFailures returns the current "live" set of outputs from a job. Think of
|
||||
// this like `tail -f`. If error is nil (i.e. the operation is successful), it is
|
||||
// your responsibility to close the io.ReadCloser named Items in the output.
|
||||
func (s *JobClient) GetFailures(ctx context.Context, input *GetJobFailuresInput) (*GetJobFailuresOutput, error) {
|
||||
fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "fail")
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodGet,
|
||||
Path: fullPath,
|
||||
}
|
||||
respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get job failures")
|
||||
}
|
||||
|
||||
output := &GetJobFailuresOutput{
|
||||
Items: respBody,
|
||||
}
|
||||
|
||||
resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64)
|
||||
if err == nil {
|
||||
output.ResultSetSize = resultSetSize
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
391
vendor/github.com/joyent/triton-go/storage/objects.go
generated
vendored
391
vendor/github.com/joyent/triton-go/storage/objects.go
generated
vendored
@ -1,391 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/joyent/triton-go/client"
|
||||
tt "github.com/joyent/triton-go/errors"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type ObjectsClient struct {
|
||||
client *client.Client
|
||||
}
|
||||
|
||||
// GetObjectInput represents parameters to a GetObject operation.
|
||||
type GetInfoInput struct {
|
||||
ObjectPath string
|
||||
Headers map[string]string
|
||||
}
|
||||
|
||||
// GetObjectOutput contains the outputs for a GetObject operation. It is your
|
||||
// responsibility to ensure that the io.ReadCloser ObjectReader is closed.
|
||||
type GetInfoOutput struct {
|
||||
ContentLength uint64
|
||||
ContentType string
|
||||
LastModified time.Time
|
||||
ContentMD5 string
|
||||
ETag string
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
// GetInfo sends a HEAD request to an object in the Manta service. This function
|
||||
// does not return a response body.
|
||||
func (s *ObjectsClient) GetInfo(ctx context.Context, input *GetInfoInput) (*GetInfoOutput, error) {
|
||||
absPath := absFileInput(s.client.AccountName, input.ObjectPath)
|
||||
|
||||
headers := &http.Header{}
|
||||
for key, value := range input.Headers {
|
||||
headers.Set(key, value)
|
||||
}
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodHead,
|
||||
Path: string(absPath),
|
||||
Headers: headers,
|
||||
}
|
||||
_, respHeaders, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get info")
|
||||
}
|
||||
|
||||
response := &GetInfoOutput{
|
||||
ContentType: respHeaders.Get("Content-Type"),
|
||||
ContentMD5: respHeaders.Get("Content-MD5"),
|
||||
ETag: respHeaders.Get("Etag"),
|
||||
}
|
||||
|
||||
lastModified, err := time.Parse(time.RFC1123, respHeaders.Get("Last-Modified"))
|
||||
if err == nil {
|
||||
response.LastModified = lastModified
|
||||
}
|
||||
|
||||
contentLength, err := strconv.ParseUint(respHeaders.Get("Content-Length"), 10, 64)
|
||||
if err == nil {
|
||||
response.ContentLength = contentLength
|
||||
}
|
||||
|
||||
metadata := map[string]string{}
|
||||
for key, values := range respHeaders {
|
||||
if strings.HasPrefix(key, "m-") {
|
||||
metadata[key] = strings.Join(values, ", ")
|
||||
}
|
||||
}
|
||||
response.Metadata = metadata
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// IsDir is a convenience wrapper around the GetInfo function which takes an
|
||||
// ObjectPath and returns a boolean whether or not the object is a directory
|
||||
// type in Manta. Returns an error if GetInfo failed upstream for some reason.
|
||||
func (s *ObjectsClient) IsDir(ctx context.Context, objectPath string) (bool, error) {
|
||||
info, err := s.GetInfo(ctx, &GetInfoInput{
|
||||
ObjectPath: objectPath,
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if info != nil {
|
||||
return strings.HasSuffix(info.ContentType, "type=directory"), nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GetObjectInput represents parameters to a GetObject operation.
|
||||
type GetObjectInput struct {
|
||||
ObjectPath string
|
||||
Headers map[string]string
|
||||
}
|
||||
|
||||
// GetObjectOutput contains the outputs for a GetObject operation. It is your
|
||||
// responsibility to ensure that the io.ReadCloser ObjectReader is closed.
|
||||
type GetObjectOutput struct {
|
||||
ContentLength uint64
|
||||
ContentType string
|
||||
LastModified time.Time
|
||||
ContentMD5 string
|
||||
ETag string
|
||||
Metadata map[string]string
|
||||
ObjectReader io.ReadCloser
|
||||
}
|
||||
|
||||
// Get retrieves an object from the Manta service. If error is nil (i.e. the
|
||||
// call returns successfully), it is your responsibility to close the
|
||||
// io.ReadCloser named ObjectReader in the operation output.
|
||||
func (s *ObjectsClient) Get(ctx context.Context, input *GetObjectInput) (*GetObjectOutput, error) {
|
||||
absPath := absFileInput(s.client.AccountName, input.ObjectPath)
|
||||
|
||||
headers := &http.Header{}
|
||||
for key, value := range input.Headers {
|
||||
headers.Set(key, value)
|
||||
}
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodGet,
|
||||
Path: string(absPath),
|
||||
Headers: headers,
|
||||
}
|
||||
respBody, respHeaders, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get object")
|
||||
}
|
||||
|
||||
response := &GetObjectOutput{
|
||||
ContentType: respHeaders.Get("Content-Type"),
|
||||
ContentMD5: respHeaders.Get("Content-MD5"),
|
||||
ETag: respHeaders.Get("Etag"),
|
||||
ObjectReader: respBody,
|
||||
}
|
||||
|
||||
lastModified, err := time.Parse(time.RFC1123, respHeaders.Get("Last-Modified"))
|
||||
if err == nil {
|
||||
response.LastModified = lastModified
|
||||
}
|
||||
|
||||
contentLength, err := strconv.ParseUint(respHeaders.Get("Content-Length"), 10, 64)
|
||||
if err == nil {
|
||||
response.ContentLength = contentLength
|
||||
}
|
||||
|
||||
metadata := map[string]string{}
|
||||
for key, values := range respHeaders {
|
||||
if strings.HasPrefix(key, "m-") {
|
||||
metadata[key] = strings.Join(values, ", ")
|
||||
}
|
||||
}
|
||||
response.Metadata = metadata
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// DeleteObjectInput represents parameters to a DeleteObject operation.
|
||||
type DeleteObjectInput struct {
|
||||
ObjectPath string
|
||||
Headers map[string]string
|
||||
}
|
||||
|
||||
// DeleteObject deletes an object.
|
||||
func (s *ObjectsClient) Delete(ctx context.Context, input *DeleteObjectInput) error {
|
||||
absPath := absFileInput(s.client.AccountName, input.ObjectPath)
|
||||
|
||||
headers := &http.Header{}
|
||||
for key, value := range input.Headers {
|
||||
headers.Set(key, value)
|
||||
}
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodDelete,
|
||||
Path: string(absPath),
|
||||
Headers: headers,
|
||||
}
|
||||
respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to delete object")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutObjectMetadataInput represents parameters to a PutObjectMetadata operation.
|
||||
type PutObjectMetadataInput struct {
|
||||
ObjectPath string
|
||||
ContentType string
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
// PutObjectMetadata allows you to overwrite the HTTP headers for an already
|
||||
// existing object, without changing the data. Note this is an idempotent "replace"
|
||||
// operation, so you must specify the complete set of HTTP headers you want
|
||||
// stored on each request.
|
||||
//
|
||||
// You cannot change "critical" headers:
|
||||
// - Content-Length
|
||||
// - Content-MD5
|
||||
// - Durability-Level
|
||||
func (s *ObjectsClient) PutMetadata(ctx context.Context, input *PutObjectMetadataInput) error {
|
||||
absPath := absFileInput(s.client.AccountName, input.ObjectPath)
|
||||
query := &url.Values{}
|
||||
query.Set("metadata", "true")
|
||||
|
||||
headers := &http.Header{}
|
||||
headers.Set("Content-Type", input.ContentType)
|
||||
for key, value := range input.Metadata {
|
||||
headers.Set(key, value)
|
||||
}
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodPut,
|
||||
Path: string(absPath),
|
||||
Query: query,
|
||||
Headers: headers,
|
||||
}
|
||||
respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to put metadata")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutObjectInput represents parameters to a PutObject operation.
|
||||
type PutObjectInput struct {
|
||||
ObjectPath string
|
||||
DurabilityLevel uint64
|
||||
ContentType string
|
||||
ContentMD5 string
|
||||
IfMatch string
|
||||
IfModifiedSince *time.Time
|
||||
ContentLength uint64
|
||||
MaxContentLength uint64
|
||||
ObjectReader io.Reader
|
||||
Headers map[string]string
|
||||
ForceInsert bool //Force the creation of the directory tree
|
||||
}
|
||||
|
||||
func (s *ObjectsClient) Put(ctx context.Context, input *PutObjectInput) error {
|
||||
absPath := absFileInput(s.client.AccountName, input.ObjectPath)
|
||||
|
||||
if input.ForceInsert {
|
||||
absDirName := _AbsCleanPath(path.Dir(string(absPath)))
|
||||
exists, err := checkDirectoryTreeExists(*s, ctx, absDirName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
err := createDirectory(*s, ctx, absDirName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return putObject(*s, ctx, input, absPath)
|
||||
}
|
||||
}
|
||||
|
||||
return putObject(*s, ctx, input, absPath)
|
||||
}
|
||||
|
||||
// _AbsCleanPath is an internal type that means the input has been
|
||||
// path.Clean()'ed and is an absolute path.
|
||||
type _AbsCleanPath string
|
||||
|
||||
func absFileInput(accountName, objPath string) _AbsCleanPath {
|
||||
cleanInput := path.Clean(objPath)
|
||||
if strings.HasPrefix(cleanInput, path.Join("/", accountName, "/")) {
|
||||
return _AbsCleanPath(cleanInput)
|
||||
}
|
||||
|
||||
cleanAbs := path.Clean(path.Join("/", accountName, objPath))
|
||||
return _AbsCleanPath(cleanAbs)
|
||||
}
|
||||
|
||||
func putObject(c ObjectsClient, ctx context.Context, input *PutObjectInput, absPath _AbsCleanPath) error {
|
||||
if input.MaxContentLength != 0 && input.ContentLength != 0 {
|
||||
return errors.New("ContentLength and MaxContentLength may not both be set to non-zero values.")
|
||||
}
|
||||
|
||||
headers := &http.Header{}
|
||||
for key, value := range input.Headers {
|
||||
headers.Set(key, value)
|
||||
}
|
||||
if input.DurabilityLevel != 0 {
|
||||
headers.Set("Durability-Level", strconv.FormatUint(input.DurabilityLevel, 10))
|
||||
}
|
||||
if input.ContentType != "" {
|
||||
headers.Set("Content-Type", input.ContentType)
|
||||
}
|
||||
if input.ContentMD5 != "" {
|
||||
headers.Set("Content-MD$", input.ContentMD5)
|
||||
}
|
||||
if input.IfMatch != "" {
|
||||
headers.Set("If-Match", input.IfMatch)
|
||||
}
|
||||
if input.IfModifiedSince != nil {
|
||||
headers.Set("If-Modified-Since", input.IfModifiedSince.Format(time.RFC1123))
|
||||
}
|
||||
if input.ContentLength != 0 {
|
||||
headers.Set("Content-Length", strconv.FormatUint(input.ContentLength, 10))
|
||||
}
|
||||
if input.MaxContentLength != 0 {
|
||||
headers.Set("Max-Content-Length", strconv.FormatUint(input.MaxContentLength, 10))
|
||||
}
|
||||
|
||||
reqInput := client.RequestNoEncodeInput{
|
||||
Method: http.MethodPut,
|
||||
Path: string(absPath),
|
||||
Headers: headers,
|
||||
Body: input.ObjectReader,
|
||||
}
|
||||
respBody, _, err := c.client.ExecuteRequestNoEncode(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to put object")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createDirectory(c ObjectsClient, ctx context.Context, absPath _AbsCleanPath) error {
|
||||
dirClient := &DirectoryClient{
|
||||
client: c.client,
|
||||
}
|
||||
|
||||
// An abspath starts w/ a leading "/" which gets added to the slice as an
|
||||
// empty string. Start all array math at 1.
|
||||
parts := strings.Split(string(absPath), "/")
|
||||
if len(parts) < 2 {
|
||||
return errors.New("no path components to create directory")
|
||||
}
|
||||
|
||||
folderPath := parts[1]
|
||||
// Don't attempt to create a manta account as a directory
|
||||
for i := 2; i < len(parts); i++ {
|
||||
part := parts[i]
|
||||
folderPath = path.Clean(path.Join("/", folderPath, part))
|
||||
err := dirClient.Put(ctx, &PutDirectoryInput{
|
||||
DirectoryName: folderPath,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDirectoryTreeExists(c ObjectsClient, ctx context.Context, absPath _AbsCleanPath) (bool, error) {
|
||||
exists, err := c.IsDir(ctx, string(absPath))
|
||||
if err != nil {
|
||||
if tt.IsResourceNotFoundError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if exists {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
90
vendor/github.com/joyent/triton-go/storage/signing.go
generated
vendored
90
vendor/github.com/joyent/triton-go/storage/signing.go
generated
vendored
@ -1,90 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// SignURLInput represents parameters to a SignURL operation.
|
||||
type SignURLInput struct {
|
||||
ValidityPeriod time.Duration
|
||||
Method string
|
||||
ObjectPath string
|
||||
}
|
||||
|
||||
// SignURLOutput contains the outputs of a SignURL operation. To simply
|
||||
// access the signed URL, use the SignedURL method.
|
||||
type SignURLOutput struct {
|
||||
host string
|
||||
objectPath string
|
||||
Method string
|
||||
Algorithm string
|
||||
Signature string
|
||||
Expires string
|
||||
KeyID string
|
||||
}
|
||||
|
||||
// SignedURL returns a signed URL for the given scheme. Valid schemes are
|
||||
// `http` and `https`.
|
||||
func (output *SignURLOutput) SignedURL(scheme string) string {
|
||||
query := &url.Values{}
|
||||
query.Set("algorithm", output.Algorithm)
|
||||
query.Set("expires", output.Expires)
|
||||
query.Set("keyId", output.KeyID)
|
||||
query.Set("signature", output.Signature)
|
||||
|
||||
sUrl := url.URL{}
|
||||
sUrl.Scheme = scheme
|
||||
sUrl.Host = output.host
|
||||
sUrl.Path = output.objectPath
|
||||
sUrl.RawQuery = query.Encode()
|
||||
|
||||
return sUrl.String()
|
||||
}
|
||||
|
||||
// SignURL creates a time-expiring URL that can be shared with others.
|
||||
// This is useful to generate HTML links, for example.
|
||||
func (s *StorageClient) SignURL(input *SignURLInput) (*SignURLOutput, error) {
|
||||
output := &SignURLOutput{
|
||||
host: s.Client.MantaURL.Host,
|
||||
objectPath: fmt.Sprintf("/%s%s", s.Client.AccountName, input.ObjectPath),
|
||||
Method: input.Method,
|
||||
Algorithm: strings.ToUpper(s.Client.Authorizers[0].DefaultAlgorithm()),
|
||||
Expires: strconv.FormatInt(time.Now().Add(input.ValidityPeriod).Unix(), 10),
|
||||
KeyID: path.Join("/", s.Client.AccountName, "keys", s.Client.Authorizers[0].KeyFingerprint()),
|
||||
}
|
||||
|
||||
toSign := bytes.Buffer{}
|
||||
toSign.WriteString(input.Method + "\n")
|
||||
toSign.WriteString(s.Client.MantaURL.Host + "\n")
|
||||
toSign.WriteString(fmt.Sprintf("/%s%s\n", s.Client.AccountName, input.ObjectPath))
|
||||
|
||||
query := &url.Values{}
|
||||
query.Set("algorithm", output.Algorithm)
|
||||
query.Set("expires", output.Expires)
|
||||
query.Set("keyId", output.KeyID)
|
||||
toSign.WriteString(query.Encode())
|
||||
|
||||
signature, _, err := s.Client.Authorizers[0].SignRaw(toSign.String())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error signing string")
|
||||
}
|
||||
|
||||
output.Signature = signature
|
||||
return output, nil
|
||||
}
|
54
vendor/github.com/joyent/triton-go/storage/snaplink.go
generated
vendored
54
vendor/github.com/joyent/triton-go/storage/snaplink.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/joyent/triton-go/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type SnapLinksClient struct {
|
||||
client *client.Client
|
||||
}
|
||||
|
||||
// PutSnapLinkInput represents parameters to a PutSnapLink operation.
|
||||
type PutSnapLinkInput struct {
|
||||
LinkPath string
|
||||
SourcePath string
|
||||
}
|
||||
|
||||
// PutSnapLink creates a SnapLink to an object.
|
||||
func (s *SnapLinksClient) Put(ctx context.Context, input *PutSnapLinkInput) error {
|
||||
linkPath := fmt.Sprintf("/%s%s", s.client.AccountName, input.LinkPath)
|
||||
sourcePath := fmt.Sprintf("/%s%s", s.client.AccountName, input.SourcePath)
|
||||
headers := &http.Header{}
|
||||
headers.Set("Content-Type", "application/json; type=link")
|
||||
headers.Set("location", sourcePath)
|
||||
headers.Set("Accept", "~1.0")
|
||||
headers.Set("Accept-Version", "application/json, */*")
|
||||
|
||||
reqInput := client.RequestInput{
|
||||
Method: http.MethodPut,
|
||||
Path: linkPath,
|
||||
Headers: headers,
|
||||
}
|
||||
respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput)
|
||||
if respBody != nil {
|
||||
defer respBody.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to put snaplink")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
27
vendor/github.com/joyent/triton-go/triton.go
generated
vendored
27
vendor/github.com/joyent/triton-go/triton.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package triton
|
||||
|
||||
import (
|
||||
"github.com/joyent/triton-go/authentication"
|
||||
)
|
||||
|
||||
// Universal package used for defining configuration used across all client
|
||||
// constructors.
|
||||
|
||||
// ClientConfig is a placeholder/input struct around the behavior of configuring
|
||||
// a client constructor through the implementation's runtime environment
|
||||
// (SDC/MANTA env vars).
|
||||
type ClientConfig struct {
|
||||
TritonURL string
|
||||
MantaURL string
|
||||
AccountName string
|
||||
Username string
|
||||
Signers []authentication.Signer
|
||||
}
|
32
vendor/github.com/joyent/triton-go/version.go
generated
vendored
32
vendor/github.com/joyent/triton-go/version.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018, Joyent, Inc. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//
|
||||
|
||||
package triton
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// The main version number of the current released Triton-go SDK.
|
||||
const Version = "0.9.0"
|
||||
|
||||
// A pre-release marker for the version. If this is "" (empty string)
|
||||
// then it means that it is a final release. Otherwise, this is a pre-release
|
||||
// such as "dev" (in development), "beta", "rc1", etc.
|
||||
var Prerelease = ""
|
||||
|
||||
func UserAgent() string {
|
||||
if Prerelease != "" {
|
||||
return fmt.Sprintf("triton-go/%s-%s (%s-%s; %s)", Version, Prerelease, runtime.GOARCH, runtime.GOOS, runtime.Version())
|
||||
} else {
|
||||
return fmt.Sprintf("triton-go/%s (%s-%s; %s)", Version, runtime.GOARCH, runtime.GOOS, runtime.Version())
|
||||
}
|
||||
}
|
||||
|
||||
const CloudAPIMajorVersion = "8"
|
22
vendor/github.com/klauspost/readahead/LICENSE
generated
vendored
22
vendor/github.com/klauspost/readahead/LICENSE
generated
vendored
@ -1,22 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Klaus Post
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
59
vendor/github.com/klauspost/readahead/README.md
generated
vendored
59
vendor/github.com/klauspost/readahead/README.md
generated
vendored
@ -1,59 +0,0 @@
|
||||
# readahead
|
||||
Asynchronous read-ahead for Go readers
|
||||
|
||||
This package will allow you to add readhead to any reader. This means a separate goroutine will perform reads from your upstream reader, so you can request from this reader without delay.
|
||||
|
||||
This is helpful for splitting an input stream into concurrent processing, and also helps smooth out **bursts** of input or output.
|
||||
|
||||
This should be fully transparent, except that once an error has been returned from the Reader, it will not recover. A panic will be caught and returned as an error.
|
||||
|
||||
The readahead object also fulfills the [`io.WriterTo`](https://golang.org/pkg/io/#WriterTo) interface, which is likely to speed up `io.Copy` and other code that use the interface.
|
||||
|
||||
See an introduction: [An Async Read-ahead Package for Go](https://blog.klauspost.com/an-async-read-ahead-package-for-go/)
|
||||
|
||||
[![GoDoc][1]][2] [![Build Status][3]][4]
|
||||
|
||||
[1]: https://godoc.org/github.com/klauspost/readahead?status.svg
|
||||
[2]: https://godoc.org/github.com/klauspost/readahead
|
||||
[3]: https://travis-ci.org/klauspost/readahead.svg
|
||||
[4]: https://travis-ci.org/klauspost/readahead
|
||||
|
||||
# usage
|
||||
|
||||
To get the package use `go get -u github.com/klauspost/readahead`.
|
||||
|
||||
Here is a simple example that does file copy. Error handling has been omitted for brevity.
|
||||
```Go
|
||||
input, _ := os.Open("input.txt")
|
||||
output, _ := os.Create("output.txt")
|
||||
defer input.Close()
|
||||
defer output.Close()
|
||||
|
||||
// Create a read-ahead Reader with default settings
|
||||
ra := readahead.NewReader(input)
|
||||
defer ra.Close()
|
||||
|
||||
// Copy the content to our output
|
||||
_, _ = io.Copy(output, ra)
|
||||
```
|
||||
|
||||
# settings
|
||||
|
||||
You can finetune the read-ahead for your specific use case, and adjust the number of buffers and the size of each buffer.
|
||||
|
||||
The default the size of each buffer is 1MB, and there are 4 buffers. Do not make your buffers too small since there is a small overhead for passing buffers between goroutines. Other than that you are free to experiment with buffer sizes.
|
||||
|
||||
# contributions
|
||||
|
||||
On this project contributions in terms of new features is limited to:
|
||||
|
||||
* Features that are widely usable and
|
||||
* Features that have extensive tests
|
||||
|
||||
This package is meant to be simple and stable, so therefore these strict requirements.
|
||||
|
||||
The only feature I have considered is supporting the `io.Seeker` interface. I currently do not plan to add it myself, but if you can show a clean and well-tested way to implementing it, I will consider to merge it. If not, I will be happy to link to it.
|
||||
|
||||
# license
|
||||
|
||||
This package is released under the MIT license. See the supplied LICENSE file for more info.
|
282
vendor/github.com/klauspost/readahead/reader.go
generated
vendored
282
vendor/github.com/klauspost/readahead/reader.go
generated
vendored
@ -1,282 +0,0 @@
|
||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
// The readahead package will do asynchronous read-ahead from an input io.Reader
|
||||
// and make the data available as an io.Reader.
|
||||
//
|
||||
// This should be fully transparent, except that once an error
|
||||
// has been returned from the Reader, it will not recover.
|
||||
//
|
||||
// The readahead object also fulfills the io.WriterTo interface, which
|
||||
// is likely to speed up copies.
|
||||
//
|
||||
// Package home: https://github.com/klauspost/readahead
|
||||
//
|
||||
package readahead
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type reader struct {
|
||||
in io.Reader // Input reader
|
||||
closer io.Closer // Optional closer
|
||||
ready chan *buffer // Buffers ready to be handed to the reader
|
||||
reuse chan *buffer // Buffers to reuse for input reading
|
||||
exit chan struct{} // Closes when finished
|
||||
buffers int // Number of buffers
|
||||
err error // If an error has occurred it is here
|
||||
cur *buffer // Current buffer being served
|
||||
exited chan struct{} // Channel is closed been the async reader shuts down
|
||||
}
|
||||
|
||||
// New returns a reader that will asynchronously read from
|
||||
// the supplied reader into 4 buffers of 1MB each.
|
||||
//
|
||||
// It will start reading from the input at once, maybe even before this
|
||||
// function has returned.
|
||||
//
|
||||
// The input can be read from the returned reader.
|
||||
// When done use Close() to release the buffers.
|
||||
func NewReader(rd io.Reader) io.ReadCloser {
|
||||
if rd == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ret, err := NewReaderSize(rd, 4, 1<<20)
|
||||
|
||||
// Should not be possible to trigger from other packages.
|
||||
if err != nil {
|
||||
panic("unexpected error:" + err.Error())
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// New returns a reader that will asynchronously read from
|
||||
// the supplied reader into 4 buffers of 1MB each.
|
||||
//
|
||||
// It will start reading from the input at once, maybe even before this
|
||||
// function has returned.
|
||||
//
|
||||
// The input can be read from the returned reader.
|
||||
// When done use Close() to release the buffers,
|
||||
// which will also close the supplied closer.
|
||||
func NewReadCloser(rd io.ReadCloser) io.ReadCloser {
|
||||
if rd == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ret, err := NewReadCloserSize(rd, 4, 1<<20)
|
||||
|
||||
// Should not be possible to trigger from other packages.
|
||||
if err != nil {
|
||||
panic("unexpected error:" + err.Error())
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// NewReaderSize returns a reader with a custom number of buffers and size.
|
||||
// buffers is the number of queued buffers and size is the size of each
|
||||
// buffer in bytes.
|
||||
func NewReaderSize(rd io.Reader, buffers, size int) (io.ReadCloser, error) {
|
||||
if size <= 0 {
|
||||
return nil, fmt.Errorf("buffer size too small")
|
||||
}
|
||||
if buffers <= 0 {
|
||||
return nil, fmt.Errorf("number of buffers too small")
|
||||
}
|
||||
if rd == nil {
|
||||
return nil, fmt.Errorf("nil input reader supplied")
|
||||
}
|
||||
a := &reader{}
|
||||
a.init(rd, buffers, size)
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// NewReadCloserSize returns a reader with a custom number of buffers and size.
|
||||
// buffers is the number of queued buffers and size is the size of each
|
||||
// buffer in bytes.
|
||||
func NewReadCloserSize(rc io.ReadCloser, buffers, size int) (io.ReadCloser, error) {
|
||||
if size <= 0 {
|
||||
return nil, fmt.Errorf("buffer size too small")
|
||||
}
|
||||
if buffers <= 0 {
|
||||
return nil, fmt.Errorf("number of buffers too small")
|
||||
}
|
||||
if rc == nil {
|
||||
return nil, fmt.Errorf("nil input reader supplied")
|
||||
}
|
||||
a := &reader{closer: rc}
|
||||
a.init(rc, buffers, size)
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// initialize the reader
|
||||
func (a *reader) init(rd io.Reader, buffers, size int) {
|
||||
a.in = rd
|
||||
a.ready = make(chan *buffer, buffers)
|
||||
a.reuse = make(chan *buffer, buffers)
|
||||
a.exit = make(chan struct{}, 0)
|
||||
a.exited = make(chan struct{}, 0)
|
||||
a.buffers = buffers
|
||||
a.cur = nil
|
||||
|
||||
// Create buffers
|
||||
for i := 0; i < buffers; i++ {
|
||||
a.reuse <- newBuffer(size)
|
||||
}
|
||||
|
||||
// Start async reader
|
||||
go func() {
|
||||
// Ensure that when we exit this is signalled.
|
||||
defer close(a.exited)
|
||||
for {
|
||||
select {
|
||||
case b := <-a.reuse:
|
||||
err := b.read(a.in)
|
||||
a.ready <- b
|
||||
if err != nil {
|
||||
close(a.ready)
|
||||
return
|
||||
}
|
||||
case <-a.exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// fill will check if the current buffer is empty and fill it if it is.
|
||||
// If an error was returned at the end of the current buffer it is returned.
|
||||
func (a *reader) fill() (err error) {
|
||||
if a.cur.isEmpty() {
|
||||
if a.cur != nil {
|
||||
a.reuse <- a.cur
|
||||
a.cur = nil
|
||||
}
|
||||
b, ok := <-a.ready
|
||||
if !ok {
|
||||
return a.err
|
||||
}
|
||||
a.cur = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read will return the next available data.
|
||||
func (a *reader) Read(p []byte) (n int, err error) {
|
||||
// Swap buffer and maybe return error
|
||||
err = a.fill()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Copy what we can
|
||||
n = copy(p, a.cur.buffer())
|
||||
a.cur.inc(n)
|
||||
|
||||
// If at end of buffer, return any error, if present
|
||||
if a.cur.isEmpty() {
|
||||
a.err = a.cur.err
|
||||
return n, a.err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// WriteTo writes data to w until there's no more data to write or when an error occurs.
|
||||
// The return value n is the number of bytes written.
|
||||
// Any error encountered during the write is also returned.
|
||||
func (a *reader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
n = 0
|
||||
for {
|
||||
err = a.fill()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
n2, err := w.Write(a.cur.buffer())
|
||||
a.cur.inc(n2)
|
||||
n += int64(n2)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if a.cur.err != nil {
|
||||
// io.Writer should return nil if we are at EOF.
|
||||
if a.cur.err == io.EOF {
|
||||
a.err = a.cur.err
|
||||
return n, nil
|
||||
}
|
||||
a.err = a.cur.err
|
||||
return n, a.cur.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close will ensure that the underlying async reader is shut down.
|
||||
// It will also close the input supplied on newAsyncReader.
|
||||
func (a *reader) Close() (err error) {
|
||||
select {
|
||||
case <-a.exited:
|
||||
case a.exit <- struct{}{}:
|
||||
<-a.exited
|
||||
}
|
||||
if a.closer != nil {
|
||||
// Only call once
|
||||
c := a.closer
|
||||
a.closer = nil
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Internal buffer representing a single read.
|
||||
// If an error is present, it must be returned
|
||||
// once all buffer content has been served.
|
||||
type buffer struct {
|
||||
buf []byte
|
||||
err error
|
||||
offset int
|
||||
size int
|
||||
}
|
||||
|
||||
func newBuffer(size int) *buffer {
|
||||
return &buffer{buf: make([]byte, size), err: nil, size: size}
|
||||
}
|
||||
|
||||
// isEmpty returns true is offset is at end of
|
||||
// buffer, or if the buffer is nil
|
||||
func (b *buffer) isEmpty() bool {
|
||||
if b == nil {
|
||||
return true
|
||||
}
|
||||
if len(b.buf)-b.offset <= 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// read into start of the buffer from the supplied reader,
|
||||
// resets the offset and updates the size of the buffer.
|
||||
// Any error encountered during the read is returned.
|
||||
func (b *buffer) read(rd io.Reader) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("panic reading: %v", r)
|
||||
b.err = err
|
||||
}
|
||||
}()
|
||||
var n int
|
||||
n, b.err = rd.Read(b.buf[0:b.size])
|
||||
b.buf = b.buf[0:n]
|
||||
b.offset = 0
|
||||
return b.err
|
||||
}
|
||||
|
||||
// Return the buffer at current offset
|
||||
func (b *buffer) buffer() []byte {
|
||||
return b.buf[b.offset:]
|
||||
}
|
||||
|
||||
// inc will increment the read offset
|
||||
func (b *buffer) inc(n int) {
|
||||
b.offset += n
|
||||
}
|
13
vendor/github.com/minio/blazer/LICENSE
generated
vendored
13
vendor/github.com/minio/blazer/LICENSE
generated
vendored
@ -1,13 +0,0 @@
|
||||
Copyright 2016, Google
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
1204
vendor/github.com/minio/blazer/base/base.go
generated
vendored
1204
vendor/github.com/minio/blazer/base/base.go
generated
vendored
File diff suppressed because it is too large
Load Diff
81
vendor/github.com/minio/blazer/base/strings.go
generated
vendored
81
vendor/github.com/minio/blazer/base/strings.go
generated
vendored
@ -1,81 +0,0 @@
|
||||
// Copyright 2017, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func noEscape(c byte) bool {
|
||||
switch c {
|
||||
case '.', '_', '-', '/', '~', '!', '$', '\'', '(', ')', '*', ';', '=', ':', '@':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func escape(s string) string {
|
||||
// cribbed from url.go, kinda
|
||||
b := &bytes.Buffer{}
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; {
|
||||
case c == '/':
|
||||
b.WriteByte(c)
|
||||
case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9':
|
||||
b.WriteByte(c)
|
||||
case noEscape(c):
|
||||
b.WriteByte(c)
|
||||
default:
|
||||
fmt.Fprintf(b, "%%%X", c)
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func unescape(s string) (string, error) {
|
||||
b := &bytes.Buffer{}
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
switch c {
|
||||
case '/':
|
||||
b.WriteString("/")
|
||||
case '+':
|
||||
b.WriteString(" ")
|
||||
case '%':
|
||||
if len(s)-i < 3 {
|
||||
return "", errors.New("unescape: bad encoding")
|
||||
}
|
||||
b.WriteByte(unhex(s[i+1])<<4 | unhex(s[i+2]))
|
||||
i += 2
|
||||
default:
|
||||
b.WriteByte(c)
|
||||
}
|
||||
}
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
func unhex(c byte) byte {
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0'
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10
|
||||
}
|
||||
return 0
|
||||
}
|
255
vendor/github.com/minio/blazer/internal/b2types/b2types.go
generated
vendored
255
vendor/github.com/minio/blazer/internal/b2types/b2types.go
generated
vendored
@ -1,255 +0,0 @@
|
||||
// Copyright 2016, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package b2types implements internal types common to the B2 API.
|
||||
package b2types
|
||||
|
||||
// You know what would be amazing? If I could autogen this from like a JSON
|
||||
// file. Wouldn't that be amazing? That would be amazing.
|
||||
|
||||
const (
|
||||
V1api = "/b2api/v1/"
|
||||
)
|
||||
|
||||
type ErrorMessage struct {
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
Msg string `json:"message"`
|
||||
}
|
||||
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"`
|
||||
AuthToken string `json:"authorizationToken"`
|
||||
URI string `json:"apiUrl"`
|
||||
DownloadURI string `json:"downloadUrl"`
|
||||
MinPartSize int `json:"minimumPartSize"`
|
||||
}
|
||||
|
||||
type LifecycleRule struct {
|
||||
DaysHiddenUntilDeleted int `json:"daysFromHidingToDeleting,omitempty"`
|
||||
DaysNewUntilHidden int `json:"daysFromUploadingToHiding,omitempty"`
|
||||
Prefix string `json:"fileNamePrefix"`
|
||||
}
|
||||
|
||||
type CreateBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
Info map[string]string `json:"bucketInfo"`
|
||||
LifecycleRules []LifecycleRule `json:"lifecycleRules"`
|
||||
}
|
||||
|
||||
type CreateBucketResponse struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
Info map[string]string `json:"bucketInfo"`
|
||||
LifecycleRules []LifecycleRule `json:"lifecycleRules"`
|
||||
Revision int `json:"revision"`
|
||||
}
|
||||
|
||||
type DeleteBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
BucketID string `json:"bucketId"`
|
||||
}
|
||||
|
||||
type ListBucketsRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
}
|
||||
|
||||
type ListBucketsResponse struct {
|
||||
Buckets []CreateBucketResponse `json:"buckets"`
|
||||
}
|
||||
|
||||
type UpdateBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
BucketID string `json:"bucketId"`
|
||||
// bucketName is a required field according to
|
||||
// https://www.backblaze.com/b2/docs/b2_update_bucket.html.
|
||||
//
|
||||
// However, actually setting it returns 400: unknown field in
|
||||
// com.backblaze.modules.b2.data.UpdateBucketRequest: bucketName
|
||||
//
|
||||
//Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType,omitempty"`
|
||||
Info map[string]string `json:"bucketInfo,omitempty"`
|
||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
||||
IfRevisionIs int `json:"ifRevisionIs,omitempty"`
|
||||
}
|
||||
|
||||
type UpdateBucketResponse CreateBucketResponse
|
||||
|
||||
type GetUploadURLRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
}
|
||||
|
||||
type GetUploadURLResponse struct {
|
||||
URI string `json:"uploadUrl"`
|
||||
Token string `json:"authorizationToken"`
|
||||
}
|
||||
|
||||
type UploadFileResponse struct {
|
||||
FileID string `json:"fileId"`
|
||||
Timestamp int64 `json:"uploadTimestamp"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
type DeleteFileVersionRequest struct {
|
||||
Name string `json:"fileName"`
|
||||
FileID string `json:"fileId"`
|
||||
}
|
||||
|
||||
type StartLargeFileRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Name string `json:"fileName"`
|
||||
ContentType string `json:"contentType"`
|
||||
Info map[string]string `json:"fileInfo,omitempty"`
|
||||
}
|
||||
|
||||
type StartLargeFileResponse struct {
|
||||
ID string `json:"fileId"`
|
||||
}
|
||||
|
||||
type CancelLargeFileRequest struct {
|
||||
ID string `json:"fileId"`
|
||||
}
|
||||
|
||||
type ListUnfinishedLargeFilesRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Continuation string `json:"startFileId,omitempty"`
|
||||
Count int `json:"maxFileCount,omitempty"`
|
||||
}
|
||||
|
||||
type ListUnfinishedLargeFilesResponse struct {
|
||||
NextID string `json:"nextFileId"`
|
||||
Files []struct {
|
||||
AccountID string `json:"accountId"`
|
||||
BucketID string `json:"bucketId"`
|
||||
Name string `json:"fileName"`
|
||||
ID string `json:"fileId"`
|
||||
Timestamp int64 `json:"uploadTimestamp"`
|
||||
ContentType string `json:"contentType"`
|
||||
Info map[string]string `json:"fileInfo,omitempty"`
|
||||
} `json:"files"`
|
||||
}
|
||||
|
||||
type ListPartsRequest struct {
|
||||
ID string `json:"fileId"`
|
||||
Start int `json:"startPartNumber"`
|
||||
Count int `json:"maxPartCount"`
|
||||
}
|
||||
|
||||
type ListPartsResponse struct {
|
||||
Next int `json:"nextPartNumber"`
|
||||
Parts []struct {
|
||||
ID string `json:"fileId"`
|
||||
Number int `json:"partNumber"`
|
||||
SHA1 string `json:"contentSha1"`
|
||||
Size int64 `json:"contentLength"`
|
||||
} `json:"parts"`
|
||||
}
|
||||
|
||||
type getUploadPartURLRequest struct {
|
||||
ID string `json:"fileId"`
|
||||
}
|
||||
|
||||
type getUploadPartURLResponse struct {
|
||||
URL string `json:"uploadUrl"`
|
||||
Token string `json:"authorizationToken"`
|
||||
}
|
||||
|
||||
type UploadPartResponse struct {
|
||||
ID string `json:"fileId"`
|
||||
PartNumber int `json:"partNumber"`
|
||||
Size int64 `json:"contentLength"`
|
||||
SHA1 string `json:"contentSha1"`
|
||||
}
|
||||
|
||||
type FinishLargeFileRequest struct {
|
||||
ID string `json:"fileId"`
|
||||
Hashes []string `json:"partSha1Array"`
|
||||
}
|
||||
|
||||
type FinishLargeFileResponse struct {
|
||||
Name string `json:"fileName"`
|
||||
FileID string `json:"fileId"`
|
||||
Timestamp int64 `json:"uploadTimestamp"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
type ListFileNamesRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Count int `json:"maxFileCount"`
|
||||
Continuation string `json:"startFileName,omitempty"`
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
Delimiter string `json:"delimiter,omitempty"`
|
||||
}
|
||||
|
||||
type ListFileNamesResponse struct {
|
||||
Continuation string `json:"nextFileName"`
|
||||
Files []GetFileInfoResponse `json:"files"`
|
||||
}
|
||||
|
||||
type ListFileVersionsRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Count int `json:"maxFileCount"`
|
||||
StartName string `json:"startFileName,omitempty"`
|
||||
StartID string `json:"startFileId,omitempty"`
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
Delimiter string `json:"delimiter,omitempty"`
|
||||
}
|
||||
|
||||
type ListFileVersionsResponse struct {
|
||||
NextName string `json:"nextFileName"`
|
||||
NextID string `json:"nextFileId"`
|
||||
Files []GetFileInfoResponse `json:"files"`
|
||||
}
|
||||
|
||||
type HideFileRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
File string `json:"fileName"`
|
||||
}
|
||||
|
||||
type HideFileResponse struct {
|
||||
ID string `json:"fileId"`
|
||||
Timestamp int64 `json:"uploadTimestamp"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
type GetFileInfoRequest struct {
|
||||
ID string `json:"fileId"`
|
||||
}
|
||||
|
||||
type GetFileInfoResponse struct {
|
||||
FileID string `json:"fileId"`
|
||||
Name string `json:"fileName"`
|
||||
SHA1 string `json:"contentSha1"`
|
||||
Size int64 `json:"contentLength"`
|
||||
ContentType string `json:"contentType"`
|
||||
Info map[string]string `json:"fileInfo"`
|
||||
Action string `json:"action"`
|
||||
Timestamp int64 `json:"uploadTimestamp"`
|
||||
}
|
||||
|
||||
type GetDownloadAuthorizationRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Prefix string `json:"fileNamePrefix"`
|
||||
Valid int `json:"validDurationInSeconds"`
|
||||
}
|
||||
|
||||
type GetDownloadAuthorizationResponse struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Prefix string `json:"fileNamePrefix"`
|
||||
Token string `json:"authorizationToken"`
|
||||
}
|
54
vendor/github.com/minio/blazer/internal/blog/blog.go
generated
vendored
54
vendor/github.com/minio/blazer/internal/blog/blog.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
// Copyright 2017, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package blog implements a private logger, in the manner of glog, without
|
||||
// polluting the flag namespace or leaving files all over /tmp.
|
||||
//
|
||||
// It has almost no features, and a bunch of global state.
|
||||
package blog
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var level int32
|
||||
|
||||
type Verbose bool
|
||||
|
||||
func init() {
|
||||
lvl := os.Getenv("B2_LOG_LEVEL")
|
||||
i, err := strconv.ParseInt(lvl, 10, 32)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
level = int32(i)
|
||||
}
|
||||
|
||||
func (v Verbose) Info(a ...interface{}) {
|
||||
if v {
|
||||
log.Print(a...)
|
||||
}
|
||||
}
|
||||
|
||||
func (v Verbose) Infof(format string, a ...interface{}) {
|
||||
if v {
|
||||
log.Printf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
func V(target int32) Verbose {
|
||||
return Verbose(target <= level)
|
||||
}
|
23
vendor/github.com/pkg/errors/LICENSE
generated
vendored
23
vendor/github.com/pkg/errors/LICENSE
generated
vendored
@ -1,23 +0,0 @@
|
||||
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
52
vendor/github.com/pkg/errors/README.md
generated
vendored
52
vendor/github.com/pkg/errors/README.md
generated
vendored
@ -1,52 +0,0 @@
|
||||
# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors) [](https://sourcegraph.com/github.com/pkg/errors?badge)
|
||||
|
||||
Package errors provides simple error handling primitives.
|
||||
|
||||
`go get github.com/pkg/errors`
|
||||
|
||||
The traditional error handling idiom in Go is roughly akin to
|
||||
```go
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
|
||||
|
||||
## Adding context to an error
|
||||
|
||||
The errors.Wrap function returns a new error that adds context to the original error. For example
|
||||
```go
|
||||
_, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "read failed")
|
||||
}
|
||||
```
|
||||
## Retrieving the cause of an error
|
||||
|
||||
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
|
||||
```go
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
```
|
||||
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
|
||||
```go
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case *MyError:
|
||||
// handle specifically
|
||||
default:
|
||||
// unknown error
|
||||
}
|
||||
```
|
||||
|
||||
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
|
||||
|
||||
Before proposing a change, please discuss your change by raising an issue.
|
||||
|
||||
## License
|
||||
|
||||
BSD-2-Clause
|
32
vendor/github.com/pkg/errors/appveyor.yml
generated
vendored
32
vendor/github.com/pkg/errors/appveyor.yml
generated
vendored
@ -1,32 +0,0 @@
|
||||
version: build-{build}.{branch}
|
||||
|
||||
clone_folder: C:\gopath\src\github.com\pkg\errors
|
||||
shallow_clone: true # for startup speed
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
|
||||
platform:
|
||||
- x64
|
||||
|
||||
# http://www.appveyor.com/docs/installed-software
|
||||
install:
|
||||
# some helpful output for debugging builds
|
||||
- go version
|
||||
- go env
|
||||
# pre-installed MinGW at C:\MinGW is 32bit only
|
||||
# but MSYS2 at C:\msys64 has mingw64
|
||||
- set PATH=C:\msys64\mingw64\bin;%PATH%
|
||||
- gcc --version
|
||||
- g++ --version
|
||||
|
||||
build_script:
|
||||
- go install -v ./...
|
||||
|
||||
test_script:
|
||||
- set PATH=C:\gopath\bin;%PATH%
|
||||
- go test -v ./...
|
||||
|
||||
#artifacts:
|
||||
# - path: '%GOPATH%\bin\*.exe'
|
||||
deploy: off
|
269
vendor/github.com/pkg/errors/errors.go
generated
vendored
269
vendor/github.com/pkg/errors/errors.go
generated
vendored
@ -1,269 +0,0 @@
|
||||
// Package errors provides simple error handling primitives.
|
||||
//
|
||||
// The traditional error handling idiom in Go is roughly akin to
|
||||
//
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// which applied recursively up the call stack results in error reports
|
||||
// without context or debugging information. The errors package allows
|
||||
// programmers to add context to the failure path in their code in a way
|
||||
// that does not destroy the original value of the error.
|
||||
//
|
||||
// Adding context to an error
|
||||
//
|
||||
// The errors.Wrap function returns a new error that adds context to the
|
||||
// original error by recording a stack trace at the point Wrap is called,
|
||||
// and the supplied message. For example
|
||||
//
|
||||
// _, err := ioutil.ReadAll(r)
|
||||
// if err != nil {
|
||||
// return errors.Wrap(err, "read failed")
|
||||
// }
|
||||
//
|
||||
// If additional control is required the errors.WithStack and errors.WithMessage
|
||||
// functions destructure errors.Wrap into its component operations of annotating
|
||||
// an error with a stack trace and an a message, respectively.
|
||||
//
|
||||
// Retrieving the cause of an error
|
||||
//
|
||||
// Using errors.Wrap constructs a stack of errors, adding context to the
|
||||
// preceding error. Depending on the nature of the error it may be necessary
|
||||
// to reverse the operation of errors.Wrap to retrieve the original error
|
||||
// for inspection. Any error value which implements this interface
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
|
||||
// the topmost error which does not implement causer, which is assumed to be
|
||||
// the original cause. For example:
|
||||
//
|
||||
// switch err := errors.Cause(err).(type) {
|
||||
// case *MyError:
|
||||
// // handle specifically
|
||||
// default:
|
||||
// // unknown error
|
||||
// }
|
||||
//
|
||||
// causer interface is not exported by this package, but is considered a part
|
||||
// of stable public API.
|
||||
//
|
||||
// Formatted printing of errors
|
||||
//
|
||||
// All error values returned from this package implement fmt.Formatter and can
|
||||
// be formatted by the fmt package. The following verbs are supported
|
||||
//
|
||||
// %s print the error. If the error has a Cause it will be
|
||||
// printed recursively
|
||||
// %v see %s
|
||||
// %+v extended format. Each Frame of the error's StackTrace will
|
||||
// be printed in detail.
|
||||
//
|
||||
// Retrieving the stack trace of an error or wrapper
|
||||
//
|
||||
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
|
||||
// invoked. This information can be retrieved with the following interface.
|
||||
//
|
||||
// type stackTracer interface {
|
||||
// StackTrace() errors.StackTrace
|
||||
// }
|
||||
//
|
||||
// Where errors.StackTrace is defined as
|
||||
//
|
||||
// type StackTrace []Frame
|
||||
//
|
||||
// The Frame type represents a call site in the stack trace. Frame supports
|
||||
// the fmt.Formatter interface that can be used for printing information about
|
||||
// the stack trace of this error. For example:
|
||||
//
|
||||
// if err, ok := err.(stackTracer); ok {
|
||||
// for _, f := range err.StackTrace() {
|
||||
// fmt.Printf("%+s:%d", f)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// stackTracer interface is not exported by this package, but is considered a part
|
||||
// of stable public API.
|
||||
//
|
||||
// See the documentation for Frame.Format for more details.
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// New returns an error with the supplied message.
|
||||
// New also records the stack trace at the point it was called.
|
||||
func New(message string) error {
|
||||
return &fundamental{
|
||||
msg: message,
|
||||
stack: callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// Errorf formats according to a format specifier and returns the string
|
||||
// as a value that satisfies error.
|
||||
// Errorf also records the stack trace at the point it was called.
|
||||
func Errorf(format string, args ...interface{}) error {
|
||||
return &fundamental{
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
stack: callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// fundamental is an error that has a message and a stack, but no caller.
|
||||
type fundamental struct {
|
||||
msg string
|
||||
*stack
|
||||
}
|
||||
|
||||
func (f *fundamental) Error() string { return f.msg }
|
||||
|
||||
func (f *fundamental) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
io.WriteString(s, f.msg)
|
||||
f.stack.Format(s, verb)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, f.msg)
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", f.msg)
|
||||
}
|
||||
}
|
||||
|
||||
// WithStack annotates err with a stack trace at the point WithStack was called.
|
||||
// If err is nil, WithStack returns nil.
|
||||
func WithStack(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
type withStack struct {
|
||||
error
|
||||
*stack
|
||||
}
|
||||
|
||||
func (w *withStack) Cause() error { return w.error }
|
||||
|
||||
func (w *withStack) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v", w.Cause())
|
||||
w.stack.Format(s, verb)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, w.Error())
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap returns an error annotating err with a stack trace
|
||||
// at the point Wrap is called, and the supplied message.
|
||||
// If err is nil, Wrap returns nil.
|
||||
func Wrap(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = &withMessage{
|
||||
cause: err,
|
||||
msg: message,
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapf returns an error annotating err with a stack trace
|
||||
// at the point Wrapf is call, and the format specifier.
|
||||
// If err is nil, Wrapf returns nil.
|
||||
func Wrapf(err error, format string, args ...interface{}) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = &withMessage{
|
||||
cause: err,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// WithMessage annotates err with a new message.
|
||||
// If err is nil, WithMessage returns nil.
|
||||
func WithMessage(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withMessage{
|
||||
cause: err,
|
||||
msg: message,
|
||||
}
|
||||
}
|
||||
|
||||
type withMessage struct {
|
||||
cause error
|
||||
msg string
|
||||
}
|
||||
|
||||
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
|
||||
func (w *withMessage) Cause() error { return w.cause }
|
||||
|
||||
func (w *withMessage) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v\n", w.Cause())
|
||||
io.WriteString(s, w.msg)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's', 'q':
|
||||
io.WriteString(s, w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Cause returns the underlying cause of the error, if possible.
|
||||
// An error value has a cause if it implements the following
|
||||
// interface:
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// If the error does not implement Cause, the original error will
|
||||
// be returned. If the error is nil, nil will be returned without further
|
||||
// investigation.
|
||||
func Cause(err error) error {
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
for err != nil {
|
||||
cause, ok := err.(causer)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
err = cause.Cause()
|
||||
}
|
||||
return err
|
||||
}
|
187
vendor/github.com/pkg/errors/stack.go
generated
vendored
187
vendor/github.com/pkg/errors/stack.go
generated
vendored
@ -1,187 +0,0 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Frame represents a program counter inside a stack frame.
|
||||
type Frame uintptr
|
||||
|
||||
// pc returns the program counter for this frame;
|
||||
// multiple frames may have the same PC value.
|
||||
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
|
||||
|
||||
// file returns the full path to the file that contains the
|
||||
// function for this Frame's pc.
|
||||
func (f Frame) file() string {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return "unknown"
|
||||
}
|
||||
file, _ := fn.FileLine(f.pc())
|
||||
return file
|
||||
}
|
||||
|
||||
// line returns the line number of source code of the
|
||||
// function for this Frame's pc.
|
||||
func (f Frame) line() int {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return 0
|
||||
}
|
||||
_, line := fn.FileLine(f.pc())
|
||||
return line
|
||||
}
|
||||
|
||||
// Format formats the frame according to the fmt.Formatter interface.
|
||||
//
|
||||
// %s source file
|
||||
// %d source line
|
||||
// %n function name
|
||||
// %v equivalent to %s:%d
|
||||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+s function name and path of source file relative to the compile time
|
||||
// GOPATH separated by \n\t (<funcname>\n\t<path>)
|
||||
// %+v equivalent to %+s:%d
|
||||
func (f Frame) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 's':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
pc := f.pc()
|
||||
fn := runtime.FuncForPC(pc)
|
||||
if fn == nil {
|
||||
io.WriteString(s, "unknown")
|
||||
} else {
|
||||
file, _ := fn.FileLine(pc)
|
||||
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
|
||||
}
|
||||
default:
|
||||
io.WriteString(s, path.Base(f.file()))
|
||||
}
|
||||
case 'd':
|
||||
fmt.Fprintf(s, "%d", f.line())
|
||||
case 'n':
|
||||
name := runtime.FuncForPC(f.pc()).Name()
|
||||
io.WriteString(s, funcname(name))
|
||||
case 'v':
|
||||
f.Format(s, 's')
|
||||
io.WriteString(s, ":")
|
||||
f.Format(s, 'd')
|
||||
}
|
||||
}
|
||||
|
||||
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
|
||||
type StackTrace []Frame
|
||||
|
||||
// Format formats the stack of Frames according to the fmt.Formatter interface.
|
||||
//
|
||||
// %s lists source files for each Frame in the stack
|
||||
// %v lists the source file and line number for each Frame in the stack
|
||||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+v Prints filename, function, and line number for each Frame in the stack.
|
||||
func (st StackTrace) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
for _, f := range st {
|
||||
fmt.Fprintf(s, "\n%+v", f)
|
||||
}
|
||||
case s.Flag('#'):
|
||||
fmt.Fprintf(s, "%#v", []Frame(st))
|
||||
default:
|
||||
fmt.Fprintf(s, "%v", []Frame(st))
|
||||
}
|
||||
case 's':
|
||||
fmt.Fprintf(s, "%s", []Frame(st))
|
||||
}
|
||||
}
|
||||
|
||||
// stack represents a stack of program counters.
|
||||
type stack []uintptr
|
||||
|
||||
func (s *stack) Format(st fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case st.Flag('+'):
|
||||
for _, pc := range *s {
|
||||
f := Frame(pc)
|
||||
fmt.Fprintf(st, "\n%+v", f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stack) StackTrace() StackTrace {
|
||||
f := make([]Frame, len(*s))
|
||||
for i := 0; i < len(f); i++ {
|
||||
f[i] = Frame((*s)[i])
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func callers() *stack {
|
||||
const depth = 32
|
||||
var pcs [depth]uintptr
|
||||
n := runtime.Callers(3, pcs[:])
|
||||
var st stack = pcs[0:n]
|
||||
return &st
|
||||
}
|
||||
|
||||
// funcname removes the path prefix component of a function's name reported by func.Name().
|
||||
func funcname(name string) string {
|
||||
i := strings.LastIndex(name, "/")
|
||||
name = name[i+1:]
|
||||
i = strings.Index(name, ".")
|
||||
return name[i+1:]
|
||||
}
|
||||
|
||||
func trimGOPATH(name, file string) string {
|
||||
// Here we want to get the source file path relative to the compile time
|
||||
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
|
||||
// GOPATH at runtime, but we can infer the number of path segments in the
|
||||
// GOPATH. We note that fn.Name() returns the function name qualified by
|
||||
// the import path, which does not include the GOPATH. Thus we can trim
|
||||
// segments from the beginning of the file path until the number of path
|
||||
// separators remaining is one more than the number of path separators in
|
||||
// the function name. For example, given:
|
||||
//
|
||||
// GOPATH /home/user
|
||||
// file /home/user/src/pkg/sub/file.go
|
||||
// fn.Name() pkg/sub.Type.Method
|
||||
//
|
||||
// We want to produce:
|
||||
//
|
||||
// pkg/sub/file.go
|
||||
//
|
||||
// From this we can easily see that fn.Name() has one less path separator
|
||||
// than our desired output. We count separators from the end of the file
|
||||
// path until it finds two more than in the function name and then move
|
||||
// one character forward to preserve the initial path segment without a
|
||||
// leading separator.
|
||||
const sep = "/"
|
||||
goal := strings.Count(name, sep) + 2
|
||||
i := len(file)
|
||||
for n := 0; n < goal; n++ {
|
||||
i = strings.LastIndex(file[:i], sep)
|
||||
if i == -1 {
|
||||
// not enough separators found, set i so that the slice expression
|
||||
// below leaves file unmodified
|
||||
i = -len(sep)
|
||||
break
|
||||
}
|
||||
}
|
||||
// get back to 0 or trim the leading separator
|
||||
file = file[i+len(sep):]
|
||||
return file
|
||||
}
|
8
vendor/golang.org/x/crypto/curve25519/const_amd64.h
generated
vendored
8
vendor/golang.org/x/crypto/curve25519/const_amd64.h
generated
vendored
@ -1,8 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
#define REDMASK51 0x0007FFFFFFFFFFFF
|
20
vendor/golang.org/x/crypto/curve25519/const_amd64.s
generated
vendored
20
vendor/golang.org/x/crypto/curve25519/const_amd64.s
generated
vendored
@ -1,20 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
// These constants cannot be encoded in non-MOVQ immediates.
|
||||
// We access them directly from memory instead.
|
||||
|
||||
DATA ·_121666_213(SB)/8, $996687872
|
||||
GLOBL ·_121666_213(SB), 8, $8
|
||||
|
||||
DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
|
||||
GLOBL ·_2P0(SB), 8, $8
|
||||
|
||||
DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
|
||||
GLOBL ·_2P1234(SB), 8, $8
|
65
vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
generated
vendored
65
vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
generated
vendored
@ -1,65 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
// func cswap(inout *[4][5]uint64, v uint64)
|
||||
TEXT ·cswap(SB),7,$0
|
||||
MOVQ inout+0(FP),DI
|
||||
MOVQ v+8(FP),SI
|
||||
|
||||
SUBQ $1, SI
|
||||
NOTQ SI
|
||||
MOVQ SI, X15
|
||||
PSHUFD $0x44, X15, X15
|
||||
|
||||
MOVOU 0(DI), X0
|
||||
MOVOU 16(DI), X2
|
||||
MOVOU 32(DI), X4
|
||||
MOVOU 48(DI), X6
|
||||
MOVOU 64(DI), X8
|
||||
MOVOU 80(DI), X1
|
||||
MOVOU 96(DI), X3
|
||||
MOVOU 112(DI), X5
|
||||
MOVOU 128(DI), X7
|
||||
MOVOU 144(DI), X9
|
||||
|
||||
MOVO X1, X10
|
||||
MOVO X3, X11
|
||||
MOVO X5, X12
|
||||
MOVO X7, X13
|
||||
MOVO X9, X14
|
||||
|
||||
PXOR X0, X10
|
||||
PXOR X2, X11
|
||||
PXOR X4, X12
|
||||
PXOR X6, X13
|
||||
PXOR X8, X14
|
||||
PAND X15, X10
|
||||
PAND X15, X11
|
||||
PAND X15, X12
|
||||
PAND X15, X13
|
||||
PAND X15, X14
|
||||
PXOR X10, X0
|
||||
PXOR X10, X1
|
||||
PXOR X11, X2
|
||||
PXOR X11, X3
|
||||
PXOR X12, X4
|
||||
PXOR X12, X5
|
||||
PXOR X13, X6
|
||||
PXOR X13, X7
|
||||
PXOR X14, X8
|
||||
PXOR X14, X9
|
||||
|
||||
MOVOU X0, 0(DI)
|
||||
MOVOU X2, 16(DI)
|
||||
MOVOU X4, 32(DI)
|
||||
MOVOU X6, 48(DI)
|
||||
MOVOU X8, 64(DI)
|
||||
MOVOU X1, 80(DI)
|
||||
MOVOU X3, 96(DI)
|
||||
MOVOU X5, 112(DI)
|
||||
MOVOU X7, 128(DI)
|
||||
MOVOU X9, 144(DI)
|
||||
RET
|
834
vendor/golang.org/x/crypto/curve25519/curve25519.go
generated
vendored
834
vendor/golang.org/x/crypto/curve25519/curve25519.go
generated
vendored
@ -1,834 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// We have an implementation in amd64 assembly so this code is only run on
|
||||
// non-amd64 platforms. The amd64 assembly does not support gccgo.
|
||||
// +build !amd64 gccgo appengine
|
||||
|
||||
package curve25519
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// This code is a port of the public domain, "ref10" implementation of
|
||||
// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
|
||||
|
||||
// fieldElement represents an element of the field GF(2^255 - 19). An element
|
||||
// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
|
||||
// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
|
||||
// context.
|
||||
type fieldElement [10]int32
|
||||
|
||||
func feZero(fe *fieldElement) {
|
||||
for i := range fe {
|
||||
fe[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func feOne(fe *fieldElement) {
|
||||
feZero(fe)
|
||||
fe[0] = 1
|
||||
}
|
||||
|
||||
func feAdd(dst, a, b *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = a[i] + b[i]
|
||||
}
|
||||
}
|
||||
|
||||
func feSub(dst, a, b *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = a[i] - b[i]
|
||||
}
|
||||
}
|
||||
|
||||
func feCopy(dst, src *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = src[i]
|
||||
}
|
||||
}
|
||||
|
||||
// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
|
||||
//
|
||||
// Preconditions: b in {0,1}.
|
||||
func feCSwap(f, g *fieldElement, b int32) {
|
||||
b = -b
|
||||
for i := range f {
|
||||
t := b & (f[i] ^ g[i])
|
||||
f[i] ^= t
|
||||
g[i] ^= t
|
||||
}
|
||||
}
|
||||
|
||||
// load3 reads a 24-bit, little-endian value from in.
|
||||
func load3(in []byte) int64 {
|
||||
var r int64
|
||||
r = int64(in[0])
|
||||
r |= int64(in[1]) << 8
|
||||
r |= int64(in[2]) << 16
|
||||
return r
|
||||
}
|
||||
|
||||
// load4 reads a 32-bit, little-endian value from in.
|
||||
func load4(in []byte) int64 {
|
||||
return int64(binary.LittleEndian.Uint32(in))
|
||||
}
|
||||
|
||||
func feFromBytes(dst *fieldElement, src *[32]byte) {
|
||||
h0 := load4(src[:])
|
||||
h1 := load3(src[4:]) << 6
|
||||
h2 := load3(src[7:]) << 5
|
||||
h3 := load3(src[10:]) << 3
|
||||
h4 := load3(src[13:]) << 2
|
||||
h5 := load4(src[16:])
|
||||
h6 := load3(src[20:]) << 7
|
||||
h7 := load3(src[23:]) << 5
|
||||
h8 := load3(src[26:]) << 4
|
||||
h9 := load3(src[29:]) << 2
|
||||
|
||||
var carry [10]int64
|
||||
carry[9] = (h9 + 1<<24) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
carry[1] = (h1 + 1<<24) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[3] = (h3 + 1<<24) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[5] = (h5 + 1<<24) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
carry[7] = (h7 + 1<<24) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[0] = (h0 + 1<<25) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[2] = (h2 + 1<<25) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[4] = (h4 + 1<<25) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[6] = (h6 + 1<<25) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
carry[8] = (h8 + 1<<25) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
dst[0] = int32(h0)
|
||||
dst[1] = int32(h1)
|
||||
dst[2] = int32(h2)
|
||||
dst[3] = int32(h3)
|
||||
dst[4] = int32(h4)
|
||||
dst[5] = int32(h5)
|
||||
dst[6] = int32(h6)
|
||||
dst[7] = int32(h7)
|
||||
dst[8] = int32(h8)
|
||||
dst[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feToBytes marshals h to s.
|
||||
// Preconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
//
|
||||
// Write p=2^255-19; q=floor(h/p).
|
||||
// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
|
||||
//
|
||||
// Proof:
|
||||
// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
|
||||
// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
|
||||
//
|
||||
// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
|
||||
// Then 0<y<1.
|
||||
//
|
||||
// Write r=h-pq.
|
||||
// Have 0<=r<=p-1=2^255-20.
|
||||
// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
|
||||
//
|
||||
// Write x=r+19(2^-255)r+y.
|
||||
// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
|
||||
//
|
||||
// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
|
||||
// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
|
||||
func feToBytes(s *[32]byte, h *fieldElement) {
|
||||
var carry [10]int32
|
||||
|
||||
q := (19*h[9] + (1 << 24)) >> 25
|
||||
q = (h[0] + q) >> 26
|
||||
q = (h[1] + q) >> 25
|
||||
q = (h[2] + q) >> 26
|
||||
q = (h[3] + q) >> 25
|
||||
q = (h[4] + q) >> 26
|
||||
q = (h[5] + q) >> 25
|
||||
q = (h[6] + q) >> 26
|
||||
q = (h[7] + q) >> 25
|
||||
q = (h[8] + q) >> 26
|
||||
q = (h[9] + q) >> 25
|
||||
|
||||
// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
|
||||
h[0] += 19 * q
|
||||
// Goal: Output h-2^255 q, which is between 0 and 2^255-20.
|
||||
|
||||
carry[0] = h[0] >> 26
|
||||
h[1] += carry[0]
|
||||
h[0] -= carry[0] << 26
|
||||
carry[1] = h[1] >> 25
|
||||
h[2] += carry[1]
|
||||
h[1] -= carry[1] << 25
|
||||
carry[2] = h[2] >> 26
|
||||
h[3] += carry[2]
|
||||
h[2] -= carry[2] << 26
|
||||
carry[3] = h[3] >> 25
|
||||
h[4] += carry[3]
|
||||
h[3] -= carry[3] << 25
|
||||
carry[4] = h[4] >> 26
|
||||
h[5] += carry[4]
|
||||
h[4] -= carry[4] << 26
|
||||
carry[5] = h[5] >> 25
|
||||
h[6] += carry[5]
|
||||
h[5] -= carry[5] << 25
|
||||
carry[6] = h[6] >> 26
|
||||
h[7] += carry[6]
|
||||
h[6] -= carry[6] << 26
|
||||
carry[7] = h[7] >> 25
|
||||
h[8] += carry[7]
|
||||
h[7] -= carry[7] << 25
|
||||
carry[8] = h[8] >> 26
|
||||
h[9] += carry[8]
|
||||
h[8] -= carry[8] << 26
|
||||
carry[9] = h[9] >> 25
|
||||
h[9] -= carry[9] << 25
|
||||
// h10 = carry9
|
||||
|
||||
// Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
|
||||
// Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
|
||||
// evidently 2^255 h10-2^255 q = 0.
|
||||
// Goal: Output h[0]+...+2^230 h[9].
|
||||
|
||||
s[0] = byte(h[0] >> 0)
|
||||
s[1] = byte(h[0] >> 8)
|
||||
s[2] = byte(h[0] >> 16)
|
||||
s[3] = byte((h[0] >> 24) | (h[1] << 2))
|
||||
s[4] = byte(h[1] >> 6)
|
||||
s[5] = byte(h[1] >> 14)
|
||||
s[6] = byte((h[1] >> 22) | (h[2] << 3))
|
||||
s[7] = byte(h[2] >> 5)
|
||||
s[8] = byte(h[2] >> 13)
|
||||
s[9] = byte((h[2] >> 21) | (h[3] << 5))
|
||||
s[10] = byte(h[3] >> 3)
|
||||
s[11] = byte(h[3] >> 11)
|
||||
s[12] = byte((h[3] >> 19) | (h[4] << 6))
|
||||
s[13] = byte(h[4] >> 2)
|
||||
s[14] = byte(h[4] >> 10)
|
||||
s[15] = byte(h[4] >> 18)
|
||||
s[16] = byte(h[5] >> 0)
|
||||
s[17] = byte(h[5] >> 8)
|
||||
s[18] = byte(h[5] >> 16)
|
||||
s[19] = byte((h[5] >> 24) | (h[6] << 1))
|
||||
s[20] = byte(h[6] >> 7)
|
||||
s[21] = byte(h[6] >> 15)
|
||||
s[22] = byte((h[6] >> 23) | (h[7] << 3))
|
||||
s[23] = byte(h[7] >> 5)
|
||||
s[24] = byte(h[7] >> 13)
|
||||
s[25] = byte((h[7] >> 21) | (h[8] << 4))
|
||||
s[26] = byte(h[8] >> 4)
|
||||
s[27] = byte(h[8] >> 12)
|
||||
s[28] = byte((h[8] >> 20) | (h[9] << 6))
|
||||
s[29] = byte(h[9] >> 2)
|
||||
s[30] = byte(h[9] >> 10)
|
||||
s[31] = byte(h[9] >> 18)
|
||||
}
|
||||
|
||||
// feMul calculates h = f * g
|
||||
// Can overlap h with f or g.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
//
|
||||
// Notes on implementation strategy:
|
||||
//
|
||||
// Using schoolbook multiplication.
|
||||
// Karatsuba would save a little in some cost models.
|
||||
//
|
||||
// Most multiplications by 2 and 19 are 32-bit precomputations;
|
||||
// cheaper than 64-bit postcomputations.
|
||||
//
|
||||
// There is one remaining multiplication by 19 in the carry chain;
|
||||
// one *19 precomputation can be merged into this,
|
||||
// but the resulting data flow is considerably less clean.
|
||||
//
|
||||
// There are 12 carries below.
|
||||
// 10 of them are 2-way parallelizable and vectorizable.
|
||||
// Can get away with 11 carries, but then data flow is much deeper.
|
||||
//
|
||||
// With tighter constraints on inputs can squeeze carries into int32.
|
||||
func feMul(h, f, g *fieldElement) {
|
||||
f0 := f[0]
|
||||
f1 := f[1]
|
||||
f2 := f[2]
|
||||
f3 := f[3]
|
||||
f4 := f[4]
|
||||
f5 := f[5]
|
||||
f6 := f[6]
|
||||
f7 := f[7]
|
||||
f8 := f[8]
|
||||
f9 := f[9]
|
||||
g0 := g[0]
|
||||
g1 := g[1]
|
||||
g2 := g[2]
|
||||
g3 := g[3]
|
||||
g4 := g[4]
|
||||
g5 := g[5]
|
||||
g6 := g[6]
|
||||
g7 := g[7]
|
||||
g8 := g[8]
|
||||
g9 := g[9]
|
||||
g1_19 := 19 * g1 // 1.4*2^29
|
||||
g2_19 := 19 * g2 // 1.4*2^30; still ok
|
||||
g3_19 := 19 * g3
|
||||
g4_19 := 19 * g4
|
||||
g5_19 := 19 * g5
|
||||
g6_19 := 19 * g6
|
||||
g7_19 := 19 * g7
|
||||
g8_19 := 19 * g8
|
||||
g9_19 := 19 * g9
|
||||
f1_2 := 2 * f1
|
||||
f3_2 := 2 * f3
|
||||
f5_2 := 2 * f5
|
||||
f7_2 := 2 * f7
|
||||
f9_2 := 2 * f9
|
||||
f0g0 := int64(f0) * int64(g0)
|
||||
f0g1 := int64(f0) * int64(g1)
|
||||
f0g2 := int64(f0) * int64(g2)
|
||||
f0g3 := int64(f0) * int64(g3)
|
||||
f0g4 := int64(f0) * int64(g4)
|
||||
f0g5 := int64(f0) * int64(g5)
|
||||
f0g6 := int64(f0) * int64(g6)
|
||||
f0g7 := int64(f0) * int64(g7)
|
||||
f0g8 := int64(f0) * int64(g8)
|
||||
f0g9 := int64(f0) * int64(g9)
|
||||
f1g0 := int64(f1) * int64(g0)
|
||||
f1g1_2 := int64(f1_2) * int64(g1)
|
||||
f1g2 := int64(f1) * int64(g2)
|
||||
f1g3_2 := int64(f1_2) * int64(g3)
|
||||
f1g4 := int64(f1) * int64(g4)
|
||||
f1g5_2 := int64(f1_2) * int64(g5)
|
||||
f1g6 := int64(f1) * int64(g6)
|
||||
f1g7_2 := int64(f1_2) * int64(g7)
|
||||
f1g8 := int64(f1) * int64(g8)
|
||||
f1g9_38 := int64(f1_2) * int64(g9_19)
|
||||
f2g0 := int64(f2) * int64(g0)
|
||||
f2g1 := int64(f2) * int64(g1)
|
||||
f2g2 := int64(f2) * int64(g2)
|
||||
f2g3 := int64(f2) * int64(g3)
|
||||
f2g4 := int64(f2) * int64(g4)
|
||||
f2g5 := int64(f2) * int64(g5)
|
||||
f2g6 := int64(f2) * int64(g6)
|
||||
f2g7 := int64(f2) * int64(g7)
|
||||
f2g8_19 := int64(f2) * int64(g8_19)
|
||||
f2g9_19 := int64(f2) * int64(g9_19)
|
||||
f3g0 := int64(f3) * int64(g0)
|
||||
f3g1_2 := int64(f3_2) * int64(g1)
|
||||
f3g2 := int64(f3) * int64(g2)
|
||||
f3g3_2 := int64(f3_2) * int64(g3)
|
||||
f3g4 := int64(f3) * int64(g4)
|
||||
f3g5_2 := int64(f3_2) * int64(g5)
|
||||
f3g6 := int64(f3) * int64(g6)
|
||||
f3g7_38 := int64(f3_2) * int64(g7_19)
|
||||
f3g8_19 := int64(f3) * int64(g8_19)
|
||||
f3g9_38 := int64(f3_2) * int64(g9_19)
|
||||
f4g0 := int64(f4) * int64(g0)
|
||||
f4g1 := int64(f4) * int64(g1)
|
||||
f4g2 := int64(f4) * int64(g2)
|
||||
f4g3 := int64(f4) * int64(g3)
|
||||
f4g4 := int64(f4) * int64(g4)
|
||||
f4g5 := int64(f4) * int64(g5)
|
||||
f4g6_19 := int64(f4) * int64(g6_19)
|
||||
f4g7_19 := int64(f4) * int64(g7_19)
|
||||
f4g8_19 := int64(f4) * int64(g8_19)
|
||||
f4g9_19 := int64(f4) * int64(g9_19)
|
||||
f5g0 := int64(f5) * int64(g0)
|
||||
f5g1_2 := int64(f5_2) * int64(g1)
|
||||
f5g2 := int64(f5) * int64(g2)
|
||||
f5g3_2 := int64(f5_2) * int64(g3)
|
||||
f5g4 := int64(f5) * int64(g4)
|
||||
f5g5_38 := int64(f5_2) * int64(g5_19)
|
||||
f5g6_19 := int64(f5) * int64(g6_19)
|
||||
f5g7_38 := int64(f5_2) * int64(g7_19)
|
||||
f5g8_19 := int64(f5) * int64(g8_19)
|
||||
f5g9_38 := int64(f5_2) * int64(g9_19)
|
||||
f6g0 := int64(f6) * int64(g0)
|
||||
f6g1 := int64(f6) * int64(g1)
|
||||
f6g2 := int64(f6) * int64(g2)
|
||||
f6g3 := int64(f6) * int64(g3)
|
||||
f6g4_19 := int64(f6) * int64(g4_19)
|
||||
f6g5_19 := int64(f6) * int64(g5_19)
|
||||
f6g6_19 := int64(f6) * int64(g6_19)
|
||||
f6g7_19 := int64(f6) * int64(g7_19)
|
||||
f6g8_19 := int64(f6) * int64(g8_19)
|
||||
f6g9_19 := int64(f6) * int64(g9_19)
|
||||
f7g0 := int64(f7) * int64(g0)
|
||||
f7g1_2 := int64(f7_2) * int64(g1)
|
||||
f7g2 := int64(f7) * int64(g2)
|
||||
f7g3_38 := int64(f7_2) * int64(g3_19)
|
||||
f7g4_19 := int64(f7) * int64(g4_19)
|
||||
f7g5_38 := int64(f7_2) * int64(g5_19)
|
||||
f7g6_19 := int64(f7) * int64(g6_19)
|
||||
f7g7_38 := int64(f7_2) * int64(g7_19)
|
||||
f7g8_19 := int64(f7) * int64(g8_19)
|
||||
f7g9_38 := int64(f7_2) * int64(g9_19)
|
||||
f8g0 := int64(f8) * int64(g0)
|
||||
f8g1 := int64(f8) * int64(g1)
|
||||
f8g2_19 := int64(f8) * int64(g2_19)
|
||||
f8g3_19 := int64(f8) * int64(g3_19)
|
||||
f8g4_19 := int64(f8) * int64(g4_19)
|
||||
f8g5_19 := int64(f8) * int64(g5_19)
|
||||
f8g6_19 := int64(f8) * int64(g6_19)
|
||||
f8g7_19 := int64(f8) * int64(g7_19)
|
||||
f8g8_19 := int64(f8) * int64(g8_19)
|
||||
f8g9_19 := int64(f8) * int64(g9_19)
|
||||
f9g0 := int64(f9) * int64(g0)
|
||||
f9g1_38 := int64(f9_2) * int64(g1_19)
|
||||
f9g2_19 := int64(f9) * int64(g2_19)
|
||||
f9g3_38 := int64(f9_2) * int64(g3_19)
|
||||
f9g4_19 := int64(f9) * int64(g4_19)
|
||||
f9g5_38 := int64(f9_2) * int64(g5_19)
|
||||
f9g6_19 := int64(f9) * int64(g6_19)
|
||||
f9g7_38 := int64(f9_2) * int64(g7_19)
|
||||
f9g8_19 := int64(f9) * int64(g8_19)
|
||||
f9g9_38 := int64(f9_2) * int64(g9_19)
|
||||
h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
|
||||
h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
|
||||
h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
|
||||
h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
|
||||
h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
|
||||
h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
|
||||
h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
|
||||
h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
|
||||
h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
|
||||
h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
|
||||
var carry [10]int64
|
||||
|
||||
// |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
|
||||
// i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
|
||||
// |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
|
||||
// i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
// |h0| <= 2^25
|
||||
// |h4| <= 2^25
|
||||
// |h1| <= 1.51*2^58
|
||||
// |h5| <= 1.51*2^58
|
||||
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
// |h1| <= 2^24; from now on fits into int32
|
||||
// |h5| <= 2^24; from now on fits into int32
|
||||
// |h2| <= 1.21*2^59
|
||||
// |h6| <= 1.21*2^59
|
||||
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
// |h2| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h6| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h3| <= 1.51*2^58
|
||||
// |h7| <= 1.51*2^58
|
||||
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
// |h3| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h7| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h4| <= 1.52*2^33
|
||||
// |h8| <= 1.52*2^33
|
||||
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
// |h4| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h8| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h5| <= 1.01*2^24
|
||||
// |h9| <= 1.51*2^58
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
// |h9| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h0| <= 1.8*2^37
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
// |h0| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h1| <= 1.01*2^24
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feSquare calculates h = f*f. Can overlap h with f.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
func feSquare(h, f *fieldElement) {
|
||||
f0 := f[0]
|
||||
f1 := f[1]
|
||||
f2 := f[2]
|
||||
f3 := f[3]
|
||||
f4 := f[4]
|
||||
f5 := f[5]
|
||||
f6 := f[6]
|
||||
f7 := f[7]
|
||||
f8 := f[8]
|
||||
f9 := f[9]
|
||||
f0_2 := 2 * f0
|
||||
f1_2 := 2 * f1
|
||||
f2_2 := 2 * f2
|
||||
f3_2 := 2 * f3
|
||||
f4_2 := 2 * f4
|
||||
f5_2 := 2 * f5
|
||||
f6_2 := 2 * f6
|
||||
f7_2 := 2 * f7
|
||||
f5_38 := 38 * f5 // 1.31*2^30
|
||||
f6_19 := 19 * f6 // 1.31*2^30
|
||||
f7_38 := 38 * f7 // 1.31*2^30
|
||||
f8_19 := 19 * f8 // 1.31*2^30
|
||||
f9_38 := 38 * f9 // 1.31*2^30
|
||||
f0f0 := int64(f0) * int64(f0)
|
||||
f0f1_2 := int64(f0_2) * int64(f1)
|
||||
f0f2_2 := int64(f0_2) * int64(f2)
|
||||
f0f3_2 := int64(f0_2) * int64(f3)
|
||||
f0f4_2 := int64(f0_2) * int64(f4)
|
||||
f0f5_2 := int64(f0_2) * int64(f5)
|
||||
f0f6_2 := int64(f0_2) * int64(f6)
|
||||
f0f7_2 := int64(f0_2) * int64(f7)
|
||||
f0f8_2 := int64(f0_2) * int64(f8)
|
||||
f0f9_2 := int64(f0_2) * int64(f9)
|
||||
f1f1_2 := int64(f1_2) * int64(f1)
|
||||
f1f2_2 := int64(f1_2) * int64(f2)
|
||||
f1f3_4 := int64(f1_2) * int64(f3_2)
|
||||
f1f4_2 := int64(f1_2) * int64(f4)
|
||||
f1f5_4 := int64(f1_2) * int64(f5_2)
|
||||
f1f6_2 := int64(f1_2) * int64(f6)
|
||||
f1f7_4 := int64(f1_2) * int64(f7_2)
|
||||
f1f8_2 := int64(f1_2) * int64(f8)
|
||||
f1f9_76 := int64(f1_2) * int64(f9_38)
|
||||
f2f2 := int64(f2) * int64(f2)
|
||||
f2f3_2 := int64(f2_2) * int64(f3)
|
||||
f2f4_2 := int64(f2_2) * int64(f4)
|
||||
f2f5_2 := int64(f2_2) * int64(f5)
|
||||
f2f6_2 := int64(f2_2) * int64(f6)
|
||||
f2f7_2 := int64(f2_2) * int64(f7)
|
||||
f2f8_38 := int64(f2_2) * int64(f8_19)
|
||||
f2f9_38 := int64(f2) * int64(f9_38)
|
||||
f3f3_2 := int64(f3_2) * int64(f3)
|
||||
f3f4_2 := int64(f3_2) * int64(f4)
|
||||
f3f5_4 := int64(f3_2) * int64(f5_2)
|
||||
f3f6_2 := int64(f3_2) * int64(f6)
|
||||
f3f7_76 := int64(f3_2) * int64(f7_38)
|
||||
f3f8_38 := int64(f3_2) * int64(f8_19)
|
||||
f3f9_76 := int64(f3_2) * int64(f9_38)
|
||||
f4f4 := int64(f4) * int64(f4)
|
||||
f4f5_2 := int64(f4_2) * int64(f5)
|
||||
f4f6_38 := int64(f4_2) * int64(f6_19)
|
||||
f4f7_38 := int64(f4) * int64(f7_38)
|
||||
f4f8_38 := int64(f4_2) * int64(f8_19)
|
||||
f4f9_38 := int64(f4) * int64(f9_38)
|
||||
f5f5_38 := int64(f5) * int64(f5_38)
|
||||
f5f6_38 := int64(f5_2) * int64(f6_19)
|
||||
f5f7_76 := int64(f5_2) * int64(f7_38)
|
||||
f5f8_38 := int64(f5_2) * int64(f8_19)
|
||||
f5f9_76 := int64(f5_2) * int64(f9_38)
|
||||
f6f6_19 := int64(f6) * int64(f6_19)
|
||||
f6f7_38 := int64(f6) * int64(f7_38)
|
||||
f6f8_38 := int64(f6_2) * int64(f8_19)
|
||||
f6f9_38 := int64(f6) * int64(f9_38)
|
||||
f7f7_38 := int64(f7) * int64(f7_38)
|
||||
f7f8_38 := int64(f7_2) * int64(f8_19)
|
||||
f7f9_76 := int64(f7_2) * int64(f9_38)
|
||||
f8f8_19 := int64(f8) * int64(f8_19)
|
||||
f8f9_38 := int64(f8) * int64(f9_38)
|
||||
f9f9_38 := int64(f9) * int64(f9_38)
|
||||
h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
|
||||
h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
|
||||
h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
|
||||
h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
|
||||
h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
|
||||
h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
|
||||
h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
|
||||
h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
|
||||
h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
|
||||
h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
|
||||
var carry [10]int64
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feMul121666 calculates h = f * 121666. Can overlap h with f.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
func feMul121666(h, f *fieldElement) {
|
||||
h0 := int64(f[0]) * 121666
|
||||
h1 := int64(f[1]) * 121666
|
||||
h2 := int64(f[2]) * 121666
|
||||
h3 := int64(f[3]) * 121666
|
||||
h4 := int64(f[4]) * 121666
|
||||
h5 := int64(f[5]) * 121666
|
||||
h6 := int64(f[6]) * 121666
|
||||
h7 := int64(f[7]) * 121666
|
||||
h8 := int64(f[8]) * 121666
|
||||
h9 := int64(f[9]) * 121666
|
||||
var carry [10]int64
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feInvert sets out = z^-1.
|
||||
func feInvert(out, z *fieldElement) {
|
||||
var t0, t1, t2, t3 fieldElement
|
||||
var i int
|
||||
|
||||
feSquare(&t0, z)
|
||||
for i = 1; i < 1; i++ {
|
||||
feSquare(&t0, &t0)
|
||||
}
|
||||
feSquare(&t1, &t0)
|
||||
for i = 1; i < 2; i++ {
|
||||
feSquare(&t1, &t1)
|
||||
}
|
||||
feMul(&t1, z, &t1)
|
||||
feMul(&t0, &t0, &t1)
|
||||
feSquare(&t2, &t0)
|
||||
for i = 1; i < 1; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t1, &t2)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 5; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 10; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t2, &t2, &t1)
|
||||
feSquare(&t3, &t2)
|
||||
for i = 1; i < 20; i++ {
|
||||
feSquare(&t3, &t3)
|
||||
}
|
||||
feMul(&t2, &t3, &t2)
|
||||
feSquare(&t2, &t2)
|
||||
for i = 1; i < 10; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 50; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t2, &t2, &t1)
|
||||
feSquare(&t3, &t2)
|
||||
for i = 1; i < 100; i++ {
|
||||
feSquare(&t3, &t3)
|
||||
}
|
||||
feMul(&t2, &t3, &t2)
|
||||
feSquare(&t2, &t2)
|
||||
for i = 1; i < 50; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t1, &t1)
|
||||
for i = 1; i < 5; i++ {
|
||||
feSquare(&t1, &t1)
|
||||
}
|
||||
feMul(out, &t1, &t0)
|
||||
}
|
||||
|
||||
func scalarMult(out, in, base *[32]byte) {
|
||||
var e [32]byte
|
||||
|
||||
copy(e[:], in[:])
|
||||
e[0] &= 248
|
||||
e[31] &= 127
|
||||
e[31] |= 64
|
||||
|
||||
var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
|
||||
feFromBytes(&x1, base)
|
||||
feOne(&x2)
|
||||
feCopy(&x3, &x1)
|
||||
feOne(&z3)
|
||||
|
||||
swap := int32(0)
|
||||
for pos := 254; pos >= 0; pos-- {
|
||||
b := e[pos/8] >> uint(pos&7)
|
||||
b &= 1
|
||||
swap ^= int32(b)
|
||||
feCSwap(&x2, &x3, swap)
|
||||
feCSwap(&z2, &z3, swap)
|
||||
swap = int32(b)
|
||||
|
||||
feSub(&tmp0, &x3, &z3)
|
||||
feSub(&tmp1, &x2, &z2)
|
||||
feAdd(&x2, &x2, &z2)
|
||||
feAdd(&z2, &x3, &z3)
|
||||
feMul(&z3, &tmp0, &x2)
|
||||
feMul(&z2, &z2, &tmp1)
|
||||
feSquare(&tmp0, &tmp1)
|
||||
feSquare(&tmp1, &x2)
|
||||
feAdd(&x3, &z3, &z2)
|
||||
feSub(&z2, &z3, &z2)
|
||||
feMul(&x2, &tmp1, &tmp0)
|
||||
feSub(&tmp1, &tmp1, &tmp0)
|
||||
feSquare(&z2, &z2)
|
||||
feMul121666(&z3, &tmp1)
|
||||
feSquare(&x3, &x3)
|
||||
feAdd(&tmp0, &tmp0, &z3)
|
||||
feMul(&z3, &x1, &z2)
|
||||
feMul(&z2, &tmp1, &tmp0)
|
||||
}
|
||||
|
||||
feCSwap(&x2, &x3, swap)
|
||||
feCSwap(&z2, &z3, swap)
|
||||
|
||||
feInvert(&z2, &z2)
|
||||
feMul(&x2, &x2, &z2)
|
||||
feToBytes(out, &x2)
|
||||
}
|
23
vendor/golang.org/x/crypto/curve25519/doc.go
generated
vendored
23
vendor/golang.org/x/crypto/curve25519/doc.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package curve25519 provides an implementation of scalar multiplication on
|
||||
// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html
|
||||
package curve25519 // import "golang.org/x/crypto/curve25519"
|
||||
|
||||
// basePoint is the x coordinate of the generator of the curve.
|
||||
var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
// ScalarMult sets dst to the product in*base where dst and base are the x
|
||||
// coordinates of group points and all values are in little-endian form.
|
||||
func ScalarMult(dst, in, base *[32]byte) {
|
||||
scalarMult(dst, in, base)
|
||||
}
|
||||
|
||||
// ScalarBaseMult sets dst to the product in*base where dst and base are the x
|
||||
// coordinates of group points, base is the standard generator and all values
|
||||
// are in little-endian form.
|
||||
func ScalarBaseMult(dst, in *[32]byte) {
|
||||
ScalarMult(dst, in, &basePoint)
|
||||
}
|
73
vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
generated
vendored
73
vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
generated
vendored
@ -1,73 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "const_amd64.h"
|
||||
|
||||
// func freeze(inout *[5]uint64)
|
||||
TEXT ·freeze(SB),7,$0-8
|
||||
MOVQ inout+0(FP), DI
|
||||
|
||||
MOVQ 0(DI),SI
|
||||
MOVQ 8(DI),DX
|
||||
MOVQ 16(DI),CX
|
||||
MOVQ 24(DI),R8
|
||||
MOVQ 32(DI),R9
|
||||
MOVQ $REDMASK51,AX
|
||||
MOVQ AX,R10
|
||||
SUBQ $18,R10
|
||||
MOVQ $3,R11
|
||||
REDUCELOOP:
|
||||
MOVQ SI,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,SI
|
||||
ADDQ R12,DX
|
||||
MOVQ DX,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,DX
|
||||
ADDQ R12,CX
|
||||
MOVQ CX,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,CX
|
||||
ADDQ R12,R8
|
||||
MOVQ R8,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,R8
|
||||
ADDQ R12,R9
|
||||
MOVQ R9,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,R9
|
||||
IMUL3Q $19,R12,R12
|
||||
ADDQ R12,SI
|
||||
SUBQ $1,R11
|
||||
JA REDUCELOOP
|
||||
MOVQ $1,R12
|
||||
CMPQ R10,SI
|
||||
CMOVQLT R11,R12
|
||||
CMPQ AX,DX
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,CX
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,R8
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,R9
|
||||
CMOVQNE R11,R12
|
||||
NEGQ R12
|
||||
ANDQ R12,AX
|
||||
ANDQ R12,R10
|
||||
SUBQ R10,SI
|
||||
SUBQ AX,DX
|
||||
SUBQ AX,CX
|
||||
SUBQ AX,R8
|
||||
SUBQ AX,R9
|
||||
MOVQ SI,0(DI)
|
||||
MOVQ DX,8(DI)
|
||||
MOVQ CX,16(DI)
|
||||
MOVQ R8,24(DI)
|
||||
MOVQ R9,32(DI)
|
||||
RET
|
1377
vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
generated
vendored
1377
vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
generated
vendored
File diff suppressed because it is too large
Load Diff
240
vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
generated
vendored
240
vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
generated
vendored
@ -1,240 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
package curve25519
|
||||
|
||||
// These functions are implemented in the .s files. The names of the functions
|
||||
// in the rest of the file are also taken from the SUPERCOP sources to help
|
||||
// people following along.
|
||||
|
||||
//go:noescape
|
||||
|
||||
func cswap(inout *[5]uint64, v uint64)
|
||||
|
||||
//go:noescape
|
||||
|
||||
func ladderstep(inout *[5][5]uint64)
|
||||
|
||||
//go:noescape
|
||||
|
||||
func freeze(inout *[5]uint64)
|
||||
|
||||
//go:noescape
|
||||
|
||||
func mul(dest, a, b *[5]uint64)
|
||||
|
||||
//go:noescape
|
||||
|
||||
func square(out, in *[5]uint64)
|
||||
|
||||
// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
|
||||
func mladder(xr, zr *[5]uint64, s *[32]byte) {
|
||||
var work [5][5]uint64
|
||||
|
||||
work[0] = *xr
|
||||
setint(&work[1], 1)
|
||||
setint(&work[2], 0)
|
||||
work[3] = *xr
|
||||
setint(&work[4], 1)
|
||||
|
||||
j := uint(6)
|
||||
var prevbit byte
|
||||
|
||||
for i := 31; i >= 0; i-- {
|
||||
for j < 8 {
|
||||
bit := ((*s)[i] >> j) & 1
|
||||
swap := bit ^ prevbit
|
||||
prevbit = bit
|
||||
cswap(&work[1], uint64(swap))
|
||||
ladderstep(&work)
|
||||
j--
|
||||
}
|
||||
j = 7
|
||||
}
|
||||
|
||||
*xr = work[1]
|
||||
*zr = work[2]
|
||||
}
|
||||
|
||||
func scalarMult(out, in, base *[32]byte) {
|
||||
var e [32]byte
|
||||
copy(e[:], (*in)[:])
|
||||
e[0] &= 248
|
||||
e[31] &= 127
|
||||
e[31] |= 64
|
||||
|
||||
var t, z [5]uint64
|
||||
unpack(&t, base)
|
||||
mladder(&t, &z, &e)
|
||||
invert(&z, &z)
|
||||
mul(&t, &t, &z)
|
||||
pack(out, &t)
|
||||
}
|
||||
|
||||
func setint(r *[5]uint64, v uint64) {
|
||||
r[0] = v
|
||||
r[1] = 0
|
||||
r[2] = 0
|
||||
r[3] = 0
|
||||
r[4] = 0
|
||||
}
|
||||
|
||||
// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
|
||||
// order.
|
||||
func unpack(r *[5]uint64, x *[32]byte) {
|
||||
r[0] = uint64(x[0]) |
|
||||
uint64(x[1])<<8 |
|
||||
uint64(x[2])<<16 |
|
||||
uint64(x[3])<<24 |
|
||||
uint64(x[4])<<32 |
|
||||
uint64(x[5])<<40 |
|
||||
uint64(x[6]&7)<<48
|
||||
|
||||
r[1] = uint64(x[6])>>3 |
|
||||
uint64(x[7])<<5 |
|
||||
uint64(x[8])<<13 |
|
||||
uint64(x[9])<<21 |
|
||||
uint64(x[10])<<29 |
|
||||
uint64(x[11])<<37 |
|
||||
uint64(x[12]&63)<<45
|
||||
|
||||
r[2] = uint64(x[12])>>6 |
|
||||
uint64(x[13])<<2 |
|
||||
uint64(x[14])<<10 |
|
||||
uint64(x[15])<<18 |
|
||||
uint64(x[16])<<26 |
|
||||
uint64(x[17])<<34 |
|
||||
uint64(x[18])<<42 |
|
||||
uint64(x[19]&1)<<50
|
||||
|
||||
r[3] = uint64(x[19])>>1 |
|
||||
uint64(x[20])<<7 |
|
||||
uint64(x[21])<<15 |
|
||||
uint64(x[22])<<23 |
|
||||
uint64(x[23])<<31 |
|
||||
uint64(x[24])<<39 |
|
||||
uint64(x[25]&15)<<47
|
||||
|
||||
r[4] = uint64(x[25])>>4 |
|
||||
uint64(x[26])<<4 |
|
||||
uint64(x[27])<<12 |
|
||||
uint64(x[28])<<20 |
|
||||
uint64(x[29])<<28 |
|
||||
uint64(x[30])<<36 |
|
||||
uint64(x[31]&127)<<44
|
||||
}
|
||||
|
||||
// pack sets out = x where out is the usual, little-endian form of the 5,
|
||||
// 51-bit limbs in x.
|
||||
func pack(out *[32]byte, x *[5]uint64) {
|
||||
t := *x
|
||||
freeze(&t)
|
||||
|
||||
out[0] = byte(t[0])
|
||||
out[1] = byte(t[0] >> 8)
|
||||
out[2] = byte(t[0] >> 16)
|
||||
out[3] = byte(t[0] >> 24)
|
||||
out[4] = byte(t[0] >> 32)
|
||||
out[5] = byte(t[0] >> 40)
|
||||
out[6] = byte(t[0] >> 48)
|
||||
|
||||
out[6] ^= byte(t[1]<<3) & 0xf8
|
||||
out[7] = byte(t[1] >> 5)
|
||||
out[8] = byte(t[1] >> 13)
|
||||
out[9] = byte(t[1] >> 21)
|
||||
out[10] = byte(t[1] >> 29)
|
||||
out[11] = byte(t[1] >> 37)
|
||||
out[12] = byte(t[1] >> 45)
|
||||
|
||||
out[12] ^= byte(t[2]<<6) & 0xc0
|
||||
out[13] = byte(t[2] >> 2)
|
||||
out[14] = byte(t[2] >> 10)
|
||||
out[15] = byte(t[2] >> 18)
|
||||
out[16] = byte(t[2] >> 26)
|
||||
out[17] = byte(t[2] >> 34)
|
||||
out[18] = byte(t[2] >> 42)
|
||||
out[19] = byte(t[2] >> 50)
|
||||
|
||||
out[19] ^= byte(t[3]<<1) & 0xfe
|
||||
out[20] = byte(t[3] >> 7)
|
||||
out[21] = byte(t[3] >> 15)
|
||||
out[22] = byte(t[3] >> 23)
|
||||
out[23] = byte(t[3] >> 31)
|
||||
out[24] = byte(t[3] >> 39)
|
||||
out[25] = byte(t[3] >> 47)
|
||||
|
||||
out[25] ^= byte(t[4]<<4) & 0xf0
|
||||
out[26] = byte(t[4] >> 4)
|
||||
out[27] = byte(t[4] >> 12)
|
||||
out[28] = byte(t[4] >> 20)
|
||||
out[29] = byte(t[4] >> 28)
|
||||
out[30] = byte(t[4] >> 36)
|
||||
out[31] = byte(t[4] >> 44)
|
||||
}
|
||||
|
||||
// invert calculates r = x^-1 mod p using Fermat's little theorem.
|
||||
func invert(r *[5]uint64, x *[5]uint64) {
|
||||
var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
|
||||
|
||||
square(&z2, x) /* 2 */
|
||||
square(&t, &z2) /* 4 */
|
||||
square(&t, &t) /* 8 */
|
||||
mul(&z9, &t, x) /* 9 */
|
||||
mul(&z11, &z9, &z2) /* 11 */
|
||||
square(&t, &z11) /* 22 */
|
||||
mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
|
||||
|
||||
square(&t, &z2_5_0) /* 2^6 - 2^1 */
|
||||
for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
|
||||
|
||||
square(&t, &z2_10_0) /* 2^11 - 2^1 */
|
||||
for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
|
||||
|
||||
square(&t, &z2_20_0) /* 2^21 - 2^1 */
|
||||
for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
|
||||
|
||||
square(&t, &t) /* 2^41 - 2^1 */
|
||||
for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
|
||||
|
||||
square(&t, &z2_50_0) /* 2^51 - 2^1 */
|
||||
for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
|
||||
|
||||
square(&t, &z2_100_0) /* 2^101 - 2^1 */
|
||||
for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
|
||||
|
||||
square(&t, &t) /* 2^201 - 2^1 */
|
||||
for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
|
||||
|
||||
square(&t, &t) /* 2^251 - 2^1 */
|
||||
square(&t, &t) /* 2^252 - 2^2 */
|
||||
square(&t, &t) /* 2^253 - 2^3 */
|
||||
|
||||
square(&t, &t) /* 2^254 - 2^4 */
|
||||
|
||||
square(&t, &t) /* 2^255 - 2^5 */
|
||||
mul(r, &t, &z11) /* 2^255 - 21 */
|
||||
}
|
169
vendor/golang.org/x/crypto/curve25519/mul_amd64.s
generated
vendored
169
vendor/golang.org/x/crypto/curve25519/mul_amd64.s
generated
vendored
@ -1,169 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "const_amd64.h"
|
||||
|
||||
// func mul(dest, a, b *[5]uint64)
|
||||
TEXT ·mul(SB),0,$16-24
|
||||
MOVQ dest+0(FP), DI
|
||||
MOVQ a+8(FP), SI
|
||||
MOVQ b+16(FP), DX
|
||||
|
||||
MOVQ DX,CX
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MOVQ AX,0(SP)
|
||||
MULQ 16(CX)
|
||||
MOVQ AX,R8
|
||||
MOVQ DX,R9
|
||||
MOVQ 32(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MOVQ AX,8(SP)
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 8(CX)
|
||||
MOVQ AX,R10
|
||||
MOVQ DX,R11
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 16(CX)
|
||||
MOVQ AX,R12
|
||||
MOVQ DX,R13
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 24(CX)
|
||||
MOVQ AX,R14
|
||||
MOVQ DX,R15
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 32(CX)
|
||||
MOVQ AX,BX
|
||||
MOVQ DX,BP
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 8(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 24(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 24(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 0(SP),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 0(SP),AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 32(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 8(SP),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 8(SP),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 8(SP),AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ $REDMASK51,SI
|
||||
SHLQ $13,R9:R8
|
||||
ANDQ SI,R8
|
||||
SHLQ $13,R11:R10
|
||||
ANDQ SI,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
ANDQ SI,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
ANDQ SI,R14
|
||||
ADDQ R13,R14
|
||||
SHLQ $13,BP:BX
|
||||
ANDQ SI,BX
|
||||
ADDQ R15,BX
|
||||
IMUL3Q $19,BP,DX
|
||||
ADDQ DX,R8
|
||||
MOVQ R8,DX
|
||||
SHRQ $51,DX
|
||||
ADDQ R10,DX
|
||||
MOVQ DX,CX
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,R8
|
||||
ADDQ R12,DX
|
||||
MOVQ DX,R9
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,CX
|
||||
ADDQ R14,DX
|
||||
MOVQ DX,AX
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,R9
|
||||
ADDQ BX,DX
|
||||
MOVQ DX,R10
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,AX
|
||||
IMUL3Q $19,DX,DX
|
||||
ADDQ DX,R8
|
||||
ANDQ SI,R10
|
||||
MOVQ R8,0(DI)
|
||||
MOVQ CX,8(DI)
|
||||
MOVQ R9,16(DI)
|
||||
MOVQ AX,24(DI)
|
||||
MOVQ R10,32(DI)
|
||||
RET
|
132
vendor/golang.org/x/crypto/curve25519/square_amd64.s
generated
vendored
132
vendor/golang.org/x/crypto/curve25519/square_amd64.s
generated
vendored
@ -1,132 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "const_amd64.h"
|
||||
|
||||
// func square(out, in *[5]uint64)
|
||||
TEXT ·square(SB),7,$0-16
|
||||
MOVQ out+0(FP), DI
|
||||
MOVQ in+8(FP), SI
|
||||
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 0(SI)
|
||||
MOVQ AX,CX
|
||||
MOVQ DX,R8
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 8(SI)
|
||||
MOVQ AX,R9
|
||||
MOVQ DX,R10
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 16(SI)
|
||||
MOVQ AX,R11
|
||||
MOVQ DX,R12
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 24(SI)
|
||||
MOVQ AX,R13
|
||||
MOVQ DX,R14
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 32(SI)
|
||||
MOVQ AX,R15
|
||||
MOVQ DX,BX
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 8(SI)
|
||||
ADDQ AX,R11
|
||||
ADCQ DX,R12
|
||||
MOVQ 8(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 16(SI)
|
||||
ADDQ AX,R13
|
||||
ADCQ DX,R14
|
||||
MOVQ 8(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,R15
|
||||
ADCQ DX,BX
|
||||
MOVQ 8(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,CX
|
||||
ADCQ DX,R8
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 16(SI)
|
||||
ADDQ AX,R15
|
||||
ADCQ DX,BX
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,CX
|
||||
ADCQ DX,R8
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R9
|
||||
ADCQ DX,R10
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,R9
|
||||
ADCQ DX,R10
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R11
|
||||
ADCQ DX,R12
|
||||
MOVQ 32(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R13
|
||||
ADCQ DX,R14
|
||||
MOVQ $REDMASK51,SI
|
||||
SHLQ $13,R8:CX
|
||||
ANDQ SI,CX
|
||||
SHLQ $13,R10:R9
|
||||
ANDQ SI,R9
|
||||
ADDQ R8,R9
|
||||
SHLQ $13,R12:R11
|
||||
ANDQ SI,R11
|
||||
ADDQ R10,R11
|
||||
SHLQ $13,R14:R13
|
||||
ANDQ SI,R13
|
||||
ADDQ R12,R13
|
||||
SHLQ $13,BX:R15
|
||||
ANDQ SI,R15
|
||||
ADDQ R14,R15
|
||||
IMUL3Q $19,BX,DX
|
||||
ADDQ DX,CX
|
||||
MOVQ CX,DX
|
||||
SHRQ $51,DX
|
||||
ADDQ R9,DX
|
||||
ANDQ SI,CX
|
||||
MOVQ DX,R8
|
||||
SHRQ $51,DX
|
||||
ADDQ R11,DX
|
||||
ANDQ SI,R8
|
||||
MOVQ DX,R9
|
||||
SHRQ $51,DX
|
||||
ADDQ R13,DX
|
||||
ANDQ SI,R9
|
||||
MOVQ DX,AX
|
||||
SHRQ $51,DX
|
||||
ADDQ R15,DX
|
||||
ANDQ SI,AX
|
||||
MOVQ DX,R10
|
||||
SHRQ $51,DX
|
||||
IMUL3Q $19,DX,DX
|
||||
ADDQ DX,CX
|
||||
ANDQ SI,R10
|
||||
MOVQ CX,0(DI)
|
||||
MOVQ R8,8(DI)
|
||||
MOVQ R9,16(DI)
|
||||
MOVQ AX,24(DI)
|
||||
MOVQ R10,32(DI)
|
||||
RET
|
181
vendor/golang.org/x/crypto/ed25519/ed25519.go
generated
vendored
181
vendor/golang.org/x/crypto/ed25519/ed25519.go
generated
vendored
@ -1,181 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ed25519 implements the Ed25519 signature algorithm. See
|
||||
// https://ed25519.cr.yp.to/.
|
||||
//
|
||||
// These functions are also compatible with the “Ed25519” function defined in
|
||||
// https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05.
|
||||
package ed25519
|
||||
|
||||
// This code is a port of the public domain, “ref10” implementation of ed25519
|
||||
// from SUPERCOP.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/sha512"
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/ed25519/internal/edwards25519"
|
||||
)
|
||||
|
||||
const (
|
||||
// PublicKeySize is the size, in bytes, of public keys as used in this package.
|
||||
PublicKeySize = 32
|
||||
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
|
||||
PrivateKeySize = 64
|
||||
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
|
||||
SignatureSize = 64
|
||||
)
|
||||
|
||||
// PublicKey is the type of Ed25519 public keys.
|
||||
type PublicKey []byte
|
||||
|
||||
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
|
||||
type PrivateKey []byte
|
||||
|
||||
// Public returns the PublicKey corresponding to priv.
|
||||
func (priv PrivateKey) Public() crypto.PublicKey {
|
||||
publicKey := make([]byte, PublicKeySize)
|
||||
copy(publicKey, priv[32:])
|
||||
return PublicKey(publicKey)
|
||||
}
|
||||
|
||||
// Sign signs the given message with priv.
|
||||
// Ed25519 performs two passes over messages to be signed and therefore cannot
|
||||
// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
|
||||
// indicate the message hasn't been hashed. This can be achieved by passing
|
||||
// crypto.Hash(0) as the value for opts.
|
||||
func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
|
||||
if opts.HashFunc() != crypto.Hash(0) {
|
||||
return nil, errors.New("ed25519: cannot sign hashed message")
|
||||
}
|
||||
|
||||
return Sign(priv, message), nil
|
||||
}
|
||||
|
||||
// GenerateKey generates a public/private key pair using entropy from rand.
|
||||
// If rand is nil, crypto/rand.Reader will be used.
|
||||
func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) {
|
||||
if rand == nil {
|
||||
rand = cryptorand.Reader
|
||||
}
|
||||
|
||||
privateKey = make([]byte, PrivateKeySize)
|
||||
publicKey = make([]byte, PublicKeySize)
|
||||
_, err = io.ReadFull(rand, privateKey[:32])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
digest := sha512.Sum512(privateKey[:32])
|
||||
digest[0] &= 248
|
||||
digest[31] &= 127
|
||||
digest[31] |= 64
|
||||
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
var hBytes [32]byte
|
||||
copy(hBytes[:], digest[:])
|
||||
edwards25519.GeScalarMultBase(&A, &hBytes)
|
||||
var publicKeyBytes [32]byte
|
||||
A.ToBytes(&publicKeyBytes)
|
||||
|
||||
copy(privateKey[32:], publicKeyBytes[:])
|
||||
copy(publicKey, publicKeyBytes[:])
|
||||
|
||||
return publicKey, privateKey, nil
|
||||
}
|
||||
|
||||
// Sign signs the message with privateKey and returns a signature. It will
|
||||
// panic if len(privateKey) is not PrivateKeySize.
|
||||
func Sign(privateKey PrivateKey, message []byte) []byte {
|
||||
if l := len(privateKey); l != PrivateKeySize {
|
||||
panic("ed25519: bad private key length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
h := sha512.New()
|
||||
h.Write(privateKey[:32])
|
||||
|
||||
var digest1, messageDigest, hramDigest [64]byte
|
||||
var expandedSecretKey [32]byte
|
||||
h.Sum(digest1[:0])
|
||||
copy(expandedSecretKey[:], digest1[:])
|
||||
expandedSecretKey[0] &= 248
|
||||
expandedSecretKey[31] &= 63
|
||||
expandedSecretKey[31] |= 64
|
||||
|
||||
h.Reset()
|
||||
h.Write(digest1[32:])
|
||||
h.Write(message)
|
||||
h.Sum(messageDigest[:0])
|
||||
|
||||
var messageDigestReduced [32]byte
|
||||
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
|
||||
var R edwards25519.ExtendedGroupElement
|
||||
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
|
||||
|
||||
var encodedR [32]byte
|
||||
R.ToBytes(&encodedR)
|
||||
|
||||
h.Reset()
|
||||
h.Write(encodedR[:])
|
||||
h.Write(privateKey[32:])
|
||||
h.Write(message)
|
||||
h.Sum(hramDigest[:0])
|
||||
var hramDigestReduced [32]byte
|
||||
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
|
||||
|
||||
var s [32]byte
|
||||
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
|
||||
|
||||
signature := make([]byte, SignatureSize)
|
||||
copy(signature[:], encodedR[:])
|
||||
copy(signature[32:], s[:])
|
||||
|
||||
return signature
|
||||
}
|
||||
|
||||
// Verify reports whether sig is a valid signature of message by publicKey. It
|
||||
// will panic if len(publicKey) is not PublicKeySize.
|
||||
func Verify(publicKey PublicKey, message, sig []byte) bool {
|
||||
if l := len(publicKey); l != PublicKeySize {
|
||||
panic("ed25519: bad public key length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
if len(sig) != SignatureSize || sig[63]&224 != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
var publicKeyBytes [32]byte
|
||||
copy(publicKeyBytes[:], publicKey)
|
||||
if !A.FromBytes(&publicKeyBytes) {
|
||||
return false
|
||||
}
|
||||
edwards25519.FeNeg(&A.X, &A.X)
|
||||
edwards25519.FeNeg(&A.T, &A.T)
|
||||
|
||||
h := sha512.New()
|
||||
h.Write(sig[:32])
|
||||
h.Write(publicKey[:])
|
||||
h.Write(message)
|
||||
var digest [64]byte
|
||||
h.Sum(digest[:0])
|
||||
|
||||
var hReduced [32]byte
|
||||
edwards25519.ScReduce(&hReduced, &digest)
|
||||
|
||||
var R edwards25519.ProjectiveGroupElement
|
||||
var b [32]byte
|
||||
copy(b[:], sig[32:])
|
||||
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)
|
||||
|
||||
var checkR [32]byte
|
||||
R.ToBytes(&checkR)
|
||||
return bytes.Equal(sig[:32], checkR[:])
|
||||
}
|
1422
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
generated
vendored
1422
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1771
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
generated
vendored
1771
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
generated
vendored
File diff suppressed because it is too large
Load Diff
683
vendor/golang.org/x/crypto/ssh/agent/client.go
generated
vendored
683
vendor/golang.org/x/crypto/ssh/agent/client.go
generated
vendored
@ -1,683 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package agent implements the ssh-agent protocol, and provides both
|
||||
// a client and a server. The client can talk to a standard ssh-agent
|
||||
// that uses UNIX sockets, and one could implement an alternative
|
||||
// ssh-agent process using the sample server.
|
||||
//
|
||||
// References:
|
||||
// [PROTOCOL.agent]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent?rev=HEAD
|
||||
package agent // import "golang.org/x/crypto/ssh/agent"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// Agent represents the capabilities of an ssh-agent.
|
||||
type Agent interface {
|
||||
// List returns the identities known to the agent.
|
||||
List() ([]*Key, error)
|
||||
|
||||
// Sign has the agent sign the data using a protocol 2 key as defined
|
||||
// in [PROTOCOL.agent] section 2.6.2.
|
||||
Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error)
|
||||
|
||||
// Add adds a private key to the agent.
|
||||
Add(key AddedKey) error
|
||||
|
||||
// Remove removes all identities with the given public key.
|
||||
Remove(key ssh.PublicKey) error
|
||||
|
||||
// RemoveAll removes all identities.
|
||||
RemoveAll() error
|
||||
|
||||
// Lock locks the agent. Sign and Remove will fail, and List will empty an empty list.
|
||||
Lock(passphrase []byte) error
|
||||
|
||||
// Unlock undoes the effect of Lock
|
||||
Unlock(passphrase []byte) error
|
||||
|
||||
// Signers returns signers for all the known keys.
|
||||
Signers() ([]ssh.Signer, error)
|
||||
}
|
||||
|
||||
// ConstraintExtension describes an optional constraint defined by users.
|
||||
type ConstraintExtension struct {
|
||||
// ExtensionName consist of a UTF-8 string suffixed by the
|
||||
// implementation domain following the naming scheme defined
|
||||
// in Section 4.2 of [RFC4251], e.g. "foo@example.com".
|
||||
ExtensionName string
|
||||
// ExtensionDetails contains the actual content of the extended
|
||||
// constraint.
|
||||
ExtensionDetails []byte
|
||||
}
|
||||
|
||||
// AddedKey describes an SSH key to be added to an Agent.
|
||||
type AddedKey struct {
|
||||
// PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey or
|
||||
// *ecdsa.PrivateKey, which will be inserted into the agent.
|
||||
PrivateKey interface{}
|
||||
// Certificate, if not nil, is communicated to the agent and will be
|
||||
// stored with the key.
|
||||
Certificate *ssh.Certificate
|
||||
// Comment is an optional, free-form string.
|
||||
Comment string
|
||||
// LifetimeSecs, if not zero, is the number of seconds that the
|
||||
// agent will store the key for.
|
||||
LifetimeSecs uint32
|
||||
// ConfirmBeforeUse, if true, requests that the agent confirm with the
|
||||
// user before each use of this key.
|
||||
ConfirmBeforeUse bool
|
||||
// ConstraintExtensions are the experimental or private-use constraints
|
||||
// defined by users.
|
||||
ConstraintExtensions []ConstraintExtension
|
||||
}
|
||||
|
||||
// See [PROTOCOL.agent], section 3.
|
||||
const (
|
||||
agentRequestV1Identities = 1
|
||||
agentRemoveAllV1Identities = 9
|
||||
|
||||
// 3.2 Requests from client to agent for protocol 2 key operations
|
||||
agentAddIdentity = 17
|
||||
agentRemoveIdentity = 18
|
||||
agentRemoveAllIdentities = 19
|
||||
agentAddIdConstrained = 25
|
||||
|
||||
// 3.3 Key-type independent requests from client to agent
|
||||
agentAddSmartcardKey = 20
|
||||
agentRemoveSmartcardKey = 21
|
||||
agentLock = 22
|
||||
agentUnlock = 23
|
||||
agentAddSmartcardKeyConstrained = 26
|
||||
|
||||
// 3.7 Key constraint identifiers
|
||||
agentConstrainLifetime = 1
|
||||
agentConstrainConfirm = 2
|
||||
agentConstrainExtension = 3
|
||||
)
|
||||
|
||||
// maxAgentResponseBytes is the maximum agent reply size that is accepted. This
|
||||
// is a sanity check, not a limit in the spec.
|
||||
const maxAgentResponseBytes = 16 << 20
|
||||
|
||||
// Agent messages:
|
||||
// These structures mirror the wire format of the corresponding ssh agent
|
||||
// messages found in [PROTOCOL.agent].
|
||||
|
||||
// 3.4 Generic replies from agent to client
|
||||
const agentFailure = 5
|
||||
|
||||
type failureAgentMsg struct{}
|
||||
|
||||
const agentSuccess = 6
|
||||
|
||||
type successAgentMsg struct{}
|
||||
|
||||
// See [PROTOCOL.agent], section 2.5.2.
|
||||
const agentRequestIdentities = 11
|
||||
|
||||
type requestIdentitiesAgentMsg struct{}
|
||||
|
||||
// See [PROTOCOL.agent], section 2.5.2.
|
||||
const agentIdentitiesAnswer = 12
|
||||
|
||||
type identitiesAnswerAgentMsg struct {
|
||||
NumKeys uint32 `sshtype:"12"`
|
||||
Keys []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// See [PROTOCOL.agent], section 2.6.2.
|
||||
const agentSignRequest = 13
|
||||
|
||||
type signRequestAgentMsg struct {
|
||||
KeyBlob []byte `sshtype:"13"`
|
||||
Data []byte
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
// See [PROTOCOL.agent], section 2.6.2.
|
||||
|
||||
// 3.6 Replies from agent to client for protocol 2 key operations
|
||||
const agentSignResponse = 14
|
||||
|
||||
type signResponseAgentMsg struct {
|
||||
SigBlob []byte `sshtype:"14"`
|
||||
}
|
||||
|
||||
type publicKey struct {
|
||||
Format string
|
||||
Rest []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// 3.7 Key constraint identifiers
|
||||
type constrainLifetimeAgentMsg struct {
|
||||
LifetimeSecs uint32 `sshtype:"1"`
|
||||
}
|
||||
|
||||
type constrainExtensionAgentMsg struct {
|
||||
ExtensionName string `sshtype:"3"`
|
||||
ExtensionDetails []byte
|
||||
|
||||
// Rest is a field used for parsing, not part of message
|
||||
Rest []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// Key represents a protocol 2 public key as defined in
|
||||
// [PROTOCOL.agent], section 2.5.2.
|
||||
type Key struct {
|
||||
Format string
|
||||
Blob []byte
|
||||
Comment string
|
||||
}
|
||||
|
||||
func clientErr(err error) error {
|
||||
return fmt.Errorf("agent: client error: %v", err)
|
||||
}
|
||||
|
||||
// String returns the storage form of an agent key with the format, base64
|
||||
// encoded serialized key, and the comment if it is not empty.
|
||||
func (k *Key) String() string {
|
||||
s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob)
|
||||
|
||||
if k.Comment != "" {
|
||||
s += " " + k.Comment
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Type returns the public key type.
|
||||
func (k *Key) Type() string {
|
||||
return k.Format
|
||||
}
|
||||
|
||||
// Marshal returns key blob to satisfy the ssh.PublicKey interface.
|
||||
func (k *Key) Marshal() []byte {
|
||||
return k.Blob
|
||||
}
|
||||
|
||||
// Verify satisfies the ssh.PublicKey interface.
|
||||
func (k *Key) Verify(data []byte, sig *ssh.Signature) error {
|
||||
pubKey, err := ssh.ParsePublicKey(k.Blob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("agent: bad public key: %v", err)
|
||||
}
|
||||
return pubKey.Verify(data, sig)
|
||||
}
|
||||
|
||||
type wireKey struct {
|
||||
Format string
|
||||
Rest []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
func parseKey(in []byte) (out *Key, rest []byte, err error) {
|
||||
var record struct {
|
||||
Blob []byte
|
||||
Comment string
|
||||
Rest []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
if err := ssh.Unmarshal(in, &record); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var wk wireKey
|
||||
if err := ssh.Unmarshal(record.Blob, &wk); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &Key{
|
||||
Format: wk.Format,
|
||||
Blob: record.Blob,
|
||||
Comment: record.Comment,
|
||||
}, record.Rest, nil
|
||||
}
|
||||
|
||||
// client is a client for an ssh-agent process.
|
||||
type client struct {
|
||||
// conn is typically a *net.UnixConn
|
||||
conn io.ReadWriter
|
||||
// mu is used to prevent concurrent access to the agent
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewClient returns an Agent that talks to an ssh-agent process over
|
||||
// the given connection.
|
||||
func NewClient(rw io.ReadWriter) Agent {
|
||||
return &client{conn: rw}
|
||||
}
|
||||
|
||||
// call sends an RPC to the agent. On success, the reply is
|
||||
// unmarshaled into reply and replyType is set to the first byte of
|
||||
// the reply, which contains the type of the message.
|
||||
func (c *client) call(req []byte) (reply interface{}, err error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
msg := make([]byte, 4+len(req))
|
||||
binary.BigEndian.PutUint32(msg, uint32(len(req)))
|
||||
copy(msg[4:], req)
|
||||
if _, err = c.conn.Write(msg); err != nil {
|
||||
return nil, clientErr(err)
|
||||
}
|
||||
|
||||
var respSizeBuf [4]byte
|
||||
if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil {
|
||||
return nil, clientErr(err)
|
||||
}
|
||||
respSize := binary.BigEndian.Uint32(respSizeBuf[:])
|
||||
if respSize > maxAgentResponseBytes {
|
||||
return nil, clientErr(err)
|
||||
}
|
||||
|
||||
buf := make([]byte, respSize)
|
||||
if _, err = io.ReadFull(c.conn, buf); err != nil {
|
||||
return nil, clientErr(err)
|
||||
}
|
||||
reply, err = unmarshal(buf)
|
||||
if err != nil {
|
||||
return nil, clientErr(err)
|
||||
}
|
||||
return reply, err
|
||||
}
|
||||
|
||||
func (c *client) simpleCall(req []byte) error {
|
||||
resp, err := c.call(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := resp.(*successAgentMsg); ok {
|
||||
return nil
|
||||
}
|
||||
return errors.New("agent: failure")
|
||||
}
|
||||
|
||||
func (c *client) RemoveAll() error {
|
||||
return c.simpleCall([]byte{agentRemoveAllIdentities})
|
||||
}
|
||||
|
||||
func (c *client) Remove(key ssh.PublicKey) error {
|
||||
req := ssh.Marshal(&agentRemoveIdentityMsg{
|
||||
KeyBlob: key.Marshal(),
|
||||
})
|
||||
return c.simpleCall(req)
|
||||
}
|
||||
|
||||
func (c *client) Lock(passphrase []byte) error {
|
||||
req := ssh.Marshal(&agentLockMsg{
|
||||
Passphrase: passphrase,
|
||||
})
|
||||
return c.simpleCall(req)
|
||||
}
|
||||
|
||||
func (c *client) Unlock(passphrase []byte) error {
|
||||
req := ssh.Marshal(&agentUnlockMsg{
|
||||
Passphrase: passphrase,
|
||||
})
|
||||
return c.simpleCall(req)
|
||||
}
|
||||
|
||||
// List returns the identities known to the agent.
|
||||
func (c *client) List() ([]*Key, error) {
|
||||
// see [PROTOCOL.agent] section 2.5.2.
|
||||
req := []byte{agentRequestIdentities}
|
||||
|
||||
msg, err := c.call(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *identitiesAnswerAgentMsg:
|
||||
if msg.NumKeys > maxAgentResponseBytes/8 {
|
||||
return nil, errors.New("agent: too many keys in agent reply")
|
||||
}
|
||||
keys := make([]*Key, msg.NumKeys)
|
||||
data := msg.Keys
|
||||
for i := uint32(0); i < msg.NumKeys; i++ {
|
||||
var key *Key
|
||||
var err error
|
||||
if key, data, err = parseKey(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys[i] = key
|
||||
}
|
||||
return keys, nil
|
||||
case *failureAgentMsg:
|
||||
return nil, errors.New("agent: failed to list keys")
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Sign has the agent sign the data using a protocol 2 key as defined
|
||||
// in [PROTOCOL.agent] section 2.6.2.
|
||||
func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
|
||||
req := ssh.Marshal(signRequestAgentMsg{
|
||||
KeyBlob: key.Marshal(),
|
||||
Data: data,
|
||||
})
|
||||
|
||||
msg, err := c.call(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *signResponseAgentMsg:
|
||||
var sig ssh.Signature
|
||||
if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &sig, nil
|
||||
case *failureAgentMsg:
|
||||
return nil, errors.New("agent: failed to sign challenge")
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// unmarshal parses an agent message in packet, returning the parsed
|
||||
// form and the message type of packet.
|
||||
func unmarshal(packet []byte) (interface{}, error) {
|
||||
if len(packet) < 1 {
|
||||
return nil, errors.New("agent: empty packet")
|
||||
}
|
||||
var msg interface{}
|
||||
switch packet[0] {
|
||||
case agentFailure:
|
||||
return new(failureAgentMsg), nil
|
||||
case agentSuccess:
|
||||
return new(successAgentMsg), nil
|
||||
case agentIdentitiesAnswer:
|
||||
msg = new(identitiesAnswerAgentMsg)
|
||||
case agentSignResponse:
|
||||
msg = new(signResponseAgentMsg)
|
||||
case agentV1IdentitiesAnswer:
|
||||
msg = new(agentV1IdentityMsg)
|
||||
default:
|
||||
return nil, fmt.Errorf("agent: unknown type tag %d", packet[0])
|
||||
}
|
||||
if err := ssh.Unmarshal(packet, msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
type rsaKeyMsg struct {
|
||||
Type string `sshtype:"17|25"`
|
||||
N *big.Int
|
||||
E *big.Int
|
||||
D *big.Int
|
||||
Iqmp *big.Int // IQMP = Inverse Q Mod P
|
||||
P *big.Int
|
||||
Q *big.Int
|
||||
Comments string
|
||||
Constraints []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type dsaKeyMsg struct {
|
||||
Type string `sshtype:"17|25"`
|
||||
P *big.Int
|
||||
Q *big.Int
|
||||
G *big.Int
|
||||
Y *big.Int
|
||||
X *big.Int
|
||||
Comments string
|
||||
Constraints []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type ecdsaKeyMsg struct {
|
||||
Type string `sshtype:"17|25"`
|
||||
Curve string
|
||||
KeyBytes []byte
|
||||
D *big.Int
|
||||
Comments string
|
||||
Constraints []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type ed25519KeyMsg struct {
|
||||
Type string `sshtype:"17|25"`
|
||||
Pub []byte
|
||||
Priv []byte
|
||||
Comments string
|
||||
Constraints []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// Insert adds a private key to the agent.
|
||||
func (c *client) insertKey(s interface{}, comment string, constraints []byte) error {
|
||||
var req []byte
|
||||
switch k := s.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
if len(k.Primes) != 2 {
|
||||
return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes))
|
||||
}
|
||||
k.Precompute()
|
||||
req = ssh.Marshal(rsaKeyMsg{
|
||||
Type: ssh.KeyAlgoRSA,
|
||||
N: k.N,
|
||||
E: big.NewInt(int64(k.E)),
|
||||
D: k.D,
|
||||
Iqmp: k.Precomputed.Qinv,
|
||||
P: k.Primes[0],
|
||||
Q: k.Primes[1],
|
||||
Comments: comment,
|
||||
Constraints: constraints,
|
||||
})
|
||||
case *dsa.PrivateKey:
|
||||
req = ssh.Marshal(dsaKeyMsg{
|
||||
Type: ssh.KeyAlgoDSA,
|
||||
P: k.P,
|
||||
Q: k.Q,
|
||||
G: k.G,
|
||||
Y: k.Y,
|
||||
X: k.X,
|
||||
Comments: comment,
|
||||
Constraints: constraints,
|
||||
})
|
||||
case *ecdsa.PrivateKey:
|
||||
nistID := fmt.Sprintf("nistp%d", k.Params().BitSize)
|
||||
req = ssh.Marshal(ecdsaKeyMsg{
|
||||
Type: "ecdsa-sha2-" + nistID,
|
||||
Curve: nistID,
|
||||
KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y),
|
||||
D: k.D,
|
||||
Comments: comment,
|
||||
Constraints: constraints,
|
||||
})
|
||||
case *ed25519.PrivateKey:
|
||||
req = ssh.Marshal(ed25519KeyMsg{
|
||||
Type: ssh.KeyAlgoED25519,
|
||||
Pub: []byte(*k)[32:],
|
||||
Priv: []byte(*k),
|
||||
Comments: comment,
|
||||
Constraints: constraints,
|
||||
})
|
||||
default:
|
||||
return fmt.Errorf("agent: unsupported key type %T", s)
|
||||
}
|
||||
|
||||
// if constraints are present then the message type needs to be changed.
|
||||
if len(constraints) != 0 {
|
||||
req[0] = agentAddIdConstrained
|
||||
}
|
||||
|
||||
resp, err := c.call(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := resp.(*successAgentMsg); ok {
|
||||
return nil
|
||||
}
|
||||
return errors.New("agent: failure")
|
||||
}
|
||||
|
||||
type rsaCertMsg struct {
|
||||
Type string `sshtype:"17|25"`
|
||||
CertBytes []byte
|
||||
D *big.Int
|
||||
Iqmp *big.Int // IQMP = Inverse Q Mod P
|
||||
P *big.Int
|
||||
Q *big.Int
|
||||
Comments string
|
||||
Constraints []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type dsaCertMsg struct {
|
||||
Type string `sshtype:"17|25"`
|
||||
CertBytes []byte
|
||||
X *big.Int
|
||||
Comments string
|
||||
Constraints []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type ecdsaCertMsg struct {
|
||||
Type string `sshtype:"17|25"`
|
||||
CertBytes []byte
|
||||
D *big.Int
|
||||
Comments string
|
||||
Constraints []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type ed25519CertMsg struct {
|
||||
Type string `sshtype:"17|25"`
|
||||
CertBytes []byte
|
||||
Pub []byte
|
||||
Priv []byte
|
||||
Comments string
|
||||
Constraints []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// Add adds a private key to the agent. If a certificate is given,
|
||||
// that certificate is added instead as public key.
|
||||
func (c *client) Add(key AddedKey) error {
|
||||
var constraints []byte
|
||||
|
||||
if secs := key.LifetimeSecs; secs != 0 {
|
||||
constraints = append(constraints, ssh.Marshal(constrainLifetimeAgentMsg{secs})...)
|
||||
}
|
||||
|
||||
if key.ConfirmBeforeUse {
|
||||
constraints = append(constraints, agentConstrainConfirm)
|
||||
}
|
||||
|
||||
if cert := key.Certificate; cert == nil {
|
||||
return c.insertKey(key.PrivateKey, key.Comment, constraints)
|
||||
} else {
|
||||
return c.insertCert(key.PrivateKey, cert, key.Comment, constraints)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error {
|
||||
var req []byte
|
||||
switch k := s.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
if len(k.Primes) != 2 {
|
||||
return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes))
|
||||
}
|
||||
k.Precompute()
|
||||
req = ssh.Marshal(rsaCertMsg{
|
||||
Type: cert.Type(),
|
||||
CertBytes: cert.Marshal(),
|
||||
D: k.D,
|
||||
Iqmp: k.Precomputed.Qinv,
|
||||
P: k.Primes[0],
|
||||
Q: k.Primes[1],
|
||||
Comments: comment,
|
||||
Constraints: constraints,
|
||||
})
|
||||
case *dsa.PrivateKey:
|
||||
req = ssh.Marshal(dsaCertMsg{
|
||||
Type: cert.Type(),
|
||||
CertBytes: cert.Marshal(),
|
||||
X: k.X,
|
||||
Comments: comment,
|
||||
Constraints: constraints,
|
||||
})
|
||||
case *ecdsa.PrivateKey:
|
||||
req = ssh.Marshal(ecdsaCertMsg{
|
||||
Type: cert.Type(),
|
||||
CertBytes: cert.Marshal(),
|
||||
D: k.D,
|
||||
Comments: comment,
|
||||
Constraints: constraints,
|
||||
})
|
||||
case *ed25519.PrivateKey:
|
||||
req = ssh.Marshal(ed25519CertMsg{
|
||||
Type: cert.Type(),
|
||||
CertBytes: cert.Marshal(),
|
||||
Pub: []byte(*k)[32:],
|
||||
Priv: []byte(*k),
|
||||
Comments: comment,
|
||||
Constraints: constraints,
|
||||
})
|
||||
default:
|
||||
return fmt.Errorf("agent: unsupported key type %T", s)
|
||||
}
|
||||
|
||||
// if constraints are present then the message type needs to be changed.
|
||||
if len(constraints) != 0 {
|
||||
req[0] = agentAddIdConstrained
|
||||
}
|
||||
|
||||
signer, err := ssh.NewSignerFromKey(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
|
||||
return errors.New("agent: signer and cert have different public key")
|
||||
}
|
||||
|
||||
resp, err := c.call(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := resp.(*successAgentMsg); ok {
|
||||
return nil
|
||||
}
|
||||
return errors.New("agent: failure")
|
||||
}
|
||||
|
||||
// Signers provides a callback for client authentication.
|
||||
func (c *client) Signers() ([]ssh.Signer, error) {
|
||||
keys, err := c.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result []ssh.Signer
|
||||
for _, k := range keys {
|
||||
result = append(result, &agentKeyringSigner{c, k})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type agentKeyringSigner struct {
|
||||
agent *client
|
||||
pub ssh.PublicKey
|
||||
}
|
||||
|
||||
func (s *agentKeyringSigner) PublicKey() ssh.PublicKey {
|
||||
return s.pub
|
||||
}
|
||||
|
||||
func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) {
|
||||
// The agent has its own entropy source, so the rand argument is ignored.
|
||||
return s.agent.Sign(s.pub, data)
|
||||
}
|
103
vendor/golang.org/x/crypto/ssh/agent/forward.go
generated
vendored
103
vendor/golang.org/x/crypto/ssh/agent/forward.go
generated
vendored
@ -1,103 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// RequestAgentForwarding sets up agent forwarding for the session.
|
||||
// ForwardToAgent or ForwardToRemote should be called to route
|
||||
// the authentication requests.
|
||||
func RequestAgentForwarding(session *ssh.Session) error {
|
||||
ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return errors.New("forwarding request denied")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForwardToAgent routes authentication requests to the given keyring.
|
||||
func ForwardToAgent(client *ssh.Client, keyring Agent) error {
|
||||
channels := client.HandleChannelOpen(channelType)
|
||||
if channels == nil {
|
||||
return errors.New("agent: already have handler for " + channelType)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for ch := range channels {
|
||||
channel, reqs, err := ch.Accept()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
go ssh.DiscardRequests(reqs)
|
||||
go func() {
|
||||
ServeAgent(keyring, channel)
|
||||
channel.Close()
|
||||
}()
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
const channelType = "auth-agent@openssh.com"
|
||||
|
||||
// ForwardToRemote routes authentication requests to the ssh-agent
|
||||
// process serving on the given unix socket.
|
||||
func ForwardToRemote(client *ssh.Client, addr string) error {
|
||||
channels := client.HandleChannelOpen(channelType)
|
||||
if channels == nil {
|
||||
return errors.New("agent: already have handler for " + channelType)
|
||||
}
|
||||
conn, err := net.Dial("unix", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
go func() {
|
||||
for ch := range channels {
|
||||
channel, reqs, err := ch.Accept()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
go ssh.DiscardRequests(reqs)
|
||||
go forwardUnixSocket(channel, addr)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func forwardUnixSocket(channel ssh.Channel, addr string) {
|
||||
conn, err := net.Dial("unix", addr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
io.Copy(conn, channel)
|
||||
conn.(*net.UnixConn).CloseWrite()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
io.Copy(channel, conn)
|
||||
channel.CloseWrite()
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
conn.Close()
|
||||
channel.Close()
|
||||
}
|
215
vendor/golang.org/x/crypto/ssh/agent/keyring.go
generated
vendored
215
vendor/golang.org/x/crypto/ssh/agent/keyring.go
generated
vendored
@ -1,215 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type privKey struct {
|
||||
signer ssh.Signer
|
||||
comment string
|
||||
expire *time.Time
|
||||
}
|
||||
|
||||
type keyring struct {
|
||||
mu sync.Mutex
|
||||
keys []privKey
|
||||
|
||||
locked bool
|
||||
passphrase []byte
|
||||
}
|
||||
|
||||
var errLocked = errors.New("agent: locked")
|
||||
|
||||
// NewKeyring returns an Agent that holds keys in memory. It is safe
|
||||
// for concurrent use by multiple goroutines.
|
||||
func NewKeyring() Agent {
|
||||
return &keyring{}
|
||||
}
|
||||
|
||||
// RemoveAll removes all identities.
|
||||
func (r *keyring) RemoveAll() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.locked {
|
||||
return errLocked
|
||||
}
|
||||
|
||||
r.keys = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeLocked does the actual key removal. The caller must already be holding the
|
||||
// keyring mutex.
|
||||
func (r *keyring) removeLocked(want []byte) error {
|
||||
found := false
|
||||
for i := 0; i < len(r.keys); {
|
||||
if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) {
|
||||
found = true
|
||||
r.keys[i] = r.keys[len(r.keys)-1]
|
||||
r.keys = r.keys[:len(r.keys)-1]
|
||||
continue
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return errors.New("agent: key not found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes all identities with the given public key.
|
||||
func (r *keyring) Remove(key ssh.PublicKey) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.locked {
|
||||
return errLocked
|
||||
}
|
||||
|
||||
return r.removeLocked(key.Marshal())
|
||||
}
|
||||
|
||||
// Lock locks the agent. Sign and Remove will fail, and List will return an empty list.
|
||||
func (r *keyring) Lock(passphrase []byte) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.locked {
|
||||
return errLocked
|
||||
}
|
||||
|
||||
r.locked = true
|
||||
r.passphrase = passphrase
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unlock undoes the effect of Lock
|
||||
func (r *keyring) Unlock(passphrase []byte) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if !r.locked {
|
||||
return errors.New("agent: not locked")
|
||||
}
|
||||
if len(passphrase) != len(r.passphrase) || 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) {
|
||||
return fmt.Errorf("agent: incorrect passphrase")
|
||||
}
|
||||
|
||||
r.locked = false
|
||||
r.passphrase = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// expireKeysLocked removes expired keys from the keyring. If a key was added
|
||||
// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have
|
||||
// ellapsed, it is removed. The caller *must* be holding the keyring mutex.
|
||||
func (r *keyring) expireKeysLocked() {
|
||||
for _, k := range r.keys {
|
||||
if k.expire != nil && time.Now().After(*k.expire) {
|
||||
r.removeLocked(k.signer.PublicKey().Marshal())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// List returns the identities known to the agent.
|
||||
func (r *keyring) List() ([]*Key, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.locked {
|
||||
// section 2.7: locked agents return empty.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
r.expireKeysLocked()
|
||||
var ids []*Key
|
||||
for _, k := range r.keys {
|
||||
pub := k.signer.PublicKey()
|
||||
ids = append(ids, &Key{
|
||||
Format: pub.Type(),
|
||||
Blob: pub.Marshal(),
|
||||
Comment: k.comment})
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// Insert adds a private key to the keyring. If a certificate
|
||||
// is given, that certificate is added as public key. Note that
|
||||
// any constraints given are ignored.
|
||||
func (r *keyring) Add(key AddedKey) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.locked {
|
||||
return errLocked
|
||||
}
|
||||
signer, err := ssh.NewSignerFromKey(key.PrivateKey)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cert := key.Certificate; cert != nil {
|
||||
signer, err = ssh.NewCertSigner(cert, signer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
p := privKey{
|
||||
signer: signer,
|
||||
comment: key.Comment,
|
||||
}
|
||||
|
||||
if key.LifetimeSecs > 0 {
|
||||
t := time.Now().Add(time.Duration(key.LifetimeSecs) * time.Second)
|
||||
p.expire = &t
|
||||
}
|
||||
|
||||
r.keys = append(r.keys, p)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign returns a signature for the data.
|
||||
func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.locked {
|
||||
return nil, errLocked
|
||||
}
|
||||
|
||||
r.expireKeysLocked()
|
||||
wanted := key.Marshal()
|
||||
for _, k := range r.keys {
|
||||
if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) {
|
||||
return k.signer.Sign(rand.Reader, data)
|
||||
}
|
||||
}
|
||||
return nil, errors.New("not found")
|
||||
}
|
||||
|
||||
// Signers returns signers for all the known keys.
|
||||
func (r *keyring) Signers() ([]ssh.Signer, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.locked {
|
||||
return nil, errLocked
|
||||
}
|
||||
|
||||
r.expireKeysLocked()
|
||||
s := make([]ssh.Signer, 0, len(r.keys))
|
||||
for _, k := range r.keys {
|
||||
s = append(s, k.signer)
|
||||
}
|
||||
return s, nil
|
||||
}
|
523
vendor/golang.org/x/crypto/ssh/agent/server.go
generated
vendored
523
vendor/golang.org/x/crypto/ssh/agent/server.go
generated
vendored
@ -1,523 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// Server wraps an Agent and uses it to implement the agent side of
|
||||
// the SSH-agent, wire protocol.
|
||||
type server struct {
|
||||
agent Agent
|
||||
}
|
||||
|
||||
func (s *server) processRequestBytes(reqData []byte) []byte {
|
||||
rep, err := s.processRequest(reqData)
|
||||
if err != nil {
|
||||
if err != errLocked {
|
||||
// TODO(hanwen): provide better logging interface?
|
||||
log.Printf("agent %d: %v", reqData[0], err)
|
||||
}
|
||||
return []byte{agentFailure}
|
||||
}
|
||||
|
||||
if err == nil && rep == nil {
|
||||
return []byte{agentSuccess}
|
||||
}
|
||||
|
||||
return ssh.Marshal(rep)
|
||||
}
|
||||
|
||||
func marshalKey(k *Key) []byte {
|
||||
var record struct {
|
||||
Blob []byte
|
||||
Comment string
|
||||
}
|
||||
record.Blob = k.Marshal()
|
||||
record.Comment = k.Comment
|
||||
|
||||
return ssh.Marshal(&record)
|
||||
}
|
||||
|
||||
// See [PROTOCOL.agent], section 2.5.1.
|
||||
const agentV1IdentitiesAnswer = 2
|
||||
|
||||
type agentV1IdentityMsg struct {
|
||||
Numkeys uint32 `sshtype:"2"`
|
||||
}
|
||||
|
||||
type agentRemoveIdentityMsg struct {
|
||||
KeyBlob []byte `sshtype:"18"`
|
||||
}
|
||||
|
||||
type agentLockMsg struct {
|
||||
Passphrase []byte `sshtype:"22"`
|
||||
}
|
||||
|
||||
type agentUnlockMsg struct {
|
||||
Passphrase []byte `sshtype:"23"`
|
||||
}
|
||||
|
||||
func (s *server) processRequest(data []byte) (interface{}, error) {
|
||||
switch data[0] {
|
||||
case agentRequestV1Identities:
|
||||
return &agentV1IdentityMsg{0}, nil
|
||||
|
||||
case agentRemoveAllV1Identities:
|
||||
return nil, nil
|
||||
|
||||
case agentRemoveIdentity:
|
||||
var req agentRemoveIdentityMsg
|
||||
if err := ssh.Unmarshal(data, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var wk wireKey
|
||||
if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob})
|
||||
|
||||
case agentRemoveAllIdentities:
|
||||
return nil, s.agent.RemoveAll()
|
||||
|
||||
case agentLock:
|
||||
var req agentLockMsg
|
||||
if err := ssh.Unmarshal(data, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, s.agent.Lock(req.Passphrase)
|
||||
|
||||
case agentUnlock:
|
||||
var req agentUnlockMsg
|
||||
if err := ssh.Unmarshal(data, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, s.agent.Unlock(req.Passphrase)
|
||||
|
||||
case agentSignRequest:
|
||||
var req signRequestAgentMsg
|
||||
if err := ssh.Unmarshal(data, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var wk wireKey
|
||||
if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k := &Key{
|
||||
Format: wk.Format,
|
||||
Blob: req.KeyBlob,
|
||||
}
|
||||
|
||||
sig, err := s.agent.Sign(k, req.Data) // TODO(hanwen): flags.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil
|
||||
|
||||
case agentRequestIdentities:
|
||||
keys, err := s.agent.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rep := identitiesAnswerAgentMsg{
|
||||
NumKeys: uint32(len(keys)),
|
||||
}
|
||||
for _, k := range keys {
|
||||
rep.Keys = append(rep.Keys, marshalKey(k)...)
|
||||
}
|
||||
return rep, nil
|
||||
|
||||
case agentAddIdConstrained, agentAddIdentity:
|
||||
return nil, s.insertIdentity(data)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown opcode %d", data[0])
|
||||
}
|
||||
|
||||
func parseConstraints(constraints []byte) (lifetimeSecs uint32, confirmBeforeUse bool, extensions []ConstraintExtension, err error) {
|
||||
for len(constraints) != 0 {
|
||||
switch constraints[0] {
|
||||
case agentConstrainLifetime:
|
||||
lifetimeSecs = binary.BigEndian.Uint32(constraints[1:5])
|
||||
constraints = constraints[5:]
|
||||
case agentConstrainConfirm:
|
||||
confirmBeforeUse = true
|
||||
constraints = constraints[1:]
|
||||
case agentConstrainExtension:
|
||||
var msg constrainExtensionAgentMsg
|
||||
if err = ssh.Unmarshal(constraints, &msg); err != nil {
|
||||
return 0, false, nil, err
|
||||
}
|
||||
extensions = append(extensions, ConstraintExtension{
|
||||
ExtensionName: msg.ExtensionName,
|
||||
ExtensionDetails: msg.ExtensionDetails,
|
||||
})
|
||||
constraints = msg.Rest
|
||||
default:
|
||||
return 0, false, nil, fmt.Errorf("unknown constraint type: %d", constraints[0])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setConstraints(key *AddedKey, constraintBytes []byte) error {
|
||||
lifetimeSecs, confirmBeforeUse, constraintExtensions, err := parseConstraints(constraintBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key.LifetimeSecs = lifetimeSecs
|
||||
key.ConfirmBeforeUse = confirmBeforeUse
|
||||
key.ConstraintExtensions = constraintExtensions
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRSAKey(req []byte) (*AddedKey, error) {
|
||||
var k rsaKeyMsg
|
||||
if err := ssh.Unmarshal(req, &k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if k.E.BitLen() > 30 {
|
||||
return nil, errors.New("agent: RSA public exponent too large")
|
||||
}
|
||||
priv := &rsa.PrivateKey{
|
||||
PublicKey: rsa.PublicKey{
|
||||
E: int(k.E.Int64()),
|
||||
N: k.N,
|
||||
},
|
||||
D: k.D,
|
||||
Primes: []*big.Int{k.P, k.Q},
|
||||
}
|
||||
priv.Precompute()
|
||||
|
||||
addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments}
|
||||
if err := setConstraints(addedKey, k.Constraints); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return addedKey, nil
|
||||
}
|
||||
|
||||
func parseEd25519Key(req []byte) (*AddedKey, error) {
|
||||
var k ed25519KeyMsg
|
||||
if err := ssh.Unmarshal(req, &k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
priv := ed25519.PrivateKey(k.Priv)
|
||||
|
||||
addedKey := &AddedKey{PrivateKey: &priv, Comment: k.Comments}
|
||||
if err := setConstraints(addedKey, k.Constraints); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return addedKey, nil
|
||||
}
|
||||
|
||||
func parseDSAKey(req []byte) (*AddedKey, error) {
|
||||
var k dsaKeyMsg
|
||||
if err := ssh.Unmarshal(req, &k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
priv := &dsa.PrivateKey{
|
||||
PublicKey: dsa.PublicKey{
|
||||
Parameters: dsa.Parameters{
|
||||
P: k.P,
|
||||
Q: k.Q,
|
||||
G: k.G,
|
||||
},
|
||||
Y: k.Y,
|
||||
},
|
||||
X: k.X,
|
||||
}
|
||||
|
||||
addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments}
|
||||
if err := setConstraints(addedKey, k.Constraints); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return addedKey, nil
|
||||
}
|
||||
|
||||
func unmarshalECDSA(curveName string, keyBytes []byte, privScalar *big.Int) (priv *ecdsa.PrivateKey, err error) {
|
||||
priv = &ecdsa.PrivateKey{
|
||||
D: privScalar,
|
||||
}
|
||||
|
||||
switch curveName {
|
||||
case "nistp256":
|
||||
priv.Curve = elliptic.P256()
|
||||
case "nistp384":
|
||||
priv.Curve = elliptic.P384()
|
||||
case "nistp521":
|
||||
priv.Curve = elliptic.P521()
|
||||
default:
|
||||
return nil, fmt.Errorf("agent: unknown curve %q", curveName)
|
||||
}
|
||||
|
||||
priv.X, priv.Y = elliptic.Unmarshal(priv.Curve, keyBytes)
|
||||
if priv.X == nil || priv.Y == nil {
|
||||
return nil, errors.New("agent: point not on curve")
|
||||
}
|
||||
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
func parseEd25519Cert(req []byte) (*AddedKey, error) {
|
||||
var k ed25519CertMsg
|
||||
if err := ssh.Unmarshal(req, &k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubKey, err := ssh.ParsePublicKey(k.CertBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
priv := ed25519.PrivateKey(k.Priv)
|
||||
cert, ok := pubKey.(*ssh.Certificate)
|
||||
if !ok {
|
||||
return nil, errors.New("agent: bad ED25519 certificate")
|
||||
}
|
||||
|
||||
addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments}
|
||||
if err := setConstraints(addedKey, k.Constraints); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return addedKey, nil
|
||||
}
|
||||
|
||||
func parseECDSAKey(req []byte) (*AddedKey, error) {
|
||||
var k ecdsaKeyMsg
|
||||
if err := ssh.Unmarshal(req, &k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
priv, err := unmarshalECDSA(k.Curve, k.KeyBytes, k.D)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments}
|
||||
if err := setConstraints(addedKey, k.Constraints); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return addedKey, nil
|
||||
}
|
||||
|
||||
func parseRSACert(req []byte) (*AddedKey, error) {
|
||||
var k rsaCertMsg
|
||||
if err := ssh.Unmarshal(req, &k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := ssh.ParsePublicKey(k.CertBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cert, ok := pubKey.(*ssh.Certificate)
|
||||
if !ok {
|
||||
return nil, errors.New("agent: bad RSA certificate")
|
||||
}
|
||||
|
||||
// An RSA publickey as marshaled by rsaPublicKey.Marshal() in keys.go
|
||||
var rsaPub struct {
|
||||
Name string
|
||||
E *big.Int
|
||||
N *big.Int
|
||||
}
|
||||
if err := ssh.Unmarshal(cert.Key.Marshal(), &rsaPub); err != nil {
|
||||
return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err)
|
||||
}
|
||||
|
||||
if rsaPub.E.BitLen() > 30 {
|
||||
return nil, errors.New("agent: RSA public exponent too large")
|
||||
}
|
||||
|
||||
priv := rsa.PrivateKey{
|
||||
PublicKey: rsa.PublicKey{
|
||||
E: int(rsaPub.E.Int64()),
|
||||
N: rsaPub.N,
|
||||
},
|
||||
D: k.D,
|
||||
Primes: []*big.Int{k.Q, k.P},
|
||||
}
|
||||
priv.Precompute()
|
||||
|
||||
addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments}
|
||||
if err := setConstraints(addedKey, k.Constraints); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return addedKey, nil
|
||||
}
|
||||
|
||||
func parseDSACert(req []byte) (*AddedKey, error) {
|
||||
var k dsaCertMsg
|
||||
if err := ssh.Unmarshal(req, &k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubKey, err := ssh.ParsePublicKey(k.CertBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, ok := pubKey.(*ssh.Certificate)
|
||||
if !ok {
|
||||
return nil, errors.New("agent: bad DSA certificate")
|
||||
}
|
||||
|
||||
// A DSA publickey as marshaled by dsaPublicKey.Marshal() in keys.go
|
||||
var w struct {
|
||||
Name string
|
||||
P, Q, G, Y *big.Int
|
||||
}
|
||||
if err := ssh.Unmarshal(cert.Key.Marshal(), &w); err != nil {
|
||||
return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err)
|
||||
}
|
||||
|
||||
priv := &dsa.PrivateKey{
|
||||
PublicKey: dsa.PublicKey{
|
||||
Parameters: dsa.Parameters{
|
||||
P: w.P,
|
||||
Q: w.Q,
|
||||
G: w.G,
|
||||
},
|
||||
Y: w.Y,
|
||||
},
|
||||
X: k.X,
|
||||
}
|
||||
|
||||
addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments}
|
||||
if err := setConstraints(addedKey, k.Constraints); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return addedKey, nil
|
||||
}
|
||||
|
||||
func parseECDSACert(req []byte) (*AddedKey, error) {
|
||||
var k ecdsaCertMsg
|
||||
if err := ssh.Unmarshal(req, &k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := ssh.ParsePublicKey(k.CertBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, ok := pubKey.(*ssh.Certificate)
|
||||
if !ok {
|
||||
return nil, errors.New("agent: bad ECDSA certificate")
|
||||
}
|
||||
|
||||
// An ECDSA publickey as marshaled by ecdsaPublicKey.Marshal() in keys.go
|
||||
var ecdsaPub struct {
|
||||
Name string
|
||||
ID string
|
||||
Key []byte
|
||||
}
|
||||
if err := ssh.Unmarshal(cert.Key.Marshal(), &ecdsaPub); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
priv, err := unmarshalECDSA(ecdsaPub.ID, ecdsaPub.Key, k.D)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments}
|
||||
if err := setConstraints(addedKey, k.Constraints); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return addedKey, nil
|
||||
}
|
||||
|
||||
func (s *server) insertIdentity(req []byte) error {
|
||||
var record struct {
|
||||
Type string `sshtype:"17|25"`
|
||||
Rest []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
if err := ssh.Unmarshal(req, &record); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var addedKey *AddedKey
|
||||
var err error
|
||||
|
||||
switch record.Type {
|
||||
case ssh.KeyAlgoRSA:
|
||||
addedKey, err = parseRSAKey(req)
|
||||
case ssh.KeyAlgoDSA:
|
||||
addedKey, err = parseDSAKey(req)
|
||||
case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521:
|
||||
addedKey, err = parseECDSAKey(req)
|
||||
case ssh.KeyAlgoED25519:
|
||||
addedKey, err = parseEd25519Key(req)
|
||||
case ssh.CertAlgoRSAv01:
|
||||
addedKey, err = parseRSACert(req)
|
||||
case ssh.CertAlgoDSAv01:
|
||||
addedKey, err = parseDSACert(req)
|
||||
case ssh.CertAlgoECDSA256v01, ssh.CertAlgoECDSA384v01, ssh.CertAlgoECDSA521v01:
|
||||
addedKey, err = parseECDSACert(req)
|
||||
case ssh.CertAlgoED25519v01:
|
||||
addedKey, err = parseEd25519Cert(req)
|
||||
default:
|
||||
return fmt.Errorf("agent: not implemented: %q", record.Type)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.agent.Add(*addedKey)
|
||||
}
|
||||
|
||||
// ServeAgent serves the agent protocol on the given connection. It
|
||||
// returns when an I/O error occurs.
|
||||
func ServeAgent(agent Agent, c io.ReadWriter) error {
|
||||
s := &server{agent}
|
||||
|
||||
var length [4]byte
|
||||
for {
|
||||
if _, err := io.ReadFull(c, length[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
l := binary.BigEndian.Uint32(length[:])
|
||||
if l > maxAgentResponseBytes {
|
||||
// We also cap requests.
|
||||
return fmt.Errorf("agent: request too large: %d", l)
|
||||
}
|
||||
|
||||
req := make([]byte, l)
|
||||
if _, err := io.ReadFull(c, req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
repData := s.processRequestBytes(req)
|
||||
if len(repData) > maxAgentResponseBytes {
|
||||
return fmt.Errorf("agent: reply too large: %d bytes", len(repData))
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(length[:], uint32(len(repData)))
|
||||
if _, err := c.Write(length[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := c.Write(repData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
98
vendor/golang.org/x/crypto/ssh/buffer.go
generated
vendored
98
vendor/golang.org/x/crypto/ssh/buffer.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// buffer provides a linked list buffer for data exchange
|
||||
// between producer and consumer. Theoretically the buffer is
|
||||
// of unlimited capacity as it does no allocation of its own.
|
||||
type buffer struct {
|
||||
// protects concurrent access to head, tail and closed
|
||||
*sync.Cond
|
||||
|
||||
head *element // the buffer that will be read first
|
||||
tail *element // the buffer that will be read last
|
||||
|
||||
closed bool
|
||||
}
|
||||
|
||||
// An element represents a single link in a linked list.
|
||||
type element struct {
|
||||
buf []byte
|
||||
next *element
|
||||
}
|
||||
|
||||
// newBuffer returns an empty buffer that is not closed.
|
||||
func newBuffer() *buffer {
|
||||
e := new(element)
|
||||
b := &buffer{
|
||||
Cond: newCond(),
|
||||
head: e,
|
||||
tail: e,
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// write makes buf available for Read to receive.
|
||||
// buf must not be modified after the call to write.
|
||||
func (b *buffer) write(buf []byte) {
|
||||
b.Cond.L.Lock()
|
||||
e := &element{buf: buf}
|
||||
b.tail.next = e
|
||||
b.tail = e
|
||||
b.Cond.Signal()
|
||||
b.Cond.L.Unlock()
|
||||
}
|
||||
|
||||
// eof closes the buffer. Reads from the buffer once all
|
||||
// the data has been consumed will receive os.EOF.
|
||||
func (b *buffer) eof() error {
|
||||
b.Cond.L.Lock()
|
||||
b.closed = true
|
||||
b.Cond.Signal()
|
||||
b.Cond.L.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads data from the internal buffer in buf. Reads will block
|
||||
// if no data is available, or until the buffer is closed.
|
||||
func (b *buffer) Read(buf []byte) (n int, err error) {
|
||||
b.Cond.L.Lock()
|
||||
defer b.Cond.L.Unlock()
|
||||
|
||||
for len(buf) > 0 {
|
||||
// if there is data in b.head, copy it
|
||||
if len(b.head.buf) > 0 {
|
||||
r := copy(buf, b.head.buf)
|
||||
buf, b.head.buf = buf[r:], b.head.buf[r:]
|
||||
n += r
|
||||
continue
|
||||
}
|
||||
// if there is a next buffer, make it the head
|
||||
if len(b.head.buf) == 0 && b.head != b.tail {
|
||||
b.head = b.head.next
|
||||
continue
|
||||
}
|
||||
|
||||
// if at least one byte has been copied, return
|
||||
if n > 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// if nothing was read, and there is nothing outstanding
|
||||
// check to see if the buffer is closed.
|
||||
if b.closed {
|
||||
err = io.EOF
|
||||
break
|
||||
}
|
||||
// out of buffers, wait for producer
|
||||
b.Cond.Wait()
|
||||
}
|
||||
return
|
||||
}
|
519
vendor/golang.org/x/crypto/ssh/certs.go
generated
vendored
519
vendor/golang.org/x/crypto/ssh/certs.go
generated
vendored
@ -1,519 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// These constants from [PROTOCOL.certkeys] represent the algorithm names
|
||||
// for certificate types supported by this package.
|
||||
const (
|
||||
CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
|
||||
CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
|
||||
CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
|
||||
CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
|
||||
CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
|
||||
CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com"
|
||||
)
|
||||
|
||||
// Certificate types distinguish between host and user
|
||||
// certificates. The values can be set in the CertType field of
|
||||
// Certificate.
|
||||
const (
|
||||
UserCert = 1
|
||||
HostCert = 2
|
||||
)
|
||||
|
||||
// Signature represents a cryptographic signature.
|
||||
type Signature struct {
|
||||
Format string
|
||||
Blob []byte
|
||||
}
|
||||
|
||||
// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that
|
||||
// a certificate does not expire.
|
||||
const CertTimeInfinity = 1<<64 - 1
|
||||
|
||||
// An Certificate represents an OpenSSH certificate as defined in
|
||||
// [PROTOCOL.certkeys]?rev=1.8.
|
||||
type Certificate struct {
|
||||
Nonce []byte
|
||||
Key PublicKey
|
||||
Serial uint64
|
||||
CertType uint32
|
||||
KeyId string
|
||||
ValidPrincipals []string
|
||||
ValidAfter uint64
|
||||
ValidBefore uint64
|
||||
Permissions
|
||||
Reserved []byte
|
||||
SignatureKey PublicKey
|
||||
Signature *Signature
|
||||
}
|
||||
|
||||
// genericCertData holds the key-independent part of the certificate data.
|
||||
// Overall, certificates contain an nonce, public key fields and
|
||||
// key-independent fields.
|
||||
type genericCertData struct {
|
||||
Serial uint64
|
||||
CertType uint32
|
||||
KeyId string
|
||||
ValidPrincipals []byte
|
||||
ValidAfter uint64
|
||||
ValidBefore uint64
|
||||
CriticalOptions []byte
|
||||
Extensions []byte
|
||||
Reserved []byte
|
||||
SignatureKey []byte
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
func marshalStringList(namelist []string) []byte {
|
||||
var to []byte
|
||||
for _, name := range namelist {
|
||||
s := struct{ N string }{name}
|
||||
to = append(to, Marshal(&s)...)
|
||||
}
|
||||
return to
|
||||
}
|
||||
|
||||
type optionsTuple struct {
|
||||
Key string
|
||||
Value []byte
|
||||
}
|
||||
|
||||
type optionsTupleValue struct {
|
||||
Value string
|
||||
}
|
||||
|
||||
// serialize a map of critical options or extensions
|
||||
// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
|
||||
// we need two length prefixes for a non-empty string value
|
||||
func marshalTuples(tups map[string]string) []byte {
|
||||
keys := make([]string, 0, len(tups))
|
||||
for key := range tups {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
var ret []byte
|
||||
for _, key := range keys {
|
||||
s := optionsTuple{Key: key}
|
||||
if value := tups[key]; len(value) > 0 {
|
||||
s.Value = Marshal(&optionsTupleValue{value})
|
||||
}
|
||||
ret = append(ret, Marshal(&s)...)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
|
||||
// we need two length prefixes for a non-empty option value
|
||||
func parseTuples(in []byte) (map[string]string, error) {
|
||||
tups := map[string]string{}
|
||||
var lastKey string
|
||||
var haveLastKey bool
|
||||
|
||||
for len(in) > 0 {
|
||||
var key, val, extra []byte
|
||||
var ok bool
|
||||
|
||||
if key, in, ok = parseString(in); !ok {
|
||||
return nil, errShortRead
|
||||
}
|
||||
keyStr := string(key)
|
||||
// according to [PROTOCOL.certkeys], the names must be in
|
||||
// lexical order.
|
||||
if haveLastKey && keyStr <= lastKey {
|
||||
return nil, fmt.Errorf("ssh: certificate options are not in lexical order")
|
||||
}
|
||||
lastKey, haveLastKey = keyStr, true
|
||||
// the next field is a data field, which if non-empty has a string embedded
|
||||
if val, in, ok = parseString(in); !ok {
|
||||
return nil, errShortRead
|
||||
}
|
||||
if len(val) > 0 {
|
||||
val, extra, ok = parseString(val)
|
||||
if !ok {
|
||||
return nil, errShortRead
|
||||
}
|
||||
if len(extra) > 0 {
|
||||
return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value")
|
||||
}
|
||||
tups[keyStr] = string(val)
|
||||
} else {
|
||||
tups[keyStr] = ""
|
||||
}
|
||||
}
|
||||
return tups, nil
|
||||
}
|
||||
|
||||
func parseCert(in []byte, privAlgo string) (*Certificate, error) {
|
||||
nonce, rest, ok := parseString(in)
|
||||
if !ok {
|
||||
return nil, errShortRead
|
||||
}
|
||||
|
||||
key, rest, err := parsePubKey(rest, privAlgo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var g genericCertData
|
||||
if err := Unmarshal(rest, &g); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &Certificate{
|
||||
Nonce: nonce,
|
||||
Key: key,
|
||||
Serial: g.Serial,
|
||||
CertType: g.CertType,
|
||||
KeyId: g.KeyId,
|
||||
ValidAfter: g.ValidAfter,
|
||||
ValidBefore: g.ValidBefore,
|
||||
}
|
||||
|
||||
for principals := g.ValidPrincipals; len(principals) > 0; {
|
||||
principal, rest, ok := parseString(principals)
|
||||
if !ok {
|
||||
return nil, errShortRead
|
||||
}
|
||||
c.ValidPrincipals = append(c.ValidPrincipals, string(principal))
|
||||
principals = rest
|
||||
}
|
||||
|
||||
c.CriticalOptions, err = parseTuples(g.CriticalOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Extensions, err = parseTuples(g.Extensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Reserved = g.Reserved
|
||||
k, err := ParsePublicKey(g.SignatureKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.SignatureKey = k
|
||||
c.Signature, rest, ok = parseSignatureBody(g.Signature)
|
||||
if !ok || len(rest) > 0 {
|
||||
return nil, errors.New("ssh: signature parse error")
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
type openSSHCertSigner struct {
|
||||
pub *Certificate
|
||||
signer Signer
|
||||
}
|
||||
|
||||
// NewCertSigner returns a Signer that signs with the given Certificate, whose
|
||||
// private key is held by signer. It returns an error if the public key in cert
|
||||
// doesn't match the key used by signer.
|
||||
func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) {
|
||||
if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
|
||||
return nil, errors.New("ssh: signer and cert have different public key")
|
||||
}
|
||||
|
||||
return &openSSHCertSigner{cert, signer}, nil
|
||||
}
|
||||
|
||||
func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
|
||||
return s.signer.Sign(rand, data)
|
||||
}
|
||||
|
||||
func (s *openSSHCertSigner) PublicKey() PublicKey {
|
||||
return s.pub
|
||||
}
|
||||
|
||||
const sourceAddressCriticalOption = "source-address"
|
||||
|
||||
// CertChecker does the work of verifying a certificate. Its methods
|
||||
// can be plugged into ClientConfig.HostKeyCallback and
|
||||
// ServerConfig.PublicKeyCallback. For the CertChecker to work,
|
||||
// minimally, the IsAuthority callback should be set.
|
||||
type CertChecker struct {
|
||||
// SupportedCriticalOptions lists the CriticalOptions that the
|
||||
// server application layer understands. These are only used
|
||||
// for user certificates.
|
||||
SupportedCriticalOptions []string
|
||||
|
||||
// IsUserAuthority should return true if the key is recognized as an
|
||||
// authority for the given user certificate. This allows for
|
||||
// certificates to be signed by other certificates. This must be set
|
||||
// if this CertChecker will be checking user certificates.
|
||||
IsUserAuthority func(auth PublicKey) bool
|
||||
|
||||
// IsHostAuthority should report whether the key is recognized as
|
||||
// an authority for this host. This allows for certificates to be
|
||||
// signed by other keys, and for those other keys to only be valid
|
||||
// signers for particular hostnames. This must be set if this
|
||||
// CertChecker will be checking host certificates.
|
||||
IsHostAuthority func(auth PublicKey, address string) bool
|
||||
|
||||
// Clock is used for verifying time stamps. If nil, time.Now
|
||||
// is used.
|
||||
Clock func() time.Time
|
||||
|
||||
// UserKeyFallback is called when CertChecker.Authenticate encounters a
|
||||
// public key that is not a certificate. It must implement validation
|
||||
// of user keys or else, if nil, all such keys are rejected.
|
||||
UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
|
||||
|
||||
// HostKeyFallback is called when CertChecker.CheckHostKey encounters a
|
||||
// public key that is not a certificate. It must implement host key
|
||||
// validation or else, if nil, all such keys are rejected.
|
||||
HostKeyFallback HostKeyCallback
|
||||
|
||||
// IsRevoked is called for each certificate so that revocation checking
|
||||
// can be implemented. It should return true if the given certificate
|
||||
// is revoked and false otherwise. If nil, no certificates are
|
||||
// considered to have been revoked.
|
||||
IsRevoked func(cert *Certificate) bool
|
||||
}
|
||||
|
||||
// CheckHostKey checks a host key certificate. This method can be
|
||||
// plugged into ClientConfig.HostKeyCallback.
|
||||
func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error {
|
||||
cert, ok := key.(*Certificate)
|
||||
if !ok {
|
||||
if c.HostKeyFallback != nil {
|
||||
return c.HostKeyFallback(addr, remote, key)
|
||||
}
|
||||
return errors.New("ssh: non-certificate host key")
|
||||
}
|
||||
if cert.CertType != HostCert {
|
||||
return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType)
|
||||
}
|
||||
if !c.IsHostAuthority(cert.SignatureKey, addr) {
|
||||
return fmt.Errorf("ssh: no authorities for hostname: %v", addr)
|
||||
}
|
||||
|
||||
hostname, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Pass hostname only as principal for host certificates (consistent with OpenSSH)
|
||||
return c.CheckCert(hostname, cert)
|
||||
}
|
||||
|
||||
// Authenticate checks a user certificate. Authenticate can be used as
|
||||
// a value for ServerConfig.PublicKeyCallback.
|
||||
func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) {
|
||||
cert, ok := pubKey.(*Certificate)
|
||||
if !ok {
|
||||
if c.UserKeyFallback != nil {
|
||||
return c.UserKeyFallback(conn, pubKey)
|
||||
}
|
||||
return nil, errors.New("ssh: normal key pairs not accepted")
|
||||
}
|
||||
|
||||
if cert.CertType != UserCert {
|
||||
return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType)
|
||||
}
|
||||
if !c.IsUserAuthority(cert.SignatureKey) {
|
||||
return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority")
|
||||
}
|
||||
|
||||
if err := c.CheckCert(conn.User(), cert); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cert.Permissions, nil
|
||||
}
|
||||
|
||||
// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and
|
||||
// the signature of the certificate.
|
||||
func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
|
||||
if c.IsRevoked != nil && c.IsRevoked(cert) {
|
||||
return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial)
|
||||
}
|
||||
|
||||
for opt, _ := range cert.CriticalOptions {
|
||||
// sourceAddressCriticalOption will be enforced by
|
||||
// serverAuthenticate
|
||||
if opt == sourceAddressCriticalOption {
|
||||
continue
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, supp := range c.SupportedCriticalOptions {
|
||||
if supp == opt {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cert.ValidPrincipals) > 0 {
|
||||
// By default, certs are valid for all users/hosts.
|
||||
found := false
|
||||
for _, p := range cert.ValidPrincipals {
|
||||
if p == principal {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals)
|
||||
}
|
||||
}
|
||||
|
||||
clock := c.Clock
|
||||
if clock == nil {
|
||||
clock = time.Now
|
||||
}
|
||||
|
||||
unixNow := clock().Unix()
|
||||
if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) {
|
||||
return fmt.Errorf("ssh: cert is not yet valid")
|
||||
}
|
||||
if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) {
|
||||
return fmt.Errorf("ssh: cert has expired")
|
||||
}
|
||||
if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil {
|
||||
return fmt.Errorf("ssh: certificate signature does not verify")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignCert sets c.SignatureKey to the authority's public key and stores a
|
||||
// Signature, by authority, in the certificate.
|
||||
func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
|
||||
c.Nonce = make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand, c.Nonce); err != nil {
|
||||
return err
|
||||
}
|
||||
c.SignatureKey = authority.PublicKey()
|
||||
|
||||
sig, err := authority.Sign(rand, c.bytesForSigning())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Signature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
var certAlgoNames = map[string]string{
|
||||
KeyAlgoRSA: CertAlgoRSAv01,
|
||||
KeyAlgoDSA: CertAlgoDSAv01,
|
||||
KeyAlgoECDSA256: CertAlgoECDSA256v01,
|
||||
KeyAlgoECDSA384: CertAlgoECDSA384v01,
|
||||
KeyAlgoECDSA521: CertAlgoECDSA521v01,
|
||||
KeyAlgoED25519: CertAlgoED25519v01,
|
||||
}
|
||||
|
||||
// certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
|
||||
// Panics if a non-certificate algorithm is passed.
|
||||
func certToPrivAlgo(algo string) string {
|
||||
for privAlgo, pubAlgo := range certAlgoNames {
|
||||
if pubAlgo == algo {
|
||||
return privAlgo
|
||||
}
|
||||
}
|
||||
panic("unknown cert algorithm")
|
||||
}
|
||||
|
||||
func (cert *Certificate) bytesForSigning() []byte {
|
||||
c2 := *cert
|
||||
c2.Signature = nil
|
||||
out := c2.Marshal()
|
||||
// Drop trailing signature length.
|
||||
return out[:len(out)-4]
|
||||
}
|
||||
|
||||
// Marshal serializes c into OpenSSH's wire format. It is part of the
|
||||
// PublicKey interface.
|
||||
func (c *Certificate) Marshal() []byte {
|
||||
generic := genericCertData{
|
||||
Serial: c.Serial,
|
||||
CertType: c.CertType,
|
||||
KeyId: c.KeyId,
|
||||
ValidPrincipals: marshalStringList(c.ValidPrincipals),
|
||||
ValidAfter: uint64(c.ValidAfter),
|
||||
ValidBefore: uint64(c.ValidBefore),
|
||||
CriticalOptions: marshalTuples(c.CriticalOptions),
|
||||
Extensions: marshalTuples(c.Extensions),
|
||||
Reserved: c.Reserved,
|
||||
SignatureKey: c.SignatureKey.Marshal(),
|
||||
}
|
||||
if c.Signature != nil {
|
||||
generic.Signature = Marshal(c.Signature)
|
||||
}
|
||||
genericBytes := Marshal(&generic)
|
||||
keyBytes := c.Key.Marshal()
|
||||
_, keyBytes, _ = parseString(keyBytes)
|
||||
prefix := Marshal(&struct {
|
||||
Name string
|
||||
Nonce []byte
|
||||
Key []byte `ssh:"rest"`
|
||||
}{c.Type(), c.Nonce, keyBytes})
|
||||
|
||||
result := make([]byte, 0, len(prefix)+len(genericBytes))
|
||||
result = append(result, prefix...)
|
||||
result = append(result, genericBytes...)
|
||||
return result
|
||||
}
|
||||
|
||||
// Type returns the key name. It is part of the PublicKey interface.
|
||||
func (c *Certificate) Type() string {
|
||||
algo, ok := certAlgoNames[c.Key.Type()]
|
||||
if !ok {
|
||||
panic("unknown cert key type " + c.Key.Type())
|
||||
}
|
||||
return algo
|
||||
}
|
||||
|
||||
// Verify verifies a signature against the certificate's public
|
||||
// key. It is part of the PublicKey interface.
|
||||
func (c *Certificate) Verify(data []byte, sig *Signature) error {
|
||||
return c.Key.Verify(data, sig)
|
||||
}
|
||||
|
||||
func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) {
|
||||
format, in, ok := parseString(in)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
out = &Signature{
|
||||
Format: string(format),
|
||||
}
|
||||
|
||||
if out.Blob, in, ok = parseString(in); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
return out, in, ok
|
||||
}
|
||||
|
||||
func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) {
|
||||
sigBytes, rest, ok := parseString(in)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
out, trailing, ok := parseSignatureBody(sigBytes)
|
||||
if !ok || len(trailing) > 0 {
|
||||
return nil, nil, false
|
||||
}
|
||||
return
|
||||
}
|
633
vendor/golang.org/x/crypto/ssh/channel.go
generated
vendored
633
vendor/golang.org/x/crypto/ssh/channel.go
generated
vendored
@ -1,633 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minPacketLength = 9
|
||||
// channelMaxPacket contains the maximum number of bytes that will be
|
||||
// sent in a single packet. As per RFC 4253, section 6.1, 32k is also
|
||||
// the minimum.
|
||||
channelMaxPacket = 1 << 15
|
||||
// We follow OpenSSH here.
|
||||
channelWindowSize = 64 * channelMaxPacket
|
||||
)
|
||||
|
||||
// NewChannel represents an incoming request to a channel. It must either be
|
||||
// accepted for use by calling Accept, or rejected by calling Reject.
|
||||
type NewChannel interface {
|
||||
// Accept accepts the channel creation request. It returns the Channel
|
||||
// and a Go channel containing SSH requests. The Go channel must be
|
||||
// serviced otherwise the Channel will hang.
|
||||
Accept() (Channel, <-chan *Request, error)
|
||||
|
||||
// Reject rejects the channel creation request. After calling
|
||||
// this, no other methods on the Channel may be called.
|
||||
Reject(reason RejectionReason, message string) error
|
||||
|
||||
// ChannelType returns the type of the channel, as supplied by the
|
||||
// client.
|
||||
ChannelType() string
|
||||
|
||||
// ExtraData returns the arbitrary payload for this channel, as supplied
|
||||
// by the client. This data is specific to the channel type.
|
||||
ExtraData() []byte
|
||||
}
|
||||
|
||||
// A Channel is an ordered, reliable, flow-controlled, duplex stream
|
||||
// that is multiplexed over an SSH connection.
|
||||
type Channel interface {
|
||||
// Read reads up to len(data) bytes from the channel.
|
||||
Read(data []byte) (int, error)
|
||||
|
||||
// Write writes len(data) bytes to the channel.
|
||||
Write(data []byte) (int, error)
|
||||
|
||||
// Close signals end of channel use. No data may be sent after this
|
||||
// call.
|
||||
Close() error
|
||||
|
||||
// CloseWrite signals the end of sending in-band
|
||||
// data. Requests may still be sent, and the other side may
|
||||
// still send data
|
||||
CloseWrite() error
|
||||
|
||||
// SendRequest sends a channel request. If wantReply is true,
|
||||
// it will wait for a reply and return the result as a
|
||||
// boolean, otherwise the return value will be false. Channel
|
||||
// requests are out-of-band messages so they may be sent even
|
||||
// if the data stream is closed or blocked by flow control.
|
||||
// If the channel is closed before a reply is returned, io.EOF
|
||||
// is returned.
|
||||
SendRequest(name string, wantReply bool, payload []byte) (bool, error)
|
||||
|
||||
// Stderr returns an io.ReadWriter that writes to this channel
|
||||
// with the extended data type set to stderr. Stderr may
|
||||
// safely be read and written from a different goroutine than
|
||||
// Read and Write respectively.
|
||||
Stderr() io.ReadWriter
|
||||
}
|
||||
|
||||
// Request is a request sent outside of the normal stream of
|
||||
// data. Requests can either be specific to an SSH channel, or they
|
||||
// can be global.
|
||||
type Request struct {
|
||||
Type string
|
||||
WantReply bool
|
||||
Payload []byte
|
||||
|
||||
ch *channel
|
||||
mux *mux
|
||||
}
|
||||
|
||||
// Reply sends a response to a request. It must be called for all requests
|
||||
// where WantReply is true and is a no-op otherwise. The payload argument is
|
||||
// ignored for replies to channel-specific requests.
|
||||
func (r *Request) Reply(ok bool, payload []byte) error {
|
||||
if !r.WantReply {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.ch == nil {
|
||||
return r.mux.ackRequest(ok, payload)
|
||||
}
|
||||
|
||||
return r.ch.ackRequest(ok)
|
||||
}
|
||||
|
||||
// RejectionReason is an enumeration used when rejecting channel creation
|
||||
// requests. See RFC 4254, section 5.1.
|
||||
type RejectionReason uint32
|
||||
|
||||
const (
|
||||
Prohibited RejectionReason = iota + 1
|
||||
ConnectionFailed
|
||||
UnknownChannelType
|
||||
ResourceShortage
|
||||
)
|
||||
|
||||
// String converts the rejection reason to human readable form.
|
||||
func (r RejectionReason) String() string {
|
||||
switch r {
|
||||
case Prohibited:
|
||||
return "administratively prohibited"
|
||||
case ConnectionFailed:
|
||||
return "connect failed"
|
||||
case UnknownChannelType:
|
||||
return "unknown channel type"
|
||||
case ResourceShortage:
|
||||
return "resource shortage"
|
||||
}
|
||||
return fmt.Sprintf("unknown reason %d", int(r))
|
||||
}
|
||||
|
||||
func min(a uint32, b int) uint32 {
|
||||
if a < uint32(b) {
|
||||
return a
|
||||
}
|
||||
return uint32(b)
|
||||
}
|
||||
|
||||
type channelDirection uint8
|
||||
|
||||
const (
|
||||
channelInbound channelDirection = iota
|
||||
channelOutbound
|
||||
)
|
||||
|
||||
// channel is an implementation of the Channel interface that works
|
||||
// with the mux class.
|
||||
type channel struct {
|
||||
// R/O after creation
|
||||
chanType string
|
||||
extraData []byte
|
||||
localId, remoteId uint32
|
||||
|
||||
// maxIncomingPayload and maxRemotePayload are the maximum
|
||||
// payload sizes of normal and extended data packets for
|
||||
// receiving and sending, respectively. The wire packet will
|
||||
// be 9 or 13 bytes larger (excluding encryption overhead).
|
||||
maxIncomingPayload uint32
|
||||
maxRemotePayload uint32
|
||||
|
||||
mux *mux
|
||||
|
||||
// decided is set to true if an accept or reject message has been sent
|
||||
// (for outbound channels) or received (for inbound channels).
|
||||
decided bool
|
||||
|
||||
// direction contains either channelOutbound, for channels created
|
||||
// locally, or channelInbound, for channels created by the peer.
|
||||
direction channelDirection
|
||||
|
||||
// Pending internal channel messages.
|
||||
msg chan interface{}
|
||||
|
||||
// Since requests have no ID, there can be only one request
|
||||
// with WantReply=true outstanding. This lock is held by a
|
||||
// goroutine that has such an outgoing request pending.
|
||||
sentRequestMu sync.Mutex
|
||||
|
||||
incomingRequests chan *Request
|
||||
|
||||
sentEOF bool
|
||||
|
||||
// thread-safe data
|
||||
remoteWin window
|
||||
pending *buffer
|
||||
extPending *buffer
|
||||
|
||||
// windowMu protects myWindow, the flow-control window.
|
||||
windowMu sync.Mutex
|
||||
myWindow uint32
|
||||
|
||||
// writeMu serializes calls to mux.conn.writePacket() and
|
||||
// protects sentClose and packetPool. This mutex must be
|
||||
// different from windowMu, as writePacket can block if there
|
||||
// is a key exchange pending.
|
||||
writeMu sync.Mutex
|
||||
sentClose bool
|
||||
|
||||
// packetPool has a buffer for each extended channel ID to
|
||||
// save allocations during writes.
|
||||
packetPool map[uint32][]byte
|
||||
}
|
||||
|
||||
// writePacket sends a packet. If the packet is a channel close, it updates
|
||||
// sentClose. This method takes the lock c.writeMu.
|
||||
func (c *channel) writePacket(packet []byte) error {
|
||||
c.writeMu.Lock()
|
||||
if c.sentClose {
|
||||
c.writeMu.Unlock()
|
||||
return io.EOF
|
||||
}
|
||||
c.sentClose = (packet[0] == msgChannelClose)
|
||||
err := c.mux.conn.writePacket(packet)
|
||||
c.writeMu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *channel) sendMessage(msg interface{}) error {
|
||||
if debugMux {
|
||||
log.Printf("send(%d): %#v", c.mux.chanList.offset, msg)
|
||||
}
|
||||
|
||||
p := Marshal(msg)
|
||||
binary.BigEndian.PutUint32(p[1:], c.remoteId)
|
||||
return c.writePacket(p)
|
||||
}
|
||||
|
||||
// WriteExtended writes data to a specific extended stream. These streams are
|
||||
// used, for example, for stderr.
|
||||
func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) {
|
||||
if c.sentEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
// 1 byte message type, 4 bytes remoteId, 4 bytes data length
|
||||
opCode := byte(msgChannelData)
|
||||
headerLength := uint32(9)
|
||||
if extendedCode > 0 {
|
||||
headerLength += 4
|
||||
opCode = msgChannelExtendedData
|
||||
}
|
||||
|
||||
c.writeMu.Lock()
|
||||
packet := c.packetPool[extendedCode]
|
||||
// We don't remove the buffer from packetPool, so
|
||||
// WriteExtended calls from different goroutines will be
|
||||
// flagged as errors by the race detector.
|
||||
c.writeMu.Unlock()
|
||||
|
||||
for len(data) > 0 {
|
||||
space := min(c.maxRemotePayload, len(data))
|
||||
if space, err = c.remoteWin.reserve(space); err != nil {
|
||||
return n, err
|
||||
}
|
||||
if want := headerLength + space; uint32(cap(packet)) < want {
|
||||
packet = make([]byte, want)
|
||||
} else {
|
||||
packet = packet[:want]
|
||||
}
|
||||
|
||||
todo := data[:space]
|
||||
|
||||
packet[0] = opCode
|
||||
binary.BigEndian.PutUint32(packet[1:], c.remoteId)
|
||||
if extendedCode > 0 {
|
||||
binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode))
|
||||
}
|
||||
binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo)))
|
||||
copy(packet[headerLength:], todo)
|
||||
if err = c.writePacket(packet); err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
n += len(todo)
|
||||
data = data[len(todo):]
|
||||
}
|
||||
|
||||
c.writeMu.Lock()
|
||||
c.packetPool[extendedCode] = packet
|
||||
c.writeMu.Unlock()
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *channel) handleData(packet []byte) error {
|
||||
headerLen := 9
|
||||
isExtendedData := packet[0] == msgChannelExtendedData
|
||||
if isExtendedData {
|
||||
headerLen = 13
|
||||
}
|
||||
if len(packet) < headerLen {
|
||||
// malformed data packet
|
||||
return parseError(packet[0])
|
||||
}
|
||||
|
||||
var extended uint32
|
||||
if isExtendedData {
|
||||
extended = binary.BigEndian.Uint32(packet[5:])
|
||||
}
|
||||
|
||||
length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen])
|
||||
if length == 0 {
|
||||
return nil
|
||||
}
|
||||
if length > c.maxIncomingPayload {
|
||||
// TODO(hanwen): should send Disconnect?
|
||||
return errors.New("ssh: incoming packet exceeds maximum payload size")
|
||||
}
|
||||
|
||||
data := packet[headerLen:]
|
||||
if length != uint32(len(data)) {
|
||||
return errors.New("ssh: wrong packet length")
|
||||
}
|
||||
|
||||
c.windowMu.Lock()
|
||||
if c.myWindow < length {
|
||||
c.windowMu.Unlock()
|
||||
// TODO(hanwen): should send Disconnect with reason?
|
||||
return errors.New("ssh: remote side wrote too much")
|
||||
}
|
||||
c.myWindow -= length
|
||||
c.windowMu.Unlock()
|
||||
|
||||
if extended == 1 {
|
||||
c.extPending.write(data)
|
||||
} else if extended > 0 {
|
||||
// discard other extended data.
|
||||
} else {
|
||||
c.pending.write(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *channel) adjustWindow(n uint32) error {
|
||||
c.windowMu.Lock()
|
||||
// Since myWindow is managed on our side, and can never exceed
|
||||
// the initial window setting, we don't worry about overflow.
|
||||
c.myWindow += uint32(n)
|
||||
c.windowMu.Unlock()
|
||||
return c.sendMessage(windowAdjustMsg{
|
||||
AdditionalBytes: uint32(n),
|
||||
})
|
||||
}
|
||||
|
||||
func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) {
|
||||
switch extended {
|
||||
case 1:
|
||||
n, err = c.extPending.Read(data)
|
||||
case 0:
|
||||
n, err = c.pending.Read(data)
|
||||
default:
|
||||
return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended)
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
err = c.adjustWindow(uint32(n))
|
||||
// sendWindowAdjust can return io.EOF if the remote
|
||||
// peer has closed the connection, however we want to
|
||||
// defer forwarding io.EOF to the caller of Read until
|
||||
// the buffer has been drained.
|
||||
if n > 0 && err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *channel) close() {
|
||||
c.pending.eof()
|
||||
c.extPending.eof()
|
||||
close(c.msg)
|
||||
close(c.incomingRequests)
|
||||
c.writeMu.Lock()
|
||||
// This is not necessary for a normal channel teardown, but if
|
||||
// there was another error, it is.
|
||||
c.sentClose = true
|
||||
c.writeMu.Unlock()
|
||||
// Unblock writers.
|
||||
c.remoteWin.close()
|
||||
}
|
||||
|
||||
// responseMessageReceived is called when a success or failure message is
|
||||
// received on a channel to check that such a message is reasonable for the
|
||||
// given channel.
|
||||
func (c *channel) responseMessageReceived() error {
|
||||
if c.direction == channelInbound {
|
||||
return errors.New("ssh: channel response message received on inbound channel")
|
||||
}
|
||||
if c.decided {
|
||||
return errors.New("ssh: duplicate response received for channel")
|
||||
}
|
||||
c.decided = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *channel) handlePacket(packet []byte) error {
|
||||
switch packet[0] {
|
||||
case msgChannelData, msgChannelExtendedData:
|
||||
return c.handleData(packet)
|
||||
case msgChannelClose:
|
||||
c.sendMessage(channelCloseMsg{PeersId: c.remoteId})
|
||||
c.mux.chanList.remove(c.localId)
|
||||
c.close()
|
||||
return nil
|
||||
case msgChannelEOF:
|
||||
// RFC 4254 is mute on how EOF affects dataExt messages but
|
||||
// it is logical to signal EOF at the same time.
|
||||
c.extPending.eof()
|
||||
c.pending.eof()
|
||||
return nil
|
||||
}
|
||||
|
||||
decoded, err := decode(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch msg := decoded.(type) {
|
||||
case *channelOpenFailureMsg:
|
||||
if err := c.responseMessageReceived(); err != nil {
|
||||
return err
|
||||
}
|
||||
c.mux.chanList.remove(msg.PeersId)
|
||||
c.msg <- msg
|
||||
case *channelOpenConfirmMsg:
|
||||
if err := c.responseMessageReceived(); err != nil {
|
||||
return err
|
||||
}
|
||||
if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
|
||||
return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize)
|
||||
}
|
||||
c.remoteId = msg.MyId
|
||||
c.maxRemotePayload = msg.MaxPacketSize
|
||||
c.remoteWin.add(msg.MyWindow)
|
||||
c.msg <- msg
|
||||
case *windowAdjustMsg:
|
||||
if !c.remoteWin.add(msg.AdditionalBytes) {
|
||||
return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes)
|
||||
}
|
||||
case *channelRequestMsg:
|
||||
req := Request{
|
||||
Type: msg.Request,
|
||||
WantReply: msg.WantReply,
|
||||
Payload: msg.RequestSpecificData,
|
||||
ch: c,
|
||||
}
|
||||
|
||||
c.incomingRequests <- &req
|
||||
default:
|
||||
c.msg <- msg
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel {
|
||||
ch := &channel{
|
||||
remoteWin: window{Cond: newCond()},
|
||||
myWindow: channelWindowSize,
|
||||
pending: newBuffer(),
|
||||
extPending: newBuffer(),
|
||||
direction: direction,
|
||||
incomingRequests: make(chan *Request, chanSize),
|
||||
msg: make(chan interface{}, chanSize),
|
||||
chanType: chanType,
|
||||
extraData: extraData,
|
||||
mux: m,
|
||||
packetPool: make(map[uint32][]byte),
|
||||
}
|
||||
ch.localId = m.chanList.add(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
var errUndecided = errors.New("ssh: must Accept or Reject channel")
|
||||
var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once")
|
||||
|
||||
type extChannel struct {
|
||||
code uint32
|
||||
ch *channel
|
||||
}
|
||||
|
||||
func (e *extChannel) Write(data []byte) (n int, err error) {
|
||||
return e.ch.WriteExtended(data, e.code)
|
||||
}
|
||||
|
||||
func (e *extChannel) Read(data []byte) (n int, err error) {
|
||||
return e.ch.ReadExtended(data, e.code)
|
||||
}
|
||||
|
||||
func (c *channel) Accept() (Channel, <-chan *Request, error) {
|
||||
if c.decided {
|
||||
return nil, nil, errDecidedAlready
|
||||
}
|
||||
c.maxIncomingPayload = channelMaxPacket
|
||||
confirm := channelOpenConfirmMsg{
|
||||
PeersId: c.remoteId,
|
||||
MyId: c.localId,
|
||||
MyWindow: c.myWindow,
|
||||
MaxPacketSize: c.maxIncomingPayload,
|
||||
}
|
||||
c.decided = true
|
||||
if err := c.sendMessage(confirm); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return c, c.incomingRequests, nil
|
||||
}
|
||||
|
||||
func (ch *channel) Reject(reason RejectionReason, message string) error {
|
||||
if ch.decided {
|
||||
return errDecidedAlready
|
||||
}
|
||||
reject := channelOpenFailureMsg{
|
||||
PeersId: ch.remoteId,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
Language: "en",
|
||||
}
|
||||
ch.decided = true
|
||||
return ch.sendMessage(reject)
|
||||
}
|
||||
|
||||
func (ch *channel) Read(data []byte) (int, error) {
|
||||
if !ch.decided {
|
||||
return 0, errUndecided
|
||||
}
|
||||
return ch.ReadExtended(data, 0)
|
||||
}
|
||||
|
||||
func (ch *channel) Write(data []byte) (int, error) {
|
||||
if !ch.decided {
|
||||
return 0, errUndecided
|
||||
}
|
||||
return ch.WriteExtended(data, 0)
|
||||
}
|
||||
|
||||
func (ch *channel) CloseWrite() error {
|
||||
if !ch.decided {
|
||||
return errUndecided
|
||||
}
|
||||
ch.sentEOF = true
|
||||
return ch.sendMessage(channelEOFMsg{
|
||||
PeersId: ch.remoteId})
|
||||
}
|
||||
|
||||
func (ch *channel) Close() error {
|
||||
if !ch.decided {
|
||||
return errUndecided
|
||||
}
|
||||
|
||||
return ch.sendMessage(channelCloseMsg{
|
||||
PeersId: ch.remoteId})
|
||||
}
|
||||
|
||||
// Extended returns an io.ReadWriter that sends and receives data on the given,
|
||||
// SSH extended stream. Such streams are used, for example, for stderr.
|
||||
func (ch *channel) Extended(code uint32) io.ReadWriter {
|
||||
if !ch.decided {
|
||||
return nil
|
||||
}
|
||||
return &extChannel{code, ch}
|
||||
}
|
||||
|
||||
func (ch *channel) Stderr() io.ReadWriter {
|
||||
return ch.Extended(1)
|
||||
}
|
||||
|
||||
func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
|
||||
if !ch.decided {
|
||||
return false, errUndecided
|
||||
}
|
||||
|
||||
if wantReply {
|
||||
ch.sentRequestMu.Lock()
|
||||
defer ch.sentRequestMu.Unlock()
|
||||
}
|
||||
|
||||
msg := channelRequestMsg{
|
||||
PeersId: ch.remoteId,
|
||||
Request: name,
|
||||
WantReply: wantReply,
|
||||
RequestSpecificData: payload,
|
||||
}
|
||||
|
||||
if err := ch.sendMessage(msg); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if wantReply {
|
||||
m, ok := (<-ch.msg)
|
||||
if !ok {
|
||||
return false, io.EOF
|
||||
}
|
||||
switch m.(type) {
|
||||
case *channelRequestFailureMsg:
|
||||
return false, nil
|
||||
case *channelRequestSuccessMsg:
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m)
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ackRequest either sends an ack or nack to the channel request.
|
||||
func (ch *channel) ackRequest(ok bool) error {
|
||||
if !ch.decided {
|
||||
return errUndecided
|
||||
}
|
||||
|
||||
var msg interface{}
|
||||
if !ok {
|
||||
msg = channelRequestFailureMsg{
|
||||
PeersId: ch.remoteId,
|
||||
}
|
||||
} else {
|
||||
msg = channelRequestSuccessMsg{
|
||||
PeersId: ch.remoteId,
|
||||
}
|
||||
}
|
||||
return ch.sendMessage(msg)
|
||||
}
|
||||
|
||||
func (ch *channel) ChannelType() string {
|
||||
return ch.chanType
|
||||
}
|
||||
|
||||
func (ch *channel) ExtraData() []byte {
|
||||
return ch.extraData
|
||||
}
|
629
vendor/golang.org/x/crypto/ssh/cipher.go
generated
vendored
629
vendor/golang.org/x/crypto/ssh/cipher.go
generated
vendored
@ -1,629 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/des"
|
||||
"crypto/rc4"
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
const (
|
||||
packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher.
|
||||
|
||||
// RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations
|
||||
// MUST be able to process (plus a few more kilobytes for padding and mac). The RFC
|
||||
// indicates implementations SHOULD be able to handle larger packet sizes, but then
|
||||
// waffles on about reasonable limits.
|
||||
//
|
||||
// OpenSSH caps their maxPacket at 256kB so we choose to do
|
||||
// the same. maxPacket is also used to ensure that uint32
|
||||
// length fields do not overflow, so it should remain well
|
||||
// below 4G.
|
||||
maxPacket = 256 * 1024
|
||||
)
|
||||
|
||||
// noneCipher implements cipher.Stream and provides no encryption. It is used
|
||||
// by the transport before the first key-exchange.
|
||||
type noneCipher struct{}
|
||||
|
||||
func (c noneCipher) XORKeyStream(dst, src []byte) {
|
||||
copy(dst, src)
|
||||
}
|
||||
|
||||
func newAESCTR(key, iv []byte) (cipher.Stream, error) {
|
||||
c, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cipher.NewCTR(c, iv), nil
|
||||
}
|
||||
|
||||
func newRC4(key, iv []byte) (cipher.Stream, error) {
|
||||
return rc4.NewCipher(key)
|
||||
}
|
||||
|
||||
type streamCipherMode struct {
|
||||
keySize int
|
||||
ivSize int
|
||||
skip int
|
||||
createFunc func(key, iv []byte) (cipher.Stream, error)
|
||||
}
|
||||
|
||||
func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) {
|
||||
if len(key) < c.keySize {
|
||||
panic("ssh: key length too small for cipher")
|
||||
}
|
||||
if len(iv) < c.ivSize {
|
||||
panic("ssh: iv too small for cipher")
|
||||
}
|
||||
|
||||
stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var streamDump []byte
|
||||
if c.skip > 0 {
|
||||
streamDump = make([]byte, 512)
|
||||
}
|
||||
|
||||
for remainingToDump := c.skip; remainingToDump > 0; {
|
||||
dumpThisTime := remainingToDump
|
||||
if dumpThisTime > len(streamDump) {
|
||||
dumpThisTime = len(streamDump)
|
||||
}
|
||||
stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime])
|
||||
remainingToDump -= dumpThisTime
|
||||
}
|
||||
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
// cipherModes documents properties of supported ciphers. Ciphers not included
|
||||
// are not supported and will not be negotiated, even if explicitly requested in
|
||||
// ClientConfig.Crypto.Ciphers.
|
||||
var cipherModes = map[string]*streamCipherMode{
|
||||
// Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms
|
||||
// are defined in the order specified in the RFC.
|
||||
"aes128-ctr": {16, aes.BlockSize, 0, newAESCTR},
|
||||
"aes192-ctr": {24, aes.BlockSize, 0, newAESCTR},
|
||||
"aes256-ctr": {32, aes.BlockSize, 0, newAESCTR},
|
||||
|
||||
// Ciphers from RFC4345, which introduces security-improved arcfour ciphers.
|
||||
// They are defined in the order specified in the RFC.
|
||||
"arcfour128": {16, 0, 1536, newRC4},
|
||||
"arcfour256": {32, 0, 1536, newRC4},
|
||||
|
||||
// Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
|
||||
// Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
|
||||
// RC4) has problems with weak keys, and should be used with caution."
|
||||
// RFC4345 introduces improved versions of Arcfour.
|
||||
"arcfour": {16, 0, 0, newRC4},
|
||||
|
||||
// AES-GCM is not a stream cipher, so it is constructed with a
|
||||
// special case. If we add any more non-stream ciphers, we
|
||||
// should invest a cleaner way to do this.
|
||||
gcmCipherID: {16, 12, 0, nil},
|
||||
|
||||
// CBC mode is insecure and so is not included in the default config.
|
||||
// (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
|
||||
// needed, it's possible to specify a custom Config to enable it.
|
||||
// You should expect that an active attacker can recover plaintext if
|
||||
// you do.
|
||||
aes128cbcID: {16, aes.BlockSize, 0, nil},
|
||||
|
||||
// 3des-cbc is insecure and is disabled by default.
|
||||
tripledescbcID: {24, des.BlockSize, 0, nil},
|
||||
}
|
||||
|
||||
// prefixLen is the length of the packet prefix that contains the packet length
|
||||
// and number of padding bytes.
|
||||
const prefixLen = 5
|
||||
|
||||
// streamPacketCipher is a packetCipher using a stream cipher.
|
||||
type streamPacketCipher struct {
|
||||
mac hash.Hash
|
||||
cipher cipher.Stream
|
||||
etm bool
|
||||
|
||||
// The following members are to avoid per-packet allocations.
|
||||
prefix [prefixLen]byte
|
||||
seqNumBytes [4]byte
|
||||
padding [2 * packetSizeMultiple]byte
|
||||
packetData []byte
|
||||
macResult []byte
|
||||
}
|
||||
|
||||
// readPacket reads and decrypt a single packet from the reader argument.
|
||||
func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
|
||||
if _, err := io.ReadFull(r, s.prefix[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var encryptedPaddingLength [1]byte
|
||||
if s.mac != nil && s.etm {
|
||||
copy(encryptedPaddingLength[:], s.prefix[4:5])
|
||||
s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5])
|
||||
} else {
|
||||
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
|
||||
}
|
||||
|
||||
length := binary.BigEndian.Uint32(s.prefix[0:4])
|
||||
paddingLength := uint32(s.prefix[4])
|
||||
|
||||
var macSize uint32
|
||||
if s.mac != nil {
|
||||
s.mac.Reset()
|
||||
binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
|
||||
s.mac.Write(s.seqNumBytes[:])
|
||||
if s.etm {
|
||||
s.mac.Write(s.prefix[:4])
|
||||
s.mac.Write(encryptedPaddingLength[:])
|
||||
} else {
|
||||
s.mac.Write(s.prefix[:])
|
||||
}
|
||||
macSize = uint32(s.mac.Size())
|
||||
}
|
||||
|
||||
if length <= paddingLength+1 {
|
||||
return nil, errors.New("ssh: invalid packet length, packet too small")
|
||||
}
|
||||
|
||||
if length > maxPacket {
|
||||
return nil, errors.New("ssh: invalid packet length, packet too large")
|
||||
}
|
||||
|
||||
// the maxPacket check above ensures that length-1+macSize
|
||||
// does not overflow.
|
||||
if uint32(cap(s.packetData)) < length-1+macSize {
|
||||
s.packetData = make([]byte, length-1+macSize)
|
||||
} else {
|
||||
s.packetData = s.packetData[:length-1+macSize]
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(r, s.packetData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mac := s.packetData[length-1:]
|
||||
data := s.packetData[:length-1]
|
||||
|
||||
if s.mac != nil && s.etm {
|
||||
s.mac.Write(data)
|
||||
}
|
||||
|
||||
s.cipher.XORKeyStream(data, data)
|
||||
|
||||
if s.mac != nil {
|
||||
if !s.etm {
|
||||
s.mac.Write(data)
|
||||
}
|
||||
s.macResult = s.mac.Sum(s.macResult[:0])
|
||||
if subtle.ConstantTimeCompare(s.macResult, mac) != 1 {
|
||||
return nil, errors.New("ssh: MAC failure")
|
||||
}
|
||||
}
|
||||
|
||||
return s.packetData[:length-paddingLength-1], nil
|
||||
}
|
||||
|
||||
// writePacket encrypts and sends a packet of data to the writer argument
|
||||
func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
|
||||
if len(packet) > maxPacket {
|
||||
return errors.New("ssh: packet too large")
|
||||
}
|
||||
|
||||
aadlen := 0
|
||||
if s.mac != nil && s.etm {
|
||||
// packet length is not encrypted for EtM modes
|
||||
aadlen = 4
|
||||
}
|
||||
|
||||
paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple
|
||||
if paddingLength < 4 {
|
||||
paddingLength += packetSizeMultiple
|
||||
}
|
||||
|
||||
length := len(packet) + 1 + paddingLength
|
||||
binary.BigEndian.PutUint32(s.prefix[:], uint32(length))
|
||||
s.prefix[4] = byte(paddingLength)
|
||||
padding := s.padding[:paddingLength]
|
||||
if _, err := io.ReadFull(rand, padding); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.mac != nil {
|
||||
s.mac.Reset()
|
||||
binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
|
||||
s.mac.Write(s.seqNumBytes[:])
|
||||
|
||||
if s.etm {
|
||||
// For EtM algorithms, the packet length must stay unencrypted,
|
||||
// but the following data (padding length) must be encrypted
|
||||
s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5])
|
||||
}
|
||||
|
||||
s.mac.Write(s.prefix[:])
|
||||
|
||||
if !s.etm {
|
||||
// For non-EtM algorithms, the algorithm is applied on unencrypted data
|
||||
s.mac.Write(packet)
|
||||
s.mac.Write(padding)
|
||||
}
|
||||
}
|
||||
|
||||
if !(s.mac != nil && s.etm) {
|
||||
// For EtM algorithms, the padding length has already been encrypted
|
||||
// and the packet length must remain unencrypted
|
||||
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
|
||||
}
|
||||
|
||||
s.cipher.XORKeyStream(packet, packet)
|
||||
s.cipher.XORKeyStream(padding, padding)
|
||||
|
||||
if s.mac != nil && s.etm {
|
||||
// For EtM algorithms, packet and padding must be encrypted
|
||||
s.mac.Write(packet)
|
||||
s.mac.Write(padding)
|
||||
}
|
||||
|
||||
if _, err := w.Write(s.prefix[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(packet); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(padding); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.mac != nil {
|
||||
s.macResult = s.mac.Sum(s.macResult[:0])
|
||||
if _, err := w.Write(s.macResult); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type gcmCipher struct {
|
||||
aead cipher.AEAD
|
||||
prefix [4]byte
|
||||
iv []byte
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) {
|
||||
c, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
aead, err := cipher.NewGCM(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &gcmCipher{
|
||||
aead: aead,
|
||||
iv: iv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
const gcmTagSize = 16
|
||||
|
||||
func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
|
||||
// Pad out to multiple of 16 bytes. This is different from the
|
||||
// stream cipher because that encrypts the length too.
|
||||
padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple)
|
||||
if padding < 4 {
|
||||
padding += packetSizeMultiple
|
||||
}
|
||||
|
||||
length := uint32(len(packet) + int(padding) + 1)
|
||||
binary.BigEndian.PutUint32(c.prefix[:], length)
|
||||
if _, err := w.Write(c.prefix[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cap(c.buf) < int(length) {
|
||||
c.buf = make([]byte, length)
|
||||
} else {
|
||||
c.buf = c.buf[:length]
|
||||
}
|
||||
|
||||
c.buf[0] = padding
|
||||
copy(c.buf[1:], packet)
|
||||
if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil {
|
||||
return err
|
||||
}
|
||||
c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:])
|
||||
if _, err := w.Write(c.buf); err != nil {
|
||||
return err
|
||||
}
|
||||
c.incIV()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *gcmCipher) incIV() {
|
||||
for i := 4 + 7; i >= 4; i-- {
|
||||
c.iv[i]++
|
||||
if c.iv[i] != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
|
||||
if _, err := io.ReadFull(r, c.prefix[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
length := binary.BigEndian.Uint32(c.prefix[:])
|
||||
if length > maxPacket {
|
||||
return nil, errors.New("ssh: max packet length exceeded.")
|
||||
}
|
||||
|
||||
if cap(c.buf) < int(length+gcmTagSize) {
|
||||
c.buf = make([]byte, length+gcmTagSize)
|
||||
} else {
|
||||
c.buf = c.buf[:length+gcmTagSize]
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(r, c.buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.incIV()
|
||||
|
||||
padding := plain[0]
|
||||
if padding < 4 {
|
||||
// padding is a byte, so it automatically satisfies
|
||||
// the maximum size, which is 255.
|
||||
return nil, fmt.Errorf("ssh: illegal padding %d", padding)
|
||||
}
|
||||
|
||||
if int(padding+1) >= len(plain) {
|
||||
return nil, fmt.Errorf("ssh: padding %d too large", padding)
|
||||
}
|
||||
plain = plain[1 : length-uint32(padding)]
|
||||
return plain, nil
|
||||
}
|
||||
|
||||
// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1
|
||||
type cbcCipher struct {
|
||||
mac hash.Hash
|
||||
macSize uint32
|
||||
decrypter cipher.BlockMode
|
||||
encrypter cipher.BlockMode
|
||||
|
||||
// The following members are to avoid per-packet allocations.
|
||||
seqNumBytes [4]byte
|
||||
packetData []byte
|
||||
macResult []byte
|
||||
|
||||
// Amount of data we should still read to hide which
|
||||
// verification error triggered.
|
||||
oracleCamouflage uint32
|
||||
}
|
||||
|
||||
func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
|
||||
cbc := &cbcCipher{
|
||||
mac: macModes[algs.MAC].new(macKey),
|
||||
decrypter: cipher.NewCBCDecrypter(c, iv),
|
||||
encrypter: cipher.NewCBCEncrypter(c, iv),
|
||||
packetData: make([]byte, 1024),
|
||||
}
|
||||
if cbc.mac != nil {
|
||||
cbc.macSize = uint32(cbc.mac.Size())
|
||||
}
|
||||
|
||||
return cbc, nil
|
||||
}
|
||||
|
||||
func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
|
||||
c, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cbc, err := newCBCCipher(c, iv, key, macKey, algs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cbc, nil
|
||||
}
|
||||
|
||||
func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
|
||||
c, err := des.NewTripleDESCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cbc, err := newCBCCipher(c, iv, key, macKey, algs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cbc, nil
|
||||
}
|
||||
|
||||
func maxUInt32(a, b int) uint32 {
|
||||
if a > b {
|
||||
return uint32(a)
|
||||
}
|
||||
return uint32(b)
|
||||
}
|
||||
|
||||
const (
|
||||
cbcMinPacketSizeMultiple = 8
|
||||
cbcMinPacketSize = 16
|
||||
cbcMinPaddingSize = 4
|
||||
)
|
||||
|
||||
// cbcError represents a verification error that may leak information.
|
||||
type cbcError string
|
||||
|
||||
func (e cbcError) Error() string { return string(e) }
|
||||
|
||||
func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
|
||||
p, err := c.readPacketLeaky(seqNum, r)
|
||||
if err != nil {
|
||||
if _, ok := err.(cbcError); ok {
|
||||
// Verification error: read a fixed amount of
|
||||
// data, to make distinguishing between
|
||||
// failing MAC and failing length check more
|
||||
// difficult.
|
||||
io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage))
|
||||
}
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) {
|
||||
blockSize := c.decrypter.BlockSize()
|
||||
|
||||
// Read the header, which will include some of the subsequent data in the
|
||||
// case of block ciphers - this is copied back to the payload later.
|
||||
// How many bytes of payload/padding will be read with this first read.
|
||||
firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize)
|
||||
firstBlock := c.packetData[:firstBlockLength]
|
||||
if _, err := io.ReadFull(r, firstBlock); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength
|
||||
|
||||
c.decrypter.CryptBlocks(firstBlock, firstBlock)
|
||||
length := binary.BigEndian.Uint32(firstBlock[:4])
|
||||
if length > maxPacket {
|
||||
return nil, cbcError("ssh: packet too large")
|
||||
}
|
||||
if length+4 < maxUInt32(cbcMinPacketSize, blockSize) {
|
||||
// The minimum size of a packet is 16 (or the cipher block size, whichever
|
||||
// is larger) bytes.
|
||||
return nil, cbcError("ssh: packet too small")
|
||||
}
|
||||
// The length of the packet (including the length field but not the MAC) must
|
||||
// be a multiple of the block size or 8, whichever is larger.
|
||||
if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 {
|
||||
return nil, cbcError("ssh: invalid packet length multiple")
|
||||
}
|
||||
|
||||
paddingLength := uint32(firstBlock[4])
|
||||
if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 {
|
||||
return nil, cbcError("ssh: invalid packet length")
|
||||
}
|
||||
|
||||
// Positions within the c.packetData buffer:
|
||||
macStart := 4 + length
|
||||
paddingStart := macStart - paddingLength
|
||||
|
||||
// Entire packet size, starting before length, ending at end of mac.
|
||||
entirePacketSize := macStart + c.macSize
|
||||
|
||||
// Ensure c.packetData is large enough for the entire packet data.
|
||||
if uint32(cap(c.packetData)) < entirePacketSize {
|
||||
// Still need to upsize and copy, but this should be rare at runtime, only
|
||||
// on upsizing the packetData buffer.
|
||||
c.packetData = make([]byte, entirePacketSize)
|
||||
copy(c.packetData, firstBlock)
|
||||
} else {
|
||||
c.packetData = c.packetData[:entirePacketSize]
|
||||
}
|
||||
|
||||
if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
c.oracleCamouflage -= uint32(n)
|
||||
}
|
||||
|
||||
remainingCrypted := c.packetData[firstBlockLength:macStart]
|
||||
c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted)
|
||||
|
||||
mac := c.packetData[macStart:]
|
||||
if c.mac != nil {
|
||||
c.mac.Reset()
|
||||
binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
|
||||
c.mac.Write(c.seqNumBytes[:])
|
||||
c.mac.Write(c.packetData[:macStart])
|
||||
c.macResult = c.mac.Sum(c.macResult[:0])
|
||||
if subtle.ConstantTimeCompare(c.macResult, mac) != 1 {
|
||||
return nil, cbcError("ssh: MAC failure")
|
||||
}
|
||||
}
|
||||
|
||||
return c.packetData[prefixLen:paddingStart], nil
|
||||
}
|
||||
|
||||
func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
|
||||
effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize())
|
||||
|
||||
// Length of encrypted portion of the packet (header, payload, padding).
|
||||
// Enforce minimum padding and packet size.
|
||||
encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize)
|
||||
// Enforce block size.
|
||||
encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize
|
||||
|
||||
length := encLength - 4
|
||||
paddingLength := int(length) - (1 + len(packet))
|
||||
|
||||
// Overall buffer contains: header, payload, padding, mac.
|
||||
// Space for the MAC is reserved in the capacity but not the slice length.
|
||||
bufferSize := encLength + c.macSize
|
||||
if uint32(cap(c.packetData)) < bufferSize {
|
||||
c.packetData = make([]byte, encLength, bufferSize)
|
||||
} else {
|
||||
c.packetData = c.packetData[:encLength]
|
||||
}
|
||||
|
||||
p := c.packetData
|
||||
|
||||
// Packet header.
|
||||
binary.BigEndian.PutUint32(p, length)
|
||||
p = p[4:]
|
||||
p[0] = byte(paddingLength)
|
||||
|
||||
// Payload.
|
||||
p = p[1:]
|
||||
copy(p, packet)
|
||||
|
||||
// Padding.
|
||||
p = p[len(packet):]
|
||||
if _, err := io.ReadFull(rand, p); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.mac != nil {
|
||||
c.mac.Reset()
|
||||
binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
|
||||
c.mac.Write(c.seqNumBytes[:])
|
||||
c.mac.Write(c.packetData)
|
||||
// The MAC is now appended into the capacity reserved for it earlier.
|
||||
c.packetData = c.mac.Sum(c.packetData)
|
||||
}
|
||||
|
||||
c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength])
|
||||
|
||||
if _, err := w.Write(c.packetData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
257
vendor/golang.org/x/crypto/ssh/client.go
generated
vendored
257
vendor/golang.org/x/crypto/ssh/client.go
generated
vendored
@ -1,257 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Client implements a traditional SSH client that supports shells,
|
||||
// subprocesses, TCP port/streamlocal forwarding and tunneled dialing.
|
||||
type Client struct {
|
||||
Conn
|
||||
|
||||
forwards forwardList // forwarded tcpip connections from the remote side
|
||||
mu sync.Mutex
|
||||
channelHandlers map[string]chan NewChannel
|
||||
}
|
||||
|
||||
// HandleChannelOpen returns a channel on which NewChannel requests
|
||||
// for the given type are sent. If the type already is being handled,
|
||||
// nil is returned. The channel is closed when the connection is closed.
|
||||
func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.channelHandlers == nil {
|
||||
// The SSH channel has been closed.
|
||||
c := make(chan NewChannel)
|
||||
close(c)
|
||||
return c
|
||||
}
|
||||
|
||||
ch := c.channelHandlers[channelType]
|
||||
if ch != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ch = make(chan NewChannel, chanSize)
|
||||
c.channelHandlers[channelType] = ch
|
||||
return ch
|
||||
}
|
||||
|
||||
// NewClient creates a Client on top of the given connection.
|
||||
func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client {
|
||||
conn := &Client{
|
||||
Conn: c,
|
||||
channelHandlers: make(map[string]chan NewChannel, 1),
|
||||
}
|
||||
|
||||
go conn.handleGlobalRequests(reqs)
|
||||
go conn.handleChannelOpens(chans)
|
||||
go func() {
|
||||
conn.Wait()
|
||||
conn.forwards.closeAll()
|
||||
}()
|
||||
go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip"))
|
||||
go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-streamlocal@openssh.com"))
|
||||
return conn
|
||||
}
|
||||
|
||||
// NewClientConn establishes an authenticated SSH connection using c
|
||||
// as the underlying transport. The Request and NewChannel channels
|
||||
// must be serviced or the connection will hang.
|
||||
func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) {
|
||||
fullConf := *config
|
||||
fullConf.SetDefaults()
|
||||
if fullConf.HostKeyCallback == nil {
|
||||
c.Close()
|
||||
return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback")
|
||||
}
|
||||
|
||||
conn := &connection{
|
||||
sshConn: sshConn{conn: c},
|
||||
}
|
||||
|
||||
if err := conn.clientHandshake(addr, &fullConf); err != nil {
|
||||
c.Close()
|
||||
return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err)
|
||||
}
|
||||
conn.mux = newMux(conn.transport)
|
||||
return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil
|
||||
}
|
||||
|
||||
// clientHandshake performs the client side key exchange. See RFC 4253 Section
|
||||
// 7.
|
||||
func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error {
|
||||
if config.ClientVersion != "" {
|
||||
c.clientVersion = []byte(config.ClientVersion)
|
||||
} else {
|
||||
c.clientVersion = []byte(packageVersion)
|
||||
}
|
||||
var err error
|
||||
c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.transport = newClientTransport(
|
||||
newTransport(c.sshConn.conn, config.Rand, true /* is client */),
|
||||
c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr())
|
||||
if err := c.transport.waitSession(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.sessionID = c.transport.getSessionID()
|
||||
return c.clientAuthenticate(config)
|
||||
}
|
||||
|
||||
// verifyHostKeySignature verifies the host key obtained in the key
|
||||
// exchange.
|
||||
func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error {
|
||||
sig, rest, ok := parseSignatureBody(result.Signature)
|
||||
if len(rest) > 0 || !ok {
|
||||
return errors.New("ssh: signature parse error")
|
||||
}
|
||||
|
||||
return hostKey.Verify(result.H, sig)
|
||||
}
|
||||
|
||||
// NewSession opens a new Session for this client. (A session is a remote
|
||||
// execution of a program.)
|
||||
func (c *Client) NewSession() (*Session, error) {
|
||||
ch, in, err := c.OpenChannel("session", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newSession(ch, in)
|
||||
}
|
||||
|
||||
func (c *Client) handleGlobalRequests(incoming <-chan *Request) {
|
||||
for r := range incoming {
|
||||
// This handles keepalive messages and matches
|
||||
// the behaviour of OpenSSH.
|
||||
r.Reply(false, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// handleChannelOpens channel open messages from the remote side.
|
||||
func (c *Client) handleChannelOpens(in <-chan NewChannel) {
|
||||
for ch := range in {
|
||||
c.mu.Lock()
|
||||
handler := c.channelHandlers[ch.ChannelType()]
|
||||
c.mu.Unlock()
|
||||
|
||||
if handler != nil {
|
||||
handler <- ch
|
||||
} else {
|
||||
ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType()))
|
||||
}
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
for _, ch := range c.channelHandlers {
|
||||
close(ch)
|
||||
}
|
||||
c.channelHandlers = nil
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Dial starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client. For access
|
||||
// to incoming channels and requests, use net.Dial with NewClientConn
|
||||
// instead.
|
||||
func Dial(network, addr string, config *ClientConfig) (*Client, error) {
|
||||
conn, err := net.DialTimeout(network, addr, config.Timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, chans, reqs, err := NewClientConn(conn, addr, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewClient(c, chans, reqs), nil
|
||||
}
|
||||
|
||||
// HostKeyCallback is the function type used for verifying server
|
||||
// keys. A HostKeyCallback must return nil if the host key is OK, or
|
||||
// an error to reject it. It receives the hostname as passed to Dial
|
||||
// or NewClientConn. The remote address is the RemoteAddr of the
|
||||
// net.Conn underlying the the SSH connection.
|
||||
type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
|
||||
|
||||
// A ClientConfig structure is used to configure a Client. It must not be
|
||||
// modified after having been passed to an SSH function.
|
||||
type ClientConfig struct {
|
||||
// Config contains configuration that is shared between clients and
|
||||
// servers.
|
||||
Config
|
||||
|
||||
// User contains the username to authenticate as.
|
||||
User string
|
||||
|
||||
// Auth contains possible authentication methods to use with the
|
||||
// server. Only the first instance of a particular RFC 4252 method will
|
||||
// be used during authentication.
|
||||
Auth []AuthMethod
|
||||
|
||||
// HostKeyCallback is called during the cryptographic
|
||||
// handshake to validate the server's host key. The client
|
||||
// configuration must supply this callback for the connection
|
||||
// to succeed. The functions InsecureIgnoreHostKey or
|
||||
// FixedHostKey can be used for simplistic host key checks.
|
||||
HostKeyCallback HostKeyCallback
|
||||
|
||||
// ClientVersion contains the version identification string that will
|
||||
// be used for the connection. If empty, a reasonable default is used.
|
||||
ClientVersion string
|
||||
|
||||
// HostKeyAlgorithms lists the key types that the client will
|
||||
// accept from the server as host key, in order of
|
||||
// preference. If empty, a reasonable default is used. Any
|
||||
// string returned from PublicKey.Type method may be used, or
|
||||
// any of the CertAlgoXxxx and KeyAlgoXxxx constants.
|
||||
HostKeyAlgorithms []string
|
||||
|
||||
// Timeout is the maximum amount of time for the TCP connection to establish.
|
||||
//
|
||||
// A Timeout of zero means no timeout.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// InsecureIgnoreHostKey returns a function that can be used for
|
||||
// ClientConfig.HostKeyCallback to accept any host key. It should
|
||||
// not be used for production code.
|
||||
func InsecureIgnoreHostKey() HostKeyCallback {
|
||||
return func(hostname string, remote net.Addr, key PublicKey) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type fixedHostKey struct {
|
||||
key PublicKey
|
||||
}
|
||||
|
||||
func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error {
|
||||
if f.key == nil {
|
||||
return fmt.Errorf("ssh: required host key was nil")
|
||||
}
|
||||
if !bytes.Equal(key.Marshal(), f.key.Marshal()) {
|
||||
return fmt.Errorf("ssh: host key mismatch")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FixedHostKey returns a function for use in
|
||||
// ClientConfig.HostKeyCallback to accept only a specific host key.
|
||||
func FixedHostKey(key PublicKey) HostKeyCallback {
|
||||
hk := &fixedHostKey{key}
|
||||
return hk.check
|
||||
}
|
486
vendor/golang.org/x/crypto/ssh/client_auth.go
generated
vendored
486
vendor/golang.org/x/crypto/ssh/client_auth.go
generated
vendored
@ -1,486 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// clientAuthenticate authenticates with the remote server. See RFC 4252.
|
||||
func (c *connection) clientAuthenticate(config *ClientConfig) error {
|
||||
// initiate user auth session
|
||||
if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil {
|
||||
return err
|
||||
}
|
||||
packet, err := c.transport.readPacket()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var serviceAccept serviceAcceptMsg
|
||||
if err := Unmarshal(packet, &serviceAccept); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// during the authentication phase the client first attempts the "none" method
|
||||
// then any untried methods suggested by the server.
|
||||
tried := make(map[string]bool)
|
||||
var lastMethods []string
|
||||
|
||||
sessionID := c.transport.getSessionID()
|
||||
for auth := AuthMethod(new(noneAuth)); auth != nil; {
|
||||
ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
// success
|
||||
return nil
|
||||
}
|
||||
tried[auth.method()] = true
|
||||
if methods == nil {
|
||||
methods = lastMethods
|
||||
}
|
||||
lastMethods = methods
|
||||
|
||||
auth = nil
|
||||
|
||||
findNext:
|
||||
for _, a := range config.Auth {
|
||||
candidateMethod := a.method()
|
||||
if tried[candidateMethod] {
|
||||
continue
|
||||
}
|
||||
for _, meth := range methods {
|
||||
if meth == candidateMethod {
|
||||
auth = a
|
||||
break findNext
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried))
|
||||
}
|
||||
|
||||
func keys(m map[string]bool) []string {
|
||||
s := make([]string, 0, len(m))
|
||||
|
||||
for key := range m {
|
||||
s = append(s, key)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// An AuthMethod represents an instance of an RFC 4252 authentication method.
|
||||
type AuthMethod interface {
|
||||
// auth authenticates user over transport t.
|
||||
// Returns true if authentication is successful.
|
||||
// If authentication is not successful, a []string of alternative
|
||||
// method names is returned. If the slice is nil, it will be ignored
|
||||
// and the previous set of possible methods will be reused.
|
||||
auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error)
|
||||
|
||||
// method returns the RFC 4252 method name.
|
||||
method() string
|
||||
}
|
||||
|
||||
// "none" authentication, RFC 4252 section 5.2.
|
||||
type noneAuth int
|
||||
|
||||
func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
|
||||
if err := c.writePacket(Marshal(&userAuthRequestMsg{
|
||||
User: user,
|
||||
Service: serviceSSH,
|
||||
Method: "none",
|
||||
})); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
return handleAuthResponse(c)
|
||||
}
|
||||
|
||||
func (n *noneAuth) method() string {
|
||||
return "none"
|
||||
}
|
||||
|
||||
// passwordCallback is an AuthMethod that fetches the password through
|
||||
// a function call, e.g. by prompting the user.
|
||||
type passwordCallback func() (password string, err error)
|
||||
|
||||
func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
|
||||
type passwordAuthMsg struct {
|
||||
User string `sshtype:"50"`
|
||||
Service string
|
||||
Method string
|
||||
Reply bool
|
||||
Password string
|
||||
}
|
||||
|
||||
pw, err := cb()
|
||||
// REVIEW NOTE: is there a need to support skipping a password attempt?
|
||||
// The program may only find out that the user doesn't have a password
|
||||
// when prompting.
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
if err := c.writePacket(Marshal(&passwordAuthMsg{
|
||||
User: user,
|
||||
Service: serviceSSH,
|
||||
Method: cb.method(),
|
||||
Reply: false,
|
||||
Password: pw,
|
||||
})); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
return handleAuthResponse(c)
|
||||
}
|
||||
|
||||
func (cb passwordCallback) method() string {
|
||||
return "password"
|
||||
}
|
||||
|
||||
// Password returns an AuthMethod using the given password.
|
||||
func Password(secret string) AuthMethod {
|
||||
return passwordCallback(func() (string, error) { return secret, nil })
|
||||
}
|
||||
|
||||
// PasswordCallback returns an AuthMethod that uses a callback for
|
||||
// fetching a password.
|
||||
func PasswordCallback(prompt func() (secret string, err error)) AuthMethod {
|
||||
return passwordCallback(prompt)
|
||||
}
|
||||
|
||||
type publickeyAuthMsg struct {
|
||||
User string `sshtype:"50"`
|
||||
Service string
|
||||
Method string
|
||||
// HasSig indicates to the receiver packet that the auth request is signed and
|
||||
// should be used for authentication of the request.
|
||||
HasSig bool
|
||||
Algoname string
|
||||
PubKey []byte
|
||||
// Sig is tagged with "rest" so Marshal will exclude it during
|
||||
// validateKey
|
||||
Sig []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// publicKeyCallback is an AuthMethod that uses a set of key
|
||||
// pairs for authentication.
|
||||
type publicKeyCallback func() ([]Signer, error)
|
||||
|
||||
func (cb publicKeyCallback) method() string {
|
||||
return "publickey"
|
||||
}
|
||||
|
||||
func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
|
||||
// Authentication is performed by sending an enquiry to test if a key is
|
||||
// acceptable to the remote. If the key is acceptable, the client will
|
||||
// attempt to authenticate with the valid key. If not the client will repeat
|
||||
// the process with the remaining keys.
|
||||
|
||||
signers, err := cb()
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
var methods []string
|
||||
for _, signer := range signers {
|
||||
ok, err := validateKey(signer.PublicKey(), user, c)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
pub := signer.PublicKey()
|
||||
pubKey := pub.Marshal()
|
||||
sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{
|
||||
User: user,
|
||||
Service: serviceSSH,
|
||||
Method: cb.method(),
|
||||
}, []byte(pub.Type()), pubKey))
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// manually wrap the serialized signature in a string
|
||||
s := Marshal(sign)
|
||||
sig := make([]byte, stringLength(len(s)))
|
||||
marshalString(sig, s)
|
||||
msg := publickeyAuthMsg{
|
||||
User: user,
|
||||
Service: serviceSSH,
|
||||
Method: cb.method(),
|
||||
HasSig: true,
|
||||
Algoname: pub.Type(),
|
||||
PubKey: pubKey,
|
||||
Sig: sig,
|
||||
}
|
||||
p := Marshal(&msg)
|
||||
if err := c.writePacket(p); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
var success bool
|
||||
success, methods, err = handleAuthResponse(c)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// If authentication succeeds or the list of available methods does not
|
||||
// contain the "publickey" method, do not attempt to authenticate with any
|
||||
// other keys. According to RFC 4252 Section 7, the latter can occur when
|
||||
// additional authentication methods are required.
|
||||
if success || !containsMethod(methods, cb.method()) {
|
||||
return success, methods, err
|
||||
}
|
||||
}
|
||||
|
||||
return false, methods, nil
|
||||
}
|
||||
|
||||
func containsMethod(methods []string, method string) bool {
|
||||
for _, m := range methods {
|
||||
if m == method {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// validateKey validates the key provided is acceptable to the server.
|
||||
func validateKey(key PublicKey, user string, c packetConn) (bool, error) {
|
||||
pubKey := key.Marshal()
|
||||
msg := publickeyAuthMsg{
|
||||
User: user,
|
||||
Service: serviceSSH,
|
||||
Method: "publickey",
|
||||
HasSig: false,
|
||||
Algoname: key.Type(),
|
||||
PubKey: pubKey,
|
||||
}
|
||||
if err := c.writePacket(Marshal(&msg)); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return confirmKeyAck(key, c)
|
||||
}
|
||||
|
||||
func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
|
||||
pubKey := key.Marshal()
|
||||
algoname := key.Type()
|
||||
|
||||
for {
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch packet[0] {
|
||||
case msgUserAuthBanner:
|
||||
// TODO(gpaul): add callback to present the banner to the user
|
||||
case msgUserAuthPubKeyOk:
|
||||
var msg userAuthPubKeyOkMsg
|
||||
if err := Unmarshal(packet, &msg); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
case msgUserAuthFailure:
|
||||
return false, nil
|
||||
default:
|
||||
return false, unexpectedMessageError(msgUserAuthSuccess, packet[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PublicKeys returns an AuthMethod that uses the given key
|
||||
// pairs.
|
||||
func PublicKeys(signers ...Signer) AuthMethod {
|
||||
return publicKeyCallback(func() ([]Signer, error) { return signers, nil })
|
||||
}
|
||||
|
||||
// PublicKeysCallback returns an AuthMethod that runs the given
|
||||
// function to obtain a list of key pairs.
|
||||
func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod {
|
||||
return publicKeyCallback(getSigners)
|
||||
}
|
||||
|
||||
// handleAuthResponse returns whether the preceding authentication request succeeded
|
||||
// along with a list of remaining authentication methods to try next and
|
||||
// an error if an unexpected response was received.
|
||||
func handleAuthResponse(c packetConn) (bool, []string, error) {
|
||||
for {
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
switch packet[0] {
|
||||
case msgUserAuthBanner:
|
||||
// TODO: add callback to present the banner to the user
|
||||
case msgUserAuthFailure:
|
||||
var msg userAuthFailureMsg
|
||||
if err := Unmarshal(packet, &msg); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
return false, msg.Methods, nil
|
||||
case msgUserAuthSuccess:
|
||||
return true, nil, nil
|
||||
default:
|
||||
return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// KeyboardInteractiveChallenge should print questions, optionally
|
||||
// disabling echoing (e.g. for passwords), and return all the answers.
|
||||
// Challenge may be called multiple times in a single session. After
|
||||
// successful authentication, the server may send a challenge with no
|
||||
// questions, for which the user and instruction messages should be
|
||||
// printed. RFC 4256 section 3.3 details how the UI should behave for
|
||||
// both CLI and GUI environments.
|
||||
type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error)
|
||||
|
||||
// KeyboardInteractive returns an AuthMethod using a prompt/response
|
||||
// sequence controlled by the server.
|
||||
func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod {
|
||||
return challenge
|
||||
}
|
||||
|
||||
func (cb KeyboardInteractiveChallenge) method() string {
|
||||
return "keyboard-interactive"
|
||||
}
|
||||
|
||||
func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
|
||||
type initiateMsg struct {
|
||||
User string `sshtype:"50"`
|
||||
Service string
|
||||
Method string
|
||||
Language string
|
||||
Submethods string
|
||||
}
|
||||
|
||||
if err := c.writePacket(Marshal(&initiateMsg{
|
||||
User: user,
|
||||
Service: serviceSSH,
|
||||
Method: "keyboard-interactive",
|
||||
})); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// like handleAuthResponse, but with less options.
|
||||
switch packet[0] {
|
||||
case msgUserAuthBanner:
|
||||
// TODO: Print banners during userauth.
|
||||
continue
|
||||
case msgUserAuthInfoRequest:
|
||||
// OK
|
||||
case msgUserAuthFailure:
|
||||
var msg userAuthFailureMsg
|
||||
if err := Unmarshal(packet, &msg); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
return false, msg.Methods, nil
|
||||
case msgUserAuthSuccess:
|
||||
return true, nil, nil
|
||||
default:
|
||||
return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
|
||||
}
|
||||
|
||||
var msg userAuthInfoRequestMsg
|
||||
if err := Unmarshal(packet, &msg); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// Manually unpack the prompt/echo pairs.
|
||||
rest := msg.Prompts
|
||||
var prompts []string
|
||||
var echos []bool
|
||||
for i := 0; i < int(msg.NumPrompts); i++ {
|
||||
prompt, r, ok := parseString(rest)
|
||||
if !ok || len(r) == 0 {
|
||||
return false, nil, errors.New("ssh: prompt format error")
|
||||
}
|
||||
prompts = append(prompts, string(prompt))
|
||||
echos = append(echos, r[0] != 0)
|
||||
rest = r[1:]
|
||||
}
|
||||
|
||||
if len(rest) != 0 {
|
||||
return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
|
||||
}
|
||||
|
||||
answers, err := cb(msg.User, msg.Instruction, prompts, echos)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
if len(answers) != len(prompts) {
|
||||
return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback")
|
||||
}
|
||||
responseLength := 1 + 4
|
||||
for _, a := range answers {
|
||||
responseLength += stringLength(len(a))
|
||||
}
|
||||
serialized := make([]byte, responseLength)
|
||||
p := serialized
|
||||
p[0] = msgUserAuthInfoResponse
|
||||
p = p[1:]
|
||||
p = marshalUint32(p, uint32(len(answers)))
|
||||
for _, a := range answers {
|
||||
p = marshalString(p, []byte(a))
|
||||
}
|
||||
|
||||
if err := c.writePacket(serialized); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type retryableAuthMethod struct {
|
||||
authMethod AuthMethod
|
||||
maxTries int
|
||||
}
|
||||
|
||||
func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) {
|
||||
for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ {
|
||||
ok, methods, err = r.authMethod.auth(session, user, c, rand)
|
||||
if ok || err != nil { // either success or error terminate
|
||||
return ok, methods, err
|
||||
}
|
||||
}
|
||||
return ok, methods, err
|
||||
}
|
||||
|
||||
func (r *retryableAuthMethod) method() string {
|
||||
return r.authMethod.method()
|
||||
}
|
||||
|
||||
// RetryableAuthMethod is a decorator for other auth methods enabling them to
|
||||
// be retried up to maxTries before considering that AuthMethod itself failed.
|
||||
// If maxTries is <= 0, will retry indefinitely
|
||||
//
|
||||
// This is useful for interactive clients using challenge/response type
|
||||
// authentication (e.g. Keyboard-Interactive, Password, etc) where the user
|
||||
// could mistype their response resulting in the server issuing a
|
||||
// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4
|
||||
// [keyboard-interactive]); Without this decorator, the non-retryable
|
||||
// AuthMethod would be removed from future consideration, and never tried again
|
||||
// (and so the user would never be able to retry their entry).
|
||||
func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod {
|
||||
return &retryableAuthMethod{authMethod: auth, maxTries: maxTries}
|
||||
}
|
373
vendor/golang.org/x/crypto/ssh/common.go
generated
vendored
373
vendor/golang.org/x/crypto/ssh/common.go
generated
vendored
@ -1,373 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
_ "crypto/sha1"
|
||||
_ "crypto/sha256"
|
||||
_ "crypto/sha512"
|
||||
)
|
||||
|
||||
// These are string constants in the SSH protocol.
|
||||
const (
|
||||
compressionNone = "none"
|
||||
serviceUserAuth = "ssh-userauth"
|
||||
serviceSSH = "ssh-connection"
|
||||
)
|
||||
|
||||
// supportedCiphers specifies the supported ciphers in preference order.
|
||||
var supportedCiphers = []string{
|
||||
"aes128-ctr", "aes192-ctr", "aes256-ctr",
|
||||
"aes128-gcm@openssh.com",
|
||||
"arcfour256", "arcfour128",
|
||||
}
|
||||
|
||||
// supportedKexAlgos specifies the supported key-exchange algorithms in
|
||||
// preference order.
|
||||
var supportedKexAlgos = []string{
|
||||
kexAlgoCurve25519SHA256,
|
||||
// P384 and P521 are not constant-time yet, but since we don't
|
||||
// reuse ephemeral keys, using them for ECDH should be OK.
|
||||
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
|
||||
kexAlgoDH14SHA1, kexAlgoDH1SHA1,
|
||||
}
|
||||
|
||||
// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods
|
||||
// of authenticating servers) in preference order.
|
||||
var supportedHostKeyAlgos = []string{
|
||||
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
|
||||
CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01,
|
||||
|
||||
KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
|
||||
KeyAlgoRSA, KeyAlgoDSA,
|
||||
|
||||
KeyAlgoED25519,
|
||||
}
|
||||
|
||||
// supportedMACs specifies a default set of MAC algorithms in preference order.
|
||||
// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed
|
||||
// because they have reached the end of their useful life.
|
||||
var supportedMACs = []string{
|
||||
"hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96",
|
||||
}
|
||||
|
||||
var supportedCompressions = []string{compressionNone}
|
||||
|
||||
// hashFuncs keeps the mapping of supported algorithms to their respective
|
||||
// hashes needed for signature verification.
|
||||
var hashFuncs = map[string]crypto.Hash{
|
||||
KeyAlgoRSA: crypto.SHA1,
|
||||
KeyAlgoDSA: crypto.SHA1,
|
||||
KeyAlgoECDSA256: crypto.SHA256,
|
||||
KeyAlgoECDSA384: crypto.SHA384,
|
||||
KeyAlgoECDSA521: crypto.SHA512,
|
||||
CertAlgoRSAv01: crypto.SHA1,
|
||||
CertAlgoDSAv01: crypto.SHA1,
|
||||
CertAlgoECDSA256v01: crypto.SHA256,
|
||||
CertAlgoECDSA384v01: crypto.SHA384,
|
||||
CertAlgoECDSA521v01: crypto.SHA512,
|
||||
}
|
||||
|
||||
// unexpectedMessageError results when the SSH message that we received didn't
|
||||
// match what we wanted.
|
||||
func unexpectedMessageError(expected, got uint8) error {
|
||||
return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected)
|
||||
}
|
||||
|
||||
// parseError results from a malformed SSH message.
|
||||
func parseError(tag uint8) error {
|
||||
return fmt.Errorf("ssh: parse error in message type %d", tag)
|
||||
}
|
||||
|
||||
func findCommon(what string, client []string, server []string) (common string, err error) {
|
||||
for _, c := range client {
|
||||
for _, s := range server {
|
||||
if c == s {
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server)
|
||||
}
|
||||
|
||||
type directionAlgorithms struct {
|
||||
Cipher string
|
||||
MAC string
|
||||
Compression string
|
||||
}
|
||||
|
||||
// rekeyBytes returns a rekeying intervals in bytes.
|
||||
func (a *directionAlgorithms) rekeyBytes() int64 {
|
||||
// According to RFC4344 block ciphers should rekey after
|
||||
// 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is
|
||||
// 128.
|
||||
switch a.Cipher {
|
||||
case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID:
|
||||
return 16 * (1 << 32)
|
||||
|
||||
}
|
||||
|
||||
// For others, stick with RFC4253 recommendation to rekey after 1 Gb of data.
|
||||
return 1 << 30
|
||||
}
|
||||
|
||||
type algorithms struct {
|
||||
kex string
|
||||
hostKey string
|
||||
w directionAlgorithms
|
||||
r directionAlgorithms
|
||||
}
|
||||
|
||||
func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) {
|
||||
result := &algorithms{}
|
||||
|
||||
result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// If rekeythreshold is too small, we can't make any progress sending
|
||||
// stuff.
|
||||
const minRekeyThreshold uint64 = 256
|
||||
|
||||
// Config contains configuration data common to both ServerConfig and
|
||||
// ClientConfig.
|
||||
type Config struct {
|
||||
// Rand provides the source of entropy for cryptographic
|
||||
// primitives. If Rand is nil, the cryptographic random reader
|
||||
// in package crypto/rand will be used.
|
||||
Rand io.Reader
|
||||
|
||||
// The maximum number of bytes sent or received after which a
|
||||
// new key is negotiated. It must be at least 256. If
|
||||
// unspecified, a size suitable for the chosen cipher is used.
|
||||
RekeyThreshold uint64
|
||||
|
||||
// The allowed key exchanges algorithms. If unspecified then a
|
||||
// default set of algorithms is used.
|
||||
KeyExchanges []string
|
||||
|
||||
// The allowed cipher algorithms. If unspecified then a sensible
|
||||
// default is used.
|
||||
Ciphers []string
|
||||
|
||||
// The allowed MAC algorithms. If unspecified then a sensible default
|
||||
// is used.
|
||||
MACs []string
|
||||
}
|
||||
|
||||
// SetDefaults sets sensible values for unset fields in config. This is
|
||||
// exported for testing: Configs passed to SSH functions are copied and have
|
||||
// default values set automatically.
|
||||
func (c *Config) SetDefaults() {
|
||||
if c.Rand == nil {
|
||||
c.Rand = rand.Reader
|
||||
}
|
||||
if c.Ciphers == nil {
|
||||
c.Ciphers = supportedCiphers
|
||||
}
|
||||
var ciphers []string
|
||||
for _, c := range c.Ciphers {
|
||||
if cipherModes[c] != nil {
|
||||
// reject the cipher if we have no cipherModes definition
|
||||
ciphers = append(ciphers, c)
|
||||
}
|
||||
}
|
||||
c.Ciphers = ciphers
|
||||
|
||||
if c.KeyExchanges == nil {
|
||||
c.KeyExchanges = supportedKexAlgos
|
||||
}
|
||||
|
||||
if c.MACs == nil {
|
||||
c.MACs = supportedMACs
|
||||
}
|
||||
|
||||
if c.RekeyThreshold == 0 {
|
||||
// cipher specific default
|
||||
} else if c.RekeyThreshold < minRekeyThreshold {
|
||||
c.RekeyThreshold = minRekeyThreshold
|
||||
} else if c.RekeyThreshold >= math.MaxInt64 {
|
||||
// Avoid weirdness if somebody uses -1 as a threshold.
|
||||
c.RekeyThreshold = math.MaxInt64
|
||||
}
|
||||
}
|
||||
|
||||
// buildDataSignedForAuth returns the data that is signed in order to prove
|
||||
// possession of a private key. See RFC 4252, section 7.
|
||||
func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
|
||||
data := struct {
|
||||
Session []byte
|
||||
Type byte
|
||||
User string
|
||||
Service string
|
||||
Method string
|
||||
Sign bool
|
||||
Algo []byte
|
||||
PubKey []byte
|
||||
}{
|
||||
sessionId,
|
||||
msgUserAuthRequest,
|
||||
req.User,
|
||||
req.Service,
|
||||
req.Method,
|
||||
true,
|
||||
algo,
|
||||
pubKey,
|
||||
}
|
||||
return Marshal(data)
|
||||
}
|
||||
|
||||
func appendU16(buf []byte, n uint16) []byte {
|
||||
return append(buf, byte(n>>8), byte(n))
|
||||
}
|
||||
|
||||
func appendU32(buf []byte, n uint32) []byte {
|
||||
return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
}
|
||||
|
||||
func appendU64(buf []byte, n uint64) []byte {
|
||||
return append(buf,
|
||||
byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32),
|
||||
byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
}
|
||||
|
||||
func appendInt(buf []byte, n int) []byte {
|
||||
return appendU32(buf, uint32(n))
|
||||
}
|
||||
|
||||
func appendString(buf []byte, s string) []byte {
|
||||
buf = appendU32(buf, uint32(len(s)))
|
||||
buf = append(buf, s...)
|
||||
return buf
|
||||
}
|
||||
|
||||
func appendBool(buf []byte, b bool) []byte {
|
||||
if b {
|
||||
return append(buf, 1)
|
||||
}
|
||||
return append(buf, 0)
|
||||
}
|
||||
|
||||
// newCond is a helper to hide the fact that there is no usable zero
|
||||
// value for sync.Cond.
|
||||
func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) }
|
||||
|
||||
// window represents the buffer available to clients
|
||||
// wishing to write to a channel.
|
||||
type window struct {
|
||||
*sync.Cond
|
||||
win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1
|
||||
writeWaiters int
|
||||
closed bool
|
||||
}
|
||||
|
||||
// add adds win to the amount of window available
|
||||
// for consumers.
|
||||
func (w *window) add(win uint32) bool {
|
||||
// a zero sized window adjust is a noop.
|
||||
if win == 0 {
|
||||
return true
|
||||
}
|
||||
w.L.Lock()
|
||||
if w.win+win < win {
|
||||
w.L.Unlock()
|
||||
return false
|
||||
}
|
||||
w.win += win
|
||||
// It is unusual that multiple goroutines would be attempting to reserve
|
||||
// window space, but not guaranteed. Use broadcast to notify all waiters
|
||||
// that additional window is available.
|
||||
w.Broadcast()
|
||||
w.L.Unlock()
|
||||
return true
|
||||
}
|
||||
|
||||
// close sets the window to closed, so all reservations fail
|
||||
// immediately.
|
||||
func (w *window) close() {
|
||||
w.L.Lock()
|
||||
w.closed = true
|
||||
w.Broadcast()
|
||||
w.L.Unlock()
|
||||
}
|
||||
|
||||
// reserve reserves win from the available window capacity.
|
||||
// If no capacity remains, reserve will block. reserve may
|
||||
// return less than requested.
|
||||
func (w *window) reserve(win uint32) (uint32, error) {
|
||||
var err error
|
||||
w.L.Lock()
|
||||
w.writeWaiters++
|
||||
w.Broadcast()
|
||||
for w.win == 0 && !w.closed {
|
||||
w.Wait()
|
||||
}
|
||||
w.writeWaiters--
|
||||
if w.win < win {
|
||||
win = w.win
|
||||
}
|
||||
w.win -= win
|
||||
if w.closed {
|
||||
err = io.EOF
|
||||
}
|
||||
w.L.Unlock()
|
||||
return win, err
|
||||
}
|
||||
|
||||
// waitWriterBlocked waits until some goroutine is blocked for further
|
||||
// writes. It is used in tests only.
|
||||
func (w *window) waitWriterBlocked() {
|
||||
w.Cond.L.Lock()
|
||||
for w.writeWaiters == 0 {
|
||||
w.Cond.Wait()
|
||||
}
|
||||
w.Cond.L.Unlock()
|
||||
}
|
143
vendor/golang.org/x/crypto/ssh/connection.go
generated
vendored
143
vendor/golang.org/x/crypto/ssh/connection.go
generated
vendored
@ -1,143 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// OpenChannelError is returned if the other side rejects an
|
||||
// OpenChannel request.
|
||||
type OpenChannelError struct {
|
||||
Reason RejectionReason
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *OpenChannelError) Error() string {
|
||||
return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message)
|
||||
}
|
||||
|
||||
// ConnMetadata holds metadata for the connection.
|
||||
type ConnMetadata interface {
|
||||
// User returns the user ID for this connection.
|
||||
User() string
|
||||
|
||||
// SessionID returns the session hash, also denoted by H.
|
||||
SessionID() []byte
|
||||
|
||||
// ClientVersion returns the client's version string as hashed
|
||||
// into the session ID.
|
||||
ClientVersion() []byte
|
||||
|
||||
// ServerVersion returns the server's version string as hashed
|
||||
// into the session ID.
|
||||
ServerVersion() []byte
|
||||
|
||||
// RemoteAddr returns the remote address for this connection.
|
||||
RemoteAddr() net.Addr
|
||||
|
||||
// LocalAddr returns the local address for this connection.
|
||||
LocalAddr() net.Addr
|
||||
}
|
||||
|
||||
// Conn represents an SSH connection for both server and client roles.
|
||||
// Conn is the basis for implementing an application layer, such
|
||||
// as ClientConn, which implements the traditional shell access for
|
||||
// clients.
|
||||
type Conn interface {
|
||||
ConnMetadata
|
||||
|
||||
// SendRequest sends a global request, and returns the
|
||||
// reply. If wantReply is true, it returns the response status
|
||||
// and payload. See also RFC4254, section 4.
|
||||
SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error)
|
||||
|
||||
// OpenChannel tries to open an channel. If the request is
|
||||
// rejected, it returns *OpenChannelError. On success it returns
|
||||
// the SSH Channel and a Go channel for incoming, out-of-band
|
||||
// requests. The Go channel must be serviced, or the
|
||||
// connection will hang.
|
||||
OpenChannel(name string, data []byte) (Channel, <-chan *Request, error)
|
||||
|
||||
// Close closes the underlying network connection
|
||||
Close() error
|
||||
|
||||
// Wait blocks until the connection has shut down, and returns the
|
||||
// error causing the shutdown.
|
||||
Wait() error
|
||||
|
||||
// TODO(hanwen): consider exposing:
|
||||
// RequestKeyChange
|
||||
// Disconnect
|
||||
}
|
||||
|
||||
// DiscardRequests consumes and rejects all requests from the
|
||||
// passed-in channel.
|
||||
func DiscardRequests(in <-chan *Request) {
|
||||
for req := range in {
|
||||
if req.WantReply {
|
||||
req.Reply(false, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A connection represents an incoming connection.
|
||||
type connection struct {
|
||||
transport *handshakeTransport
|
||||
sshConn
|
||||
|
||||
// The connection protocol.
|
||||
*mux
|
||||
}
|
||||
|
||||
func (c *connection) Close() error {
|
||||
return c.sshConn.conn.Close()
|
||||
}
|
||||
|
||||
// sshconn provides net.Conn metadata, but disallows direct reads and
|
||||
// writes.
|
||||
type sshConn struct {
|
||||
conn net.Conn
|
||||
|
||||
user string
|
||||
sessionID []byte
|
||||
clientVersion []byte
|
||||
serverVersion []byte
|
||||
}
|
||||
|
||||
func dup(src []byte) []byte {
|
||||
dst := make([]byte, len(src))
|
||||
copy(dst, src)
|
||||
return dst
|
||||
}
|
||||
|
||||
func (c *sshConn) User() string {
|
||||
return c.user
|
||||
}
|
||||
|
||||
func (c *sshConn) RemoteAddr() net.Addr {
|
||||
return c.conn.RemoteAddr()
|
||||
}
|
||||
|
||||
func (c *sshConn) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
func (c *sshConn) LocalAddr() net.Addr {
|
||||
return c.conn.LocalAddr()
|
||||
}
|
||||
|
||||
func (c *sshConn) SessionID() []byte {
|
||||
return dup(c.sessionID)
|
||||
}
|
||||
|
||||
func (c *sshConn) ClientVersion() []byte {
|
||||
return dup(c.clientVersion)
|
||||
}
|
||||
|
||||
func (c *sshConn) ServerVersion() []byte {
|
||||
return dup(c.serverVersion)
|
||||
}
|
21
vendor/golang.org/x/crypto/ssh/doc.go
generated
vendored
21
vendor/golang.org/x/crypto/ssh/doc.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package ssh implements an SSH client and server.
|
||||
|
||||
SSH is a transport security protocol, an authentication protocol and a
|
||||
family of application protocols. The most typical application level
|
||||
protocol is a remote shell and this is specifically implemented. However,
|
||||
the multiplexed nature of SSH is exposed to users that wish to support
|
||||
others.
|
||||
|
||||
References:
|
||||
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
|
||||
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
|
||||
|
||||
This package does not fall under the stability promise of the Go language itself,
|
||||
so its API may be changed when pressing needs arise.
|
||||
*/
|
||||
package ssh // import "golang.org/x/crypto/ssh"
|
640
vendor/golang.org/x/crypto/ssh/handshake.go
generated
vendored
640
vendor/golang.org/x/crypto/ssh/handshake.go
generated
vendored
@ -1,640 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// debugHandshake, if set, prints messages sent and received. Key
|
||||
// exchange messages are printed as if DH were used, so the debug
|
||||
// messages are wrong when using ECDH.
|
||||
const debugHandshake = false
|
||||
|
||||
// chanSize sets the amount of buffering SSH connections. This is
|
||||
// primarily for testing: setting chanSize=0 uncovers deadlocks more
|
||||
// quickly.
|
||||
const chanSize = 16
|
||||
|
||||
// keyingTransport is a packet based transport that supports key
|
||||
// changes. It need not be thread-safe. It should pass through
|
||||
// msgNewKeys in both directions.
|
||||
type keyingTransport interface {
|
||||
packetConn
|
||||
|
||||
// prepareKeyChange sets up a key change. The key change for a
|
||||
// direction will be effected if a msgNewKeys message is sent
|
||||
// or received.
|
||||
prepareKeyChange(*algorithms, *kexResult) error
|
||||
}
|
||||
|
||||
// handshakeTransport implements rekeying on top of a keyingTransport
|
||||
// and offers a thread-safe writePacket() interface.
|
||||
type handshakeTransport struct {
|
||||
conn keyingTransport
|
||||
config *Config
|
||||
|
||||
serverVersion []byte
|
||||
clientVersion []byte
|
||||
|
||||
// hostKeys is non-empty if we are the server. In that case,
|
||||
// it contains all host keys that can be used to sign the
|
||||
// connection.
|
||||
hostKeys []Signer
|
||||
|
||||
// hostKeyAlgorithms is non-empty if we are the client. In that case,
|
||||
// we accept these key types from the server as host key.
|
||||
hostKeyAlgorithms []string
|
||||
|
||||
// On read error, incoming is closed, and readError is set.
|
||||
incoming chan []byte
|
||||
readError error
|
||||
|
||||
mu sync.Mutex
|
||||
writeError error
|
||||
sentInitPacket []byte
|
||||
sentInitMsg *kexInitMsg
|
||||
pendingPackets [][]byte // Used when a key exchange is in progress.
|
||||
|
||||
// If the read loop wants to schedule a kex, it pings this
|
||||
// channel, and the write loop will send out a kex
|
||||
// message.
|
||||
requestKex chan struct{}
|
||||
|
||||
// If the other side requests or confirms a kex, its kexInit
|
||||
// packet is sent here for the write loop to find it.
|
||||
startKex chan *pendingKex
|
||||
|
||||
// data for host key checking
|
||||
hostKeyCallback HostKeyCallback
|
||||
dialAddress string
|
||||
remoteAddr net.Addr
|
||||
|
||||
// Algorithms agreed in the last key exchange.
|
||||
algorithms *algorithms
|
||||
|
||||
readPacketsLeft uint32
|
||||
readBytesLeft int64
|
||||
|
||||
writePacketsLeft uint32
|
||||
writeBytesLeft int64
|
||||
|
||||
// The session ID or nil if first kex did not complete yet.
|
||||
sessionID []byte
|
||||
}
|
||||
|
||||
type pendingKex struct {
|
||||
otherInit []byte
|
||||
done chan error
|
||||
}
|
||||
|
||||
func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport {
|
||||
t := &handshakeTransport{
|
||||
conn: conn,
|
||||
serverVersion: serverVersion,
|
||||
clientVersion: clientVersion,
|
||||
incoming: make(chan []byte, chanSize),
|
||||
requestKex: make(chan struct{}, 1),
|
||||
startKex: make(chan *pendingKex, 1),
|
||||
|
||||
config: config,
|
||||
}
|
||||
t.resetReadThresholds()
|
||||
t.resetWriteThresholds()
|
||||
|
||||
// We always start with a mandatory key exchange.
|
||||
t.requestKex <- struct{}{}
|
||||
return t
|
||||
}
|
||||
|
||||
func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport {
|
||||
t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
|
||||
t.dialAddress = dialAddr
|
||||
t.remoteAddr = addr
|
||||
t.hostKeyCallback = config.HostKeyCallback
|
||||
if config.HostKeyAlgorithms != nil {
|
||||
t.hostKeyAlgorithms = config.HostKeyAlgorithms
|
||||
} else {
|
||||
t.hostKeyAlgorithms = supportedHostKeyAlgos
|
||||
}
|
||||
go t.readLoop()
|
||||
go t.kexLoop()
|
||||
return t
|
||||
}
|
||||
|
||||
func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport {
|
||||
t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
|
||||
t.hostKeys = config.hostKeys
|
||||
go t.readLoop()
|
||||
go t.kexLoop()
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) getSessionID() []byte {
|
||||
return t.sessionID
|
||||
}
|
||||
|
||||
// waitSession waits for the session to be established. This should be
|
||||
// the first thing to call after instantiating handshakeTransport.
|
||||
func (t *handshakeTransport) waitSession() error {
|
||||
p, err := t.readPacket()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if p[0] != msgNewKeys {
|
||||
return fmt.Errorf("ssh: first packet should be msgNewKeys")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) id() string {
|
||||
if len(t.hostKeys) > 0 {
|
||||
return "server"
|
||||
}
|
||||
return "client"
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) printPacket(p []byte, write bool) {
|
||||
action := "got"
|
||||
if write {
|
||||
action = "sent"
|
||||
}
|
||||
|
||||
if p[0] == msgChannelData || p[0] == msgChannelExtendedData {
|
||||
log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p))
|
||||
} else {
|
||||
msg, err := decode(p)
|
||||
log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) readPacket() ([]byte, error) {
|
||||
p, ok := <-t.incoming
|
||||
if !ok {
|
||||
return nil, t.readError
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) readLoop() {
|
||||
first := true
|
||||
for {
|
||||
p, err := t.readOnePacket(first)
|
||||
first = false
|
||||
if err != nil {
|
||||
t.readError = err
|
||||
close(t.incoming)
|
||||
break
|
||||
}
|
||||
if p[0] == msgIgnore || p[0] == msgDebug {
|
||||
continue
|
||||
}
|
||||
t.incoming <- p
|
||||
}
|
||||
|
||||
// Stop writers too.
|
||||
t.recordWriteError(t.readError)
|
||||
|
||||
// Unblock the writer should it wait for this.
|
||||
close(t.startKex)
|
||||
|
||||
// Don't close t.requestKex; it's also written to from writePacket.
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) pushPacket(p []byte) error {
|
||||
if debugHandshake {
|
||||
t.printPacket(p, true)
|
||||
}
|
||||
return t.conn.writePacket(p)
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) getWriteError() error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
return t.writeError
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) recordWriteError(err error) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.writeError == nil && err != nil {
|
||||
t.writeError = err
|
||||
}
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) requestKeyExchange() {
|
||||
select {
|
||||
case t.requestKex <- struct{}{}:
|
||||
default:
|
||||
// something already requested a kex, so do nothing.
|
||||
}
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) resetWriteThresholds() {
|
||||
t.writePacketsLeft = packetRekeyThreshold
|
||||
if t.config.RekeyThreshold > 0 {
|
||||
t.writeBytesLeft = int64(t.config.RekeyThreshold)
|
||||
} else if t.algorithms != nil {
|
||||
t.writeBytesLeft = t.algorithms.w.rekeyBytes()
|
||||
} else {
|
||||
t.writeBytesLeft = 1 << 30
|
||||
}
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) kexLoop() {
|
||||
|
||||
write:
|
||||
for t.getWriteError() == nil {
|
||||
var request *pendingKex
|
||||
var sent bool
|
||||
|
||||
for request == nil || !sent {
|
||||
var ok bool
|
||||
select {
|
||||
case request, ok = <-t.startKex:
|
||||
if !ok {
|
||||
break write
|
||||
}
|
||||
case <-t.requestKex:
|
||||
break
|
||||
}
|
||||
|
||||
if !sent {
|
||||
if err := t.sendKexInit(); err != nil {
|
||||
t.recordWriteError(err)
|
||||
break
|
||||
}
|
||||
sent = true
|
||||
}
|
||||
}
|
||||
|
||||
if err := t.getWriteError(); err != nil {
|
||||
if request != nil {
|
||||
request.done <- err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// We're not servicing t.requestKex, but that is OK:
|
||||
// we never block on sending to t.requestKex.
|
||||
|
||||
// We're not servicing t.startKex, but the remote end
|
||||
// has just sent us a kexInitMsg, so it can't send
|
||||
// another key change request, until we close the done
|
||||
// channel on the pendingKex request.
|
||||
|
||||
err := t.enterKeyExchange(request.otherInit)
|
||||
|
||||
t.mu.Lock()
|
||||
t.writeError = err
|
||||
t.sentInitPacket = nil
|
||||
t.sentInitMsg = nil
|
||||
|
||||
t.resetWriteThresholds()
|
||||
|
||||
// we have completed the key exchange. Since the
|
||||
// reader is still blocked, it is safe to clear out
|
||||
// the requestKex channel. This avoids the situation
|
||||
// where: 1) we consumed our own request for the
|
||||
// initial kex, and 2) the kex from the remote side
|
||||
// caused another send on the requestKex channel,
|
||||
clear:
|
||||
for {
|
||||
select {
|
||||
case <-t.requestKex:
|
||||
//
|
||||
default:
|
||||
break clear
|
||||
}
|
||||
}
|
||||
|
||||
request.done <- t.writeError
|
||||
|
||||
// kex finished. Push packets that we received while
|
||||
// the kex was in progress. Don't look at t.startKex
|
||||
// and don't increment writtenSinceKex: if we trigger
|
||||
// another kex while we are still busy with the last
|
||||
// one, things will become very confusing.
|
||||
for _, p := range t.pendingPackets {
|
||||
t.writeError = t.pushPacket(p)
|
||||
if t.writeError != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
t.pendingPackets = t.pendingPackets[:0]
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
// drain startKex channel. We don't service t.requestKex
|
||||
// because nobody does blocking sends there.
|
||||
go func() {
|
||||
for init := range t.startKex {
|
||||
init.done <- t.writeError
|
||||
}
|
||||
}()
|
||||
|
||||
// Unblock reader.
|
||||
t.conn.Close()
|
||||
}
|
||||
|
||||
// The protocol uses uint32 for packet counters, so we can't let them
|
||||
// reach 1<<32. We will actually read and write more packets than
|
||||
// this, though: the other side may send more packets, and after we
|
||||
// hit this limit on writing we will send a few more packets for the
|
||||
// key exchange itself.
|
||||
const packetRekeyThreshold = (1 << 31)
|
||||
|
||||
func (t *handshakeTransport) resetReadThresholds() {
|
||||
t.readPacketsLeft = packetRekeyThreshold
|
||||
if t.config.RekeyThreshold > 0 {
|
||||
t.readBytesLeft = int64(t.config.RekeyThreshold)
|
||||
} else if t.algorithms != nil {
|
||||
t.readBytesLeft = t.algorithms.r.rekeyBytes()
|
||||
} else {
|
||||
t.readBytesLeft = 1 << 30
|
||||
}
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) {
|
||||
p, err := t.conn.readPacket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if t.readPacketsLeft > 0 {
|
||||
t.readPacketsLeft--
|
||||
} else {
|
||||
t.requestKeyExchange()
|
||||
}
|
||||
|
||||
if t.readBytesLeft > 0 {
|
||||
t.readBytesLeft -= int64(len(p))
|
||||
} else {
|
||||
t.requestKeyExchange()
|
||||
}
|
||||
|
||||
if debugHandshake {
|
||||
t.printPacket(p, false)
|
||||
}
|
||||
|
||||
if first && p[0] != msgKexInit {
|
||||
return nil, fmt.Errorf("ssh: first packet should be msgKexInit")
|
||||
}
|
||||
|
||||
if p[0] != msgKexInit {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
firstKex := t.sessionID == nil
|
||||
|
||||
kex := pendingKex{
|
||||
done: make(chan error, 1),
|
||||
otherInit: p,
|
||||
}
|
||||
t.startKex <- &kex
|
||||
err = <-kex.done
|
||||
|
||||
if debugHandshake {
|
||||
log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t.resetReadThresholds()
|
||||
|
||||
// By default, a key exchange is hidden from higher layers by
|
||||
// translating it into msgIgnore.
|
||||
successPacket := []byte{msgIgnore}
|
||||
if firstKex {
|
||||
// sendKexInit() for the first kex waits for
|
||||
// msgNewKeys so the authentication process is
|
||||
// guaranteed to happen over an encrypted transport.
|
||||
successPacket = []byte{msgNewKeys}
|
||||
}
|
||||
|
||||
return successPacket, nil
|
||||
}
|
||||
|
||||
// sendKexInit sends a key change message.
|
||||
func (t *handshakeTransport) sendKexInit() error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.sentInitMsg != nil {
|
||||
// kexInits may be sent either in response to the other side,
|
||||
// or because our side wants to initiate a key change, so we
|
||||
// may have already sent a kexInit. In that case, don't send a
|
||||
// second kexInit.
|
||||
return nil
|
||||
}
|
||||
|
||||
msg := &kexInitMsg{
|
||||
KexAlgos: t.config.KeyExchanges,
|
||||
CiphersClientServer: t.config.Ciphers,
|
||||
CiphersServerClient: t.config.Ciphers,
|
||||
MACsClientServer: t.config.MACs,
|
||||
MACsServerClient: t.config.MACs,
|
||||
CompressionClientServer: supportedCompressions,
|
||||
CompressionServerClient: supportedCompressions,
|
||||
}
|
||||
io.ReadFull(rand.Reader, msg.Cookie[:])
|
||||
|
||||
if len(t.hostKeys) > 0 {
|
||||
for _, k := range t.hostKeys {
|
||||
msg.ServerHostKeyAlgos = append(
|
||||
msg.ServerHostKeyAlgos, k.PublicKey().Type())
|
||||
}
|
||||
} else {
|
||||
msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
|
||||
}
|
||||
packet := Marshal(msg)
|
||||
|
||||
// writePacket destroys the contents, so save a copy.
|
||||
packetCopy := make([]byte, len(packet))
|
||||
copy(packetCopy, packet)
|
||||
|
||||
if err := t.pushPacket(packetCopy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.sentInitMsg = msg
|
||||
t.sentInitPacket = packet
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) writePacket(p []byte) error {
|
||||
switch p[0] {
|
||||
case msgKexInit:
|
||||
return errors.New("ssh: only handshakeTransport can send kexInit")
|
||||
case msgNewKeys:
|
||||
return errors.New("ssh: only handshakeTransport can send newKeys")
|
||||
}
|
||||
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.writeError != nil {
|
||||
return t.writeError
|
||||
}
|
||||
|
||||
if t.sentInitMsg != nil {
|
||||
// Copy the packet so the writer can reuse the buffer.
|
||||
cp := make([]byte, len(p))
|
||||
copy(cp, p)
|
||||
t.pendingPackets = append(t.pendingPackets, cp)
|
||||
return nil
|
||||
}
|
||||
|
||||
if t.writeBytesLeft > 0 {
|
||||
t.writeBytesLeft -= int64(len(p))
|
||||
} else {
|
||||
t.requestKeyExchange()
|
||||
}
|
||||
|
||||
if t.writePacketsLeft > 0 {
|
||||
t.writePacketsLeft--
|
||||
} else {
|
||||
t.requestKeyExchange()
|
||||
}
|
||||
|
||||
if err := t.pushPacket(p); err != nil {
|
||||
t.writeError = err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) Close() error {
|
||||
return t.conn.Close()
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
|
||||
if debugHandshake {
|
||||
log.Printf("%s entered key exchange", t.id())
|
||||
}
|
||||
|
||||
otherInit := &kexInitMsg{}
|
||||
if err := Unmarshal(otherInitPacket, otherInit); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
magics := handshakeMagics{
|
||||
clientVersion: t.clientVersion,
|
||||
serverVersion: t.serverVersion,
|
||||
clientKexInit: otherInitPacket,
|
||||
serverKexInit: t.sentInitPacket,
|
||||
}
|
||||
|
||||
clientInit := otherInit
|
||||
serverInit := t.sentInitMsg
|
||||
if len(t.hostKeys) == 0 {
|
||||
clientInit, serverInit = serverInit, clientInit
|
||||
|
||||
magics.clientKexInit = t.sentInitPacket
|
||||
magics.serverKexInit = otherInitPacket
|
||||
}
|
||||
|
||||
var err error
|
||||
t.algorithms, err = findAgreedAlgorithms(clientInit, serverInit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We don't send FirstKexFollows, but we handle receiving it.
|
||||
//
|
||||
// RFC 4253 section 7 defines the kex and the agreement method for
|
||||
// first_kex_packet_follows. It states that the guessed packet
|
||||
// should be ignored if the "kex algorithm and/or the host
|
||||
// key algorithm is guessed wrong (server and client have
|
||||
// different preferred algorithm), or if any of the other
|
||||
// algorithms cannot be agreed upon". The other algorithms have
|
||||
// already been checked above so the kex algorithm and host key
|
||||
// algorithm are checked here.
|
||||
if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) {
|
||||
// other side sent a kex message for the wrong algorithm,
|
||||
// which we have to ignore.
|
||||
if _, err := t.conn.readPacket(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
kex, ok := kexAlgoMap[t.algorithms.kex]
|
||||
if !ok {
|
||||
return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex)
|
||||
}
|
||||
|
||||
var result *kexResult
|
||||
if len(t.hostKeys) > 0 {
|
||||
result, err = t.server(kex, t.algorithms, &magics)
|
||||
} else {
|
||||
result, err = t.client(kex, t.algorithms, &magics)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.sessionID == nil {
|
||||
t.sessionID = result.H
|
||||
}
|
||||
result.SessionID = t.sessionID
|
||||
|
||||
if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
|
||||
return err
|
||||
}
|
||||
if packet, err := t.conn.readPacket(); err != nil {
|
||||
return err
|
||||
} else if packet[0] != msgNewKeys {
|
||||
return unexpectedMessageError(msgNewKeys, packet[0])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
|
||||
var hostKey Signer
|
||||
for _, k := range t.hostKeys {
|
||||
if algs.hostKey == k.PublicKey().Type() {
|
||||
hostKey = k
|
||||
}
|
||||
}
|
||||
|
||||
r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey)
|
||||
return r, err
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
|
||||
result, err := kex.Client(t.conn, t.config.Rand, magics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostKey, err := ParsePublicKey(result.HostKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := verifyHostKeySignature(hostKey, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
540
vendor/golang.org/x/crypto/ssh/kex.go
generated
vendored
540
vendor/golang.org/x/crypto/ssh/kex.go
generated
vendored
@ -1,540 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/curve25519"
|
||||
)
|
||||
|
||||
const (
|
||||
kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
|
||||
kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
|
||||
kexAlgoECDH256 = "ecdh-sha2-nistp256"
|
||||
kexAlgoECDH384 = "ecdh-sha2-nistp384"
|
||||
kexAlgoECDH521 = "ecdh-sha2-nistp521"
|
||||
kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org"
|
||||
)
|
||||
|
||||
// kexResult captures the outcome of a key exchange.
|
||||
type kexResult struct {
|
||||
// Session hash. See also RFC 4253, section 8.
|
||||
H []byte
|
||||
|
||||
// Shared secret. See also RFC 4253, section 8.
|
||||
K []byte
|
||||
|
||||
// Host key as hashed into H.
|
||||
HostKey []byte
|
||||
|
||||
// Signature of H.
|
||||
Signature []byte
|
||||
|
||||
// A cryptographic hash function that matches the security
|
||||
// level of the key exchange algorithm. It is used for
|
||||
// calculating H, and for deriving keys from H and K.
|
||||
Hash crypto.Hash
|
||||
|
||||
// The session ID, which is the first H computed. This is used
|
||||
// to derive key material inside the transport.
|
||||
SessionID []byte
|
||||
}
|
||||
|
||||
// handshakeMagics contains data that is always included in the
|
||||
// session hash.
|
||||
type handshakeMagics struct {
|
||||
clientVersion, serverVersion []byte
|
||||
clientKexInit, serverKexInit []byte
|
||||
}
|
||||
|
||||
func (m *handshakeMagics) write(w io.Writer) {
|
||||
writeString(w, m.clientVersion)
|
||||
writeString(w, m.serverVersion)
|
||||
writeString(w, m.clientKexInit)
|
||||
writeString(w, m.serverKexInit)
|
||||
}
|
||||
|
||||
// kexAlgorithm abstracts different key exchange algorithms.
|
||||
type kexAlgorithm interface {
|
||||
// Server runs server-side key agreement, signing the result
|
||||
// with a hostkey.
|
||||
Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error)
|
||||
|
||||
// Client runs the client-side key agreement. Caller is
|
||||
// responsible for verifying the host key signature.
|
||||
Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error)
|
||||
}
|
||||
|
||||
// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
|
||||
type dhGroup struct {
|
||||
g, p, pMinus1 *big.Int
|
||||
}
|
||||
|
||||
func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
|
||||
if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 {
|
||||
return nil, errors.New("ssh: DH parameter out of bounds")
|
||||
}
|
||||
return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil
|
||||
}
|
||||
|
||||
func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
|
||||
hashFunc := crypto.SHA1
|
||||
|
||||
var x *big.Int
|
||||
for {
|
||||
var err error
|
||||
if x, err = rand.Int(randSource, group.pMinus1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if x.Sign() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
X := new(big.Int).Exp(group.g, x, group.p)
|
||||
kexDHInit := kexDHInitMsg{
|
||||
X: X,
|
||||
}
|
||||
if err := c.writePacket(Marshal(&kexDHInit)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var kexDHReply kexDHReplyMsg
|
||||
if err = Unmarshal(packet, &kexDHReply); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kInt, err := group.diffieHellman(kexDHReply.Y, x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h := hashFunc.New()
|
||||
magics.write(h)
|
||||
writeString(h, kexDHReply.HostKey)
|
||||
writeInt(h, X)
|
||||
writeInt(h, kexDHReply.Y)
|
||||
K := make([]byte, intLength(kInt))
|
||||
marshalInt(K, kInt)
|
||||
h.Write(K)
|
||||
|
||||
return &kexResult{
|
||||
H: h.Sum(nil),
|
||||
K: K,
|
||||
HostKey: kexDHReply.HostKey,
|
||||
Signature: kexDHReply.Signature,
|
||||
Hash: crypto.SHA1,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
|
||||
hashFunc := crypto.SHA1
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var kexDHInit kexDHInitMsg
|
||||
if err = Unmarshal(packet, &kexDHInit); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var y *big.Int
|
||||
for {
|
||||
if y, err = rand.Int(randSource, group.pMinus1); err != nil {
|
||||
return
|
||||
}
|
||||
if y.Sign() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
Y := new(big.Int).Exp(group.g, y, group.p)
|
||||
kInt, err := group.diffieHellman(kexDHInit.X, y)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostKeyBytes := priv.PublicKey().Marshal()
|
||||
|
||||
h := hashFunc.New()
|
||||
magics.write(h)
|
||||
writeString(h, hostKeyBytes)
|
||||
writeInt(h, kexDHInit.X)
|
||||
writeInt(h, Y)
|
||||
|
||||
K := make([]byte, intLength(kInt))
|
||||
marshalInt(K, kInt)
|
||||
h.Write(K)
|
||||
|
||||
H := h.Sum(nil)
|
||||
|
||||
// H is already a hash, but the hostkey signing will apply its
|
||||
// own key-specific hash algorithm.
|
||||
sig, err := signAndMarshal(priv, randSource, H)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kexDHReply := kexDHReplyMsg{
|
||||
HostKey: hostKeyBytes,
|
||||
Y: Y,
|
||||
Signature: sig,
|
||||
}
|
||||
packet = Marshal(&kexDHReply)
|
||||
|
||||
err = c.writePacket(packet)
|
||||
return &kexResult{
|
||||
H: H,
|
||||
K: K,
|
||||
HostKey: hostKeyBytes,
|
||||
Signature: sig,
|
||||
Hash: crypto.SHA1,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ecdh performs Elliptic Curve Diffie-Hellman key exchange as
|
||||
// described in RFC 5656, section 4.
|
||||
type ecdh struct {
|
||||
curve elliptic.Curve
|
||||
}
|
||||
|
||||
func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
|
||||
ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kexInit := kexECDHInitMsg{
|
||||
ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y),
|
||||
}
|
||||
|
||||
serialized := Marshal(&kexInit)
|
||||
if err := c.writePacket(serialized); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var reply kexECDHReplyMsg
|
||||
if err = Unmarshal(packet, &reply); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// generate shared secret
|
||||
secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes())
|
||||
|
||||
h := ecHash(kex.curve).New()
|
||||
magics.write(h)
|
||||
writeString(h, reply.HostKey)
|
||||
writeString(h, kexInit.ClientPubKey)
|
||||
writeString(h, reply.EphemeralPubKey)
|
||||
K := make([]byte, intLength(secret))
|
||||
marshalInt(K, secret)
|
||||
h.Write(K)
|
||||
|
||||
return &kexResult{
|
||||
H: h.Sum(nil),
|
||||
K: K,
|
||||
HostKey: reply.HostKey,
|
||||
Signature: reply.Signature,
|
||||
Hash: ecHash(kex.curve),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// unmarshalECKey parses and checks an EC key.
|
||||
func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) {
|
||||
x, y = elliptic.Unmarshal(curve, pubkey)
|
||||
if x == nil {
|
||||
return nil, nil, errors.New("ssh: elliptic.Unmarshal failure")
|
||||
}
|
||||
if !validateECPublicKey(curve, x, y) {
|
||||
return nil, nil, errors.New("ssh: public key not on curve")
|
||||
}
|
||||
return x, y, nil
|
||||
}
|
||||
|
||||
// validateECPublicKey checks that the point is a valid public key for
|
||||
// the given curve. See [SEC1], 3.2.2
|
||||
func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool {
|
||||
if x.Sign() == 0 && y.Sign() == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if x.Cmp(curve.Params().P) >= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if y.Cmp(curve.Params().P) >= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if !curve.IsOnCurve(x, y) {
|
||||
return false
|
||||
}
|
||||
|
||||
// We don't check if N * PubKey == 0, since
|
||||
//
|
||||
// - the NIST curves have cofactor = 1, so this is implicit.
|
||||
// (We don't foresee an implementation that supports non NIST
|
||||
// curves)
|
||||
//
|
||||
// - for ephemeral keys, we don't need to worry about small
|
||||
// subgroup attacks.
|
||||
return true
|
||||
}
|
||||
|
||||
func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var kexECDHInit kexECDHInitMsg
|
||||
if err = Unmarshal(packet, &kexECDHInit); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We could cache this key across multiple users/multiple
|
||||
// connection attempts, but the benefit is small. OpenSSH
|
||||
// generates a new key for each incoming connection.
|
||||
ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostKeyBytes := priv.PublicKey().Marshal()
|
||||
|
||||
serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y)
|
||||
|
||||
// generate shared secret
|
||||
secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes())
|
||||
|
||||
h := ecHash(kex.curve).New()
|
||||
magics.write(h)
|
||||
writeString(h, hostKeyBytes)
|
||||
writeString(h, kexECDHInit.ClientPubKey)
|
||||
writeString(h, serializedEphKey)
|
||||
|
||||
K := make([]byte, intLength(secret))
|
||||
marshalInt(K, secret)
|
||||
h.Write(K)
|
||||
|
||||
H := h.Sum(nil)
|
||||
|
||||
// H is already a hash, but the hostkey signing will apply its
|
||||
// own key-specific hash algorithm.
|
||||
sig, err := signAndMarshal(priv, rand, H)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reply := kexECDHReplyMsg{
|
||||
EphemeralPubKey: serializedEphKey,
|
||||
HostKey: hostKeyBytes,
|
||||
Signature: sig,
|
||||
}
|
||||
|
||||
serialized := Marshal(&reply)
|
||||
if err := c.writePacket(serialized); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &kexResult{
|
||||
H: H,
|
||||
K: K,
|
||||
HostKey: reply.HostKey,
|
||||
Signature: sig,
|
||||
Hash: ecHash(kex.curve),
|
||||
}, nil
|
||||
}
|
||||
|
||||
var kexAlgoMap = map[string]kexAlgorithm{}
|
||||
|
||||
func init() {
|
||||
// This is the group called diffie-hellman-group1-sha1 in RFC
|
||||
// 4253 and Oakley Group 2 in RFC 2409.
|
||||
p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16)
|
||||
kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
|
||||
g: new(big.Int).SetInt64(2),
|
||||
p: p,
|
||||
pMinus1: new(big.Int).Sub(p, bigOne),
|
||||
}
|
||||
|
||||
// This is the group called diffie-hellman-group14-sha1 in RFC
|
||||
// 4253 and Oakley Group 14 in RFC 3526.
|
||||
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
|
||||
|
||||
kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
|
||||
g: new(big.Int).SetInt64(2),
|
||||
p: p,
|
||||
pMinus1: new(big.Int).Sub(p, bigOne),
|
||||
}
|
||||
|
||||
kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
|
||||
kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
|
||||
kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}
|
||||
kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{}
|
||||
}
|
||||
|
||||
// curve25519sha256 implements the curve25519-sha256@libssh.org key
|
||||
// agreement protocol, as described in
|
||||
// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt
|
||||
type curve25519sha256 struct{}
|
||||
|
||||
type curve25519KeyPair struct {
|
||||
priv [32]byte
|
||||
pub [32]byte
|
||||
}
|
||||
|
||||
func (kp *curve25519KeyPair) generate(rand io.Reader) error {
|
||||
if _, err := io.ReadFull(rand, kp.priv[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
curve25519.ScalarBaseMult(&kp.pub, &kp.priv)
|
||||
return nil
|
||||
}
|
||||
|
||||
// curve25519Zeros is just an array of 32 zero bytes so that we have something
|
||||
// convenient to compare against in order to reject curve25519 points with the
|
||||
// wrong order.
|
||||
var curve25519Zeros [32]byte
|
||||
|
||||
func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
|
||||
var kp curve25519KeyPair
|
||||
if err := kp.generate(rand); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var reply kexECDHReplyMsg
|
||||
if err = Unmarshal(packet, &reply); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(reply.EphemeralPubKey) != 32 {
|
||||
return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
|
||||
}
|
||||
|
||||
var servPub, secret [32]byte
|
||||
copy(servPub[:], reply.EphemeralPubKey)
|
||||
curve25519.ScalarMult(&secret, &kp.priv, &servPub)
|
||||
if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
|
||||
return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
|
||||
}
|
||||
|
||||
h := crypto.SHA256.New()
|
||||
magics.write(h)
|
||||
writeString(h, reply.HostKey)
|
||||
writeString(h, kp.pub[:])
|
||||
writeString(h, reply.EphemeralPubKey)
|
||||
|
||||
kInt := new(big.Int).SetBytes(secret[:])
|
||||
K := make([]byte, intLength(kInt))
|
||||
marshalInt(K, kInt)
|
||||
h.Write(K)
|
||||
|
||||
return &kexResult{
|
||||
H: h.Sum(nil),
|
||||
K: K,
|
||||
HostKey: reply.HostKey,
|
||||
Signature: reply.Signature,
|
||||
Hash: crypto.SHA256,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var kexInit kexECDHInitMsg
|
||||
if err = Unmarshal(packet, &kexInit); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(kexInit.ClientPubKey) != 32 {
|
||||
return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
|
||||
}
|
||||
|
||||
var kp curve25519KeyPair
|
||||
if err := kp.generate(rand); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var clientPub, secret [32]byte
|
||||
copy(clientPub[:], kexInit.ClientPubKey)
|
||||
curve25519.ScalarMult(&secret, &kp.priv, &clientPub)
|
||||
if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
|
||||
return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
|
||||
}
|
||||
|
||||
hostKeyBytes := priv.PublicKey().Marshal()
|
||||
|
||||
h := crypto.SHA256.New()
|
||||
magics.write(h)
|
||||
writeString(h, hostKeyBytes)
|
||||
writeString(h, kexInit.ClientPubKey)
|
||||
writeString(h, kp.pub[:])
|
||||
|
||||
kInt := new(big.Int).SetBytes(secret[:])
|
||||
K := make([]byte, intLength(kInt))
|
||||
marshalInt(K, kInt)
|
||||
h.Write(K)
|
||||
|
||||
H := h.Sum(nil)
|
||||
|
||||
sig, err := signAndMarshal(priv, rand, H)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reply := kexECDHReplyMsg{
|
||||
EphemeralPubKey: kp.pub[:],
|
||||
HostKey: hostKeyBytes,
|
||||
Signature: sig,
|
||||
}
|
||||
if err := c.writePacket(Marshal(&reply)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &kexResult{
|
||||
H: H,
|
||||
K: K,
|
||||
HostKey: hostKeyBytes,
|
||||
Signature: sig,
|
||||
Hash: crypto.SHA256,
|
||||
}, nil
|
||||
}
|
1031
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
1031
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
File diff suppressed because it is too large
Load Diff
61
vendor/golang.org/x/crypto/ssh/mac.go
generated
vendored
61
vendor/golang.org/x/crypto/ssh/mac.go
generated
vendored
@ -1,61 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
// Message authentication support
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
)
|
||||
|
||||
type macMode struct {
|
||||
keySize int
|
||||
etm bool
|
||||
new func(key []byte) hash.Hash
|
||||
}
|
||||
|
||||
// truncatingMAC wraps around a hash.Hash and truncates the output digest to
|
||||
// a given size.
|
||||
type truncatingMAC struct {
|
||||
length int
|
||||
hmac hash.Hash
|
||||
}
|
||||
|
||||
func (t truncatingMAC) Write(data []byte) (int, error) {
|
||||
return t.hmac.Write(data)
|
||||
}
|
||||
|
||||
func (t truncatingMAC) Sum(in []byte) []byte {
|
||||
out := t.hmac.Sum(in)
|
||||
return out[:len(in)+t.length]
|
||||
}
|
||||
|
||||
func (t truncatingMAC) Reset() {
|
||||
t.hmac.Reset()
|
||||
}
|
||||
|
||||
func (t truncatingMAC) Size() int {
|
||||
return t.length
|
||||
}
|
||||
|
||||
func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
|
||||
|
||||
var macModes = map[string]*macMode{
|
||||
"hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash {
|
||||
return hmac.New(sha256.New, key)
|
||||
}},
|
||||
"hmac-sha2-256": {32, false, func(key []byte) hash.Hash {
|
||||
return hmac.New(sha256.New, key)
|
||||
}},
|
||||
"hmac-sha1": {20, false, func(key []byte) hash.Hash {
|
||||
return hmac.New(sha1.New, key)
|
||||
}},
|
||||
"hmac-sha1-96": {20, false, func(key []byte) hash.Hash {
|
||||
return truncatingMAC{12, hmac.New(sha1.New, key)}
|
||||
}},
|
||||
}
|
758
vendor/golang.org/x/crypto/ssh/messages.go
generated
vendored
758
vendor/golang.org/x/crypto/ssh/messages.go
generated
vendored
@ -1,758 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// These are SSH message type numbers. They are scattered around several
|
||||
// documents but many were taken from [SSH-PARAMETERS].
|
||||
const (
|
||||
msgIgnore = 2
|
||||
msgUnimplemented = 3
|
||||
msgDebug = 4
|
||||
msgNewKeys = 21
|
||||
|
||||
// Standard authentication messages
|
||||
msgUserAuthSuccess = 52
|
||||
msgUserAuthBanner = 53
|
||||
)
|
||||
|
||||
// SSH messages:
|
||||
//
|
||||
// These structures mirror the wire format of the corresponding SSH messages.
|
||||
// They are marshaled using reflection with the marshal and unmarshal functions
|
||||
// in this file. The only wrinkle is that a final member of type []byte with a
|
||||
// ssh tag of "rest" receives the remainder of a packet when unmarshaling.
|
||||
|
||||
// See RFC 4253, section 11.1.
|
||||
const msgDisconnect = 1
|
||||
|
||||
// disconnectMsg is the message that signals a disconnect. It is also
|
||||
// the error type returned from mux.Wait()
|
||||
type disconnectMsg struct {
|
||||
Reason uint32 `sshtype:"1"`
|
||||
Message string
|
||||
Language string
|
||||
}
|
||||
|
||||
func (d *disconnectMsg) Error() string {
|
||||
return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message)
|
||||
}
|
||||
|
||||
// See RFC 4253, section 7.1.
|
||||
const msgKexInit = 20
|
||||
|
||||
type kexInitMsg struct {
|
||||
Cookie [16]byte `sshtype:"20"`
|
||||
KexAlgos []string
|
||||
ServerHostKeyAlgos []string
|
||||
CiphersClientServer []string
|
||||
CiphersServerClient []string
|
||||
MACsClientServer []string
|
||||
MACsServerClient []string
|
||||
CompressionClientServer []string
|
||||
CompressionServerClient []string
|
||||
LanguagesClientServer []string
|
||||
LanguagesServerClient []string
|
||||
FirstKexFollows bool
|
||||
Reserved uint32
|
||||
}
|
||||
|
||||
// See RFC 4253, section 8.
|
||||
|
||||
// Diffie-Helman
|
||||
const msgKexDHInit = 30
|
||||
|
||||
type kexDHInitMsg struct {
|
||||
X *big.Int `sshtype:"30"`
|
||||
}
|
||||
|
||||
const msgKexECDHInit = 30
|
||||
|
||||
type kexECDHInitMsg struct {
|
||||
ClientPubKey []byte `sshtype:"30"`
|
||||
}
|
||||
|
||||
const msgKexECDHReply = 31
|
||||
|
||||
type kexECDHReplyMsg struct {
|
||||
HostKey []byte `sshtype:"31"`
|
||||
EphemeralPubKey []byte
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
const msgKexDHReply = 31
|
||||
|
||||
type kexDHReplyMsg struct {
|
||||
HostKey []byte `sshtype:"31"`
|
||||
Y *big.Int
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
// See RFC 4253, section 10.
|
||||
const msgServiceRequest = 5
|
||||
|
||||
type serviceRequestMsg struct {
|
||||
Service string `sshtype:"5"`
|
||||
}
|
||||
|
||||
// See RFC 4253, section 10.
|
||||
const msgServiceAccept = 6
|
||||
|
||||
type serviceAcceptMsg struct {
|
||||
Service string `sshtype:"6"`
|
||||
}
|
||||
|
||||
// See RFC 4252, section 5.
|
||||
const msgUserAuthRequest = 50
|
||||
|
||||
type userAuthRequestMsg struct {
|
||||
User string `sshtype:"50"`
|
||||
Service string
|
||||
Method string
|
||||
Payload []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// Used for debug printouts of packets.
|
||||
type userAuthSuccessMsg struct {
|
||||
}
|
||||
|
||||
// See RFC 4252, section 5.1
|
||||
const msgUserAuthFailure = 51
|
||||
|
||||
type userAuthFailureMsg struct {
|
||||
Methods []string `sshtype:"51"`
|
||||
PartialSuccess bool
|
||||
}
|
||||
|
||||
// See RFC 4256, section 3.2
|
||||
const msgUserAuthInfoRequest = 60
|
||||
const msgUserAuthInfoResponse = 61
|
||||
|
||||
type userAuthInfoRequestMsg struct {
|
||||
User string `sshtype:"60"`
|
||||
Instruction string
|
||||
DeprecatedLanguage string
|
||||
NumPrompts uint32
|
||||
Prompts []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// See RFC 4254, section 5.1.
|
||||
const msgChannelOpen = 90
|
||||
|
||||
type channelOpenMsg struct {
|
||||
ChanType string `sshtype:"90"`
|
||||
PeersId uint32
|
||||
PeersWindow uint32
|
||||
MaxPacketSize uint32
|
||||
TypeSpecificData []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
const msgChannelExtendedData = 95
|
||||
const msgChannelData = 94
|
||||
|
||||
// Used for debug print outs of packets.
|
||||
type channelDataMsg struct {
|
||||
PeersId uint32 `sshtype:"94"`
|
||||
Length uint32
|
||||
Rest []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// See RFC 4254, section 5.1.
|
||||
const msgChannelOpenConfirm = 91
|
||||
|
||||
type channelOpenConfirmMsg struct {
|
||||
PeersId uint32 `sshtype:"91"`
|
||||
MyId uint32
|
||||
MyWindow uint32
|
||||
MaxPacketSize uint32
|
||||
TypeSpecificData []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// See RFC 4254, section 5.1.
|
||||
const msgChannelOpenFailure = 92
|
||||
|
||||
type channelOpenFailureMsg struct {
|
||||
PeersId uint32 `sshtype:"92"`
|
||||
Reason RejectionReason
|
||||
Message string
|
||||
Language string
|
||||
}
|
||||
|
||||
const msgChannelRequest = 98
|
||||
|
||||
type channelRequestMsg struct {
|
||||
PeersId uint32 `sshtype:"98"`
|
||||
Request string
|
||||
WantReply bool
|
||||
RequestSpecificData []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// See RFC 4254, section 5.4.
|
||||
const msgChannelSuccess = 99
|
||||
|
||||
type channelRequestSuccessMsg struct {
|
||||
PeersId uint32 `sshtype:"99"`
|
||||
}
|
||||
|
||||
// See RFC 4254, section 5.4.
|
||||
const msgChannelFailure = 100
|
||||
|
||||
type channelRequestFailureMsg struct {
|
||||
PeersId uint32 `sshtype:"100"`
|
||||
}
|
||||
|
||||
// See RFC 4254, section 5.3
|
||||
const msgChannelClose = 97
|
||||
|
||||
type channelCloseMsg struct {
|
||||
PeersId uint32 `sshtype:"97"`
|
||||
}
|
||||
|
||||
// See RFC 4254, section 5.3
|
||||
const msgChannelEOF = 96
|
||||
|
||||
type channelEOFMsg struct {
|
||||
PeersId uint32 `sshtype:"96"`
|
||||
}
|
||||
|
||||
// See RFC 4254, section 4
|
||||
const msgGlobalRequest = 80
|
||||
|
||||
type globalRequestMsg struct {
|
||||
Type string `sshtype:"80"`
|
||||
WantReply bool
|
||||
Data []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// See RFC 4254, section 4
|
||||
const msgRequestSuccess = 81
|
||||
|
||||
type globalRequestSuccessMsg struct {
|
||||
Data []byte `ssh:"rest" sshtype:"81"`
|
||||
}
|
||||
|
||||
// See RFC 4254, section 4
|
||||
const msgRequestFailure = 82
|
||||
|
||||
type globalRequestFailureMsg struct {
|
||||
Data []byte `ssh:"rest" sshtype:"82"`
|
||||
}
|
||||
|
||||
// See RFC 4254, section 5.2
|
||||
const msgChannelWindowAdjust = 93
|
||||
|
||||
type windowAdjustMsg struct {
|
||||
PeersId uint32 `sshtype:"93"`
|
||||
AdditionalBytes uint32
|
||||
}
|
||||
|
||||
// See RFC 4252, section 7
|
||||
const msgUserAuthPubKeyOk = 60
|
||||
|
||||
type userAuthPubKeyOkMsg struct {
|
||||
Algo string `sshtype:"60"`
|
||||
PubKey []byte
|
||||
}
|
||||
|
||||
// typeTags returns the possible type bytes for the given reflect.Type, which
|
||||
// should be a struct. The possible values are separated by a '|' character.
|
||||
func typeTags(structType reflect.Type) (tags []byte) {
|
||||
tagStr := structType.Field(0).Tag.Get("sshtype")
|
||||
|
||||
for _, tag := range strings.Split(tagStr, "|") {
|
||||
i, err := strconv.Atoi(tag)
|
||||
if err == nil {
|
||||
tags = append(tags, byte(i))
|
||||
}
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
func fieldError(t reflect.Type, field int, problem string) error {
|
||||
if problem != "" {
|
||||
problem = ": " + problem
|
||||
}
|
||||
return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem)
|
||||
}
|
||||
|
||||
var errShortRead = errors.New("ssh: short read")
|
||||
|
||||
// Unmarshal parses data in SSH wire format into a structure. The out
|
||||
// argument should be a pointer to struct. If the first member of the
|
||||
// struct has the "sshtype" tag set to a '|'-separated set of numbers
|
||||
// in decimal, the packet must start with one of those numbers. In
|
||||
// case of error, Unmarshal returns a ParseError or
|
||||
// UnexpectedMessageError.
|
||||
func Unmarshal(data []byte, out interface{}) error {
|
||||
v := reflect.ValueOf(out).Elem()
|
||||
structType := v.Type()
|
||||
expectedTypes := typeTags(structType)
|
||||
|
||||
var expectedType byte
|
||||
if len(expectedTypes) > 0 {
|
||||
expectedType = expectedTypes[0]
|
||||
}
|
||||
|
||||
if len(data) == 0 {
|
||||
return parseError(expectedType)
|
||||
}
|
||||
|
||||
if len(expectedTypes) > 0 {
|
||||
goodType := false
|
||||
for _, e := range expectedTypes {
|
||||
if e > 0 && data[0] == e {
|
||||
goodType = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !goodType {
|
||||
return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes)
|
||||
}
|
||||
data = data[1:]
|
||||
}
|
||||
|
||||
var ok bool
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
t := field.Type()
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
if len(data) < 1 {
|
||||
return errShortRead
|
||||
}
|
||||
field.SetBool(data[0] != 0)
|
||||
data = data[1:]
|
||||
case reflect.Array:
|
||||
if t.Elem().Kind() != reflect.Uint8 {
|
||||
return fieldError(structType, i, "array of unsupported type")
|
||||
}
|
||||
if len(data) < t.Len() {
|
||||
return errShortRead
|
||||
}
|
||||
for j, n := 0, t.Len(); j < n; j++ {
|
||||
field.Index(j).Set(reflect.ValueOf(data[j]))
|
||||
}
|
||||
data = data[t.Len():]
|
||||
case reflect.Uint64:
|
||||
var u64 uint64
|
||||
if u64, data, ok = parseUint64(data); !ok {
|
||||
return errShortRead
|
||||
}
|
||||
field.SetUint(u64)
|
||||
case reflect.Uint32:
|
||||
var u32 uint32
|
||||
if u32, data, ok = parseUint32(data); !ok {
|
||||
return errShortRead
|
||||
}
|
||||
field.SetUint(uint64(u32))
|
||||
case reflect.Uint8:
|
||||
if len(data) < 1 {
|
||||
return errShortRead
|
||||
}
|
||||
field.SetUint(uint64(data[0]))
|
||||
data = data[1:]
|
||||
case reflect.String:
|
||||
var s []byte
|
||||
if s, data, ok = parseString(data); !ok {
|
||||
return fieldError(structType, i, "")
|
||||
}
|
||||
field.SetString(string(s))
|
||||
case reflect.Slice:
|
||||
switch t.Elem().Kind() {
|
||||
case reflect.Uint8:
|
||||
if structType.Field(i).Tag.Get("ssh") == "rest" {
|
||||
field.Set(reflect.ValueOf(data))
|
||||
data = nil
|
||||
} else {
|
||||
var s []byte
|
||||
if s, data, ok = parseString(data); !ok {
|
||||
return errShortRead
|
||||
}
|
||||
field.Set(reflect.ValueOf(s))
|
||||
}
|
||||
case reflect.String:
|
||||
var nl []string
|
||||
if nl, data, ok = parseNameList(data); !ok {
|
||||
return errShortRead
|
||||
}
|
||||
field.Set(reflect.ValueOf(nl))
|
||||
default:
|
||||
return fieldError(structType, i, "slice of unsupported type")
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t == bigIntType {
|
||||
var n *big.Int
|
||||
if n, data, ok = parseInt(data); !ok {
|
||||
return errShortRead
|
||||
}
|
||||
field.Set(reflect.ValueOf(n))
|
||||
} else {
|
||||
return fieldError(structType, i, "pointer to unsupported type")
|
||||
}
|
||||
default:
|
||||
return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t))
|
||||
}
|
||||
}
|
||||
|
||||
if len(data) != 0 {
|
||||
return parseError(expectedType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshal serializes the message in msg to SSH wire format. The msg
|
||||
// argument should be a struct or pointer to struct. If the first
|
||||
// member has the "sshtype" tag set to a number in decimal, that
|
||||
// number is prepended to the result. If the last of member has the
|
||||
// "ssh" tag set to "rest", its contents are appended to the output.
|
||||
func Marshal(msg interface{}) []byte {
|
||||
out := make([]byte, 0, 64)
|
||||
return marshalStruct(out, msg)
|
||||
}
|
||||
|
||||
func marshalStruct(out []byte, msg interface{}) []byte {
|
||||
v := reflect.Indirect(reflect.ValueOf(msg))
|
||||
msgTypes := typeTags(v.Type())
|
||||
if len(msgTypes) > 0 {
|
||||
out = append(out, msgTypes[0])
|
||||
}
|
||||
|
||||
for i, n := 0, v.NumField(); i < n; i++ {
|
||||
field := v.Field(i)
|
||||
switch t := field.Type(); t.Kind() {
|
||||
case reflect.Bool:
|
||||
var v uint8
|
||||
if field.Bool() {
|
||||
v = 1
|
||||
}
|
||||
out = append(out, v)
|
||||
case reflect.Array:
|
||||
if t.Elem().Kind() != reflect.Uint8 {
|
||||
panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface()))
|
||||
}
|
||||
for j, l := 0, t.Len(); j < l; j++ {
|
||||
out = append(out, uint8(field.Index(j).Uint()))
|
||||
}
|
||||
case reflect.Uint32:
|
||||
out = appendU32(out, uint32(field.Uint()))
|
||||
case reflect.Uint64:
|
||||
out = appendU64(out, uint64(field.Uint()))
|
||||
case reflect.Uint8:
|
||||
out = append(out, uint8(field.Uint()))
|
||||
case reflect.String:
|
||||
s := field.String()
|
||||
out = appendInt(out, len(s))
|
||||
out = append(out, s...)
|
||||
case reflect.Slice:
|
||||
switch t.Elem().Kind() {
|
||||
case reflect.Uint8:
|
||||
if v.Type().Field(i).Tag.Get("ssh") != "rest" {
|
||||
out = appendInt(out, field.Len())
|
||||
}
|
||||
out = append(out, field.Bytes()...)
|
||||
case reflect.String:
|
||||
offset := len(out)
|
||||
out = appendU32(out, 0)
|
||||
if n := field.Len(); n > 0 {
|
||||
for j := 0; j < n; j++ {
|
||||
f := field.Index(j)
|
||||
if j != 0 {
|
||||
out = append(out, ',')
|
||||
}
|
||||
out = append(out, f.String()...)
|
||||
}
|
||||
// overwrite length value
|
||||
binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4))
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface()))
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t == bigIntType {
|
||||
var n *big.Int
|
||||
nValue := reflect.ValueOf(&n)
|
||||
nValue.Elem().Set(field)
|
||||
needed := intLength(n)
|
||||
oldLength := len(out)
|
||||
|
||||
if cap(out)-len(out) < needed {
|
||||
newOut := make([]byte, len(out), 2*(len(out)+needed))
|
||||
copy(newOut, out)
|
||||
out = newOut
|
||||
}
|
||||
out = out[:oldLength+needed]
|
||||
marshalInt(out[oldLength:], n)
|
||||
} else {
|
||||
panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
var bigOne = big.NewInt(1)
|
||||
|
||||
func parseString(in []byte) (out, rest []byte, ok bool) {
|
||||
if len(in) < 4 {
|
||||
return
|
||||
}
|
||||
length := binary.BigEndian.Uint32(in)
|
||||
in = in[4:]
|
||||
if uint32(len(in)) < length {
|
||||
return
|
||||
}
|
||||
out = in[:length]
|
||||
rest = in[length:]
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
comma = []byte{','}
|
||||
emptyNameList = []string{}
|
||||
)
|
||||
|
||||
func parseNameList(in []byte) (out []string, rest []byte, ok bool) {
|
||||
contents, rest, ok := parseString(in)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if len(contents) == 0 {
|
||||
out = emptyNameList
|
||||
return
|
||||
}
|
||||
parts := bytes.Split(contents, comma)
|
||||
out = make([]string, len(parts))
|
||||
for i, part := range parts {
|
||||
out[i] = string(part)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) {
|
||||
contents, rest, ok := parseString(in)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
out = new(big.Int)
|
||||
|
||||
if len(contents) > 0 && contents[0]&0x80 == 0x80 {
|
||||
// This is a negative number
|
||||
notBytes := make([]byte, len(contents))
|
||||
for i := range notBytes {
|
||||
notBytes[i] = ^contents[i]
|
||||
}
|
||||
out.SetBytes(notBytes)
|
||||
out.Add(out, bigOne)
|
||||
out.Neg(out)
|
||||
} else {
|
||||
// Positive number
|
||||
out.SetBytes(contents)
|
||||
}
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
func parseUint32(in []byte) (uint32, []byte, bool) {
|
||||
if len(in) < 4 {
|
||||
return 0, nil, false
|
||||
}
|
||||
return binary.BigEndian.Uint32(in), in[4:], true
|
||||
}
|
||||
|
||||
func parseUint64(in []byte) (uint64, []byte, bool) {
|
||||
if len(in) < 8 {
|
||||
return 0, nil, false
|
||||
}
|
||||
return binary.BigEndian.Uint64(in), in[8:], true
|
||||
}
|
||||
|
||||
func intLength(n *big.Int) int {
|
||||
length := 4 /* length bytes */
|
||||
if n.Sign() < 0 {
|
||||
nMinus1 := new(big.Int).Neg(n)
|
||||
nMinus1.Sub(nMinus1, bigOne)
|
||||
bitLen := nMinus1.BitLen()
|
||||
if bitLen%8 == 0 {
|
||||
// The number will need 0xff padding
|
||||
length++
|
||||
}
|
||||
length += (bitLen + 7) / 8
|
||||
} else if n.Sign() == 0 {
|
||||
// A zero is the zero length string
|
||||
} else {
|
||||
bitLen := n.BitLen()
|
||||
if bitLen%8 == 0 {
|
||||
// The number will need 0x00 padding
|
||||
length++
|
||||
}
|
||||
length += (bitLen + 7) / 8
|
||||
}
|
||||
|
||||
return length
|
||||
}
|
||||
|
||||
func marshalUint32(to []byte, n uint32) []byte {
|
||||
binary.BigEndian.PutUint32(to, n)
|
||||
return to[4:]
|
||||
}
|
||||
|
||||
func marshalUint64(to []byte, n uint64) []byte {
|
||||
binary.BigEndian.PutUint64(to, n)
|
||||
return to[8:]
|
||||
}
|
||||
|
||||
func marshalInt(to []byte, n *big.Int) []byte {
|
||||
lengthBytes := to
|
||||
to = to[4:]
|
||||
length := 0
|
||||
|
||||
if n.Sign() < 0 {
|
||||
// A negative number has to be converted to two's-complement
|
||||
// form. So we'll subtract 1 and invert. If the
|
||||
// most-significant-bit isn't set then we'll need to pad the
|
||||
// beginning with 0xff in order to keep the number negative.
|
||||
nMinus1 := new(big.Int).Neg(n)
|
||||
nMinus1.Sub(nMinus1, bigOne)
|
||||
bytes := nMinus1.Bytes()
|
||||
for i := range bytes {
|
||||
bytes[i] ^= 0xff
|
||||
}
|
||||
if len(bytes) == 0 || bytes[0]&0x80 == 0 {
|
||||
to[0] = 0xff
|
||||
to = to[1:]
|
||||
length++
|
||||
}
|
||||
nBytes := copy(to, bytes)
|
||||
to = to[nBytes:]
|
||||
length += nBytes
|
||||
} else if n.Sign() == 0 {
|
||||
// A zero is the zero length string
|
||||
} else {
|
||||
bytes := n.Bytes()
|
||||
if len(bytes) > 0 && bytes[0]&0x80 != 0 {
|
||||
// We'll have to pad this with a 0x00 in order to
|
||||
// stop it looking like a negative number.
|
||||
to[0] = 0
|
||||
to = to[1:]
|
||||
length++
|
||||
}
|
||||
nBytes := copy(to, bytes)
|
||||
to = to[nBytes:]
|
||||
length += nBytes
|
||||
}
|
||||
|
||||
lengthBytes[0] = byte(length >> 24)
|
||||
lengthBytes[1] = byte(length >> 16)
|
||||
lengthBytes[2] = byte(length >> 8)
|
||||
lengthBytes[3] = byte(length)
|
||||
return to
|
||||
}
|
||||
|
||||
func writeInt(w io.Writer, n *big.Int) {
|
||||
length := intLength(n)
|
||||
buf := make([]byte, length)
|
||||
marshalInt(buf, n)
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
func writeString(w io.Writer, s []byte) {
|
||||
var lengthBytes [4]byte
|
||||
lengthBytes[0] = byte(len(s) >> 24)
|
||||
lengthBytes[1] = byte(len(s) >> 16)
|
||||
lengthBytes[2] = byte(len(s) >> 8)
|
||||
lengthBytes[3] = byte(len(s))
|
||||
w.Write(lengthBytes[:])
|
||||
w.Write(s)
|
||||
}
|
||||
|
||||
func stringLength(n int) int {
|
||||
return 4 + n
|
||||
}
|
||||
|
||||
func marshalString(to []byte, s []byte) []byte {
|
||||
to[0] = byte(len(s) >> 24)
|
||||
to[1] = byte(len(s) >> 16)
|
||||
to[2] = byte(len(s) >> 8)
|
||||
to[3] = byte(len(s))
|
||||
to = to[4:]
|
||||
copy(to, s)
|
||||
return to[len(s):]
|
||||
}
|
||||
|
||||
var bigIntType = reflect.TypeOf((*big.Int)(nil))
|
||||
|
||||
// Decode a packet into its corresponding message.
|
||||
func decode(packet []byte) (interface{}, error) {
|
||||
var msg interface{}
|
||||
switch packet[0] {
|
||||
case msgDisconnect:
|
||||
msg = new(disconnectMsg)
|
||||
case msgServiceRequest:
|
||||
msg = new(serviceRequestMsg)
|
||||
case msgServiceAccept:
|
||||
msg = new(serviceAcceptMsg)
|
||||
case msgKexInit:
|
||||
msg = new(kexInitMsg)
|
||||
case msgKexDHInit:
|
||||
msg = new(kexDHInitMsg)
|
||||
case msgKexDHReply:
|
||||
msg = new(kexDHReplyMsg)
|
||||
case msgUserAuthRequest:
|
||||
msg = new(userAuthRequestMsg)
|
||||
case msgUserAuthSuccess:
|
||||
return new(userAuthSuccessMsg), nil
|
||||
case msgUserAuthFailure:
|
||||
msg = new(userAuthFailureMsg)
|
||||
case msgUserAuthPubKeyOk:
|
||||
msg = new(userAuthPubKeyOkMsg)
|
||||
case msgGlobalRequest:
|
||||
msg = new(globalRequestMsg)
|
||||
case msgRequestSuccess:
|
||||
msg = new(globalRequestSuccessMsg)
|
||||
case msgRequestFailure:
|
||||
msg = new(globalRequestFailureMsg)
|
||||
case msgChannelOpen:
|
||||
msg = new(channelOpenMsg)
|
||||
case msgChannelData:
|
||||
msg = new(channelDataMsg)
|
||||
case msgChannelOpenConfirm:
|
||||
msg = new(channelOpenConfirmMsg)
|
||||
case msgChannelOpenFailure:
|
||||
msg = new(channelOpenFailureMsg)
|
||||
case msgChannelWindowAdjust:
|
||||
msg = new(windowAdjustMsg)
|
||||
case msgChannelEOF:
|
||||
msg = new(channelEOFMsg)
|
||||
case msgChannelClose:
|
||||
msg = new(channelCloseMsg)
|
||||
case msgChannelRequest:
|
||||
msg = new(channelRequestMsg)
|
||||
case msgChannelSuccess:
|
||||
msg = new(channelRequestSuccessMsg)
|
||||
case msgChannelFailure:
|
||||
msg = new(channelRequestFailureMsg)
|
||||
default:
|
||||
return nil, unexpectedMessageError(0, packet[0])
|
||||
}
|
||||
if err := Unmarshal(packet, msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, nil
|
||||
}
|
330
vendor/golang.org/x/crypto/ssh/mux.go
generated
vendored
330
vendor/golang.org/x/crypto/ssh/mux.go
generated
vendored
@ -1,330 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// debugMux, if set, causes messages in the connection protocol to be
|
||||
// logged.
|
||||
const debugMux = false
|
||||
|
||||
// chanList is a thread safe channel list.
|
||||
type chanList struct {
|
||||
// protects concurrent access to chans
|
||||
sync.Mutex
|
||||
|
||||
// chans are indexed by the local id of the channel, which the
|
||||
// other side should send in the PeersId field.
|
||||
chans []*channel
|
||||
|
||||
// This is a debugging aid: it offsets all IDs by this
|
||||
// amount. This helps distinguish otherwise identical
|
||||
// server/client muxes
|
||||
offset uint32
|
||||
}
|
||||
|
||||
// Assigns a channel ID to the given channel.
|
||||
func (c *chanList) add(ch *channel) uint32 {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
for i := range c.chans {
|
||||
if c.chans[i] == nil {
|
||||
c.chans[i] = ch
|
||||
return uint32(i) + c.offset
|
||||
}
|
||||
}
|
||||
c.chans = append(c.chans, ch)
|
||||
return uint32(len(c.chans)-1) + c.offset
|
||||
}
|
||||
|
||||
// getChan returns the channel for the given ID.
|
||||
func (c *chanList) getChan(id uint32) *channel {
|
||||
id -= c.offset
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if id < uint32(len(c.chans)) {
|
||||
return c.chans[id]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *chanList) remove(id uint32) {
|
||||
id -= c.offset
|
||||
c.Lock()
|
||||
if id < uint32(len(c.chans)) {
|
||||
c.chans[id] = nil
|
||||
}
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
// dropAll forgets all channels it knows, returning them in a slice.
|
||||
func (c *chanList) dropAll() []*channel {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
var r []*channel
|
||||
|
||||
for _, ch := range c.chans {
|
||||
if ch == nil {
|
||||
continue
|
||||
}
|
||||
r = append(r, ch)
|
||||
}
|
||||
c.chans = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// mux represents the state for the SSH connection protocol, which
|
||||
// multiplexes many channels onto a single packet transport.
|
||||
type mux struct {
|
||||
conn packetConn
|
||||
chanList chanList
|
||||
|
||||
incomingChannels chan NewChannel
|
||||
|
||||
globalSentMu sync.Mutex
|
||||
globalResponses chan interface{}
|
||||
incomingRequests chan *Request
|
||||
|
||||
errCond *sync.Cond
|
||||
err error
|
||||
}
|
||||
|
||||
// When debugging, each new chanList instantiation has a different
|
||||
// offset.
|
||||
var globalOff uint32
|
||||
|
||||
func (m *mux) Wait() error {
|
||||
m.errCond.L.Lock()
|
||||
defer m.errCond.L.Unlock()
|
||||
for m.err == nil {
|
||||
m.errCond.Wait()
|
||||
}
|
||||
return m.err
|
||||
}
|
||||
|
||||
// newMux returns a mux that runs over the given connection.
|
||||
func newMux(p packetConn) *mux {
|
||||
m := &mux{
|
||||
conn: p,
|
||||
incomingChannels: make(chan NewChannel, chanSize),
|
||||
globalResponses: make(chan interface{}, 1),
|
||||
incomingRequests: make(chan *Request, chanSize),
|
||||
errCond: newCond(),
|
||||
}
|
||||
if debugMux {
|
||||
m.chanList.offset = atomic.AddUint32(&globalOff, 1)
|
||||
}
|
||||
|
||||
go m.loop()
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mux) sendMessage(msg interface{}) error {
|
||||
p := Marshal(msg)
|
||||
if debugMux {
|
||||
log.Printf("send global(%d): %#v", m.chanList.offset, msg)
|
||||
}
|
||||
return m.conn.writePacket(p)
|
||||
}
|
||||
|
||||
func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) {
|
||||
if wantReply {
|
||||
m.globalSentMu.Lock()
|
||||
defer m.globalSentMu.Unlock()
|
||||
}
|
||||
|
||||
if err := m.sendMessage(globalRequestMsg{
|
||||
Type: name,
|
||||
WantReply: wantReply,
|
||||
Data: payload,
|
||||
}); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
if !wantReply {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
msg, ok := <-m.globalResponses
|
||||
if !ok {
|
||||
return false, nil, io.EOF
|
||||
}
|
||||
switch msg := msg.(type) {
|
||||
case *globalRequestFailureMsg:
|
||||
return false, msg.Data, nil
|
||||
case *globalRequestSuccessMsg:
|
||||
return true, msg.Data, nil
|
||||
default:
|
||||
return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// ackRequest must be called after processing a global request that
|
||||
// has WantReply set.
|
||||
func (m *mux) ackRequest(ok bool, data []byte) error {
|
||||
if ok {
|
||||
return m.sendMessage(globalRequestSuccessMsg{Data: data})
|
||||
}
|
||||
return m.sendMessage(globalRequestFailureMsg{Data: data})
|
||||
}
|
||||
|
||||
func (m *mux) Close() error {
|
||||
return m.conn.Close()
|
||||
}
|
||||
|
||||
// loop runs the connection machine. It will process packets until an
|
||||
// error is encountered. To synchronize on loop exit, use mux.Wait.
|
||||
func (m *mux) loop() {
|
||||
var err error
|
||||
for err == nil {
|
||||
err = m.onePacket()
|
||||
}
|
||||
|
||||
for _, ch := range m.chanList.dropAll() {
|
||||
ch.close()
|
||||
}
|
||||
|
||||
close(m.incomingChannels)
|
||||
close(m.incomingRequests)
|
||||
close(m.globalResponses)
|
||||
|
||||
m.conn.Close()
|
||||
|
||||
m.errCond.L.Lock()
|
||||
m.err = err
|
||||
m.errCond.Broadcast()
|
||||
m.errCond.L.Unlock()
|
||||
|
||||
if debugMux {
|
||||
log.Println("loop exit", err)
|
||||
}
|
||||
}
|
||||
|
||||
// onePacket reads and processes one packet.
|
||||
func (m *mux) onePacket() error {
|
||||
packet, err := m.conn.readPacket()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if debugMux {
|
||||
if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData {
|
||||
log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet))
|
||||
} else {
|
||||
p, _ := decode(packet)
|
||||
log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet))
|
||||
}
|
||||
}
|
||||
|
||||
switch packet[0] {
|
||||
case msgChannelOpen:
|
||||
return m.handleChannelOpen(packet)
|
||||
case msgGlobalRequest, msgRequestSuccess, msgRequestFailure:
|
||||
return m.handleGlobalPacket(packet)
|
||||
}
|
||||
|
||||
// assume a channel packet.
|
||||
if len(packet) < 5 {
|
||||
return parseError(packet[0])
|
||||
}
|
||||
id := binary.BigEndian.Uint32(packet[1:])
|
||||
ch := m.chanList.getChan(id)
|
||||
if ch == nil {
|
||||
return fmt.Errorf("ssh: invalid channel %d", id)
|
||||
}
|
||||
|
||||
return ch.handlePacket(packet)
|
||||
}
|
||||
|
||||
func (m *mux) handleGlobalPacket(packet []byte) error {
|
||||
msg, err := decode(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *globalRequestMsg:
|
||||
m.incomingRequests <- &Request{
|
||||
Type: msg.Type,
|
||||
WantReply: msg.WantReply,
|
||||
Payload: msg.Data,
|
||||
mux: m,
|
||||
}
|
||||
case *globalRequestSuccessMsg, *globalRequestFailureMsg:
|
||||
m.globalResponses <- msg
|
||||
default:
|
||||
panic(fmt.Sprintf("not a global message %#v", msg))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleChannelOpen schedules a channel to be Accept()ed.
|
||||
func (m *mux) handleChannelOpen(packet []byte) error {
|
||||
var msg channelOpenMsg
|
||||
if err := Unmarshal(packet, &msg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
|
||||
failMsg := channelOpenFailureMsg{
|
||||
PeersId: msg.PeersId,
|
||||
Reason: ConnectionFailed,
|
||||
Message: "invalid request",
|
||||
Language: "en_US.UTF-8",
|
||||
}
|
||||
return m.sendMessage(failMsg)
|
||||
}
|
||||
|
||||
c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData)
|
||||
c.remoteId = msg.PeersId
|
||||
c.maxRemotePayload = msg.MaxPacketSize
|
||||
c.remoteWin.add(msg.PeersWindow)
|
||||
m.incomingChannels <- c
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) {
|
||||
ch, err := m.openChannel(chanType, extra)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return ch, ch.incomingRequests, nil
|
||||
}
|
||||
|
||||
func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
|
||||
ch := m.newChannel(chanType, channelOutbound, extra)
|
||||
|
||||
ch.maxIncomingPayload = channelMaxPacket
|
||||
|
||||
open := channelOpenMsg{
|
||||
ChanType: chanType,
|
||||
PeersWindow: ch.myWindow,
|
||||
MaxPacketSize: ch.maxIncomingPayload,
|
||||
TypeSpecificData: extra,
|
||||
PeersId: ch.localId,
|
||||
}
|
||||
if err := m.sendMessage(open); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := (<-ch.msg).(type) {
|
||||
case *channelOpenConfirmMsg:
|
||||
return ch, nil
|
||||
case *channelOpenFailureMsg:
|
||||
return nil, &OpenChannelError{msg.Reason, msg.Message}
|
||||
default:
|
||||
return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
|
||||
}
|
||||
}
|
563
vendor/golang.org/x/crypto/ssh/server.go
generated
vendored
563
vendor/golang.org/x/crypto/ssh/server.go
generated
vendored
@ -1,563 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The Permissions type holds fine-grained permissions that are
|
||||
// specific to a user or a specific authentication method for a user.
|
||||
// The Permissions value for a successful authentication attempt is
|
||||
// available in ServerConn, so it can be used to pass information from
|
||||
// the user-authentication phase to the application layer.
|
||||
type Permissions struct {
|
||||
// CriticalOptions indicate restrictions to the default
|
||||
// permissions, and are typically used in conjunction with
|
||||
// user certificates. The standard for SSH certificates
|
||||
// defines "force-command" (only allow the given command to
|
||||
// execute) and "source-address" (only allow connections from
|
||||
// the given address). The SSH package currently only enforces
|
||||
// the "source-address" critical option. It is up to server
|
||||
// implementations to enforce other critical options, such as
|
||||
// "force-command", by checking them after the SSH handshake
|
||||
// is successful. In general, SSH servers should reject
|
||||
// connections that specify critical options that are unknown
|
||||
// or not supported.
|
||||
CriticalOptions map[string]string
|
||||
|
||||
// Extensions are extra functionality that the server may
|
||||
// offer on authenticated connections. Lack of support for an
|
||||
// extension does not preclude authenticating a user. Common
|
||||
// extensions are "permit-agent-forwarding",
|
||||
// "permit-X11-forwarding". The Go SSH library currently does
|
||||
// not act on any extension, and it is up to server
|
||||
// implementations to honor them. Extensions can be used to
|
||||
// pass data from the authentication callbacks to the server
|
||||
// application layer.
|
||||
Extensions map[string]string
|
||||
}
|
||||
|
||||
// ServerConfig holds server specific configuration data.
|
||||
type ServerConfig struct {
|
||||
// Config contains configuration shared between client and server.
|
||||
Config
|
||||
|
||||
hostKeys []Signer
|
||||
|
||||
// NoClientAuth is true if clients are allowed to connect without
|
||||
// authenticating.
|
||||
NoClientAuth bool
|
||||
|
||||
// MaxAuthTries specifies the maximum number of authentication attempts
|
||||
// permitted per connection. If set to a negative number, the number of
|
||||
// attempts are unlimited. If set to zero, the number of attempts are limited
|
||||
// to 6.
|
||||
MaxAuthTries int
|
||||
|
||||
// PasswordCallback, if non-nil, is called when a user
|
||||
// attempts to authenticate using a password.
|
||||
PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error)
|
||||
|
||||
// PublicKeyCallback, if non-nil, is called when a client
|
||||
// offers a public key for authentication. It must return a nil error
|
||||
// if the given public key can be used to authenticate the
|
||||
// given user. For example, see CertChecker.Authenticate. A
|
||||
// call to this function does not guarantee that the key
|
||||
// offered is in fact used to authenticate. To record any data
|
||||
// depending on the public key, store it inside a
|
||||
// Permissions.Extensions entry.
|
||||
PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
|
||||
|
||||
// KeyboardInteractiveCallback, if non-nil, is called when
|
||||
// keyboard-interactive authentication is selected (RFC
|
||||
// 4256). The client object's Challenge function should be
|
||||
// used to query the user. The callback may offer multiple
|
||||
// Challenge rounds. To avoid information leaks, the client
|
||||
// should be presented a challenge even if the user is
|
||||
// unknown.
|
||||
KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error)
|
||||
|
||||
// AuthLogCallback, if non-nil, is called to log all authentication
|
||||
// attempts.
|
||||
AuthLogCallback func(conn ConnMetadata, method string, err error)
|
||||
|
||||
// ServerVersion is the version identification string to announce in
|
||||
// the public handshake.
|
||||
// If empty, a reasonable default is used.
|
||||
// Note that RFC 4253 section 4.2 requires that this string start with
|
||||
// "SSH-2.0-".
|
||||
ServerVersion string
|
||||
}
|
||||
|
||||
// AddHostKey adds a private key as a host key. If an existing host
|
||||
// key exists with the same algorithm, it is overwritten. Each server
|
||||
// config must have at least one host key.
|
||||
func (s *ServerConfig) AddHostKey(key Signer) {
|
||||
for i, k := range s.hostKeys {
|
||||
if k.PublicKey().Type() == key.PublicKey().Type() {
|
||||
s.hostKeys[i] = key
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.hostKeys = append(s.hostKeys, key)
|
||||
}
|
||||
|
||||
// cachedPubKey contains the results of querying whether a public key is
|
||||
// acceptable for a user.
|
||||
type cachedPubKey struct {
|
||||
user string
|
||||
pubKeyData []byte
|
||||
result error
|
||||
perms *Permissions
|
||||
}
|
||||
|
||||
const maxCachedPubKeys = 16
|
||||
|
||||
// pubKeyCache caches tests for public keys. Since SSH clients
|
||||
// will query whether a public key is acceptable before attempting to
|
||||
// authenticate with it, we end up with duplicate queries for public
|
||||
// key validity. The cache only applies to a single ServerConn.
|
||||
type pubKeyCache struct {
|
||||
keys []cachedPubKey
|
||||
}
|
||||
|
||||
// get returns the result for a given user/algo/key tuple.
|
||||
func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
|
||||
for _, k := range c.keys {
|
||||
if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) {
|
||||
return k, true
|
||||
}
|
||||
}
|
||||
return cachedPubKey{}, false
|
||||
}
|
||||
|
||||
// add adds the given tuple to the cache.
|
||||
func (c *pubKeyCache) add(candidate cachedPubKey) {
|
||||
if len(c.keys) < maxCachedPubKeys {
|
||||
c.keys = append(c.keys, candidate)
|
||||
}
|
||||
}
|
||||
|
||||
// ServerConn is an authenticated SSH connection, as seen from the
|
||||
// server
|
||||
type ServerConn struct {
|
||||
Conn
|
||||
|
||||
// If the succeeding authentication callback returned a
|
||||
// non-nil Permissions pointer, it is stored here.
|
||||
Permissions *Permissions
|
||||
}
|
||||
|
||||
// NewServerConn starts a new SSH server with c as the underlying
|
||||
// transport. It starts with a handshake and, if the handshake is
|
||||
// unsuccessful, it closes the connection and returns an error. The
|
||||
// Request and NewChannel channels must be serviced, or the connection
|
||||
// will hang.
|
||||
func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) {
|
||||
fullConf := *config
|
||||
fullConf.SetDefaults()
|
||||
if fullConf.MaxAuthTries == 0 {
|
||||
fullConf.MaxAuthTries = 6
|
||||
}
|
||||
|
||||
s := &connection{
|
||||
sshConn: sshConn{conn: c},
|
||||
}
|
||||
perms, err := s.serverHandshake(&fullConf)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil
|
||||
}
|
||||
|
||||
// signAndMarshal signs the data with the appropriate algorithm,
|
||||
// and serializes the result in SSH wire format.
|
||||
func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) {
|
||||
sig, err := k.Sign(rand, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return Marshal(sig), nil
|
||||
}
|
||||
|
||||
// handshake performs key exchange and user authentication.
|
||||
func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) {
|
||||
if len(config.hostKeys) == 0 {
|
||||
return nil, errors.New("ssh: server has no host keys")
|
||||
}
|
||||
|
||||
if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil {
|
||||
return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
|
||||
}
|
||||
|
||||
if config.ServerVersion != "" {
|
||||
s.serverVersion = []byte(config.ServerVersion)
|
||||
} else {
|
||||
s.serverVersion = []byte(packageVersion)
|
||||
}
|
||||
var err error
|
||||
s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */)
|
||||
s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config)
|
||||
|
||||
if err := s.transport.waitSession(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We just did the key change, so the session ID is established.
|
||||
s.sessionID = s.transport.getSessionID()
|
||||
|
||||
var packet []byte
|
||||
if packet, err = s.transport.readPacket(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var serviceRequest serviceRequestMsg
|
||||
if err = Unmarshal(packet, &serviceRequest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if serviceRequest.Service != serviceUserAuth {
|
||||
return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating")
|
||||
}
|
||||
serviceAccept := serviceAcceptMsg{
|
||||
Service: serviceUserAuth,
|
||||
}
|
||||
if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
perms, err := s.serverAuthenticate(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.mux = newMux(s.transport)
|
||||
return perms, err
|
||||
}
|
||||
|
||||
func isAcceptableAlgo(algo string) bool {
|
||||
switch algo {
|
||||
case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519,
|
||||
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func checkSourceAddress(addr net.Addr, sourceAddrs string) error {
|
||||
if addr == nil {
|
||||
return errors.New("ssh: no address known for client, but source-address match required")
|
||||
}
|
||||
|
||||
tcpAddr, ok := addr.(*net.TCPAddr)
|
||||
if !ok {
|
||||
return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr)
|
||||
}
|
||||
|
||||
for _, sourceAddr := range strings.Split(sourceAddrs, ",") {
|
||||
if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
|
||||
if allowedIP.Equal(tcpAddr.IP) {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
_, ipNet, err := net.ParseCIDR(sourceAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err)
|
||||
}
|
||||
|
||||
if ipNet.Contains(tcpAddr.IP) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr)
|
||||
}
|
||||
|
||||
// ServerAuthError implements the error interface. It appends any authentication
|
||||
// errors that may occur, and is returned if all of the authentication methods
|
||||
// provided by the user failed to authenticate.
|
||||
type ServerAuthError struct {
|
||||
// Errors contains authentication errors returned by the authentication
|
||||
// callback methods.
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func (l ServerAuthError) Error() string {
|
||||
var errs []string
|
||||
for _, err := range l.Errors {
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
return "[" + strings.Join(errs, ", ") + "]"
|
||||
}
|
||||
|
||||
func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
|
||||
sessionID := s.transport.getSessionID()
|
||||
var cache pubKeyCache
|
||||
var perms *Permissions
|
||||
|
||||
authFailures := 0
|
||||
var authErrs []error
|
||||
|
||||
userAuthLoop:
|
||||
for {
|
||||
if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 {
|
||||
discMsg := &disconnectMsg{
|
||||
Reason: 2,
|
||||
Message: "too many authentication failures",
|
||||
}
|
||||
|
||||
if err := s.transport.writePacket(Marshal(discMsg)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, discMsg
|
||||
}
|
||||
|
||||
var userAuthReq userAuthRequestMsg
|
||||
if packet, err := s.transport.readPacket(); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, &ServerAuthError{Errors: authErrs}
|
||||
}
|
||||
return nil, err
|
||||
} else if err = Unmarshal(packet, &userAuthReq); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if userAuthReq.Service != serviceSSH {
|
||||
return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service)
|
||||
}
|
||||
|
||||
s.user = userAuthReq.User
|
||||
perms = nil
|
||||
authErr := errors.New("no auth passed yet")
|
||||
|
||||
switch userAuthReq.Method {
|
||||
case "none":
|
||||
if config.NoClientAuth {
|
||||
authErr = nil
|
||||
}
|
||||
|
||||
// allow initial attempt of 'none' without penalty
|
||||
if authFailures == 0 {
|
||||
authFailures--
|
||||
}
|
||||
case "password":
|
||||
if config.PasswordCallback == nil {
|
||||
authErr = errors.New("ssh: password auth not configured")
|
||||
break
|
||||
}
|
||||
payload := userAuthReq.Payload
|
||||
if len(payload) < 1 || payload[0] != 0 {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
payload = payload[1:]
|
||||
password, payload, ok := parseString(payload)
|
||||
if !ok || len(payload) > 0 {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
|
||||
perms, authErr = config.PasswordCallback(s, password)
|
||||
case "keyboard-interactive":
|
||||
if config.KeyboardInteractiveCallback == nil {
|
||||
authErr = errors.New("ssh: keyboard-interactive auth not configubred")
|
||||
break
|
||||
}
|
||||
|
||||
prompter := &sshClientKeyboardInteractive{s}
|
||||
perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge)
|
||||
case "publickey":
|
||||
if config.PublicKeyCallback == nil {
|
||||
authErr = errors.New("ssh: publickey auth not configured")
|
||||
break
|
||||
}
|
||||
payload := userAuthReq.Payload
|
||||
if len(payload) < 1 {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
isQuery := payload[0] == 0
|
||||
payload = payload[1:]
|
||||
algoBytes, payload, ok := parseString(payload)
|
||||
if !ok {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
algo := string(algoBytes)
|
||||
if !isAcceptableAlgo(algo) {
|
||||
authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
|
||||
break
|
||||
}
|
||||
|
||||
pubKeyData, payload, ok := parseString(payload)
|
||||
if !ok {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
|
||||
pubKey, err := ParsePublicKey(pubKeyData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
candidate, ok := cache.get(s.user, pubKeyData)
|
||||
if !ok {
|
||||
candidate.user = s.user
|
||||
candidate.pubKeyData = pubKeyData
|
||||
candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey)
|
||||
if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" {
|
||||
candidate.result = checkSourceAddress(
|
||||
s.RemoteAddr(),
|
||||
candidate.perms.CriticalOptions[sourceAddressCriticalOption])
|
||||
}
|
||||
cache.add(candidate)
|
||||
}
|
||||
|
||||
if isQuery {
|
||||
// The client can query if the given public key
|
||||
// would be okay.
|
||||
|
||||
if len(payload) > 0 {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
|
||||
if candidate.result == nil {
|
||||
okMsg := userAuthPubKeyOkMsg{
|
||||
Algo: algo,
|
||||
PubKey: pubKeyData,
|
||||
}
|
||||
if err = s.transport.writePacket(Marshal(&okMsg)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue userAuthLoop
|
||||
}
|
||||
authErr = candidate.result
|
||||
} else {
|
||||
sig, payload, ok := parseSignature(payload)
|
||||
if !ok || len(payload) > 0 {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
// Ensure the public key algo and signature algo
|
||||
// are supported. Compare the private key
|
||||
// algorithm name that corresponds to algo with
|
||||
// sig.Format. This is usually the same, but
|
||||
// for certs, the names differ.
|
||||
if !isAcceptableAlgo(sig.Format) {
|
||||
break
|
||||
}
|
||||
signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData)
|
||||
|
||||
if err := pubKey.Verify(signedData, sig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
authErr = candidate.result
|
||||
perms = candidate.perms
|
||||
}
|
||||
default:
|
||||
authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method)
|
||||
}
|
||||
|
||||
authErrs = append(authErrs, authErr)
|
||||
|
||||
if config.AuthLogCallback != nil {
|
||||
config.AuthLogCallback(s, userAuthReq.Method, authErr)
|
||||
}
|
||||
|
||||
if authErr == nil {
|
||||
break userAuthLoop
|
||||
}
|
||||
|
||||
authFailures++
|
||||
|
||||
var failureMsg userAuthFailureMsg
|
||||
if config.PasswordCallback != nil {
|
||||
failureMsg.Methods = append(failureMsg.Methods, "password")
|
||||
}
|
||||
if config.PublicKeyCallback != nil {
|
||||
failureMsg.Methods = append(failureMsg.Methods, "publickey")
|
||||
}
|
||||
if config.KeyboardInteractiveCallback != nil {
|
||||
failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive")
|
||||
}
|
||||
|
||||
if len(failureMsg.Methods) == 0 {
|
||||
return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
|
||||
}
|
||||
|
||||
if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return perms, nil
|
||||
}
|
||||
|
||||
// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by
|
||||
// asking the client on the other side of a ServerConn.
|
||||
type sshClientKeyboardInteractive struct {
|
||||
*connection
|
||||
}
|
||||
|
||||
func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
|
||||
if len(questions) != len(echos) {
|
||||
return nil, errors.New("ssh: echos and questions must have equal length")
|
||||
}
|
||||
|
||||
var prompts []byte
|
||||
for i := range questions {
|
||||
prompts = appendString(prompts, questions[i])
|
||||
prompts = appendBool(prompts, echos[i])
|
||||
}
|
||||
|
||||
if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{
|
||||
Instruction: instruction,
|
||||
NumPrompts: uint32(len(questions)),
|
||||
Prompts: prompts,
|
||||
})); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
packet, err := c.transport.readPacket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if packet[0] != msgUserAuthInfoResponse {
|
||||
return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0])
|
||||
}
|
||||
packet = packet[1:]
|
||||
|
||||
n, packet, ok := parseUint32(packet)
|
||||
if !ok || int(n) != len(questions) {
|
||||
return nil, parseError(msgUserAuthInfoResponse)
|
||||
}
|
||||
|
||||
for i := uint32(0); i < n; i++ {
|
||||
ans, rest, ok := parseString(packet)
|
||||
if !ok {
|
||||
return nil, parseError(msgUserAuthInfoResponse)
|
||||
}
|
||||
|
||||
answers = append(answers, string(ans))
|
||||
packet = rest
|
||||
}
|
||||
if len(packet) != 0 {
|
||||
return nil, errors.New("ssh: junk at end of message")
|
||||
}
|
||||
|
||||
return answers, nil
|
||||
}
|
647
vendor/golang.org/x/crypto/ssh/session.go
generated
vendored
647
vendor/golang.org/x/crypto/ssh/session.go
generated
vendored
@ -1,647 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
// Session implements an interactive session described in
|
||||
// "RFC 4254, section 6".
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Signal string
|
||||
|
||||
// POSIX signals as listed in RFC 4254 Section 6.10.
|
||||
const (
|
||||
SIGABRT Signal = "ABRT"
|
||||
SIGALRM Signal = "ALRM"
|
||||
SIGFPE Signal = "FPE"
|
||||
SIGHUP Signal = "HUP"
|
||||
SIGILL Signal = "ILL"
|
||||
SIGINT Signal = "INT"
|
||||
SIGKILL Signal = "KILL"
|
||||
SIGPIPE Signal = "PIPE"
|
||||
SIGQUIT Signal = "QUIT"
|
||||
SIGSEGV Signal = "SEGV"
|
||||
SIGTERM Signal = "TERM"
|
||||
SIGUSR1 Signal = "USR1"
|
||||
SIGUSR2 Signal = "USR2"
|
||||
)
|
||||
|
||||
var signals = map[Signal]int{
|
||||
SIGABRT: 6,
|
||||
SIGALRM: 14,
|
||||
SIGFPE: 8,
|
||||
SIGHUP: 1,
|
||||
SIGILL: 4,
|
||||
SIGINT: 2,
|
||||
SIGKILL: 9,
|
||||
SIGPIPE: 13,
|
||||
SIGQUIT: 3,
|
||||
SIGSEGV: 11,
|
||||
SIGTERM: 15,
|
||||
}
|
||||
|
||||
type TerminalModes map[uint8]uint32
|
||||
|
||||
// POSIX terminal mode flags as listed in RFC 4254 Section 8.
|
||||
const (
|
||||
tty_OP_END = 0
|
||||
VINTR = 1
|
||||
VQUIT = 2
|
||||
VERASE = 3
|
||||
VKILL = 4
|
||||
VEOF = 5
|
||||
VEOL = 6
|
||||
VEOL2 = 7
|
||||
VSTART = 8
|
||||
VSTOP = 9
|
||||
VSUSP = 10
|
||||
VDSUSP = 11
|
||||
VREPRINT = 12
|
||||
VWERASE = 13
|
||||
VLNEXT = 14
|
||||
VFLUSH = 15
|
||||
VSWTCH = 16
|
||||
VSTATUS = 17
|
||||
VDISCARD = 18
|
||||
IGNPAR = 30
|
||||
PARMRK = 31
|
||||
INPCK = 32
|
||||
ISTRIP = 33
|
||||
INLCR = 34
|
||||
IGNCR = 35
|
||||
ICRNL = 36
|
||||
IUCLC = 37
|
||||
IXON = 38
|
||||
IXANY = 39
|
||||
IXOFF = 40
|
||||
IMAXBEL = 41
|
||||
ISIG = 50
|
||||
ICANON = 51
|
||||
XCASE = 52
|
||||
ECHO = 53
|
||||
ECHOE = 54
|
||||
ECHOK = 55
|
||||
ECHONL = 56
|
||||
NOFLSH = 57
|
||||
TOSTOP = 58
|
||||
IEXTEN = 59
|
||||
ECHOCTL = 60
|
||||
ECHOKE = 61
|
||||
PENDIN = 62
|
||||
OPOST = 70
|
||||
OLCUC = 71
|
||||
ONLCR = 72
|
||||
OCRNL = 73
|
||||
ONOCR = 74
|
||||
ONLRET = 75
|
||||
CS7 = 90
|
||||
CS8 = 91
|
||||
PARENB = 92
|
||||
PARODD = 93
|
||||
TTY_OP_ISPEED = 128
|
||||
TTY_OP_OSPEED = 129
|
||||
)
|
||||
|
||||
// A Session represents a connection to a remote command or shell.
|
||||
type Session struct {
|
||||
// Stdin specifies the remote process's standard input.
|
||||
// If Stdin is nil, the remote process reads from an empty
|
||||
// bytes.Buffer.
|
||||
Stdin io.Reader
|
||||
|
||||
// Stdout and Stderr specify the remote process's standard
|
||||
// output and error.
|
||||
//
|
||||
// If either is nil, Run connects the corresponding file
|
||||
// descriptor to an instance of ioutil.Discard. There is a
|
||||
// fixed amount of buffering that is shared for the two streams.
|
||||
// If either blocks it may eventually cause the remote
|
||||
// command to block.
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
|
||||
ch Channel // the channel backing this session
|
||||
started bool // true once Start, Run or Shell is invoked.
|
||||
copyFuncs []func() error
|
||||
errors chan error // one send per copyFunc
|
||||
|
||||
// true if pipe method is active
|
||||
stdinpipe, stdoutpipe, stderrpipe bool
|
||||
|
||||
// stdinPipeWriter is non-nil if StdinPipe has not been called
|
||||
// and Stdin was specified by the user; it is the write end of
|
||||
// a pipe connecting Session.Stdin to the stdin channel.
|
||||
stdinPipeWriter io.WriteCloser
|
||||
|
||||
exitStatus chan error
|
||||
}
|
||||
|
||||
// SendRequest sends an out-of-band channel request on the SSH channel
|
||||
// underlying the session.
|
||||
func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
|
||||
return s.ch.SendRequest(name, wantReply, payload)
|
||||
}
|
||||
|
||||
func (s *Session) Close() error {
|
||||
return s.ch.Close()
|
||||
}
|
||||
|
||||
// RFC 4254 Section 6.4.
|
||||
type setenvRequest struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
// Setenv sets an environment variable that will be applied to any
|
||||
// command executed by Shell or Run.
|
||||
func (s *Session) Setenv(name, value string) error {
|
||||
msg := setenvRequest{
|
||||
Name: name,
|
||||
Value: value,
|
||||
}
|
||||
ok, err := s.ch.SendRequest("env", true, Marshal(&msg))
|
||||
if err == nil && !ok {
|
||||
err = errors.New("ssh: setenv failed")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RFC 4254 Section 6.2.
|
||||
type ptyRequestMsg struct {
|
||||
Term string
|
||||
Columns uint32
|
||||
Rows uint32
|
||||
Width uint32
|
||||
Height uint32
|
||||
Modelist string
|
||||
}
|
||||
|
||||
// RequestPty requests the association of a pty with the session on the remote host.
|
||||
func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error {
|
||||
var tm []byte
|
||||
for k, v := range termmodes {
|
||||
kv := struct {
|
||||
Key byte
|
||||
Val uint32
|
||||
}{k, v}
|
||||
|
||||
tm = append(tm, Marshal(&kv)...)
|
||||
}
|
||||
tm = append(tm, tty_OP_END)
|
||||
req := ptyRequestMsg{
|
||||
Term: term,
|
||||
Columns: uint32(w),
|
||||
Rows: uint32(h),
|
||||
Width: uint32(w * 8),
|
||||
Height: uint32(h * 8),
|
||||
Modelist: string(tm),
|
||||
}
|
||||
ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req))
|
||||
if err == nil && !ok {
|
||||
err = errors.New("ssh: pty-req failed")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RFC 4254 Section 6.5.
|
||||
type subsystemRequestMsg struct {
|
||||
Subsystem string
|
||||
}
|
||||
|
||||
// RequestSubsystem requests the association of a subsystem with the session on the remote host.
|
||||
// A subsystem is a predefined command that runs in the background when the ssh session is initiated
|
||||
func (s *Session) RequestSubsystem(subsystem string) error {
|
||||
msg := subsystemRequestMsg{
|
||||
Subsystem: subsystem,
|
||||
}
|
||||
ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg))
|
||||
if err == nil && !ok {
|
||||
err = errors.New("ssh: subsystem request failed")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RFC 4254 Section 6.7.
|
||||
type ptyWindowChangeMsg struct {
|
||||
Columns uint32
|
||||
Rows uint32
|
||||
Width uint32
|
||||
Height uint32
|
||||
}
|
||||
|
||||
// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns.
|
||||
func (s *Session) WindowChange(h, w int) error {
|
||||
req := ptyWindowChangeMsg{
|
||||
Columns: uint32(w),
|
||||
Rows: uint32(h),
|
||||
Width: uint32(w * 8),
|
||||
Height: uint32(h * 8),
|
||||
}
|
||||
_, err := s.ch.SendRequest("window-change", false, Marshal(&req))
|
||||
return err
|
||||
}
|
||||
|
||||
// RFC 4254 Section 6.9.
|
||||
type signalMsg struct {
|
||||
Signal string
|
||||
}
|
||||
|
||||
// Signal sends the given signal to the remote process.
|
||||
// sig is one of the SIG* constants.
|
||||
func (s *Session) Signal(sig Signal) error {
|
||||
msg := signalMsg{
|
||||
Signal: string(sig),
|
||||
}
|
||||
|
||||
_, err := s.ch.SendRequest("signal", false, Marshal(&msg))
|
||||
return err
|
||||
}
|
||||
|
||||
// RFC 4254 Section 6.5.
|
||||
type execMsg struct {
|
||||
Command string
|
||||
}
|
||||
|
||||
// Start runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start or Shell.
|
||||
func (s *Session) Start(cmd string) error {
|
||||
if s.started {
|
||||
return errors.New("ssh: session already started")
|
||||
}
|
||||
req := execMsg{
|
||||
Command: cmd,
|
||||
}
|
||||
|
||||
ok, err := s.ch.SendRequest("exec", true, Marshal(&req))
|
||||
if err == nil && !ok {
|
||||
err = fmt.Errorf("ssh: command %v failed", cmd)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.start()
|
||||
}
|
||||
|
||||
// Run runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start, Shell, Output,
|
||||
// or CombinedOutput.
|
||||
//
|
||||
// The returned error is nil if the command runs, has no problems
|
||||
// copying stdin, stdout, and stderr, and exits with a zero exit
|
||||
// status.
|
||||
//
|
||||
// If the remote server does not send an exit status, an error of type
|
||||
// *ExitMissingError is returned. If the command completes
|
||||
// unsuccessfully or is interrupted by a signal, the error is of type
|
||||
// *ExitError. Other error types may be returned for I/O problems.
|
||||
func (s *Session) Run(cmd string) error {
|
||||
err := s.Start(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Wait()
|
||||
}
|
||||
|
||||
// Output runs cmd on the remote host and returns its standard output.
|
||||
func (s *Session) Output(cmd string) ([]byte, error) {
|
||||
if s.Stdout != nil {
|
||||
return nil, errors.New("ssh: Stdout already set")
|
||||
}
|
||||
var b bytes.Buffer
|
||||
s.Stdout = &b
|
||||
err := s.Run(cmd)
|
||||
return b.Bytes(), err
|
||||
}
|
||||
|
||||
type singleWriter struct {
|
||||
b bytes.Buffer
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (w *singleWriter) Write(p []byte) (int, error) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.b.Write(p)
|
||||
}
|
||||
|
||||
// CombinedOutput runs cmd on the remote host and returns its combined
|
||||
// standard output and standard error.
|
||||
func (s *Session) CombinedOutput(cmd string) ([]byte, error) {
|
||||
if s.Stdout != nil {
|
||||
return nil, errors.New("ssh: Stdout already set")
|
||||
}
|
||||
if s.Stderr != nil {
|
||||
return nil, errors.New("ssh: Stderr already set")
|
||||
}
|
||||
var b singleWriter
|
||||
s.Stdout = &b
|
||||
s.Stderr = &b
|
||||
err := s.Run(cmd)
|
||||
return b.b.Bytes(), err
|
||||
}
|
||||
|
||||
// Shell starts a login shell on the remote host. A Session only
|
||||
// accepts one call to Run, Start, Shell, Output, or CombinedOutput.
|
||||
func (s *Session) Shell() error {
|
||||
if s.started {
|
||||
return errors.New("ssh: session already started")
|
||||
}
|
||||
|
||||
ok, err := s.ch.SendRequest("shell", true, nil)
|
||||
if err == nil && !ok {
|
||||
return errors.New("ssh: could not start shell")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.start()
|
||||
}
|
||||
|
||||
func (s *Session) start() error {
|
||||
s.started = true
|
||||
|
||||
type F func(*Session)
|
||||
for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} {
|
||||
setupFd(s)
|
||||
}
|
||||
|
||||
s.errors = make(chan error, len(s.copyFuncs))
|
||||
for _, fn := range s.copyFuncs {
|
||||
go func(fn func() error) {
|
||||
s.errors <- fn()
|
||||
}(fn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait waits for the remote command to exit.
|
||||
//
|
||||
// The returned error is nil if the command runs, has no problems
|
||||
// copying stdin, stdout, and stderr, and exits with a zero exit
|
||||
// status.
|
||||
//
|
||||
// If the remote server does not send an exit status, an error of type
|
||||
// *ExitMissingError is returned. If the command completes
|
||||
// unsuccessfully or is interrupted by a signal, the error is of type
|
||||
// *ExitError. Other error types may be returned for I/O problems.
|
||||
func (s *Session) Wait() error {
|
||||
if !s.started {
|
||||
return errors.New("ssh: session not started")
|
||||
}
|
||||
waitErr := <-s.exitStatus
|
||||
|
||||
if s.stdinPipeWriter != nil {
|
||||
s.stdinPipeWriter.Close()
|
||||
}
|
||||
var copyError error
|
||||
for _ = range s.copyFuncs {
|
||||
if err := <-s.errors; err != nil && copyError == nil {
|
||||
copyError = err
|
||||
}
|
||||
}
|
||||
if waitErr != nil {
|
||||
return waitErr
|
||||
}
|
||||
return copyError
|
||||
}
|
||||
|
||||
func (s *Session) wait(reqs <-chan *Request) error {
|
||||
wm := Waitmsg{status: -1}
|
||||
// Wait for msg channel to be closed before returning.
|
||||
for msg := range reqs {
|
||||
switch msg.Type {
|
||||
case "exit-status":
|
||||
wm.status = int(binary.BigEndian.Uint32(msg.Payload))
|
||||
case "exit-signal":
|
||||
var sigval struct {
|
||||
Signal string
|
||||
CoreDumped bool
|
||||
Error string
|
||||
Lang string
|
||||
}
|
||||
if err := Unmarshal(msg.Payload, &sigval); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Must sanitize strings?
|
||||
wm.signal = sigval.Signal
|
||||
wm.msg = sigval.Error
|
||||
wm.lang = sigval.Lang
|
||||
default:
|
||||
// This handles keepalives and matches
|
||||
// OpenSSH's behaviour.
|
||||
if msg.WantReply {
|
||||
msg.Reply(false, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
if wm.status == 0 {
|
||||
return nil
|
||||
}
|
||||
if wm.status == -1 {
|
||||
// exit-status was never sent from server
|
||||
if wm.signal == "" {
|
||||
// signal was not sent either. RFC 4254
|
||||
// section 6.10 recommends against this
|
||||
// behavior, but it is allowed, so we let
|
||||
// clients handle it.
|
||||
return &ExitMissingError{}
|
||||
}
|
||||
wm.status = 128
|
||||
if _, ok := signals[Signal(wm.signal)]; ok {
|
||||
wm.status += signals[Signal(wm.signal)]
|
||||
}
|
||||
}
|
||||
|
||||
return &ExitError{wm}
|
||||
}
|
||||
|
||||
// ExitMissingError is returned if a session is torn down cleanly, but
|
||||
// the server sends no confirmation of the exit status.
|
||||
type ExitMissingError struct{}
|
||||
|
||||
func (e *ExitMissingError) Error() string {
|
||||
return "wait: remote command exited without exit status or exit signal"
|
||||
}
|
||||
|
||||
func (s *Session) stdin() {
|
||||
if s.stdinpipe {
|
||||
return
|
||||
}
|
||||
var stdin io.Reader
|
||||
if s.Stdin == nil {
|
||||
stdin = new(bytes.Buffer)
|
||||
} else {
|
||||
r, w := io.Pipe()
|
||||
go func() {
|
||||
_, err := io.Copy(w, s.Stdin)
|
||||
w.CloseWithError(err)
|
||||
}()
|
||||
stdin, s.stdinPipeWriter = r, w
|
||||
}
|
||||
s.copyFuncs = append(s.copyFuncs, func() error {
|
||||
_, err := io.Copy(s.ch, stdin)
|
||||
if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF {
|
||||
err = err1
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Session) stdout() {
|
||||
if s.stdoutpipe {
|
||||
return
|
||||
}
|
||||
if s.Stdout == nil {
|
||||
s.Stdout = ioutil.Discard
|
||||
}
|
||||
s.copyFuncs = append(s.copyFuncs, func() error {
|
||||
_, err := io.Copy(s.Stdout, s.ch)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Session) stderr() {
|
||||
if s.stderrpipe {
|
||||
return
|
||||
}
|
||||
if s.Stderr == nil {
|
||||
s.Stderr = ioutil.Discard
|
||||
}
|
||||
s.copyFuncs = append(s.copyFuncs, func() error {
|
||||
_, err := io.Copy(s.Stderr, s.ch.Stderr())
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// sessionStdin reroutes Close to CloseWrite.
|
||||
type sessionStdin struct {
|
||||
io.Writer
|
||||
ch Channel
|
||||
}
|
||||
|
||||
func (s *sessionStdin) Close() error {
|
||||
return s.ch.CloseWrite()
|
||||
}
|
||||
|
||||
// StdinPipe returns a pipe that will be connected to the
|
||||
// remote command's standard input when the command starts.
|
||||
func (s *Session) StdinPipe() (io.WriteCloser, error) {
|
||||
if s.Stdin != nil {
|
||||
return nil, errors.New("ssh: Stdin already set")
|
||||
}
|
||||
if s.started {
|
||||
return nil, errors.New("ssh: StdinPipe after process started")
|
||||
}
|
||||
s.stdinpipe = true
|
||||
return &sessionStdin{s.ch, s.ch}, nil
|
||||
}
|
||||
|
||||
// StdoutPipe returns a pipe that will be connected to the
|
||||
// remote command's standard output when the command starts.
|
||||
// There is a fixed amount of buffering that is shared between
|
||||
// stdout and stderr streams. If the StdoutPipe reader is
|
||||
// not serviced fast enough it may eventually cause the
|
||||
// remote command to block.
|
||||
func (s *Session) StdoutPipe() (io.Reader, error) {
|
||||
if s.Stdout != nil {
|
||||
return nil, errors.New("ssh: Stdout already set")
|
||||
}
|
||||
if s.started {
|
||||
return nil, errors.New("ssh: StdoutPipe after process started")
|
||||
}
|
||||
s.stdoutpipe = true
|
||||
return s.ch, nil
|
||||
}
|
||||
|
||||
// StderrPipe returns a pipe that will be connected to the
|
||||
// remote command's standard error when the command starts.
|
||||
// There is a fixed amount of buffering that is shared between
|
||||
// stdout and stderr streams. If the StderrPipe reader is
|
||||
// not serviced fast enough it may eventually cause the
|
||||
// remote command to block.
|
||||
func (s *Session) StderrPipe() (io.Reader, error) {
|
||||
if s.Stderr != nil {
|
||||
return nil, errors.New("ssh: Stderr already set")
|
||||
}
|
||||
if s.started {
|
||||
return nil, errors.New("ssh: StderrPipe after process started")
|
||||
}
|
||||
s.stderrpipe = true
|
||||
return s.ch.Stderr(), nil
|
||||
}
|
||||
|
||||
// newSession returns a new interactive session on the remote host.
|
||||
func newSession(ch Channel, reqs <-chan *Request) (*Session, error) {
|
||||
s := &Session{
|
||||
ch: ch,
|
||||
}
|
||||
s.exitStatus = make(chan error, 1)
|
||||
go func() {
|
||||
s.exitStatus <- s.wait(reqs)
|
||||
}()
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// An ExitError reports unsuccessful completion of a remote command.
|
||||
type ExitError struct {
|
||||
Waitmsg
|
||||
}
|
||||
|
||||
func (e *ExitError) Error() string {
|
||||
return e.Waitmsg.String()
|
||||
}
|
||||
|
||||
// Waitmsg stores the information about an exited remote command
|
||||
// as reported by Wait.
|
||||
type Waitmsg struct {
|
||||
status int
|
||||
signal string
|
||||
msg string
|
||||
lang string
|
||||
}
|
||||
|
||||
// ExitStatus returns the exit status of the remote command.
|
||||
func (w Waitmsg) ExitStatus() int {
|
||||
return w.status
|
||||
}
|
||||
|
||||
// Signal returns the exit signal of the remote command if
|
||||
// it was terminated violently.
|
||||
func (w Waitmsg) Signal() string {
|
||||
return w.signal
|
||||
}
|
||||
|
||||
// Msg returns the exit message given by the remote command
|
||||
func (w Waitmsg) Msg() string {
|
||||
return w.msg
|
||||
}
|
||||
|
||||
// Lang returns the language tag. See RFC 3066
|
||||
func (w Waitmsg) Lang() string {
|
||||
return w.lang
|
||||
}
|
||||
|
||||
func (w Waitmsg) String() string {
|
||||
str := fmt.Sprintf("Process exited with status %v", w.status)
|
||||
if w.signal != "" {
|
||||
str += fmt.Sprintf(" from signal %v", w.signal)
|
||||
}
|
||||
if w.msg != "" {
|
||||
str += fmt.Sprintf(". Reason was: %v", w.msg)
|
||||
}
|
||||
return str
|
||||
}
|
115
vendor/golang.org/x/crypto/ssh/streamlocal.go
generated
vendored
115
vendor/golang.org/x/crypto/ssh/streamlocal.go
generated
vendored
@ -1,115 +0,0 @@
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
)
|
||||
|
||||
// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message
|
||||
// with "direct-streamlocal@openssh.com" string.
|
||||
//
|
||||
// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding
|
||||
// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235
|
||||
type streamLocalChannelOpenDirectMsg struct {
|
||||
socketPath string
|
||||
reserved0 string
|
||||
reserved1 uint32
|
||||
}
|
||||
|
||||
// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message
|
||||
// with "forwarded-streamlocal@openssh.com" string.
|
||||
type forwardedStreamLocalPayload struct {
|
||||
SocketPath string
|
||||
Reserved0 string
|
||||
}
|
||||
|
||||
// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message
|
||||
// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string.
|
||||
type streamLocalChannelForwardMsg struct {
|
||||
socketPath string
|
||||
}
|
||||
|
||||
// ListenUnix is similar to ListenTCP but uses a Unix domain socket.
|
||||
func (c *Client) ListenUnix(socketPath string) (net.Listener, error) {
|
||||
m := streamLocalChannelForwardMsg{
|
||||
socketPath,
|
||||
}
|
||||
// send message
|
||||
ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer")
|
||||
}
|
||||
ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"})
|
||||
|
||||
return &unixListener{socketPath, c, ch}, nil
|
||||
}
|
||||
|
||||
func (c *Client) dialStreamLocal(socketPath string) (Channel, error) {
|
||||
msg := streamLocalChannelOpenDirectMsg{
|
||||
socketPath: socketPath,
|
||||
}
|
||||
ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go DiscardRequests(in)
|
||||
return ch, err
|
||||
}
|
||||
|
||||
type unixListener struct {
|
||||
socketPath string
|
||||
|
||||
conn *Client
|
||||
in <-chan forward
|
||||
}
|
||||
|
||||
// Accept waits for and returns the next connection to the listener.
|
||||
func (l *unixListener) Accept() (net.Conn, error) {
|
||||
s, ok := <-l.in
|
||||
if !ok {
|
||||
return nil, io.EOF
|
||||
}
|
||||
ch, incoming, err := s.newCh.Accept()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go DiscardRequests(incoming)
|
||||
|
||||
return &chanConn{
|
||||
Channel: ch,
|
||||
laddr: &net.UnixAddr{
|
||||
Name: l.socketPath,
|
||||
Net: "unix",
|
||||
},
|
||||
raddr: &net.UnixAddr{
|
||||
Name: "@",
|
||||
Net: "unix",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the listener.
|
||||
func (l *unixListener) Close() error {
|
||||
// this also closes the listener.
|
||||
l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"})
|
||||
m := streamLocalChannelForwardMsg{
|
||||
l.socketPath,
|
||||
}
|
||||
ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m))
|
||||
if err == nil && !ok {
|
||||
err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Addr returns the listener's network address.
|
||||
func (l *unixListener) Addr() net.Addr {
|
||||
return &net.UnixAddr{
|
||||
Name: l.socketPath,
|
||||
Net: "unix",
|
||||
}
|
||||
}
|
465
vendor/golang.org/x/crypto/ssh/tcpip.go
generated
vendored
465
vendor/golang.org/x/crypto/ssh/tcpip.go
generated
vendored
@ -1,465 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Listen requests the remote peer open a listening socket on
|
||||
// addr. Incoming connections will be available by calling Accept on
|
||||
// the returned net.Listener. The listener must be serviced, or the
|
||||
// SSH connection may hang.
|
||||
// N must be "tcp", "tcp4", "tcp6", or "unix".
|
||||
func (c *Client) Listen(n, addr string) (net.Listener, error) {
|
||||
switch n {
|
||||
case "tcp", "tcp4", "tcp6":
|
||||
laddr, err := net.ResolveTCPAddr(n, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.ListenTCP(laddr)
|
||||
case "unix":
|
||||
return c.ListenUnix(addr)
|
||||
default:
|
||||
return nil, fmt.Errorf("ssh: unsupported protocol: %s", n)
|
||||
}
|
||||
}
|
||||
|
||||
// Automatic port allocation is broken with OpenSSH before 6.0. See
|
||||
// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In
|
||||
// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0,
|
||||
// rather than the actual port number. This means you can never open
|
||||
// two different listeners with auto allocated ports. We work around
|
||||
// this by trying explicit ports until we succeed.
|
||||
|
||||
const openSSHPrefix = "OpenSSH_"
|
||||
|
||||
var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
// isBrokenOpenSSHVersion returns true if the given version string
|
||||
// specifies a version of OpenSSH that is known to have a bug in port
|
||||
// forwarding.
|
||||
func isBrokenOpenSSHVersion(versionStr string) bool {
|
||||
i := strings.Index(versionStr, openSSHPrefix)
|
||||
if i < 0 {
|
||||
return false
|
||||
}
|
||||
i += len(openSSHPrefix)
|
||||
j := i
|
||||
for ; j < len(versionStr); j++ {
|
||||
if versionStr[j] < '0' || versionStr[j] > '9' {
|
||||
break
|
||||
}
|
||||
}
|
||||
version, _ := strconv.Atoi(versionStr[i:j])
|
||||
return version < 6
|
||||
}
|
||||
|
||||
// autoPortListenWorkaround simulates automatic port allocation by
|
||||
// trying random ports repeatedly.
|
||||
func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) {
|
||||
var sshListener net.Listener
|
||||
var err error
|
||||
const tries = 10
|
||||
for i := 0; i < tries; i++ {
|
||||
addr := *laddr
|
||||
addr.Port = 1024 + portRandomizer.Intn(60000)
|
||||
sshListener, err = c.ListenTCP(&addr)
|
||||
if err == nil {
|
||||
laddr.Port = addr.Port
|
||||
return sshListener, err
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err)
|
||||
}
|
||||
|
||||
// RFC 4254 7.1
|
||||
type channelForwardMsg struct {
|
||||
addr string
|
||||
rport uint32
|
||||
}
|
||||
|
||||
// ListenTCP requests the remote peer open a listening socket
|
||||
// on laddr. Incoming connections will be available by calling
|
||||
// Accept on the returned net.Listener.
|
||||
func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
|
||||
if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) {
|
||||
return c.autoPortListenWorkaround(laddr)
|
||||
}
|
||||
|
||||
m := channelForwardMsg{
|
||||
laddr.IP.String(),
|
||||
uint32(laddr.Port),
|
||||
}
|
||||
// send message
|
||||
ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, errors.New("ssh: tcpip-forward request denied by peer")
|
||||
}
|
||||
|
||||
// If the original port was 0, then the remote side will
|
||||
// supply a real port number in the response.
|
||||
if laddr.Port == 0 {
|
||||
var p struct {
|
||||
Port uint32
|
||||
}
|
||||
if err := Unmarshal(resp, &p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
laddr.Port = int(p.Port)
|
||||
}
|
||||
|
||||
// Register this forward, using the port number we obtained.
|
||||
ch := c.forwards.add(laddr)
|
||||
|
||||
return &tcpListener{laddr, c, ch}, nil
|
||||
}
|
||||
|
||||
// forwardList stores a mapping between remote
|
||||
// forward requests and the tcpListeners.
|
||||
type forwardList struct {
|
||||
sync.Mutex
|
||||
entries []forwardEntry
|
||||
}
|
||||
|
||||
// forwardEntry represents an established mapping of a laddr on a
|
||||
// remote ssh server to a channel connected to a tcpListener.
|
||||
type forwardEntry struct {
|
||||
laddr net.Addr
|
||||
c chan forward
|
||||
}
|
||||
|
||||
// forward represents an incoming forwarded tcpip connection. The
|
||||
// arguments to add/remove/lookup should be address as specified in
|
||||
// the original forward-request.
|
||||
type forward struct {
|
||||
newCh NewChannel // the ssh client channel underlying this forward
|
||||
raddr net.Addr // the raddr of the incoming connection
|
||||
}
|
||||
|
||||
func (l *forwardList) add(addr net.Addr) chan forward {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
f := forwardEntry{
|
||||
laddr: addr,
|
||||
c: make(chan forward, 1),
|
||||
}
|
||||
l.entries = append(l.entries, f)
|
||||
return f.c
|
||||
}
|
||||
|
||||
// See RFC 4254, section 7.2
|
||||
type forwardedTCPPayload struct {
|
||||
Addr string
|
||||
Port uint32
|
||||
OriginAddr string
|
||||
OriginPort uint32
|
||||
}
|
||||
|
||||
// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr.
|
||||
func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) {
|
||||
if port == 0 || port > 65535 {
|
||||
return nil, fmt.Errorf("ssh: port number out of range: %d", port)
|
||||
}
|
||||
ip := net.ParseIP(string(addr))
|
||||
if ip == nil {
|
||||
return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr)
|
||||
}
|
||||
return &net.TCPAddr{IP: ip, Port: int(port)}, nil
|
||||
}
|
||||
|
||||
func (l *forwardList) handleChannels(in <-chan NewChannel) {
|
||||
for ch := range in {
|
||||
var (
|
||||
laddr net.Addr
|
||||
raddr net.Addr
|
||||
err error
|
||||
)
|
||||
switch channelType := ch.ChannelType(); channelType {
|
||||
case "forwarded-tcpip":
|
||||
var payload forwardedTCPPayload
|
||||
if err = Unmarshal(ch.ExtraData(), &payload); err != nil {
|
||||
ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// RFC 4254 section 7.2 specifies that incoming
|
||||
// addresses should list the address, in string
|
||||
// format. It is implied that this should be an IP
|
||||
// address, as it would be impossible to connect to it
|
||||
// otherwise.
|
||||
laddr, err = parseTCPAddr(payload.Addr, payload.Port)
|
||||
if err != nil {
|
||||
ch.Reject(ConnectionFailed, err.Error())
|
||||
continue
|
||||
}
|
||||
raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort)
|
||||
if err != nil {
|
||||
ch.Reject(ConnectionFailed, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
case "forwarded-streamlocal@openssh.com":
|
||||
var payload forwardedStreamLocalPayload
|
||||
if err = Unmarshal(ch.ExtraData(), &payload); err != nil {
|
||||
ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error())
|
||||
continue
|
||||
}
|
||||
laddr = &net.UnixAddr{
|
||||
Name: payload.SocketPath,
|
||||
Net: "unix",
|
||||
}
|
||||
raddr = &net.UnixAddr{
|
||||
Name: "@",
|
||||
Net: "unix",
|
||||
}
|
||||
default:
|
||||
panic(fmt.Errorf("ssh: unknown channel type %s", channelType))
|
||||
}
|
||||
if ok := l.forward(laddr, raddr, ch); !ok {
|
||||
// Section 7.2, implementations MUST reject spurious incoming
|
||||
// connections.
|
||||
ch.Reject(Prohibited, "no forward for address")
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// remove removes the forward entry, and the channel feeding its
|
||||
// listener.
|
||||
func (l *forwardList) remove(addr net.Addr) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
for i, f := range l.entries {
|
||||
if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() {
|
||||
l.entries = append(l.entries[:i], l.entries[i+1:]...)
|
||||
close(f.c)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// closeAll closes and clears all forwards.
|
||||
func (l *forwardList) closeAll() {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
for _, f := range l.entries {
|
||||
close(f.c)
|
||||
}
|
||||
l.entries = nil
|
||||
}
|
||||
|
||||
func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
for _, f := range l.entries {
|
||||
if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() {
|
||||
f.c <- forward{newCh: ch, raddr: raddr}
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type tcpListener struct {
|
||||
laddr *net.TCPAddr
|
||||
|
||||
conn *Client
|
||||
in <-chan forward
|
||||
}
|
||||
|
||||
// Accept waits for and returns the next connection to the listener.
|
||||
func (l *tcpListener) Accept() (net.Conn, error) {
|
||||
s, ok := <-l.in
|
||||
if !ok {
|
||||
return nil, io.EOF
|
||||
}
|
||||
ch, incoming, err := s.newCh.Accept()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go DiscardRequests(incoming)
|
||||
|
||||
return &chanConn{
|
||||
Channel: ch,
|
||||
laddr: l.laddr,
|
||||
raddr: s.raddr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the listener.
|
||||
func (l *tcpListener) Close() error {
|
||||
m := channelForwardMsg{
|
||||
l.laddr.IP.String(),
|
||||
uint32(l.laddr.Port),
|
||||
}
|
||||
|
||||
// this also closes the listener.
|
||||
l.conn.forwards.remove(l.laddr)
|
||||
ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m))
|
||||
if err == nil && !ok {
|
||||
err = errors.New("ssh: cancel-tcpip-forward failed")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Addr returns the listener's network address.
|
||||
func (l *tcpListener) Addr() net.Addr {
|
||||
return l.laddr
|
||||
}
|
||||
|
||||
// Dial initiates a connection to the addr from the remote host.
|
||||
// The resulting connection has a zero LocalAddr() and RemoteAddr().
|
||||
func (c *Client) Dial(n, addr string) (net.Conn, error) {
|
||||
var ch Channel
|
||||
switch n {
|
||||
case "tcp", "tcp4", "tcp6":
|
||||
// Parse the address into host and numeric port.
|
||||
host, portString, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
port, err := strconv.ParseUint(portString, 10, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Use a zero address for local and remote address.
|
||||
zeroAddr := &net.TCPAddr{
|
||||
IP: net.IPv4zero,
|
||||
Port: 0,
|
||||
}
|
||||
return &chanConn{
|
||||
Channel: ch,
|
||||
laddr: zeroAddr,
|
||||
raddr: zeroAddr,
|
||||
}, nil
|
||||
case "unix":
|
||||
var err error
|
||||
ch, err = c.dialStreamLocal(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &chanConn{
|
||||
Channel: ch,
|
||||
laddr: &net.UnixAddr{
|
||||
Name: "@",
|
||||
Net: "unix",
|
||||
},
|
||||
raddr: &net.UnixAddr{
|
||||
Name: addr,
|
||||
Net: "unix",
|
||||
},
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("ssh: unsupported protocol: %s", n)
|
||||
}
|
||||
}
|
||||
|
||||
// DialTCP connects to the remote address raddr on the network net,
|
||||
// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used
|
||||
// as the local address for the connection.
|
||||
func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) {
|
||||
if laddr == nil {
|
||||
laddr = &net.TCPAddr{
|
||||
IP: net.IPv4zero,
|
||||
Port: 0,
|
||||
}
|
||||
}
|
||||
ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &chanConn{
|
||||
Channel: ch,
|
||||
laddr: laddr,
|
||||
raddr: raddr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RFC 4254 7.2
|
||||
type channelOpenDirectMsg struct {
|
||||
raddr string
|
||||
rport uint32
|
||||
laddr string
|
||||
lport uint32
|
||||
}
|
||||
|
||||
func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) {
|
||||
msg := channelOpenDirectMsg{
|
||||
raddr: raddr,
|
||||
rport: uint32(rport),
|
||||
laddr: laddr,
|
||||
lport: uint32(lport),
|
||||
}
|
||||
ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go DiscardRequests(in)
|
||||
return ch, err
|
||||
}
|
||||
|
||||
type tcpChan struct {
|
||||
Channel // the backing channel
|
||||
}
|
||||
|
||||
// chanConn fulfills the net.Conn interface without
|
||||
// the tcpChan having to hold laddr or raddr directly.
|
||||
type chanConn struct {
|
||||
Channel
|
||||
laddr, raddr net.Addr
|
||||
}
|
||||
|
||||
// LocalAddr returns the local network address.
|
||||
func (t *chanConn) LocalAddr() net.Addr {
|
||||
return t.laddr
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
func (t *chanConn) RemoteAddr() net.Addr {
|
||||
return t.raddr
|
||||
}
|
||||
|
||||
// SetDeadline sets the read and write deadlines associated
|
||||
// with the connection.
|
||||
func (t *chanConn) SetDeadline(deadline time.Time) error {
|
||||
if err := t.SetReadDeadline(deadline); err != nil {
|
||||
return err
|
||||
}
|
||||
return t.SetWriteDeadline(deadline)
|
||||
}
|
||||
|
||||
// SetReadDeadline sets the read deadline.
|
||||
// A zero value for t means Read will not time out.
|
||||
// After the deadline, the error from Read will implement net.Error
|
||||
// with Timeout() == true.
|
||||
func (t *chanConn) SetReadDeadline(deadline time.Time) error {
|
||||
// for compatibility with previous version,
|
||||
// the error message contains "tcpChan"
|
||||
return errors.New("ssh: tcpChan: deadline not supported")
|
||||
}
|
||||
|
||||
// SetWriteDeadline exists to satisfy the net.Conn interface
|
||||
// but is not implemented by this type. It always returns an error.
|
||||
func (t *chanConn) SetWriteDeadline(deadline time.Time) error {
|
||||
return errors.New("ssh: tcpChan: deadline not supported")
|
||||
}
|
375
vendor/golang.org/x/crypto/ssh/transport.go
generated
vendored
375
vendor/golang.org/x/crypto/ssh/transport.go
generated
vendored
@ -1,375 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// debugTransport if set, will print packet types as they go over the
|
||||
// wire. No message decoding is done, to minimize the impact on timing.
|
||||
const debugTransport = false
|
||||
|
||||
const (
|
||||
gcmCipherID = "aes128-gcm@openssh.com"
|
||||
aes128cbcID = "aes128-cbc"
|
||||
tripledescbcID = "3des-cbc"
|
||||
)
|
||||
|
||||
// packetConn represents a transport that implements packet based
|
||||
// operations.
|
||||
type packetConn interface {
|
||||
// Encrypt and send a packet of data to the remote peer.
|
||||
writePacket(packet []byte) error
|
||||
|
||||
// Read a packet from the connection. The read is blocking,
|
||||
// i.e. if error is nil, then the returned byte slice is
|
||||
// always non-empty.
|
||||
readPacket() ([]byte, error)
|
||||
|
||||
// Close closes the write-side of the connection.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// transport is the keyingTransport that implements the SSH packet
|
||||
// protocol.
|
||||
type transport struct {
|
||||
reader connectionState
|
||||
writer connectionState
|
||||
|
||||
bufReader *bufio.Reader
|
||||
bufWriter *bufio.Writer
|
||||
rand io.Reader
|
||||
isClient bool
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// packetCipher represents a combination of SSH encryption/MAC
|
||||
// protocol. A single instance should be used for one direction only.
|
||||
type packetCipher interface {
|
||||
// writePacket encrypts the packet and writes it to w. The
|
||||
// contents of the packet are generally scrambled.
|
||||
writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error
|
||||
|
||||
// readPacket reads and decrypts a packet of data. The
|
||||
// returned packet may be overwritten by future calls of
|
||||
// readPacket.
|
||||
readPacket(seqnum uint32, r io.Reader) ([]byte, error)
|
||||
}
|
||||
|
||||
// connectionState represents one side (read or write) of the
|
||||
// connection. This is necessary because each direction has its own
|
||||
// keys, and can even have its own algorithms
|
||||
type connectionState struct {
|
||||
packetCipher
|
||||
seqNum uint32
|
||||
dir direction
|
||||
pendingKeyChange chan packetCipher
|
||||
}
|
||||
|
||||
// prepareKeyChange sets up key material for a keychange. The key changes in
|
||||
// both directions are triggered by reading and writing a msgNewKey packet
|
||||
// respectively.
|
||||
func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error {
|
||||
if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil {
|
||||
return err
|
||||
} else {
|
||||
t.reader.pendingKeyChange <- ciph
|
||||
}
|
||||
|
||||
if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil {
|
||||
return err
|
||||
} else {
|
||||
t.writer.pendingKeyChange <- ciph
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *transport) printPacket(p []byte, write bool) {
|
||||
if len(p) == 0 {
|
||||
return
|
||||
}
|
||||
who := "server"
|
||||
if t.isClient {
|
||||
who = "client"
|
||||
}
|
||||
what := "read"
|
||||
if write {
|
||||
what = "write"
|
||||
}
|
||||
|
||||
log.Println(what, who, p[0])
|
||||
}
|
||||
|
||||
// Read and decrypt next packet.
|
||||
func (t *transport) readPacket() (p []byte, err error) {
|
||||
for {
|
||||
p, err = t.reader.readPacket(t.bufReader)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if debugTransport {
|
||||
t.printPacket(p, false)
|
||||
}
|
||||
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
|
||||
packet, err := s.packetCipher.readPacket(s.seqNum, r)
|
||||
s.seqNum++
|
||||
if err == nil && len(packet) == 0 {
|
||||
err = errors.New("ssh: zero length packet")
|
||||
}
|
||||
|
||||
if len(packet) > 0 {
|
||||
switch packet[0] {
|
||||
case msgNewKeys:
|
||||
select {
|
||||
case cipher := <-s.pendingKeyChange:
|
||||
s.packetCipher = cipher
|
||||
default:
|
||||
return nil, errors.New("ssh: got bogus newkeys message.")
|
||||
}
|
||||
|
||||
case msgDisconnect:
|
||||
// Transform a disconnect message into an
|
||||
// error. Since this is lowest level at which
|
||||
// we interpret message types, doing it here
|
||||
// ensures that we don't have to handle it
|
||||
// elsewhere.
|
||||
var msg disconnectMsg
|
||||
if err := Unmarshal(packet, &msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, &msg
|
||||
}
|
||||
}
|
||||
|
||||
// The packet may point to an internal buffer, so copy the
|
||||
// packet out here.
|
||||
fresh := make([]byte, len(packet))
|
||||
copy(fresh, packet)
|
||||
|
||||
return fresh, err
|
||||
}
|
||||
|
||||
func (t *transport) writePacket(packet []byte) error {
|
||||
if debugTransport {
|
||||
t.printPacket(packet, true)
|
||||
}
|
||||
return t.writer.writePacket(t.bufWriter, t.rand, packet)
|
||||
}
|
||||
|
||||
func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error {
|
||||
changeKeys := len(packet) > 0 && packet[0] == msgNewKeys
|
||||
|
||||
err := s.packetCipher.writePacket(s.seqNum, w, rand, packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
s.seqNum++
|
||||
if changeKeys {
|
||||
select {
|
||||
case cipher := <-s.pendingKeyChange:
|
||||
s.packetCipher = cipher
|
||||
default:
|
||||
panic("ssh: no key material for msgNewKeys")
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport {
|
||||
t := &transport{
|
||||
bufReader: bufio.NewReader(rwc),
|
||||
bufWriter: bufio.NewWriter(rwc),
|
||||
rand: rand,
|
||||
reader: connectionState{
|
||||
packetCipher: &streamPacketCipher{cipher: noneCipher{}},
|
||||
pendingKeyChange: make(chan packetCipher, 1),
|
||||
},
|
||||
writer: connectionState{
|
||||
packetCipher: &streamPacketCipher{cipher: noneCipher{}},
|
||||
pendingKeyChange: make(chan packetCipher, 1),
|
||||
},
|
||||
Closer: rwc,
|
||||
}
|
||||
t.isClient = isClient
|
||||
|
||||
if isClient {
|
||||
t.reader.dir = serverKeys
|
||||
t.writer.dir = clientKeys
|
||||
} else {
|
||||
t.reader.dir = clientKeys
|
||||
t.writer.dir = serverKeys
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
type direction struct {
|
||||
ivTag []byte
|
||||
keyTag []byte
|
||||
macKeyTag []byte
|
||||
}
|
||||
|
||||
var (
|
||||
serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}}
|
||||
clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}}
|
||||
)
|
||||
|
||||
// generateKeys generates key material for IV, MAC and encryption.
|
||||
func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) {
|
||||
cipherMode := cipherModes[algs.Cipher]
|
||||
macMode := macModes[algs.MAC]
|
||||
|
||||
iv = make([]byte, cipherMode.ivSize)
|
||||
key = make([]byte, cipherMode.keySize)
|
||||
macKey = make([]byte, macMode.keySize)
|
||||
|
||||
generateKeyMaterial(iv, d.ivTag, kex)
|
||||
generateKeyMaterial(key, d.keyTag, kex)
|
||||
generateKeyMaterial(macKey, d.macKeyTag, kex)
|
||||
return
|
||||
}
|
||||
|
||||
// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as
|
||||
// described in RFC 4253, section 6.4. direction should either be serverKeys
|
||||
// (to setup server->client keys) or clientKeys (for client->server keys).
|
||||
func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
|
||||
iv, key, macKey := generateKeys(d, algs, kex)
|
||||
|
||||
if algs.Cipher == gcmCipherID {
|
||||
return newGCMCipher(iv, key, macKey)
|
||||
}
|
||||
|
||||
if algs.Cipher == aes128cbcID {
|
||||
return newAESCBCCipher(iv, key, macKey, algs)
|
||||
}
|
||||
|
||||
if algs.Cipher == tripledescbcID {
|
||||
return newTripleDESCBCCipher(iv, key, macKey, algs)
|
||||
}
|
||||
|
||||
c := &streamPacketCipher{
|
||||
mac: macModes[algs.MAC].new(macKey),
|
||||
etm: macModes[algs.MAC].etm,
|
||||
}
|
||||
c.macResult = make([]byte, c.mac.Size())
|
||||
|
||||
var err error
|
||||
c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// generateKeyMaterial fills out with key material generated from tag, K, H
|
||||
// and sessionId, as specified in RFC 4253, section 7.2.
|
||||
func generateKeyMaterial(out, tag []byte, r *kexResult) {
|
||||
var digestsSoFar []byte
|
||||
|
||||
h := r.Hash.New()
|
||||
for len(out) > 0 {
|
||||
h.Reset()
|
||||
h.Write(r.K)
|
||||
h.Write(r.H)
|
||||
|
||||
if len(digestsSoFar) == 0 {
|
||||
h.Write(tag)
|
||||
h.Write(r.SessionID)
|
||||
} else {
|
||||
h.Write(digestsSoFar)
|
||||
}
|
||||
|
||||
digest := h.Sum(nil)
|
||||
n := copy(out, digest)
|
||||
out = out[n:]
|
||||
if len(out) > 0 {
|
||||
digestsSoFar = append(digestsSoFar, digest...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const packageVersion = "SSH-2.0-Go"
|
||||
|
||||
// Sends and receives a version line. The versionLine string should
|
||||
// be US ASCII, start with "SSH-2.0-", and should not include a
|
||||
// newline. exchangeVersions returns the other side's version line.
|
||||
func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) {
|
||||
// Contrary to the RFC, we do not ignore lines that don't
|
||||
// start with "SSH-2.0-" to make the library usable with
|
||||
// nonconforming servers.
|
||||
for _, c := range versionLine {
|
||||
// The spec disallows non US-ASCII chars, and
|
||||
// specifically forbids null chars.
|
||||
if c < 32 {
|
||||
return nil, errors.New("ssh: junk character in version line")
|
||||
}
|
||||
}
|
||||
if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
them, err = readVersion(rw)
|
||||
return them, err
|
||||
}
|
||||
|
||||
// maxVersionStringBytes is the maximum number of bytes that we'll
|
||||
// accept as a version string. RFC 4253 section 4.2 limits this at 255
|
||||
// chars
|
||||
const maxVersionStringBytes = 255
|
||||
|
||||
// Read version string as specified by RFC 4253, section 4.2.
|
||||
func readVersion(r io.Reader) ([]byte, error) {
|
||||
versionString := make([]byte, 0, 64)
|
||||
var ok bool
|
||||
var buf [1]byte
|
||||
|
||||
for len(versionString) < maxVersionStringBytes {
|
||||
_, err := io.ReadFull(r, buf[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The RFC says that the version should be terminated with \r\n
|
||||
// but several SSH servers actually only send a \n.
|
||||
if buf[0] == '\n' {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
|
||||
// non ASCII chars are disallowed, but we are lenient,
|
||||
// since Go doesn't use null-terminated strings.
|
||||
|
||||
// The RFC allows a comment after a space, however,
|
||||
// all of it (version and comments) goes into the
|
||||
// session hash.
|
||||
versionString = append(versionString, buf[0])
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return nil, errors.New("ssh: overflow reading version string")
|
||||
}
|
||||
|
||||
// There might be a '\r' on the end which we should remove.
|
||||
if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' {
|
||||
versionString = versionString[:len(versionString)-1]
|
||||
}
|
||||
return versionString, nil
|
||||
}
|
62
vendor/google.golang.org/appengine/cloudsql/cloudsql.go
generated
vendored
Normal file
62
vendor/google.golang.org/appengine/cloudsql/cloudsql.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2013 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package cloudsql exposes access to Google Cloud SQL databases.
|
||||
|
||||
This package does not work in App Engine "flexible environment".
|
||||
|
||||
This package is intended for MySQL drivers to make App Engine-specific
|
||||
connections. Applications should use this package through database/sql:
|
||||
Select a pure Go MySQL driver that supports this package, and use sql.Open
|
||||
with protocol "cloudsql" and an address of the Cloud SQL instance.
|
||||
|
||||
A Go MySQL driver that has been tested to work well with Cloud SQL
|
||||
is the go-sql-driver:
|
||||
import "database/sql"
|
||||
import _ "github.com/go-sql-driver/mysql"
|
||||
|
||||
db, err := sql.Open("mysql", "user@cloudsql(project-id:instance-name)/dbname")
|
||||
|
||||
|
||||
Another driver that works well with Cloud SQL is the mymysql driver:
|
||||
import "database/sql"
|
||||
import _ "github.com/ziutek/mymysql/godrv"
|
||||
|
||||
db, err := sql.Open("mymysql", "cloudsql:instance-name*dbname/user/password")
|
||||
|
||||
|
||||
Using either of these drivers, you can perform a standard SQL query.
|
||||
This example assumes there is a table named 'users' with
|
||||
columns 'first_name' and 'last_name':
|
||||
|
||||
rows, err := db.Query("SELECT first_name, last_name FROM users")
|
||||
if err != nil {
|
||||
log.Errorf(ctx, "db.Query: %v", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var firstName string
|
||||
var lastName string
|
||||
if err := rows.Scan(&firstName, &lastName); err != nil {
|
||||
log.Errorf(ctx, "rows.Scan: %v", err)
|
||||
continue
|
||||
}
|
||||
log.Infof(ctx, "First: %v - Last: %v", firstName, lastName)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
log.Errorf(ctx, "Row error: %v", err)
|
||||
}
|
||||
*/
|
||||
package cloudsql
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// Dial connects to the named Cloud SQL instance.
|
||||
func Dial(instance string) (net.Conn, error) {
|
||||
return connect(instance)
|
||||
}
|
17
vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
generated
vendored
Normal file
17
vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2013 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package cloudsql
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"appengine/cloudsql"
|
||||
)
|
||||
|
||||
func connect(instance string) (net.Conn, error) {
|
||||
return cloudsql.Dial(instance)
|
||||
}
|
16
vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
generated
vendored
Normal file
16
vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2013 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package cloudsql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
)
|
||||
|
||||
func connect(instance string) (net.Conn, error) {
|
||||
return nil, errors.New(`cloudsql: not supported in App Engine "flexible environment"`)
|
||||
}
|
96
vendor/vendor.json
vendored
96
vendor/vendor.json
vendored
@ -508,36 +508,6 @@
|
||||
"revision": "8152e7eb6ccf8679a64582a66b78519688d156ad",
|
||||
"revisionTime": "2016-01-12T19:33:35Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Lg8OHK87XRGCaipG+5+zFyN8OMw=",
|
||||
"path": "github.com/joyent/triton-go",
|
||||
"revision": "7e6a47b300b10be9449610a6ff4fbae17d6e95b6",
|
||||
"revisionTime": "2018-01-16T16:19:11Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Y03+L+I0FVZ2bMGWt1MHTDEyWM4=",
|
||||
"path": "github.com/joyent/triton-go/authentication",
|
||||
"revision": "7e6a47b300b10be9449610a6ff4fbae17d6e95b6",
|
||||
"revisionTime": "2018-01-16T16:19:11Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "MuJsGBr6HlXQYxZY9cM5rBk+Lns=",
|
||||
"path": "github.com/joyent/triton-go/client",
|
||||
"revision": "7e6a47b300b10be9449610a6ff4fbae17d6e95b6",
|
||||
"revisionTime": "2018-01-16T16:19:11Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "d/Py6j/uMgOAFNFGpsQrNnSsO+k=",
|
||||
"path": "github.com/joyent/triton-go/errors",
|
||||
"revision": "7e6a47b300b10be9449610a6ff4fbae17d6e95b6",
|
||||
"revisionTime": "2018-01-16T16:19:11Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "5v533ELM047YOiwHsyMaVzITpR0=",
|
||||
"path": "github.com/joyent/triton-go/storage",
|
||||
"revision": "7e6a47b300b10be9449610a6ff4fbae17d6e95b6",
|
||||
"revisionTime": "2018-01-16T16:19:11Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "mA7isU/nIAT5ytwIzK65H0vlVqI=",
|
||||
"path": "github.com/klauspost/compress/flate",
|
||||
@ -561,12 +531,6 @@
|
||||
"revision": "90b2c57fba35a1dd05cb40f9200722763808d99b",
|
||||
"revisionTime": "2018-06-06T15:09:39Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "/8VtN8HUS0G235mhqfj2gRMi9Eg=",
|
||||
"path": "github.com/klauspost/readahead",
|
||||
"revision": "7f90b27d81113b71920c55b7a73a071dc81bdfd8",
|
||||
"revisionTime": "2017-10-07T12:43:06Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "KiQa3vguztElzJkoqeIGHlfLFJA=",
|
||||
"path": "github.com/klauspost/reedsolomon",
|
||||
@ -608,24 +572,6 @@
|
||||
"revision": "db96a2b759cdef4f11a34506a42eb8d1290c598e",
|
||||
"revisionTime": "2016-07-26T03:20:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "uTShVxdYNwW+3WI6SfJwOc/LQgo=",
|
||||
"path": "github.com/minio/blazer/base",
|
||||
"revision": "2081f5bf046503f576d8712253724fbf2950fffe",
|
||||
"revisionTime": "2017-11-26T20:28:54Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "ucCxupZ1gyxvFsBg5igP13dySLI=",
|
||||
"path": "github.com/minio/blazer/internal/b2types",
|
||||
"revision": "8e81ddf2d8deed54c6ac3f7d264d78659e72fbb8",
|
||||
"revisionTime": "2017-10-06T21:06:28Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "zgBbPwwuUH2sxz8smOzOA9TrD5g=",
|
||||
"path": "github.com/minio/blazer/internal/blog",
|
||||
"revision": "8e81ddf2d8deed54c6ac3f7d264d78659e72fbb8",
|
||||
"revisionTime": "2017-10-06T21:06:28Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "fUWokilZyc1QDKnIgCDJE8n1S9U=",
|
||||
"path": "github.com/minio/cli",
|
||||
@ -799,12 +745,6 @@
|
||||
"revision": "1f6e18d34f6790fc0afea6f13a5fe3d9ab1770af",
|
||||
"revisionTime": "2018-09-11T17:58:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "xCv4GBFyw07vZkVtKF/XrUnkHRk=",
|
||||
"path": "github.com/pkg/errors",
|
||||
"revision": "e881fd58d78e04cf6d0de1217f8707c8cc2249bc",
|
||||
"revisionTime": "2017-12-16T07:03:16Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "C3yiSMdTQxSY3xqKJzMV9T+KnIc=",
|
||||
"path": "github.com/pkg/profile",
|
||||
@ -1128,42 +1068,12 @@
|
||||
"revision": "81e90905daefcd6fd217b62423c0908922eadb30",
|
||||
"revisionTime": "2017-08-25T20:24:07Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "IQkUIOnvlf0tYloFx9mLaXSvXWQ=",
|
||||
"path": "golang.org/x/crypto/curve25519",
|
||||
"revision": "7d9177d70076375b9a59c8fde23d52d9c4a7ecd5",
|
||||
"revisionTime": "2017-09-15T19:08:28Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "X6Q8nYb+KXh+64AKHwWOOcyijHQ=",
|
||||
"path": "golang.org/x/crypto/ed25519",
|
||||
"revision": "7d9177d70076375b9a59c8fde23d52d9c4a7ecd5",
|
||||
"revisionTime": "2017-09-15T19:08:28Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "LXFcVx8I587SnWmKycSDEq9yvK8=",
|
||||
"path": "golang.org/x/crypto/ed25519/internal/edwards25519",
|
||||
"revision": "7d9177d70076375b9a59c8fde23d52d9c4a7ecd5",
|
||||
"revisionTime": "2017-09-15T19:08:28Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "kVKE0OX1Xdw5mG7XKT86DLLKE2I=",
|
||||
"path": "golang.org/x/crypto/poly1305",
|
||||
"revision": "81e90905daefcd6fd217b62423c0908922eadb30",
|
||||
"revisionTime": "2017-08-25T20:24:07Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "yPCMw2UX8Jad4GNWSwo8QgZS6TY=",
|
||||
"path": "golang.org/x/crypto/ssh",
|
||||
"revision": "7d9177d70076375b9a59c8fde23d52d9c4a7ecd5",
|
||||
"revisionTime": "2017-09-15T19:08:28Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "ujKeyWHFOYmXm5IgAxfyFCGefsY=",
|
||||
"path": "golang.org/x/crypto/ssh/agent",
|
||||
"revision": "7d9177d70076375b9a59c8fde23d52d9c4a7ecd5",
|
||||
"revisionTime": "2017-09-15T19:08:28Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "GtamqiJoL7PGHsN454AoffBFMa8=",
|
||||
"path": "golang.org/x/net/context",
|
||||
@ -1362,6 +1272,12 @@
|
||||
"revision": "170382fa85b10b94728989dfcf6cc818b335c952",
|
||||
"revisionTime": "2017-04-10T19:43:55Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "LiyXfqOzaeQ8vgYZH3t2hUEdVTw=",
|
||||
"path": "google.golang.org/appengine/cloudsql",
|
||||
"revision": "b9aad5d628b283f265adf8d3557faae187a8d015",
|
||||
"revisionTime": "2018-05-16T22:56:43Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "4o2JkeR2LyUfZ7BQIzHUejyqKno=",
|
||||
"path": "google.golang.org/appengine/internal",
|
||||
|
Loading…
x
Reference in New Issue
Block a user