Bring etcd support for bucket DNS federation

- Supports centralized `config.json`
- Supports centralized `bucket` service records
  for client lookups
- implement a new proxy forwarder
This commit is contained in:
Harshavardhana 2018-02-02 18:18:52 -08:00 committed by kannappanr
parent 7872c192ec
commit 853ea371ce
144 changed files with 77684 additions and 81 deletions

View File

@ -818,8 +818,8 @@ func (a adminAPIHandlers) UpdateCredentialsHandler(w http.ResponseWriter,
// Update local credentials in memory.
globalServerConfig.SetCredential(creds)
if err = globalServerConfig.Save(); err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL)
if err = globalServerConfig.Save(getConfigFile()); err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
return
}

View File

@ -17,10 +17,12 @@
package cmd
import (
"context"
"encoding/xml"
"fmt"
"net/http"
"github.com/coreos/etcd/client"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash"
@ -864,6 +866,29 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrAdminInvalidAccessKey
case auth.ErrInvalidSecretKeyLength:
apiErr = ErrAdminInvalidSecretKey
// SSE errors
case errInsecureSSERequest:
apiErr = ErrInsecureSSECustomerRequest
case errInvalidSSEAlgorithm:
apiErr = ErrInvalidSSECustomerAlgorithm
case errInvalidSSEKey:
apiErr = ErrInvalidSSECustomerKey
case errMissingSSEKey:
apiErr = ErrMissingSSECustomerKey
case errMissingSSEKeyMD5:
apiErr = ErrMissingSSECustomerKeyMD5
case errSSEKeyMD5Mismatch:
apiErr = ErrSSECustomerKeyMD5Mismatch
case errObjectTampered:
apiErr = ErrObjectTampered
case errEncryptedObject:
apiErr = ErrSSEEncryptedObject
case errInvalidSSEParameters:
apiErr = ErrInvalidSSECustomerParameters
case errSSEKeyMismatch:
apiErr = ErrAccessDenied // no access without correct key
case context.Canceled, context.DeadlineExceeded:
apiErr = ErrOperationTimedOut
}
if apiErr != ErrNone {
@ -871,27 +896,12 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
return apiErr
}
switch err { // SSE errors
case errInsecureSSERequest:
return ErrInsecureSSECustomerRequest
case errInvalidSSEAlgorithm:
return ErrInvalidSSECustomerAlgorithm
case errInvalidSSEKey:
return ErrInvalidSSECustomerKey
case errMissingSSEKey:
return ErrMissingSSECustomerKey
case errMissingSSEKeyMD5:
return ErrMissingSSECustomerKeyMD5
case errSSEKeyMD5Mismatch:
return ErrSSECustomerKeyMD5Mismatch
case errObjectTampered:
return ErrObjectTampered
case errEncryptedObject:
return ErrSSEEncryptedObject
case errInvalidSSEParameters:
return ErrInvalidSSECustomerParameters
case errSSEKeyMismatch:
return ErrAccessDenied // no access without correct key
// etcd specific errors, a key is always a bucket for us return
// ErrNoSuchBucket in such a case.
if e, ok := err.(*client.Error); ok {
if e.Code == client.ErrorCodeKeyNotFound {
return ErrNoSuchBucket
}
}
switch err.(type) {

View File

@ -28,6 +28,7 @@ import (
"strings"
"sync"
"github.com/coreos/etcd/client"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/event"
@ -158,12 +159,29 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
return
}
// Invoke the list buckets.
bucketsInfo, err := listBuckets(ctx)
// If etcd, dns federation configured list buckets from etcd.
var bucketsInfo []BucketInfo
if globalDNSConfig != nil {
dnsBuckets, err := globalDNSConfig.List()
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
for _, dnsRecord := range dnsBuckets {
bucketsInfo = append(bucketsInfo, BucketInfo{
Name: dnsRecord.Key,
Created: dnsRecord.CreationDate,
})
}
} else {
// Invoke the list buckets.
var err error
bucketsInfo, err = listBuckets(ctx)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
}
// Generate response.
response := generateListBucketsResponse(bucketsInfo)
@ -353,12 +371,33 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
return
}
bucketLock := globalNSMutex.NewNSLock(bucket, "")
if err := bucketLock.GetLock(globalObjectTimeout); err != nil {
if globalDNSConfig != nil {
if _, err := globalDNSConfig.Get(bucket); err != nil {
if client.IsKeyNotFound(err) {
// Proceed to creating a bucket.
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
defer bucketLock.Unlock()
if err = globalDNSConfig.Put(bucket); err != nil {
objectAPI.DeleteBucket(ctx, bucket)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Make sure to add Location information here only for bucket
w.Header().Set("Location", getObjectLocation(r, globalDomainName, bucket, ""))
writeSuccessResponseHeadersOnly(w)
return
}
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
writeErrorResponse(w, ErrBucketAlreadyOwnedByYou, r.URL)
return
}
// Proceed to creating a bucket.
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location)
@ -660,6 +699,15 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
logger.LogIf(ctx, nerr.Err)
}
if globalDNSConfig != nil {
if err := globalDNSConfig.Delete(bucket); err != nil {
// Deleting DNS entry failed, attempt to create the bucket again.
objectAPI.MakeBucketWithLocation(ctx, bucket, "")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
}
// Write success response.
writeSuccessNoContent(w)
}

View File

@ -18,15 +18,19 @@ package cmd
import (
"errors"
"net"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"time"
etcdc "github.com/coreos/etcd/client"
"github.com/minio/cli"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/dns"
)
// Check for updates and print a notification message
@ -42,6 +46,18 @@ func checkUpdate(mode string) {
}
func initConfig() {
if globalEtcdClient != nil {
if err := loadConfig(); err != nil {
if etcdc.IsKeyNotFound(err) {
logger.FatalIf(newConfig(), "Unable to initialize minio config for the first time.")
logger.Info("Created minio configuration file successfully at", globalEtcdClient.Endpoints())
} else {
logger.FatalIf(err, "Unable to load config version: '%s'.", serverConfigVersion)
}
}
return
}
// Config file does not exist, we create it fresh and return upon success.
if isFile(getConfigFile()) {
logger.FatalIf(migrateConfig(), "Config migration failed")
@ -125,6 +141,31 @@ func handleCommonEnvVars() {
globalDomainName, globalIsEnvDomainName = os.LookupEnv("MINIO_DOMAIN")
etcdEndpointsEnv, ok := os.LookupEnv("MINIO_ETCD_ENDPOINTS")
if ok {
etcdEndpoints := strings.Split(etcdEndpointsEnv, ",")
var err error
globalEtcdClient, err = etcdc.New(etcdc.Config{
Endpoints: etcdEndpoints,
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
},
})
logger.FatalIf(err, "Unable to initialize etcd with %s", etcdEndpoints)
}
globalDomainIP = os.Getenv("MINIO_DOMAIN_IP")
if globalDomainName != "" && globalDomainIP != "" && globalEtcdClient != nil {
var err error
globalDNSConfig, err = dns.NewCoreDNS(globalDomainName, globalDomainIP, globalMinioPort, globalEtcdClient)
logger.FatalIf(err, "Unable to initialize DNS config for %s.", globalDomainName)
}
if drives := os.Getenv("MINIO_CACHE_DRIVES"); drives != "" {
driveList, err := parseCacheDrives(strings.Split(drives, cacheEnvDelimiter))
if err != nil {

View File

@ -128,9 +128,9 @@ func (s *serverConfig) GetCacheConfig() CacheConfig {
}
// Save config.
func (s *serverConfig) Save() error {
func (s *serverConfig) Save(configFile string) error {
// Save config file.
return quick.Save(getConfigFile(), s)
return quick.Save(configFile, s)
}
// Returns the string describing a difference with the given
@ -230,7 +230,10 @@ func newServerConfig() *serverConfig {
// found, otherwise use default parameters
func newConfig() error {
// Initialize server config.
srvCfg := newServerConfig()
srvCfg, err := newQuickConfig(newServerConfig())
if err != nil {
return err
}
// If env is set override the credentials from config file.
if globalIsEnvCreds {
@ -269,7 +272,29 @@ func newConfig() error {
globalServerConfigMu.Unlock()
// Save config into file.
return globalServerConfig.Save()
return globalServerConfig.Save(getConfigFile())
}
// newQuickConfig - initialize a new server config, with an allocated
// quick.Config interface.
func newQuickConfig(srvCfg *serverConfig) (*serverConfig, error) {
if globalEtcdClient == nil {
qcfg, err := quick.NewLocalConfig(srvCfg)
if err != nil {
return nil, err
}
srvCfg.Config = qcfg
return srvCfg, nil
}
qcfg, err := quick.NewEtcdConfig(srvCfg, globalEtcdClient)
if err != nil {
return nil, err
}
srvCfg.Config = qcfg
return srvCfg, nil
}
// getValidConfig - returns valid server configuration
@ -279,7 +304,14 @@ func getValidConfig() (*serverConfig, error) {
Browser: true,
}
if _, err := quick.Load(getConfigFile(), srvCfg); err != nil {
var err error
srvCfg, err = newQuickConfig(srvCfg)
if err != nil {
return nil, err
}
configFile := getConfigFile()
if err = srvCfg.Load(configFile); err != nil {
return nil, err
}

View File

@ -50,7 +50,7 @@ func TestServerConfig(t *testing.T) {
}
// Attempt to save.
if err := globalServerConfig.Save(); err != nil {
if err := globalServerConfig.Save(getConfigFile()); err != nil {
t.Fatalf("Unable to save updated config file %s", err)
}

View File

@ -21,6 +21,7 @@ import (
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/event/target"
"github.com/minio/minio/pkg/quick"
)
/////////////////// Config V1 ///////////////////
@ -585,6 +586,8 @@ type serverConfigV22 struct {
// IMPORTANT NOTE: When updating this struct make sure that
// serverConfig.ConfigDiff() is updated as necessary.
type serverConfigV23 struct {
quick.Config `json:"-"` // ignore interfaces
Version string `json:"version"`
// S3 API configuration.

View File

@ -19,13 +19,17 @@ package cmd
import (
"bufio"
"context"
"fmt"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/coreos/etcd/client"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/sys"
"github.com/rs/cors"
"golang.org/x/time/rate"
@ -611,6 +615,64 @@ func (h pathValidityHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.handler.ServeHTTP(w, r)
}
// To forward the path style requests on a bucket to the right
// configured server, bucket to IP configuration is obtained
// from centralized etcd configuration service.
type bucketForwardingHandler struct {
fwd *handlers.Forwarder
handler http.Handler
}
func (f bucketForwardingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if globalDNSConfig == nil || globalDomainName == "" {
f.handler.ServeHTTP(w, r)
return
}
bucket, object := urlPath2BucketObjectName(r.URL.Path)
if r.Method == http.MethodPut && bucket != "" && object == "" {
f.handler.ServeHTTP(w, r)
return
}
sr, err := globalDNSConfig.Get(bucket)
if err != nil {
if client.IsKeyNotFound(err) {
writeErrorResponse(w, ErrNoSuchBucket, r.URL)
} else {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
}
return
}
if sr.Host != globalDomainIP {
backendURL := fmt.Sprintf("http://%s:%d", sr.Host, sr.Port)
if globalIsSSL {
backendURL = fmt.Sprintf("https://%s:%d", sr.Host, sr.Port)
}
r.URL, err = url.Parse(backendURL)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
f.fwd.ServeHTTP(w, r)
return
}
f.handler.ServeHTTP(w, r)
}
// setBucketForwardingHandler middleware forwards the path style requests
// on a bucket to the right bucket location, bucket to IP configuration
// is obtained from centralized etcd configuration service.
func setBucketForwardingHandler(h http.Handler) http.Handler {
fwd := handlers.NewForwarder(&handlers.Forwarder{
PassHost: true,
RoundTripper: NewCustomHTTPTransport(),
})
return bucketForwardingHandler{fwd, h}
}
// setRateLimitHandler middleware limits the throughput to h using a
// rate.Limiter token bucket configured with maxOpenFileLimit and
// burst set to 1. The request will idle for up to 1*time.Second.

View File

@ -22,11 +22,13 @@ import (
"runtime"
"time"
etcdc "github.com/coreos/etcd/client"
humanize "github.com/dustin/go-humanize"
"github.com/fatih/color"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/certs"
"github.com/minio/minio/pkg/dns"
)
// minio configuration related constants.
@ -160,6 +162,7 @@ var (
globalIsEnvDomainName bool
globalDomainName string // Root domain for virtual host style requests
globalDomainIP string // Root domain IP address
globalListingTimeout = newDynamicTimeout( /*30*/ 600*time.Second /*5*/, 600*time.Second) // timeout for listing related ops
globalObjectTimeout = newDynamicTimeout( /*1*/ 10*time.Minute /*10*/, 600*time.Second) // timeout for Object API related ops
@ -175,14 +178,18 @@ var (
globalStandardStorageClass storageClass
globalIsEnvWORM bool
// Is worm enabled
globalWORMEnabled bool
// Is Disk Caching set up
globalIsDiskCacheEnabled bool
// Disk cache drives
globalCacheDrives []string
// Disk cache excludes
globalCacheExcludes []string
// Disk cache expiry
globalCacheExpiry = 90
@ -192,12 +199,18 @@ var (
// Current RPC version
globalRPCAPIVersion = RPCVersion{3, 0, 0}
// Add new variable global values here.
// Allocated etcd endpoint for config and bucket DNS.
globalEtcdClient etcdc.Client
// Allocated DNS config wrapper over etcd client.
globalDNSConfig dns.Config
// Default usage check interval value.
globalDefaultUsageCheckInterval = 12 * time.Hour // 12 hours
// Usage check interval value.
globalUsageCheckInterval = globalDefaultUsageCheckInterval
// Add new variable global values here.
)
// global colors.

View File

@ -49,6 +49,9 @@ func registerDistXLRouters(router *mux.Router, endpoints EndpointList) {
var globalHandlers = []HandlerFunc{
// set HTTP security headers such as Content-Security-Policy.
addSecurityHeaders,
// set Bucket forwarding handler to proxy path style requests
// when federated backend is enabled.
setBucketForwardingHandler,
// Ratelimit the incoming requests using a token bucket algorithm
setRateLimitHandler,
// Validate all the incoming paths.

View File

@ -89,6 +89,13 @@ ENVIRONMENT VARIABLES:
WORM:
MINIO_WORM: To turn on Write-Once-Read-Many in server, set this value to "on".
BUCKET-DNS:
MINIO_DOMAIN: To enable virtual-host-style requests.
MINIO_DOMAIN_IP: To enable virtual-host-style requests.
ETCD:
MINIO_ETCD_ENDPOINTS: Comma separated list of etcd endpoints.
EXAMPLES:
1. Start minio server on "/home/shared" directory.
$ {{.HelpName}} /home/shared

View File

@ -605,7 +605,7 @@ func newTestConfig(bucketLocation string) (rootPath string, err error) {
globalServerConfig.SetRegion(bucketLocation)
// Save config.
if err = globalServerConfig.Save(); err != nil {
if err = globalServerConfig.Save(getConfigFile()); err != nil {
return "", err
}

View File

@ -480,9 +480,9 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se
// Update credentials in memory
prevCred := globalServerConfig.SetCredential(creds)
// Save credentials to config file
if err := globalServerConfig.Save(); err != nil {
// As saving configurstion failed, restore previous credential in memory.
// Persist updated credentials.
if err = globalServerConfig.Save(getConfigFile()); err != nil {
// Save the current creds when failed to update.
globalServerConfig.SetCredential(prevCred)
logger.LogIf(context.Background(), err)
return toJSONError(err)

View File

@ -0,0 +1,11 @@
. {
etcd churchofminio.com {
endpoint http://localhost:2379 http://localhost:4001
upstream /etc/resolv.conf
}
debug
prometheus
cache 160 churchofminio.com
loadbalance
proxy . /etc/resolv.conf
}

View File

@ -0,0 +1,48 @@
# Federation
There are primarily two types of federation
- Bucket lookup from DNS
- Bucket is shared across many clusters
This document will explain in detail about how to configure Minio supporting `Bucket lookup`
## Federation (Bucket Lookup)
Bucket lookup federation requires two dependencies
- etcd (for config, bucket SRV records)
- coredns (for DNS management based on populated bucket SRV records)
## Architecture
![bucket-lookup](./bucket-lookup.png)
### Run Multiple Clusters
> cluster1
```
export MINIO_ETCD_ENDPOINTS="http://remote-etcd1:2379,http://remote-etcd2:4001"
export MINIO_DOMAIN=domain.com
export MINIO_DOMAIN_IP=44.35.2.1
minio server http://rack{1...4}.host{1...4}.domain.com/mnt/export{1...32}
```
> cluster2
```
export MINIO_ETCD_ENDPOINTS="http://remote-etcd1:2379,http://remote-etcd2:4001"
export MINIO_DOMAIN=domain.com
export MINIO_DOMAIN_IP=44.35.2.2
minio server http://rack{5...8}.host{5...8}.domain.com/mnt/export{1...32}
```
In this configuration you can see `MINIO_ETCD_ENDPOINTS` points to the etcd backend which manages Minio's
`config.json` and bucket SRV records. `MINIO_DOMAIN` indicates the domain suffix for the bucket which
will be used to resolve bucket from DNS. For example if you have a bucket such as `mybucket`, the
client can use now `mybucket.domain.com` to directly resolve to the right cluster. `MINIO_DOMAIN_IP`
points to the public IP address where each cluster might be accessible, this is unique per each cluster.
NOTE: `mybucket` only exists on one cluster either `cluster1` or `cluster2` this is truly random and
is decided by how `domain.com` gets resolved, if there is a round-robin DNS on `domain.com` then
it is truly random which cluster might provision the bucket. This control is not provided to the
client yet, but can be done based on the `region` parameter as supported by `AWS S3` specification.

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

View File

@ -28,7 +28,7 @@ Minio is an object storage server released under Apache License v2.0.
It is compatible with Amazon S3 cloud storage service. It is best
suited for storing unstructured data such as photos, videos, log
files, backups and container / VM images. Size of an object can
range from a few KBs to a maximum of 5TB.
range from a few KBs to a maximum of 5TiB.
%prep
%setup -qc

140
pkg/dns/coredns.go Normal file
View File

@ -0,0 +1,140 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dns
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"sort"
"strconv"
"strings"
"time"
"github.com/coredns/coredns/plugin/etcd/msg"
etcdc "github.com/coreos/etcd/client"
)
// create a new coredns service record for the bucket.
func newCoreDNSMsg(bucket string, ip string, port int, ttl uint32) ([]byte, error) {
return json.Marshal(&SrvRecord{
Host: ip,
Port: port,
TTL: ttl,
CreationDate: time.Now().UTC(),
})
}
// Retrieves list of DNS entries for a bucket.
func (c *coreDNS) List() ([]SrvRecord, error) {
kapi := etcdc.NewKeysAPI(c.etcdClient)
key := msg.Path(fmt.Sprintf("%s.", c.domainName), defaultPrefixPath)
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
r, err := kapi.Get(ctx, key, nil)
cancel()
if err != nil {
return nil, err
}
var srvRecords []SrvRecord
for _, n := range r.Node.Nodes {
var srvRecord SrvRecord
if err = json.Unmarshal([]byte(n.Value), &srvRecord); err != nil {
return nil, err
}
srvRecord.Key = strings.TrimPrefix(n.Key, key)
srvRecords = append(srvRecords, srvRecord)
}
sort.Slice(srvRecords, func(i int, j int) bool {
return srvRecords[i].Key < srvRecords[j].Key
})
return srvRecords, nil
}
// Retrieves DNS record for a bucket.
func (c *coreDNS) Get(bucket string) (SrvRecord, error) {
kapi := etcdc.NewKeysAPI(c.etcdClient)
key := msg.Path(fmt.Sprintf("%s.%s.", bucket, c.domainName), defaultPrefixPath)
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
r, err := kapi.Get(ctx, key, nil)
cancel()
if err != nil {
return SrvRecord{}, err
}
var sr SrvRecord
if err = json.Unmarshal([]byte(r.Node.Value), &sr); err != nil {
return SrvRecord{}, err
}
sr.Key = strings.TrimPrefix(r.Node.Key, key)
return sr, nil
}
// Adds DNS entries into etcd endpoint in CoreDNS etcd messae format.
func (c *coreDNS) Put(bucket string) error {
bucketMsg, err := newCoreDNSMsg(bucket, c.domainIP, c.domainPort, defaultTTL)
if err != nil {
return err
}
kapi := etcdc.NewKeysAPI(c.etcdClient)
key := msg.Path(fmt.Sprintf("%s.%s.", bucket, c.domainName), defaultPrefixPath)
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
_, err = kapi.Set(ctx, key, string(bucketMsg), nil)
cancel()
return err
}
// Removes DNS entries added in Put().
func (c *coreDNS) Delete(bucket string) error {
kapi := etcdc.NewKeysAPI(c.etcdClient)
key := msg.Path(fmt.Sprintf("%s.%s.", bucket, c.domainName), defaultPrefixPath)
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
_, err := kapi.Delete(ctx, key, nil)
cancel()
return err
}
// CoreDNS - represents dns config for coredns server.
type coreDNS struct {
domainName, domainIP string
domainPort int
etcdClient etcdc.Client
}
// NewCoreDNS - initialize a new coreDNS set/unset values.
func NewCoreDNS(domainName, domainIP, domainPort string, etcdClient etcdc.Client) (Config, error) {
if domainName == "" || domainIP == "" || etcdClient == nil {
return nil, errors.New("invalid argument")
}
if net.ParseIP(domainIP) == nil {
return nil, errors.New("invalid argument")
}
port, err := strconv.Atoi(domainPort)
if err != nil {
return nil, err
}
return &coreDNS{
domainName: domainName,
domainIP: domainIP,
domainPort: port,
etcdClient: etcdClient,
}, nil
}

65
pkg/dns/dns.go Normal file
View File

@ -0,0 +1,65 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dns
import (
"time"
)
const (
defaultTTL = 30
defaultPrefixPath = "/skydns"
defaultContextTimeout = 5 * time.Minute
)
// SrvRecord - represents a DNS service record
type SrvRecord struct {
Host string `json:"host,omitempty"`
Port int `json:"port,omitempty"`
Priority int `json:"priority,omitempty"`
Weight int `json:"weight,omitempty"`
Text string `json:"text,omitempty"`
Mail bool `json:"mail,omitempty"` // Be an MX record. Priority becomes Preference.
TTL uint32 `json:"ttl,omitempty"`
// Holds info about when the entry was created first.
CreationDate time.Time `json:"creationDate"`
// When a SRV record with a "Host: IP-address" is added, we synthesize
// a srv.Target domain name. Normally we convert the full Key where
// the record lives to a DNS name and use this as the srv.Target. When
// TargetStrip > 0 we strip the left most TargetStrip labels from the
// DNS name.
TargetStrip int `json:"targetstrip,omitempty"`
// Group is used to group (or *not* to group) different services
// together. Services with an identical Group are returned in
// the same answer.
Group string `json:"group,omitempty"`
// Key carries the original key used during Put().
Key string `json:"-"`
}
// Config - represents dns put, get interface. This interface can be
// used to implement various backends as needed.
type Config interface {
Put(key string) error
List() ([]SrvRecord, error)
Get(key string) (SrvRecord, error)
Delete(key string) error
}

169
pkg/handlers/forwarder.go Normal file
View File

@ -0,0 +1,169 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package handlers
import (
"context"
"net"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"time"
)
const defaultFlushInterval = time.Duration(100) * time.Millisecond
// Forwarder forwards all incoming HTTP requests to configured transport.
type Forwarder struct {
RoundTripper http.RoundTripper
PassHost bool
// internal variables
rewriter *headerRewriter
}
// NewForwarder creates an instance of Forwarder based on the provided list of configuration options
func NewForwarder(f *Forwarder) *Forwarder {
f.rewriter = &headerRewriter{}
if f.RoundTripper == nil {
f.RoundTripper = http.DefaultTransport
}
return f
}
// ServeHTTP forwards HTTP traffic using the configured transport
func (f *Forwarder) ServeHTTP(w http.ResponseWriter, inReq *http.Request) {
outReq := new(http.Request)
*outReq = *inReq // includes shallow copies of maps, but we handle this in Director
revproxy := httputil.ReverseProxy{
Director: func(req *http.Request) {
f.modifyRequest(req, inReq.URL)
},
Transport: f.RoundTripper,
FlushInterval: defaultFlushInterval,
}
revproxy.ServeHTTP(w, outReq)
}
func (f *Forwarder) getURLFromRequest(req *http.Request) *url.URL {
// If the Request was created by Go via a real HTTP request, RequestURI will
// contain the original query string. If the Request was created in code, RequestURI
// will be empty, and we will use the URL object instead
u := req.URL
if req.RequestURI != "" {
parsedURL, err := url.ParseRequestURI(req.RequestURI)
if err == nil {
u = parsedURL
}
}
return u
}
// copyURL provides update safe copy by avoiding shallow copying User field
func copyURL(i *url.URL) *url.URL {
out := *i
if i.User != nil {
out.User = &(*i.User)
}
return &out
}
// Modify the request to handle the target URL
func (f *Forwarder) modifyRequest(outReq *http.Request, target *url.URL) {
outReq.URL = copyURL(outReq.URL)
outReq.URL.Scheme = target.Scheme
outReq.URL.Host = target.Host
u := f.getURLFromRequest(outReq)
outReq.URL.Path = u.Path
outReq.URL.RawPath = u.RawPath
outReq.URL.RawQuery = u.RawQuery
outReq.RequestURI = "" // Outgoing request should not have RequestURI
// Do not pass client Host header unless requested.
if !f.PassHost {
outReq.Host = target.Host
}
// TODO: only supports HTTP 1.1 for now.
outReq.Proto = "HTTP/1.1"
outReq.ProtoMajor = 1
outReq.ProtoMinor = 1
f.rewriter.Rewrite(outReq)
// Disable closeNotify when method GET for http pipelining
if outReq.Method == http.MethodGet {
quietReq := outReq.WithContext(context.Background())
*outReq = *quietReq
}
}
// headerRewriter is responsible for removing hop-by-hop headers and setting forwarding headers
type headerRewriter struct{}
// Clean up IP in case if it is ipv6 address and it has {zone} information in it, like
// "[fe80::d806:a55d:eb1b:49cc%vEthernet (vmxnet3 Ethernet Adapter - Virtual Switch)]:64692"
func ipv6fix(clientIP string) string {
return strings.Split(clientIP, "%")[0]
}
func (rw *headerRewriter) Rewrite(req *http.Request) {
if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
clientIP = ipv6fix(clientIP)
if req.Header.Get(xRealIP) == "" {
req.Header.Set(xRealIP, clientIP)
}
}
xfProto := req.Header.Get(xForwardedProto)
if xfProto == "" {
if req.TLS != nil {
req.Header.Set(xForwardedProto, "https")
} else {
req.Header.Set(xForwardedProto, "http")
}
}
if xfPort := req.Header.Get(xForwardedPort); xfPort == "" {
req.Header.Set(xForwardedPort, forwardedPort(req))
}
if xfHost := req.Header.Get(xForwardedHost); xfHost == "" && req.Host != "" {
req.Header.Set(xForwardedHost, req.Host)
}
}
func forwardedPort(req *http.Request) string {
if req == nil {
return ""
}
if _, port, err := net.SplitHostPort(req.Host); err == nil && port != "" {
return port
}
if req.TLS != nil {
return "443"
}
return "80"
}

View File

@ -26,8 +26,11 @@ import (
var (
// De-facto standard header keys.
xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host")
xForwardedPort = http.CanonicalHeaderKey("X-Forwarded-Port")
xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto")
xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme")
xForwardedServer = http.CanonicalHeaderKey("X-Forwarded-Server")
xRealIP = http.CanonicalHeaderKey("X-Real-IP")
)

View File

@ -41,21 +41,21 @@ type Config interface {
DeepDiff(Config) ([]structs.Field, error)
}
// config - implements quick.Config interface
type config struct {
// localConfig - implements quick.Config interface
type localConfig struct {
data interface{}
lock *sync.RWMutex
}
// Version returns the current config file format version
func (d config) Version() string {
func (d localConfig) Version() string {
st := structs.New(d.data)
f := st.Field("Version")
return f.Value().(string)
}
// String converts JSON config to printable string
func (d config) String() string {
func (d localConfig) String() string {
configBytes, _ := json.MarshalIndent(d.data, "", "\t")
return string(configBytes)
}
@ -63,7 +63,7 @@ func (d config) String() string {
// Save writes config data to a file. Data format
// is selected based on file extension or JSON if
// not provided.
func (d config) Save(filename string) error {
func (d localConfig) Save(filename string) error {
d.lock.Lock()
defer d.lock.Unlock()
@ -89,19 +89,19 @@ func (d config) Save(filename string) error {
// Load - loads config from file and merge with currently set values
// File content format is guessed from the file name extension, if not
// available, consider that we have JSON.
func (d config) Load(filename string) error {
func (d localConfig) Load(filename string) error {
d.lock.Lock()
defer d.lock.Unlock()
return loadFileConfig(filename, d.data)
}
// Data - grab internal data map for reading
func (d config) Data() interface{} {
func (d localConfig) Data() interface{} {
return d.data
}
// Diff - list fields that are in A but not in B
func (d config) Diff(c Config) ([]structs.Field, error) {
func (d localConfig) Diff(c Config) ([]structs.Field, error) {
var fields []structs.Field
currFields := structs.Fields(d.Data())
@ -123,7 +123,7 @@ func (d config) Diff(c Config) ([]structs.Field, error) {
}
// DeepDiff - list fields in A that are missing or not equal to fields in B
func (d config) DeepDiff(c Config) ([]structs.Field, error) {
func (d localConfig) DeepDiff(c Config) ([]structs.Field, error) {
var fields []structs.Field
currFields := structs.Fields(d.Data())
@ -179,13 +179,13 @@ func writeFile(filename string, data []byte) error {
return safeFile.Close()
}
// New - instantiate a new config
func New(data interface{}) (Config, error) {
// NewLocalConfig - instantiate a new config r/w configs from local files.
func NewLocalConfig(data interface{}) (Config, error) {
if err := checkData(data); err != nil {
return nil, err
}
d := new(config)
d := new(localConfig)
d.data = data
d.lock = new(sync.RWMutex)
return d, nil
@ -204,7 +204,7 @@ func GetVersion(filename string) (version string, err error) {
// Load - loads json config from filename for the a given struct data
func Load(filename string, data interface{}) (qc Config, err error) {
if qc, err = New(data); err == nil {
if qc, err = NewLocalConfig(data); err == nil {
err = qc.Load(filename)
}
return qc, err
@ -213,7 +213,7 @@ func Load(filename string, data interface{}) (qc Config, err error) {
// Save - saves given configuration data into given file as JSON.
func Save(filename string, data interface{}) (err error) {
var qc Config
if qc, err = New(data); err == nil {
if qc, err = NewLocalConfig(data); err == nil {
err = qc.Save(filename)
}

175
pkg/quick/quick_etcd.go Normal file
View File

@ -0,0 +1,175 @@
/*
* Quick - Quick key value store for config files and persistent state files
*
* Quick (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quick
import (
"encoding/json"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"time"
etcdc "github.com/coreos/etcd/client"
"github.com/fatih/structs"
"golang.org/x/net/context"
)
// etcdConfig - implements quick.Config interface
type etcdConfig struct {
lock *sync.Mutex
data interface{}
clnt etcdc.Client
}
// Version returns the current config file format version
func (d etcdConfig) Version() string {
st := structs.New(d.data)
f := st.Field("Version")
return f.Value().(string)
}
// String converts JSON config to printable string
func (d etcdConfig) String() string {
configBytes, _ := json.MarshalIndent(d.data, "", "\t")
return string(configBytes)
}
// Save writes config data to an configured etcd endpoints. Data format
// is selected based on file extension or JSON if not provided.
func (d etcdConfig) Save(filename string) error {
d.lock.Lock()
defer d.lock.Unlock()
// Fetch filename's extension
ext := filepath.Ext(filename)
// Marshal data
dataBytes, err := toMarshaller(ext)(d.data)
if err != nil {
return err
}
if runtime.GOOS == "windows" {
dataBytes = []byte(strings.Replace(string(dataBytes), "\n", "\r\n", -1))
}
kapi := etcdc.NewKeysAPI(d.clnt)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
_, err = kapi.Create(ctx, filename, string(dataBytes))
cancel()
return err
}
// Load - loads config from file and merge with currently set values
// File content format is guessed from the file name extension, if not
// available, consider that we have JSON.
func (d etcdConfig) Load(filename string) error {
d.lock.Lock()
defer d.lock.Unlock()
kapi := etcdc.NewKeysAPI(d.clnt)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
resp, err := kapi.Get(ctx, filename, nil)
cancel()
if err != nil {
return err
}
var ev *etcdc.Node
switch {
case resp.Node.Dir:
for _, ev = range resp.Node.Nodes {
if string(ev.Key) == filename {
break
}
}
default:
ev = resp.Node
}
fileData := ev.Value
if runtime.GOOS == "windows" {
fileData = strings.Replace(ev.Value, "\r\n", "\n", -1)
}
// Unmarshal file's content
return toUnmarshaller(filepath.Ext(filename))([]byte(fileData), d.data)
}
// Data - grab internal data map for reading
func (d etcdConfig) Data() interface{} {
return d.data
}
//Diff - list fields that are in A but not in B
func (d etcdConfig) Diff(c Config) ([]structs.Field, error) {
var fields []structs.Field
currFields := structs.Fields(d.Data())
newFields := structs.Fields(c.Data())
var found bool
for _, currField := range currFields {
found = false
for _, newField := range newFields {
if reflect.DeepEqual(currField.Name(), newField.Name()) {
found = true
}
}
if !found {
fields = append(fields, *currField)
}
}
return fields, nil
}
// DeepDiff - list fields in A that are missing or not equal to fields in B
func (d etcdConfig) DeepDiff(c Config) ([]structs.Field, error) {
var fields []structs.Field
currFields := structs.Fields(d.Data())
newFields := structs.Fields(c.Data())
var found bool
for _, currField := range currFields {
found = false
for _, newField := range newFields {
if reflect.DeepEqual(currField.Value(), newField.Value()) {
found = true
}
}
if !found {
fields = append(fields, *currField)
}
}
return fields, nil
}
// NewEtcdConfig - instantiate a new etcd config
func NewEtcdConfig(data interface{}, clnt etcdc.Client) (Config, error) {
if err := checkData(data); err != nil {
return nil, err
}
d := new(etcdConfig)
d.data = data
d.clnt = clnt
d.lock = &sync.Mutex{}
return d, nil
}

View File

@ -36,7 +36,7 @@ func TestReadVersion(t *testing.T) {
Version string
}
saveMe := myStruct{"1"}
config, err := New(&saveMe)
config, err := NewLocalConfig(&saveMe)
if err != nil {
t.Fatal(err)
}
@ -59,7 +59,7 @@ func TestReadVersionErr(t *testing.T) {
Version int
}
saveMe := myStruct{1}
_, err := New(&saveMe)
_, err := NewLocalConfig(&saveMe)
if err == nil {
t.Fatal("Unexpected should fail in initialization for bad input")
}
@ -95,7 +95,7 @@ func TestSaveFailOnDir(t *testing.T) {
Version string
}
saveMe := myStruct{"1"}
config, err := New(&saveMe)
config, err := NewLocalConfig(&saveMe)
if err != nil {
t.Fatal(err)
}
@ -171,7 +171,7 @@ func TestLoadFile(t *testing.T) {
if err == nil {
t.Fatal("Unexpected should fail to load empty JSON")
}
config, err := New(&saveMe)
config, err := NewLocalConfig(&saveMe)
if err != nil {
t.Fatal(err)
}
@ -186,7 +186,7 @@ func TestLoadFile(t *testing.T) {
}
saveMe = myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}}
config, err = New(&saveMe)
config, err = NewLocalConfig(&saveMe)
if err != nil {
t.Fatal(err)
}
@ -240,7 +240,7 @@ directories:
saveMe := myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}}
// Save format using
config, err := New(&saveMe)
config, err := NewLocalConfig(&saveMe)
if err != nil {
t.Fatal(err)
}
@ -262,7 +262,7 @@ directories:
// Check if the loaded data is the same as the saved one
loadMe := myStruct{}
config, err = New(&loadMe)
config, err = NewLocalConfig(&loadMe)
if err != nil {
t.Fatal(err)
}
@ -306,7 +306,7 @@ func TestJSONFormat(t *testing.T) {
saveMe := myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}}
// Save format using
config, err := New(&saveMe)
config, err := NewLocalConfig(&saveMe)
if err != nil {
t.Fatal(err)
}
@ -328,7 +328,7 @@ func TestJSONFormat(t *testing.T) {
// Check if the loaded data is the same as the saved one
loadMe := myStruct{}
config, err = New(&loadMe)
config, err = NewLocalConfig(&loadMe)
if err != nil {
t.Fatal(err)
}
@ -351,7 +351,7 @@ func TestSaveLoad(t *testing.T) {
Directories []string
}
saveMe := myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}}
config, err := New(&saveMe)
config, err := NewLocalConfig(&saveMe)
if err != nil {
t.Fatal(err)
}
@ -361,7 +361,7 @@ func TestSaveLoad(t *testing.T) {
}
loadMe := myStruct{Version: "1"}
newConfig, err := New(&loadMe)
newConfig, err := NewLocalConfig(&loadMe)
if err != nil {
t.Fatal(err)
}
@ -393,7 +393,7 @@ func TestSaveBackup(t *testing.T) {
Directories []string
}
saveMe := myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}}
config, err := New(&saveMe)
config, err := NewLocalConfig(&saveMe)
if err != nil {
t.Fatal(err)
}
@ -403,7 +403,7 @@ func TestSaveBackup(t *testing.T) {
}
loadMe := myStruct{Version: "1"}
newConfig, err := New(&loadMe)
newConfig, err := NewLocalConfig(&loadMe)
if err != nil {
t.Fatal(err)
}
@ -424,7 +424,7 @@ func TestSaveBackup(t *testing.T) {
t.Fatal("Expected to mismatch but succeeded instead")
}
config, err = New(&mismatch)
config, err = NewLocalConfig(&mismatch)
if err != nil {
t.Fatal(err)
}
@ -442,20 +442,20 @@ func TestDiff(t *testing.T) {
Directories []string
}
saveMe := myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}}
config, err := New(&saveMe)
config, err := NewLocalConfig(&saveMe)
if err != nil {
t.Fatal(err)
}
type myNewStruct struct {
type myNewLocalConfigStruct struct {
Version string
// User string
Password string
Directories []string
}
mismatch := myNewStruct{"1", "nopassword", []string{"Work", "documents", "Music"}}
newConfig, err := New(&mismatch)
mismatch := myNewLocalConfigStruct{"1", "nopassword", []string{"Work", "documents", "Music"}}
newConfig, err := NewLocalConfig(&mismatch)
if err != nil {
t.Fatal(err)
}
@ -482,13 +482,13 @@ func TestDeepDiff(t *testing.T) {
Directories []string
}
saveMe := myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}}
config, err := New(&saveMe)
config, err := NewLocalConfig(&saveMe)
if err != nil {
t.Fatal(err)
}
mismatch := myStruct{"1", "Guest", "nopassword", []string{"Work", "documents", "Music"}}
newConfig, err := New(&mismatch)
newConfig, err := NewLocalConfig(&mismatch)
if err != nil {
t.Fatal(err)
}

201
vendor/github.com/coredns/coredns/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,48 @@
package msg
import (
"path"
"strings"
"github.com/coredns/coredns/plugin/pkg/dnsutil"
"github.com/miekg/dns"
)
// Path converts a domainname to an etcd path. If s looks like service.staging.skydns.local.,
// the resulting key will be /skydns/local/skydns/staging/service .
func Path(s, prefix string) string {
l := dns.SplitDomainName(s)
for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 {
l[i], l[j] = l[j], l[i]
}
return path.Join(append([]string{"/" + prefix + "/"}, l...)...)
}
// Domain is the opposite of Path.
func Domain(s string) string {
l := strings.Split(s, "/")
// start with 1, to strip /skydns
for i, j := 1, len(l)-1; i < j; i, j = i+1, j-1 {
l[i], l[j] = l[j], l[i]
}
return dnsutil.Join(l[1 : len(l)-1])
}
// PathWithWildcard ascts as Path, but if a name contains wildcards (* or any), the name will be
// chopped of before the (first) wildcard, and we do a highler evel search and
// later find the matching names. So service.*.skydns.local, will look for all
// services under skydns.local and will later check for names that match
// service.*.skydns.local. If a wildcard is found the returned bool is true.
func PathWithWildcard(s, prefix string) (string, bool) {
l := dns.SplitDomainName(s)
for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 {
l[i], l[j] = l[j], l[i]
}
for i, k := range l {
if k == "*" || k == "any" {
return path.Join(append([]string{"/" + prefix + "/"}, l[:i]...)...), true
}
}
return path.Join(append([]string{"/" + prefix + "/"}, l...)...), false
}

View File

@ -0,0 +1,203 @@
// Package msg defines the Service structure which is used for service discovery.
package msg
import (
"fmt"
"net"
"strings"
"github.com/miekg/dns"
)
// Service defines a discoverable service in etcd. It is the rdata from a SRV
// record, but with a twist. Host (Target in SRV) must be a domain name, but
// if it looks like an IP address (4/6), we will treat it like an IP address.
type Service struct {
Host string `json:"host,omitempty"`
Port int `json:"port,omitempty"`
Priority int `json:"priority,omitempty"`
Weight int `json:"weight,omitempty"`
Text string `json:"text,omitempty"`
Mail bool `json:"mail,omitempty"` // Be an MX record. Priority becomes Preference.
TTL uint32 `json:"ttl,omitempty"`
// When a SRV record with a "Host: IP-address" is added, we synthesize
// a srv.Target domain name. Normally we convert the full Key where
// the record lives to a DNS name and use this as the srv.Target. When
// TargetStrip > 0 we strip the left most TargetStrip labels from the
// DNS name.
TargetStrip int `json:"targetstrip,omitempty"`
// Group is used to group (or *not* to group) different services
// together. Services with an identical Group are returned in the same
// answer.
Group string `json:"group,omitempty"`
// Etcd key where we found this service and ignored from json un-/marshalling
Key string `json:"-"`
}
// RR returns an RR representation of s. It is in a condensed form to minimize space
// when this is returned in a DNS message.
// The RR will look like:
// 1.rails.production.east.skydns.local. 300 CH TXT "service1.example.com:8080(10,0,,false)[0,]"
// etcd Key Ttl Host:Port < see below >
// between parens: (Priority, Weight, Text (only first 200 bytes!), Mail)
// between blockquotes: [TargetStrip,Group]
// If the record is synthesised by CoreDNS (i.e. no lookup in etcd happened):
//
// TODO(miek): what to put here?
//
func (s *Service) RR() *dns.TXT {
l := len(s.Text)
if l > 200 {
l = 200
}
t := new(dns.TXT)
t.Hdr.Class = dns.ClassCHAOS
t.Hdr.Ttl = s.TTL
t.Hdr.Rrtype = dns.TypeTXT
t.Hdr.Name = Domain(s.Key)
t.Txt = make([]string, 1)
t.Txt[0] = fmt.Sprintf("%s:%d(%d,%d,%s,%t)[%d,%s]",
s.Host, s.Port,
s.Priority, s.Weight, s.Text[:l], s.Mail,
s.TargetStrip, s.Group)
return t
}
// NewSRV returns a new SRV record based on the Service.
func (s *Service) NewSRV(name string, weight uint16) *dns.SRV {
host := targetStrip(dns.Fqdn(s.Host), s.TargetStrip)
return &dns.SRV{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeSRV, Class: dns.ClassINET, Ttl: s.TTL},
Priority: uint16(s.Priority), Weight: weight, Port: uint16(s.Port), Target: dns.Fqdn(host)}
}
// NewMX returns a new MX record based on the Service.
func (s *Service) NewMX(name string) *dns.MX {
host := targetStrip(dns.Fqdn(s.Host), s.TargetStrip)
return &dns.MX{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: s.TTL},
Preference: uint16(s.Priority), Mx: host}
}
// NewA returns a new A record based on the Service.
func (s *Service) NewA(name string, ip net.IP) *dns.A {
return &dns.A{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: s.TTL}, A: ip}
}
// NewAAAA returns a new AAAA record based on the Service.
func (s *Service) NewAAAA(name string, ip net.IP) *dns.AAAA {
return &dns.AAAA{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: s.TTL}, AAAA: ip}
}
// NewCNAME returns a new CNAME record based on the Service.
func (s *Service) NewCNAME(name string, target string) *dns.CNAME {
return &dns.CNAME{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Ttl: s.TTL}, Target: dns.Fqdn(target)}
}
// NewTXT returns a new TXT record based on the Service.
func (s *Service) NewTXT(name string) *dns.TXT {
return &dns.TXT{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: s.TTL}, Txt: split255(s.Text)}
}
// NewPTR returns a new PTR record based on the Service.
func (s *Service) NewPTR(name string, target string) *dns.PTR {
return &dns.PTR{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: s.TTL}, Ptr: dns.Fqdn(target)}
}
// NewNS returns a new NS record based on the Service.
func (s *Service) NewNS(name string) *dns.NS {
host := targetStrip(dns.Fqdn(s.Host), s.TargetStrip)
return &dns.NS{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: s.TTL}, Ns: host}
}
// Group checks the services in sx, it looks for a Group attribute on the shortest
// keys. If there are multiple shortest keys *and* the group attribute disagrees (and
// is not empty), we don't consider it a group.
// If a group is found, only services with *that* group (or no group) will be returned.
func Group(sx []Service) []Service {
if len(sx) == 0 {
return sx
}
// Shortest key with group attribute sets the group for this set.
group := sx[0].Group
slashes := strings.Count(sx[0].Key, "/")
length := make([]int, len(sx))
for i, s := range sx {
x := strings.Count(s.Key, "/")
length[i] = x
if x < slashes {
if s.Group == "" {
break
}
slashes = x
group = s.Group
}
}
if group == "" {
return sx
}
ret := []Service{} // with slice-tricks in sx we can prolly save this allocation (TODO)
for i, s := range sx {
if s.Group == "" {
ret = append(ret, s)
continue
}
// Disagreement on the same level
if length[i] == slashes && s.Group != group {
return sx
}
if s.Group == group {
ret = append(ret, s)
}
}
return ret
}
// Split255 splits a string into 255 byte chunks.
func split255(s string) []string {
if len(s) < 255 {
return []string{s}
}
sx := []string{}
p, i := 0, 255
for {
if i <= len(s) {
sx = append(sx, s[p:i])
} else {
sx = append(sx, s[p:])
break
}
p, i = p+255, i+255
}
return sx
}
// targetStrip strips "targetstrip" labels from the left side of the fully qualified name.
func targetStrip(name string, targetStrip int) string {
if targetStrip == 0 {
return name
}
offset, end := 0, false
for i := 0; i < targetStrip; i++ {
offset, end = dns.NextLabel(name, offset)
}
if end {
// We overshot the name, use the orignal one.
offset = 0
}
name = name[offset:]
return name
}

View File

@ -0,0 +1,33 @@
package msg
import (
"net"
"github.com/miekg/dns"
)
// HostType returns the DNS type of what is encoded in the Service Host field. We're reusing
// dns.TypeXXX to not reinvent a new set of identifiers.
//
// dns.TypeA: the service's Host field contains an A record.
// dns.TypeAAAA: the service's Host field contains an AAAA record.
// dns.TypeCNAME: the service's Host field contains a name.
//
// Note that a service can double/triple as a TXT record or MX record.
func (s *Service) HostType() (what uint16, normalized net.IP) {
ip := net.ParseIP(s.Host)
switch {
case ip == nil:
return dns.TypeCNAME, nil
case ip.To4() != nil:
return dns.TypeA, ip.To4()
case ip.To4() == nil:
return dns.TypeAAAA, ip.To16()
}
// This should never be reached.
return dns.TypeNone, nil
}

View File

@ -0,0 +1,15 @@
package dnsutil
import "github.com/miekg/dns"
// DuplicateCNAME returns true if r already exists in records.
func DuplicateCNAME(r *dns.CNAME, records []dns.RR) bool {
for _, rec := range records {
if v, ok := rec.(*dns.CNAME); ok {
if v.Target == r.Target {
return true
}
}
}
return false
}

View File

@ -0,0 +1,12 @@
package dnsutil
import "github.com/miekg/dns"
// Dedup de-duplicates a message.
func Dedup(m *dns.Msg) *dns.Msg {
// TODO(miek): expensive!
m.Answer = dns.Dedup(m.Answer, nil)
m.Ns = dns.Dedup(m.Ns, nil)
m.Extra = dns.Dedup(m.Extra, nil)
return m
}

View File

@ -0,0 +1,2 @@
// Package dnsutil contains DNS related helper functions.
package dnsutil

View File

@ -0,0 +1,82 @@
package dnsutil
import (
"fmt"
"net"
"os"
"github.com/miekg/dns"
)
// ParseHostPortOrFile parses the strings in s, each string can either be a address,
// address:port or a filename. The address part is checked and the filename case a
// resolv.conf like file is parsed and the nameserver found are returned.
func ParseHostPortOrFile(s ...string) ([]string, error) {
var servers []string
for _, host := range s {
addr, _, err := net.SplitHostPort(host)
if err != nil {
// Parse didn't work, it is not a addr:port combo
if net.ParseIP(host) == nil {
// Not an IP address.
ss, err := tryFile(host)
if err == nil {
servers = append(servers, ss...)
continue
}
return servers, fmt.Errorf("not an IP address or file: %q", host)
}
ss := net.JoinHostPort(host, "53")
servers = append(servers, ss)
continue
}
if net.ParseIP(addr) == nil {
// No an IP address.
ss, err := tryFile(host)
if err == nil {
servers = append(servers, ss...)
continue
}
return servers, fmt.Errorf("not an IP address or file: %q", host)
}
servers = append(servers, host)
}
return servers, nil
}
// Try to open this is a file first.
func tryFile(s string) ([]string, error) {
c, err := dns.ClientConfigFromFile(s)
if err == os.ErrNotExist {
return nil, fmt.Errorf("failed to open file %q: %q", s, err)
} else if err != nil {
return nil, err
}
servers := []string{}
for _, s := range c.Servers {
servers = append(servers, net.JoinHostPort(s, c.Port))
}
return servers, nil
}
// ParseHostPort will check if the host part is a valid IP address, if the
// IP address is valid, but no port is found, defaultPort is added.
func ParseHostPort(s, defaultPort string) (string, error) {
addr, port, err := net.SplitHostPort(s)
if port == "" {
port = defaultPort
}
if err != nil {
if net.ParseIP(s) == nil {
return "", fmt.Errorf("must specify an IP address: `%s'", s)
}
return net.JoinHostPort(s, port), nil
}
if net.ParseIP(addr) == nil {
return "", fmt.Errorf("must specify an IP address: `%s'", addr)
}
return net.JoinHostPort(addr, port), nil
}

View File

@ -0,0 +1,19 @@
package dnsutil
import (
"strings"
"github.com/miekg/dns"
)
// Join joins labels to form a fully qualified domain name. If the last label is
// the root label it is ignored. Not other syntax checks are performed.
func Join(labels []string) string {
ll := len(labels)
if labels[ll-1] == "." {
s := strings.Join(labels[:ll-1], ".")
return dns.Fqdn(s)
}
s := strings.Join(labels, ".")
return dns.Fqdn(s)
}

View File

@ -0,0 +1,68 @@
package dnsutil
import (
"net"
"strings"
)
// ExtractAddressFromReverse turns a standard PTR reverse record name
// into an IP address. This works for ipv4 or ipv6.
//
// 54.119.58.176.in-addr.arpa. becomes 176.58.119.54. If the conversion
// fails the empty string is returned.
func ExtractAddressFromReverse(reverseName string) string {
search := ""
f := reverse
switch {
case strings.HasSuffix(reverseName, v4arpaSuffix):
search = strings.TrimSuffix(reverseName, v4arpaSuffix)
case strings.HasSuffix(reverseName, v6arpaSuffix):
search = strings.TrimSuffix(reverseName, v6arpaSuffix)
f = reverse6
default:
return ""
}
// Reverse the segments and then combine them.
return f(strings.Split(search, "."))
}
func reverse(slice []string) string {
for i := 0; i < len(slice)/2; i++ {
j := len(slice) - i - 1
slice[i], slice[j] = slice[j], slice[i]
}
ip := net.ParseIP(strings.Join(slice, ".")).To4()
if ip == nil {
return ""
}
return ip.String()
}
// reverse6 reverse the segments and combine them according to RFC3596:
// b.a.9.8.7.6.5.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2
// is reversed to 2001:db8::567:89ab
func reverse6(slice []string) string {
for i := 0; i < len(slice)/2; i++ {
j := len(slice) - i - 1
slice[i], slice[j] = slice[j], slice[i]
}
slice6 := []string{}
for i := 0; i < len(slice)/4; i++ {
slice6 = append(slice6, strings.Join(slice[i*4:i*4+4], ""))
}
ip := net.ParseIP(strings.Join(slice6, ":")).To16()
if ip == nil {
return ""
}
return ip.String()
}
const (
// v4arpaSuffix is the reverse tree suffix for v4 IP addresses.
v4arpaSuffix = ".in-addr.arpa."
// v6arpaSuffix is the reverse tree suffix for v6 IP addresses.
v6arpaSuffix = ".ip6.arpa."
)

View File

@ -0,0 +1,20 @@
package dnsutil
import (
"errors"
"github.com/miekg/dns"
)
// TrimZone removes the zone component from q. It returns the trimmed
// name or an error is zone is longer then qname. The trimmed name will be returned
// without a trailing dot.
func TrimZone(q string, z string) (string, error) {
zl := dns.CountLabel(z)
i, ok := dns.PrevLabel(q, zl)
if ok || i-1 < 0 {
return "", errors.New("trimzone: overshot qname: " + q + "for zone " + z)
}
// This includes the '.', remove on return
return q[:i-1], nil
}

202
vendor/github.com/coreos/etcd/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

5
vendor/github.com/coreos/etcd/NOTICE generated vendored Normal file
View File

@ -0,0 +1,5 @@
CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

117
vendor/github.com/coreos/etcd/client/README.md generated vendored Normal file
View File

@ -0,0 +1,117 @@
# etcd/client
etcd/client is the Go client library for etcd.
[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client)
etcd uses `cmd/vendor` directory to store external dependencies, which are
to be compiled into etcd release binaries. `client` can be imported without
vendoring. For full compatibility, it is recommended to vendor builds using
etcd's vendored packages, using tools like godep, as in
[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
## Install
```bash
go get github.com/coreos/etcd/client
```
## Usage
```go
package main
import (
"log"
"time"
"golang.org/x/net/context"
"github.com/coreos/etcd/client"
)
func main() {
cfg := client.Config{
Endpoints: []string{"http://127.0.0.1:2379"},
Transport: client.DefaultTransport,
// set timeout per request to fail fast when the target endpoint is unavailable
HeaderTimeoutPerRequest: time.Second,
}
c, err := client.New(cfg)
if err != nil {
log.Fatal(err)
}
kapi := client.NewKeysAPI(c)
// set "/foo" key with "bar" value
log.Print("Setting '/foo' key with 'bar' value")
resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
if err != nil {
log.Fatal(err)
} else {
// print common key info
log.Printf("Set is done. Metadata is %q\n", resp)
}
// get "/foo" key's value
log.Print("Getting '/foo' key value")
resp, err = kapi.Get(context.Background(), "/foo", nil)
if err != nil {
log.Fatal(err)
} else {
// print common key info
log.Printf("Get is done. Metadata is %q\n", resp)
// print value
log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
}
}
```
## Error Handling
etcd client might return three types of errors.
- context error
Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered.
- cluster error
Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned.
- response error
If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error.
Here is the example code to handle client errors:
```go
cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
c, err := client.New(cfg)
if err != nil {
log.Fatal(err)
}
kapi := client.NewKeysAPI(c)
resp, err := kapi.Set(ctx, "test", "bar", nil)
if err != nil {
if err == context.Canceled {
// ctx is canceled by another routine
} else if err == context.DeadlineExceeded {
// ctx is attached with a deadline and it exceeded
} else if cerr, ok := err.(*client.ClusterError); ok {
// process (cerr.Errors)
} else {
// bad cluster endpoints, which are not etcd servers
}
}
```
## Caveat
1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process.
2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened.
3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.

237
vendor/github.com/coreos/etcd/client/auth_role.go generated vendored Normal file
View File

@ -0,0 +1,237 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
"golang.org/x/net/context"
)
type Role struct {
Role string `json:"role"`
Permissions Permissions `json:"permissions"`
Grant *Permissions `json:"grant,omitempty"`
Revoke *Permissions `json:"revoke,omitempty"`
}
type Permissions struct {
KV rwPermission `json:"kv"`
}
type rwPermission struct {
Read []string `json:"read"`
Write []string `json:"write"`
}
type PermissionType int
const (
ReadPermission PermissionType = iota
WritePermission
ReadWritePermission
)
// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
// interact with etcd's role creation and modification features.
func NewAuthRoleAPI(c Client) AuthRoleAPI {
return &httpAuthRoleAPI{
client: c,
}
}
type AuthRoleAPI interface {
// AddRole adds a role.
AddRole(ctx context.Context, role string) error
// RemoveRole removes a role.
RemoveRole(ctx context.Context, role string) error
// GetRole retrieves role details.
GetRole(ctx context.Context, role string) (*Role, error)
// GrantRoleKV grants a role some permission prefixes for the KV store.
GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// RevokeRoleKV revokes some permission prefixes for a role on the KV store.
RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// ListRoles lists roles.
ListRoles(ctx context.Context) ([]string, error)
}
type httpAuthRoleAPI struct {
client httpClient
}
type authRoleAPIAction struct {
verb string
name string
role *Role
}
type authRoleAPIList struct{}
func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "roles", "")
req, _ := http.NewRequest("GET", u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "roles", l.name)
if l.role == nil {
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
b, err := json.Marshal(l.role)
if err != nil {
panic(err)
}
body := bytes.NewReader(b)
req, _ := http.NewRequest(l.verb, u.String(), body)
req.Header.Set("Content-Type", "application/json")
return req
}
func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var roleList struct {
Roles []Role `json:"roles"`
}
if err = json.Unmarshal(body, &roleList); err != nil {
return nil, err
}
ret := make([]string, 0, len(roleList.Roles))
for _, r := range roleList.Roles {
ret = append(ret, r.Role)
}
return ret, nil
}
func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
role := &Role{
Role: rolename,
}
return r.addRemoveRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
return r.addRemoveRole(ctx, &authRoleAPIAction{
verb: "DELETE",
name: rolename,
})
}
func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
resp, body, err := r.client.Do(ctx, req)
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
return r.modRole(ctx, &authRoleAPIAction{
verb: "GET",
name: rolename,
})
}
func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
var out rwPermission
switch permType {
case ReadPermission:
out.Read = prefixes
case WritePermission:
out.Write = prefixes
case ReadWritePermission:
out.Read = prefixes
out.Write = prefixes
}
return out
}
func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
rwp := buildRWPermission(prefixes, permType)
role := &Role{
Role: rolename,
Grant: &Permissions{
KV: rwp,
},
}
return r.modRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
rwp := buildRWPermission(prefixes, permType)
role := &Role{
Role: rolename,
Revoke: &Permissions{
KV: rwp,
},
}
return r.modRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
resp, body, err := r.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var role Role
if err = json.Unmarshal(body, &role); err != nil {
return nil, err
}
return &role, nil
}

320
vendor/github.com/coreos/etcd/client/auth_user.go generated vendored Normal file
View File

@ -0,0 +1,320 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
"path"
"golang.org/x/net/context"
)
var (
defaultV2AuthPrefix = "/v2/auth"
)
type User struct {
User string `json:"user"`
Password string `json:"password,omitempty"`
Roles []string `json:"roles"`
Grant []string `json:"grant,omitempty"`
Revoke []string `json:"revoke,omitempty"`
}
// userListEntry is the user representation given by the server for ListUsers
type userListEntry struct {
User string `json:"user"`
Roles []Role `json:"roles"`
}
type UserRoles struct {
User string `json:"user"`
Roles []Role `json:"roles"`
}
func v2AuthURL(ep url.URL, action string, name string) *url.URL {
if name != "" {
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
return &ep
}
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
return &ep
}
// NewAuthAPI constructs a new AuthAPI that uses HTTP to
// interact with etcd's general auth features.
func NewAuthAPI(c Client) AuthAPI {
return &httpAuthAPI{
client: c,
}
}
type AuthAPI interface {
// Enable auth.
Enable(ctx context.Context) error
// Disable auth.
Disable(ctx context.Context) error
}
type httpAuthAPI struct {
client httpClient
}
func (s *httpAuthAPI) Enable(ctx context.Context) error {
return s.enableDisable(ctx, &authAPIAction{"PUT"})
}
func (s *httpAuthAPI) Disable(ctx context.Context) error {
return s.enableDisable(ctx, &authAPIAction{"DELETE"})
}
func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
resp, body, err := s.client.Do(ctx, req)
if err != nil {
return err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
type authAPIAction struct {
verb string
}
func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "enable", "")
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
type authError struct {
Message string `json:"message"`
Code int `json:"-"`
}
func (e authError) Error() string {
return e.Message
}
// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
// interact with etcd's user creation and modification features.
func NewAuthUserAPI(c Client) AuthUserAPI {
return &httpAuthUserAPI{
client: c,
}
}
type AuthUserAPI interface {
// AddUser adds a user.
AddUser(ctx context.Context, username string, password string) error
// RemoveUser removes a user.
RemoveUser(ctx context.Context, username string) error
// GetUser retrieves user details.
GetUser(ctx context.Context, username string) (*User, error)
// GrantUser grants a user some permission roles.
GrantUser(ctx context.Context, username string, roles []string) (*User, error)
// RevokeUser revokes some permission roles from a user.
RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
// ChangePassword changes the user's password.
ChangePassword(ctx context.Context, username string, password string) (*User, error)
// ListUsers lists the users.
ListUsers(ctx context.Context) ([]string, error)
}
type httpAuthUserAPI struct {
client httpClient
}
type authUserAPIAction struct {
verb string
username string
user *User
}
type authUserAPIList struct{}
func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "users", "")
req, _ := http.NewRequest("GET", u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "users", l.username)
if l.user == nil {
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
b, err := json.Marshal(l.user)
if err != nil {
panic(err)
}
body := bytes.NewReader(b)
req, _ := http.NewRequest(l.verb, u.String(), body)
req.Header.Set("Content-Type", "application/json")
return req
}
func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
resp, body, err := u.client.Do(ctx, &authUserAPIList{})
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var userList struct {
Users []userListEntry `json:"users"`
}
if err = json.Unmarshal(body, &userList); err != nil {
return nil, err
}
ret := make([]string, 0, len(userList.Users))
for _, u := range userList.Users {
ret = append(ret, u.User)
}
return ret, nil
}
func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
user := &User{
User: username,
Password: password,
}
return u.addRemoveUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
return u.addRemoveUser(ctx, &authUserAPIAction{
verb: "DELETE",
username: username,
})
}
func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
resp, body, err := u.client.Do(ctx, req)
if err != nil {
return err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
return u.modUser(ctx, &authUserAPIAction{
verb: "GET",
username: username,
})
}
func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
user := &User{
User: username,
Grant: roles,
}
return u.modUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
user := &User{
User: username,
Revoke: roles,
}
return u.modUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
user := &User{
User: username,
Password: password,
}
return u.modUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
resp, body, err := u.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var user User
if err = json.Unmarshal(body, &user); err != nil {
var userR UserRoles
if urerr := json.Unmarshal(body, &userR); urerr != nil {
return nil, err
}
user.User = userR.User
for _, r := range userR.Roles {
user.Roles = append(user.Roles, r.Role)
}
}
return &user, nil
}

18
vendor/github.com/coreos/etcd/client/cancelreq.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// borrowed from golang/net/context/ctxhttp/cancelreq.go
package client
import "net/http"
func requestCanceler(tr CancelableTransport, req *http.Request) func() {
ch := make(chan struct{})
req.Cancel = ch
return func() {
close(ch)
}
}

704
vendor/github.com/coreos/etcd/client/client.go generated vendored Normal file
View File

@ -0,0 +1,704 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"sort"
"strconv"
"sync"
"time"
"github.com/coreos/etcd/version"
"golang.org/x/net/context"
)
var (
ErrNoEndpoints = errors.New("client: no endpoints available")
ErrTooManyRedirects = errors.New("client: too many redirects")
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
// that Do() will not retry a request
oneShotCtxValue interface{}
)
var DefaultRequestTimeout = 5 * time.Second
var DefaultTransport CancelableTransport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
}
type EndpointSelectionMode int
const (
// EndpointSelectionRandom is the default value of the 'SelectionMode'.
// As the name implies, the client object will pick a node from the members
// of the cluster in a random fashion. If the cluster has three members, A, B,
// and C, the client picks any node from its three members as its request
// destination.
EndpointSelectionRandom EndpointSelectionMode = iota
// If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
// requests are sent directly to the cluster leader. This reduces
// forwarding roundtrips compared to making requests to etcd followers
// who then forward them to the cluster leader. In the event of a leader
// failure, however, clients configured this way cannot prioritize among
// the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
// to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
// maintain its knowledge of current cluster state.
//
// This mode should be used with Client.AutoSync().
EndpointSelectionPrioritizeLeader
)
type Config struct {
// Endpoints defines a set of URLs (schemes, hosts and ports only)
// that can be used to communicate with a logical etcd cluster. For
// example, a three-node cluster could be provided like so:
//
// Endpoints: []string{
// "http://node1.example.com:2379",
// "http://node2.example.com:2379",
// "http://node3.example.com:2379",
// }
//
// If multiple endpoints are provided, the Client will attempt to
// use them all in the event that one or more of them are unusable.
//
// If Client.Sync is ever called, the Client may cache an alternate
// set of endpoints to continue operation.
Endpoints []string
// Transport is used by the Client to drive HTTP requests. If not
// provided, DefaultTransport will be used.
Transport CancelableTransport
// CheckRedirect specifies the policy for handling HTTP redirects.
// If CheckRedirect is not nil, the Client calls it before
// following an HTTP redirect. The sole argument is the number of
// requests that have already been made. If CheckRedirect returns
// an error, Client.Do will not make any further requests and return
// the error back it to the caller.
//
// If CheckRedirect is nil, the Client uses its default policy,
// which is to stop after 10 consecutive requests.
CheckRedirect CheckRedirectFunc
// Username specifies the user credential to add as an authorization header
Username string
// Password is the password for the specified user to add as an authorization header
// to the request.
Password string
// HeaderTimeoutPerRequest specifies the time limit to wait for response
// header in a single request made by the Client. The timeout includes
// connection time, any redirects, and header wait time.
//
// For non-watch GET request, server returns the response body immediately.
// For PUT/POST/DELETE request, server will attempt to commit request
// before responding, which is expected to take `100ms + 2 * RTT`.
// For watch request, server returns the header immediately to notify Client
// watch start. But if server is behind some kind of proxy, the response
// header may be cached at proxy, and Client cannot rely on this behavior.
//
// Especially, wait request will ignore this timeout.
//
// One API call may send multiple requests to different etcd servers until it
// succeeds. Use context of the API to specify the overall timeout.
//
// A HeaderTimeoutPerRequest of zero means no timeout.
HeaderTimeoutPerRequest time.Duration
// SelectionMode is an EndpointSelectionMode enum that specifies the
// policy for choosing the etcd cluster node to which requests are sent.
SelectionMode EndpointSelectionMode
}
func (cfg *Config) transport() CancelableTransport {
if cfg.Transport == nil {
return DefaultTransport
}
return cfg.Transport
}
func (cfg *Config) checkRedirect() CheckRedirectFunc {
if cfg.CheckRedirect == nil {
return DefaultCheckRedirect
}
return cfg.CheckRedirect
}
// CancelableTransport mimics net/http.Transport, but requires that
// the object also support request cancellation.
type CancelableTransport interface {
http.RoundTripper
CancelRequest(req *http.Request)
}
type CheckRedirectFunc func(via int) error
// DefaultCheckRedirect follows up to 10 redirects, but no more.
var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
if via > 10 {
return ErrTooManyRedirects
}
return nil
}
type Client interface {
// Sync updates the internal cache of the etcd cluster's membership.
Sync(context.Context) error
// AutoSync periodically calls Sync() every given interval.
// The recommended sync interval is 10 seconds to 1 minute, which does
// not bring too much overhead to server and makes client catch up the
// cluster change in time.
//
// The example to use it:
//
// for {
// err := client.AutoSync(ctx, 10*time.Second)
// if err == context.DeadlineExceeded || err == context.Canceled {
// break
// }
// log.Print(err)
// }
AutoSync(context.Context, time.Duration) error
// Endpoints returns a copy of the current set of API endpoints used
// by Client to resolve HTTP requests. If Sync has ever been called,
// this may differ from the initial Endpoints provided in the Config.
Endpoints() []string
// SetEndpoints sets the set of API endpoints used by Client to resolve
// HTTP requests. If the given endpoints are not valid, an error will be
// returned
SetEndpoints(eps []string) error
// GetVersion retrieves the current etcd server and cluster version
GetVersion(ctx context.Context) (*version.Versions, error)
httpClient
}
func New(cfg Config) (Client, error) {
c := &httpClusterClient{
clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
selectionMode: cfg.SelectionMode,
}
if cfg.Username != "" {
c.credentials = &credentials{
username: cfg.Username,
password: cfg.Password,
}
}
if err := c.SetEndpoints(cfg.Endpoints); err != nil {
return nil, err
}
return c, nil
}
type httpClient interface {
Do(context.Context, httpAction) (*http.Response, []byte, error)
}
func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
return func(ep url.URL) httpClient {
return &redirectFollowingHTTPClient{
checkRedirect: cr,
client: &simpleHTTPClient{
transport: tr,
endpoint: ep,
headerTimeout: headerTimeout,
},
}
}
}
type credentials struct {
username string
password string
}
type httpClientFactory func(url.URL) httpClient
type httpAction interface {
HTTPRequest(url.URL) *http.Request
}
type httpClusterClient struct {
clientFactory httpClientFactory
endpoints []url.URL
pinned int
credentials *credentials
sync.RWMutex
rand *rand.Rand
selectionMode EndpointSelectionMode
}
func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
ceps := make([]url.URL, len(eps))
copy(ceps, eps)
// To perform a lookup on the new endpoint list without using the current
// client, we'll copy it
clientCopy := &httpClusterClient{
clientFactory: c.clientFactory,
credentials: c.credentials,
rand: c.rand,
pinned: 0,
endpoints: ceps,
}
mAPI := NewMembersAPI(clientCopy)
leader, err := mAPI.Leader(ctx)
if err != nil {
return "", err
}
if len(leader.ClientURLs) == 0 {
return "", ErrNoLeaderEndpoint
}
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
}
func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
if len(eps) == 0 {
return []url.URL{}, ErrNoEndpoints
}
neps := make([]url.URL, len(eps))
for i, ep := range eps {
u, err := url.Parse(ep)
if err != nil {
return []url.URL{}, err
}
neps[i] = *u
}
return neps, nil
}
func (c *httpClusterClient) SetEndpoints(eps []string) error {
neps, err := c.parseEndpoints(eps)
if err != nil {
return err
}
c.Lock()
defer c.Unlock()
c.endpoints = shuffleEndpoints(c.rand, neps)
// We're not doing anything for PrioritizeLeader here. This is
// due to not having a context meaning we can't call getLeaderEndpoint
// However, if you're using PrioritizeLeader, you've already been told
// to regularly call sync, where we do have a ctx, and can figure the
// leader. PrioritizeLeader is also quite a loose guarantee, so deal
// with it
c.pinned = 0
return nil
}
func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
action := act
c.RLock()
leps := len(c.endpoints)
eps := make([]url.URL, leps)
n := copy(eps, c.endpoints)
pinned := c.pinned
if c.credentials != nil {
action = &authedAction{
act: act,
credentials: *c.credentials,
}
}
c.RUnlock()
if leps == 0 {
return nil, nil, ErrNoEndpoints
}
if leps != n {
return nil, nil, errors.New("unable to pick endpoint: copy failed")
}
var resp *http.Response
var body []byte
var err error
cerr := &ClusterError{}
isOneShot := ctx.Value(&oneShotCtxValue) != nil
for i := pinned; i < leps+pinned; i++ {
k := i % leps
hc := c.clientFactory(eps[k])
resp, body, err = hc.Do(ctx, action)
if err != nil {
cerr.Errors = append(cerr.Errors, err)
if err == ctx.Err() {
return nil, nil, ctx.Err()
}
if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err
}
} else if resp.StatusCode/100 == 5 {
switch resp.StatusCode {
case http.StatusInternalServerError, http.StatusServiceUnavailable:
// TODO: make sure this is a no leader response
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
default:
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
}
err = cerr.Errors[0]
}
if err != nil {
if !isOneShot {
continue
}
c.Lock()
c.pinned = (k + 1) % leps
c.Unlock()
return nil, nil, err
}
if k != pinned {
c.Lock()
c.pinned = k
c.Unlock()
}
return resp, body, nil
}
return nil, nil, cerr
}
func (c *httpClusterClient) Endpoints() []string {
c.RLock()
defer c.RUnlock()
eps := make([]string, len(c.endpoints))
for i, ep := range c.endpoints {
eps[i] = ep.String()
}
return eps
}
func (c *httpClusterClient) Sync(ctx context.Context) error {
mAPI := NewMembersAPI(c)
ms, err := mAPI.List(ctx)
if err != nil {
return err
}
var eps []string
for _, m := range ms {
eps = append(eps, m.ClientURLs...)
}
neps, err := c.parseEndpoints(eps)
if err != nil {
return err
}
npin := 0
switch c.selectionMode {
case EndpointSelectionRandom:
c.RLock()
eq := endpointsEqual(c.endpoints, neps)
c.RUnlock()
if eq {
return nil
}
// When items in the endpoint list changes, we choose a new pin
neps = shuffleEndpoints(c.rand, neps)
case EndpointSelectionPrioritizeLeader:
nle, err := c.getLeaderEndpoint(ctx, neps)
if err != nil {
return ErrNoLeaderEndpoint
}
for i, n := range neps {
if n.String() == nle {
npin = i
break
}
}
default:
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
}
c.Lock()
defer c.Unlock()
c.endpoints = neps
c.pinned = npin
return nil
}
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
err := c.Sync(ctx)
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
}
func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
act := &getAction{Prefix: "/version"}
resp, body, err := c.Do(ctx, act)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case http.StatusOK:
if len(body) == 0 {
return nil, ErrEmptyBody
}
var vresp version.Versions
if err := json.Unmarshal(body, &vresp); err != nil {
return nil, ErrInvalidJSON
}
return &vresp, nil
default:
var etcdErr Error
if err := json.Unmarshal(body, &etcdErr); err != nil {
return nil, ErrInvalidJSON
}
return nil, etcdErr
}
}
type roundTripResponse struct {
resp *http.Response
err error
}
type simpleHTTPClient struct {
transport CancelableTransport
endpoint url.URL
headerTimeout time.Duration
}
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
req := act.HTTPRequest(c.endpoint)
if err := printcURL(req); err != nil {
return nil, nil, err
}
isWait := false
if req != nil && req.URL != nil {
ws := req.URL.Query().Get("wait")
if len(ws) != 0 {
var err error
isWait, err = strconv.ParseBool(ws)
if err != nil {
return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
}
}
}
var hctx context.Context
var hcancel context.CancelFunc
if !isWait && c.headerTimeout > 0 {
hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
} else {
hctx, hcancel = context.WithCancel(ctx)
}
defer hcancel()
reqcancel := requestCanceler(c.transport, req)
rtchan := make(chan roundTripResponse, 1)
go func() {
resp, err := c.transport.RoundTrip(req)
rtchan <- roundTripResponse{resp: resp, err: err}
close(rtchan)
}()
var resp *http.Response
var err error
select {
case rtresp := <-rtchan:
resp, err = rtresp.resp, rtresp.err
case <-hctx.Done():
// cancel and wait for request to actually exit before continuing
reqcancel()
rtresp := <-rtchan
resp = rtresp.resp
switch {
case ctx.Err() != nil:
err = ctx.Err()
case hctx.Err() != nil:
err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
default:
panic("failed to get error from context")
}
}
// always check for resp nil-ness to deal with possible
// race conditions between channels above
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
if err != nil {
return nil, nil, err
}
var body []byte
done := make(chan struct{})
go func() {
body, err = ioutil.ReadAll(resp.Body)
done <- struct{}{}
}()
select {
case <-ctx.Done():
resp.Body.Close()
<-done
return nil, nil, ctx.Err()
case <-done:
}
return resp, body, err
}
type authedAction struct {
act httpAction
credentials credentials
}
func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
r := a.act.HTTPRequest(url)
r.SetBasicAuth(a.credentials.username, a.credentials.password)
return r
}
type redirectFollowingHTTPClient struct {
client httpClient
checkRedirect CheckRedirectFunc
}
func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
next := act
for i := 0; i < 100; i++ {
if i > 0 {
if err := r.checkRedirect(i); err != nil {
return nil, nil, err
}
}
resp, body, err := r.client.Do(ctx, next)
if err != nil {
return nil, nil, err
}
if resp.StatusCode/100 == 3 {
hdr := resp.Header.Get("Location")
if hdr == "" {
return nil, nil, fmt.Errorf("Location header not set")
}
loc, err := url.Parse(hdr)
if err != nil {
return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr)
}
next = &redirectedHTTPAction{
action: act,
location: *loc,
}
continue
}
return resp, body, nil
}
return nil, nil, errTooManyRedirectChecks
}
type redirectedHTTPAction struct {
action httpAction
location url.URL
}
func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
orig := r.action.HTTPRequest(ep)
orig.URL = &r.location
return orig
}
func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
p := r.Perm(len(eps))
neps := make([]url.URL, len(eps))
for i, k := range p {
neps[i] = eps[k]
}
return neps
}
func endpointsEqual(left, right []url.URL) bool {
if len(left) != len(right) {
return false
}
sLeft := make([]string, len(left))
sRight := make([]string, len(right))
for i, l := range left {
sLeft[i] = l.String()
}
for i, r := range right {
sRight[i] = r.String()
}
sort.Strings(sLeft)
sort.Strings(sRight)
for i := range sLeft {
if sLeft[i] != sRight[i] {
return false
}
}
return true
}

37
vendor/github.com/coreos/etcd/client/cluster_error.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import "fmt"
type ClusterError struct {
Errors []error
}
func (ce *ClusterError) Error() string {
s := ErrClusterUnavailable.Error()
for i, e := range ce.Errors {
s += fmt.Sprintf("; error #%d: %s\n", i, e)
}
return s
}
func (ce *ClusterError) Detail() string {
s := ""
for i, e := range ce.Errors {
s += fmt.Sprintf("error #%d: %s\n", i, e)
}
return s
}

70
vendor/github.com/coreos/etcd/client/curl.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"os"
)
var (
cURLDebug = false
)
func EnablecURLDebug() {
cURLDebug = true
}
func DisablecURLDebug() {
cURLDebug = false
}
// printcURL prints the cURL equivalent request to stderr.
// It returns an error if the body of the request cannot
// be read.
// The caller MUST cancel the request if there is an error.
func printcURL(req *http.Request) error {
if !cURLDebug {
return nil
}
var (
command string
b []byte
err error
)
if req.URL != nil {
command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
}
if req.Body != nil {
b, err = ioutil.ReadAll(req.Body)
if err != nil {
return err
}
command += fmt.Sprintf(" -d %q", string(b))
}
fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
// reset body
body := bytes.NewBuffer(b)
req.Body = ioutil.NopCloser(body)
return nil
}

40
vendor/github.com/coreos/etcd/client/discover.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"github.com/coreos/etcd/pkg/srv"
)
// Discoverer is an interface that wraps the Discover method.
type Discoverer interface {
// Discover looks up the etcd servers for the domain.
Discover(domain string) ([]string, error)
}
type srvDiscover struct{}
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
func NewSRVDiscover() Discoverer {
return &srvDiscover{}
}
func (d *srvDiscover) Discover(domain string) ([]string, error) {
srvs, err := srv.GetClient("etcd-client", domain)
if err != nil {
return nil, err
}
return srvs.Endpoints, nil
}

73
vendor/github.com/coreos/etcd/client/doc.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package client provides bindings for the etcd APIs.
Create a Config and exchange it for a Client:
import (
"net/http"
"github.com/coreos/etcd/client"
"golang.org/x/net/context"
)
cfg := client.Config{
Endpoints: []string{"http://127.0.0.1:2379"},
Transport: DefaultTransport,
}
c, err := client.New(cfg)
if err != nil {
// handle error
}
Clients are safe for concurrent use by multiple goroutines.
Create a KeysAPI using the Client, then use it to interact with etcd:
kAPI := client.NewKeysAPI(c)
// create a new key /foo with the value "bar"
_, err = kAPI.Create(context.Background(), "/foo", "bar")
if err != nil {
// handle error
}
// delete the newly created key only if the value is still "bar"
_, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
if err != nil {
// handle error
}
Use a custom context to set timeouts on your operations:
import "time"
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// set a new key, ignoring it's previous state
_, err := kAPI.Set(ctx, "/ping", "pong", nil)
if err != nil {
if err == context.DeadlineExceeded {
// request took longer than 5s
} else {
// handle error
}
}
*/
package client

1087
vendor/github.com/coreos/etcd/client/keys.generated.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

682
vendor/github.com/coreos/etcd/client/keys.go generated vendored Normal file
View File

@ -0,0 +1,682 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/coreos/etcd/pkg/pathutil"
"github.com/ugorji/go/codec"
"golang.org/x/net/context"
)
const (
ErrorCodeKeyNotFound = 100
ErrorCodeTestFailed = 101
ErrorCodeNotFile = 102
ErrorCodeNotDir = 104
ErrorCodeNodeExist = 105
ErrorCodeRootROnly = 107
ErrorCodeDirNotEmpty = 108
ErrorCodeUnauthorized = 110
ErrorCodePrevValueRequired = 201
ErrorCodeTTLNaN = 202
ErrorCodeIndexNaN = 203
ErrorCodeInvalidField = 209
ErrorCodeInvalidForm = 210
ErrorCodeRaftInternal = 300
ErrorCodeLeaderElect = 301
ErrorCodeWatcherCleared = 400
ErrorCodeEventIndexCleared = 401
)
type Error struct {
Code int `json:"errorCode"`
Message string `json:"message"`
Cause string `json:"cause"`
Index uint64 `json:"index"`
}
func (e Error) Error() string {
return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index)
}
var (
ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.")
ErrEmptyBody = errors.New("client: response body is empty")
)
// PrevExistType is used to define an existence condition when setting
// or deleting Nodes.
type PrevExistType string
const (
PrevIgnore = PrevExistType("")
PrevExist = PrevExistType("true")
PrevNoExist = PrevExistType("false")
)
var (
defaultV2KeysPrefix = "/v2/keys"
)
// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
// API over HTTP.
func NewKeysAPI(c Client) KeysAPI {
return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix)
}
// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller
// to provide a custom base URL path. This should only be used in
// very rare cases.
func NewKeysAPIWithPrefix(c Client, p string) KeysAPI {
return &httpKeysAPI{
client: c,
prefix: p,
}
}
type KeysAPI interface {
// Get retrieves a set of Nodes from etcd
Get(ctx context.Context, key string, opts *GetOptions) (*Response, error)
// Set assigns a new value to a Node identified by a given key. The caller
// may define a set of conditions in the SetOptions. If SetOptions.Dir=true
// then value is ignored.
Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
// Delete removes a Node identified by the given key, optionally destroying
// all of its children as well. The caller may define a set of required
// conditions in an DeleteOptions object.
Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error)
// Create is an alias for Set w/ PrevExist=false
Create(ctx context.Context, key, value string) (*Response, error)
// CreateInOrder is used to atomically create in-order keys within the given directory.
CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error)
// Update is an alias for Set w/ PrevExist=true
Update(ctx context.Context, key, value string) (*Response, error)
// Watcher builds a new Watcher targeted at a specific Node identified
// by the given key. The Watcher may be configured at creation time
// through a WatcherOptions object. The returned Watcher is designed
// to emit events that happen to a Node, and optionally to its children.
Watcher(key string, opts *WatcherOptions) Watcher
}
type WatcherOptions struct {
// AfterIndex defines the index after-which the Watcher should
// start emitting events. For example, if a value of 5 is
// provided, the first event will have an index >= 6.
//
// Setting AfterIndex to 0 (default) means that the Watcher
// should start watching for events starting at the current
// index, whatever that may be.
AfterIndex uint64
// Recursive specifies whether or not the Watcher should emit
// events that occur in children of the given keyspace. If set
// to false (default), events will be limited to those that
// occur for the exact key.
Recursive bool
}
type CreateInOrderOptions struct {
// TTL defines a period of time after-which the Node should
// expire and no longer exist. Values <= 0 are ignored. Given
// that the zero-value is ignored, TTL cannot be used to set
// a TTL of 0.
TTL time.Duration
}
type SetOptions struct {
// PrevValue specifies what the current value of the Node must
// be in order for the Set operation to succeed.
//
// Leaving this field empty means that the caller wishes to
// ignore the current value of the Node. This cannot be used
// to compare the Node's current value to an empty string.
//
// PrevValue is ignored if Dir=true
PrevValue string
// PrevIndex indicates what the current ModifiedIndex of the
// Node must be in order for the Set operation to succeed.
//
// If PrevIndex is set to 0 (default), no comparison is made.
PrevIndex uint64
// PrevExist specifies whether the Node must currently exist
// (PrevExist) or not (PrevNoExist). If the caller does not
// care about existence, set PrevExist to PrevIgnore, or simply
// leave it unset.
PrevExist PrevExistType
// TTL defines a period of time after-which the Node should
// expire and no longer exist. Values <= 0 are ignored. Given
// that the zero-value is ignored, TTL cannot be used to set
// a TTL of 0.
TTL time.Duration
// Refresh set to true means a TTL value can be updated
// without firing a watch or changing the node value. A
// value must not be provided when refreshing a key.
Refresh bool
// Dir specifies whether or not this Node should be created as a directory.
Dir bool
// NoValueOnSuccess specifies whether the response contains the current value of the Node.
// If set, the response will only contain the current value when the request fails.
NoValueOnSuccess bool
}
type GetOptions struct {
// Recursive defines whether or not all children of the Node
// should be returned.
Recursive bool
// Sort instructs the server whether or not to sort the Nodes.
// If true, the Nodes are sorted alphabetically by key in
// ascending order (A to z). If false (default), the Nodes will
// not be sorted and the ordering used should not be considered
// predictable.
Sort bool
// Quorum specifies whether it gets the latest committed value that
// has been applied in quorum of members, which ensures external
// consistency (or linearizability).
Quorum bool
}
type DeleteOptions struct {
// PrevValue specifies what the current value of the Node must
// be in order for the Delete operation to succeed.
//
// Leaving this field empty means that the caller wishes to
// ignore the current value of the Node. This cannot be used
// to compare the Node's current value to an empty string.
PrevValue string
// PrevIndex indicates what the current ModifiedIndex of the
// Node must be in order for the Delete operation to succeed.
//
// If PrevIndex is set to 0 (default), no comparison is made.
PrevIndex uint64
// Recursive defines whether or not all children of the Node
// should be deleted. If set to true, all children of the Node
// identified by the given key will be deleted. If left unset
// or explicitly set to false, only a single Node will be
// deleted.
Recursive bool
// Dir specifies whether or not this Node should be removed as a directory.
Dir bool
}
type Watcher interface {
// Next blocks until an etcd event occurs, then returns a Response
// representing that event. The behavior of Next depends on the
// WatcherOptions used to construct the Watcher. Next is designed to
// be called repeatedly, each time blocking until a subsequent event
// is available.
//
// If the provided context is cancelled, Next will return a non-nil
// error. Any other failures encountered while waiting for the next
// event (connection issues, deserialization failures, etc) will
// also result in a non-nil error.
Next(context.Context) (*Response, error)
}
type Response struct {
// Action is the name of the operation that occurred. Possible values
// include get, set, delete, update, create, compareAndSwap,
// compareAndDelete and expire.
Action string `json:"action"`
// Node represents the state of the relevant etcd Node.
Node *Node `json:"node"`
// PrevNode represents the previous state of the Node. PrevNode is non-nil
// only if the Node existed before the action occurred and the action
// caused a change to the Node.
PrevNode *Node `json:"prevNode"`
// Index holds the cluster-level index at the time the Response was generated.
// This index is not tied to the Node(s) contained in this Response.
Index uint64 `json:"-"`
// ClusterID holds the cluster-level ID reported by the server. This
// should be different for different etcd clusters.
ClusterID string `json:"-"`
}
type Node struct {
// Key represents the unique location of this Node (e.g. "/foo/bar").
Key string `json:"key"`
// Dir reports whether node describes a directory.
Dir bool `json:"dir,omitempty"`
// Value is the current data stored on this Node. If this Node
// is a directory, Value will be empty.
Value string `json:"value"`
// Nodes holds the children of this Node, only if this Node is a directory.
// This slice of will be arbitrarily deep (children, grandchildren, great-
// grandchildren, etc.) if a recursive Get or Watch request were made.
Nodes Nodes `json:"nodes"`
// CreatedIndex is the etcd index at-which this Node was created.
CreatedIndex uint64 `json:"createdIndex"`
// ModifiedIndex is the etcd index at-which this Node was last modified.
ModifiedIndex uint64 `json:"modifiedIndex"`
// Expiration is the server side expiration time of the key.
Expiration *time.Time `json:"expiration,omitempty"`
// TTL is the time to live of the key in second.
TTL int64 `json:"ttl,omitempty"`
}
func (n *Node) String() string {
return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL)
}
// TTLDuration returns the Node's TTL as a time.Duration object
func (n *Node) TTLDuration() time.Duration {
return time.Duration(n.TTL) * time.Second
}
type Nodes []*Node
// interfaces for sorting
func (ns Nodes) Len() int { return len(ns) }
func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
type httpKeysAPI struct {
client httpClient
prefix string
}
func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) {
act := &setAction{
Prefix: k.prefix,
Key: key,
Value: val,
}
if opts != nil {
act.PrevValue = opts.PrevValue
act.PrevIndex = opts.PrevIndex
act.PrevExist = opts.PrevExist
act.TTL = opts.TTL
act.Refresh = opts.Refresh
act.Dir = opts.Dir
act.NoValueOnSuccess = opts.NoValueOnSuccess
}
doCtx := ctx
if act.PrevExist == PrevNoExist {
doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
}
resp, body, err := k.client.Do(doCtx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) {
return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist})
}
func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) {
act := &createInOrderAction{
Prefix: k.prefix,
Dir: dir,
Value: val,
}
if opts != nil {
act.TTL = opts.TTL
}
resp, body, err := k.client.Do(ctx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) {
return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist})
}
func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) {
act := &deleteAction{
Prefix: k.prefix,
Key: key,
}
if opts != nil {
act.PrevValue = opts.PrevValue
act.PrevIndex = opts.PrevIndex
act.Dir = opts.Dir
act.Recursive = opts.Recursive
}
doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
resp, body, err := k.client.Do(doCtx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) {
act := &getAction{
Prefix: k.prefix,
Key: key,
}
if opts != nil {
act.Recursive = opts.Recursive
act.Sorted = opts.Sort
act.Quorum = opts.Quorum
}
resp, body, err := k.client.Do(ctx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher {
act := waitAction{
Prefix: k.prefix,
Key: key,
}
if opts != nil {
act.Recursive = opts.Recursive
if opts.AfterIndex > 0 {
act.WaitIndex = opts.AfterIndex + 1
}
}
return &httpWatcher{
client: k.client,
nextWait: act,
}
}
type httpWatcher struct {
client httpClient
nextWait waitAction
}
func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
for {
httpresp, body, err := hw.client.Do(ctx, &hw.nextWait)
if err != nil {
return nil, err
}
resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
if err != nil {
if err == ErrEmptyBody {
continue
}
return nil, err
}
hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1
return resp, nil
}
}
// v2KeysURL forms a URL representing the location of a key.
// The endpoint argument represents the base URL of an etcd
// server. The prefix is the path needed to route from the
// provided endpoint's path to the root of the keys API
// (typically "/v2/keys").
func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
// We concatenate all parts together manually. We cannot use
// path.Join because it does not reserve trailing slash.
// We call CanonicalURLPath to further cleanup the path.
if prefix != "" && prefix[0] != '/' {
prefix = "/" + prefix
}
if key != "" && key[0] != '/' {
key = "/" + key
}
ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key)
return &ep
}
type getAction struct {
Prefix string
Key string
Recursive bool
Sorted bool
Quorum bool
}
func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, g.Prefix, g.Key)
params := u.Query()
params.Set("recursive", strconv.FormatBool(g.Recursive))
params.Set("sorted", strconv.FormatBool(g.Sorted))
params.Set("quorum", strconv.FormatBool(g.Quorum))
u.RawQuery = params.Encode()
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type waitAction struct {
Prefix string
Key string
WaitIndex uint64
Recursive bool
}
func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, w.Prefix, w.Key)
params := u.Query()
params.Set("wait", "true")
params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10))
params.Set("recursive", strconv.FormatBool(w.Recursive))
u.RawQuery = params.Encode()
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type setAction struct {
Prefix string
Key string
Value string
PrevValue string
PrevIndex uint64
PrevExist PrevExistType
TTL time.Duration
Refresh bool
Dir bool
NoValueOnSuccess bool
}
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, a.Prefix, a.Key)
params := u.Query()
form := url.Values{}
// we're either creating a directory or setting a key
if a.Dir {
params.Set("dir", strconv.FormatBool(a.Dir))
} else {
// These options are only valid for setting a key
if a.PrevValue != "" {
params.Set("prevValue", a.PrevValue)
}
form.Add("value", a.Value)
}
// Options which apply to both setting a key and creating a dir
if a.PrevIndex != 0 {
params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
}
if a.PrevExist != PrevIgnore {
params.Set("prevExist", string(a.PrevExist))
}
if a.TTL > 0 {
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
}
if a.Refresh {
form.Add("refresh", "true")
}
if a.NoValueOnSuccess {
params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
}
u.RawQuery = params.Encode()
body := strings.NewReader(form.Encode())
req, _ := http.NewRequest("PUT", u.String(), body)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
type deleteAction struct {
Prefix string
Key string
PrevValue string
PrevIndex uint64
Dir bool
Recursive bool
}
func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, a.Prefix, a.Key)
params := u.Query()
if a.PrevValue != "" {
params.Set("prevValue", a.PrevValue)
}
if a.PrevIndex != 0 {
params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
}
if a.Dir {
params.Set("dir", "true")
}
if a.Recursive {
params.Set("recursive", "true")
}
u.RawQuery = params.Encode()
req, _ := http.NewRequest("DELETE", u.String(), nil)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
type createInOrderAction struct {
Prefix string
Dir string
Value string
TTL time.Duration
}
func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, a.Prefix, a.Dir)
form := url.Values{}
form.Add("value", a.Value)
if a.TTL > 0 {
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
}
body := strings.NewReader(form.Encode())
req, _ := http.NewRequest("POST", u.String(), body)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) {
switch code {
case http.StatusOK, http.StatusCreated:
if len(body) == 0 {
return nil, ErrEmptyBody
}
res, err = unmarshalSuccessfulKeysResponse(header, body)
default:
err = unmarshalFailedKeysResponse(body)
}
return
}
func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
var res Response
err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res)
if err != nil {
return nil, ErrInvalidJSON
}
if header.Get("X-Etcd-Index") != "" {
res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64)
if err != nil {
return nil, err
}
}
res.ClusterID = header.Get("X-Etcd-Cluster-ID")
return &res, nil
}
func unmarshalFailedKeysResponse(body []byte) error {
var etcdErr Error
if err := json.Unmarshal(body, &etcdErr); err != nil {
return ErrInvalidJSON
}
return etcdErr
}

304
vendor/github.com/coreos/etcd/client/members.go generated vendored Normal file
View File

@ -0,0 +1,304 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/url"
"path"
"golang.org/x/net/context"
"github.com/coreos/etcd/pkg/types"
)
var (
defaultV2MembersPrefix = "/v2/members"
defaultLeaderSuffix = "/leader"
)
type Member struct {
// ID is the unique identifier of this Member.
ID string `json:"id"`
// Name is a human-readable, non-unique identifier of this Member.
Name string `json:"name"`
// PeerURLs represents the HTTP(S) endpoints this Member uses to
// participate in etcd's consensus protocol.
PeerURLs []string `json:"peerURLs"`
// ClientURLs represents the HTTP(S) endpoints on which this Member
// serves it's client-facing APIs.
ClientURLs []string `json:"clientURLs"`
}
type memberCollection []Member
func (c *memberCollection) UnmarshalJSON(data []byte) error {
d := struct {
Members []Member
}{}
if err := json.Unmarshal(data, &d); err != nil {
return err
}
if d.Members == nil {
*c = make([]Member, 0)
return nil
}
*c = d.Members
return nil
}
type memberCreateOrUpdateRequest struct {
PeerURLs types.URLs
}
func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) {
s := struct {
PeerURLs []string `json:"peerURLs"`
}{
PeerURLs: make([]string, len(m.PeerURLs)),
}
for i, u := range m.PeerURLs {
s.PeerURLs[i] = u.String()
}
return json.Marshal(&s)
}
// NewMembersAPI constructs a new MembersAPI that uses HTTP to
// interact with etcd's membership API.
func NewMembersAPI(c Client) MembersAPI {
return &httpMembersAPI{
client: c,
}
}
type MembersAPI interface {
// List enumerates the current cluster membership.
List(ctx context.Context) ([]Member, error)
// Add instructs etcd to accept a new Member into the cluster.
Add(ctx context.Context, peerURL string) (*Member, error)
// Remove demotes an existing Member out of the cluster.
Remove(ctx context.Context, mID string) error
// Update instructs etcd to update an existing Member in the cluster.
Update(ctx context.Context, mID string, peerURLs []string) error
// Leader gets current leader of the cluster
Leader(ctx context.Context) (*Member, error)
}
type httpMembersAPI struct {
client httpClient
}
func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
req := &membersAPIActionList{}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var mCollection memberCollection
if err := json.Unmarshal(body, &mCollection); err != nil {
return nil, err
}
return []Member(mCollection), nil
}
func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
urls, err := types.NewURLs([]string{peerURL})
if err != nil {
return nil, err
}
req := &membersAPIActionAdd{peerURLs: urls}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
return nil, err
}
if resp.StatusCode != http.StatusCreated {
var merr membersError
if err := json.Unmarshal(body, &merr); err != nil {
return nil, err
}
return nil, merr
}
var memb Member
if err := json.Unmarshal(body, &memb); err != nil {
return nil, err
}
return &memb, nil
}
func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error {
urls, err := types.NewURLs(peerURLs)
if err != nil {
return err
}
req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
var merr membersError
if err := json.Unmarshal(body, &merr); err != nil {
return err
}
return merr
}
return nil
}
func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
req := &membersAPIActionRemove{memberID: memberID}
resp, _, err := m.client.Do(ctx, req)
if err != nil {
return err
}
return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
}
func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
req := &membersAPIActionLeader{}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var leader Member
if err := json.Unmarshal(body, &leader); err != nil {
return nil, err
}
return &leader, nil
}
type membersAPIActionList struct{}
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type membersAPIActionRemove struct {
memberID string
}
func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, d.memberID)
req, _ := http.NewRequest("DELETE", u.String(), nil)
return req
}
type membersAPIActionAdd struct {
peerURLs types.URLs
}
func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
b, _ := json.Marshal(&m)
req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
req.Header.Set("Content-Type", "application/json")
return req
}
type membersAPIActionUpdate struct {
memberID string
peerURLs types.URLs
}
func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
u.Path = path.Join(u.Path, a.memberID)
b, _ := json.Marshal(&m)
req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
req.Header.Set("Content-Type", "application/json")
return req
}
func assertStatusCode(got int, want ...int) (err error) {
for _, w := range want {
if w == got {
return nil
}
}
return fmt.Errorf("unexpected status code %d", got)
}
type membersAPIActionLeader struct{}
func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, defaultLeaderSuffix)
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
// v2MembersURL add the necessary path to the provided endpoint
// to route requests to the default v2 members API.
func v2MembersURL(ep url.URL) *url.URL {
ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
return &ep
}
type membersError struct {
Message string `json:"message"`
Code int `json:"-"`
}
func (e membersError) Error() string {
return e.Message
}

53
vendor/github.com/coreos/etcd/client/util.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"regexp"
)
var (
roleNotFoundRegExp *regexp.Regexp
userNotFoundRegExp *regexp.Regexp
)
func init() {
roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
}
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
func IsKeyNotFound(err error) bool {
if cErr, ok := err.(Error); ok {
return cErr.Code == ErrorCodeKeyNotFound
}
return false
}
// IsRoleNotFound returns true if the error means role not found of v2 API.
func IsRoleNotFound(err error) bool {
if ae, ok := err.(authError); ok {
return roleNotFoundRegExp.MatchString(ae.Message)
}
return false
}
// IsUserNotFound returns true if the error means user not found of v2 API.
func IsUserNotFound(err error) bool {
if ae, ok := err.(authError); ok {
return userNotFoundRegExp.MatchString(ae.Message)
}
return false
}

31
vendor/github.com/coreos/etcd/pkg/pathutil/path.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pathutil implements utility functions for handling slash-separated
// paths.
package pathutil
import "path"
// CanonicalURLPath returns the canonical url path for p, which follows the rules:
// 1. the path always starts with "/"
// 2. replace multiple slashes with a single slash
// 3. replace each '.' '..' path name element with equivalent one
// 4. keep the trailing slash
// The function is borrowed from stdlib http.cleanPath in server.go.
func CanonicalURLPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root,
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}

140
vendor/github.com/coreos/etcd/pkg/srv/srv.go generated vendored Normal file
View File

@ -0,0 +1,140 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package srv looks up DNS SRV records.
package srv
import (
"fmt"
"net"
"net/url"
"strings"
"github.com/coreos/etcd/pkg/types"
)
var (
// indirection for testing
lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
resolveTCPAddr = net.ResolveTCPAddr
)
// GetCluster gets the cluster information via DNS discovery.
// Also sees each entry as a separate instance.
func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) {
tempName := int(0)
tcp2ap := make(map[string]url.URL)
// First, resolve the apurls
for _, url := range apurls {
tcpAddr, err := resolveTCPAddr("tcp", url.Host)
if err != nil {
return nil, err
}
tcp2ap[tcpAddr.String()] = url
}
stringParts := []string{}
updateNodeMap := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", dns)
if err != nil {
return err
}
for _, srv := range addrs {
port := fmt.Sprintf("%d", srv.Port)
host := net.JoinHostPort(srv.Target, port)
tcpAddr, terr := resolveTCPAddr("tcp", host)
if terr != nil {
err = terr
continue
}
n := ""
url, ok := tcp2ap[tcpAddr.String()]
if ok {
n = name
}
if n == "" {
n = fmt.Sprintf("%d", tempName)
tempName++
}
// SRV records have a trailing dot but URL shouldn't.
shortHost := strings.TrimSuffix(srv.Target, ".")
urlHost := net.JoinHostPort(shortHost, port)
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
if ok && url.Scheme != scheme {
err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
}
}
if len(stringParts) == 0 {
return err
}
return nil
}
failCount := 0
err := updateNodeMap(service+"-ssl", "https")
srvErr := make([]string, 2)
if err != nil {
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err)
failCount++
}
err = updateNodeMap(service, "http")
if err != nil {
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err)
failCount++
}
if failCount == 2 {
return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1])
}
return stringParts, nil
}
type SRVClients struct {
Endpoints []string
SRVs []*net.SRV
}
// GetClient looks up the client endpoints for a service and domain.
func GetClient(service, domain string) (*SRVClients, error) {
var urls []*url.URL
var srvs []*net.SRV
updateURLs := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", domain)
if err != nil {
return err
}
for _, srv := range addrs {
urls = append(urls, &url.URL{
Scheme: scheme,
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
})
}
srvs = append(srvs, addrs...)
return nil
}
errHTTPS := updateURLs(service+"-ssl", "https")
errHTTP := updateURLs(service, "http")
if errHTTPS != nil && errHTTP != nil {
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
}
endpoints := make([]string, len(urls))
for i := range urls {
endpoints[i] = urls[i].String()
}
return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
}

17
vendor/github.com/coreos/etcd/pkg/types/doc.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package types declares various data types and implements type-checking
// functions.
package types

41
vendor/github.com/coreos/etcd/pkg/types/id.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"strconv"
)
// ID represents a generic identifier which is canonically
// stored as a uint64 but is typically represented as a
// base-16 string for input/output
type ID uint64
func (i ID) String() string {
return strconv.FormatUint(uint64(i), 16)
}
// IDFromString attempts to create an ID from a base-16 string.
func IDFromString(s string) (ID, error) {
i, err := strconv.ParseUint(s, 16, 64)
return ID(i), err
}
// IDSlice implements the sort interface
type IDSlice []ID
func (p IDSlice) Len() int { return len(p) }
func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

178
vendor/github.com/coreos/etcd/pkg/types/set.go generated vendored Normal file
View File

@ -0,0 +1,178 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"reflect"
"sort"
"sync"
)
type Set interface {
Add(string)
Remove(string)
Contains(string) bool
Equals(Set) bool
Length() int
Values() []string
Copy() Set
Sub(Set) Set
}
func NewUnsafeSet(values ...string) *unsafeSet {
set := &unsafeSet{make(map[string]struct{})}
for _, v := range values {
set.Add(v)
}
return set
}
func NewThreadsafeSet(values ...string) *tsafeSet {
us := NewUnsafeSet(values...)
return &tsafeSet{us, sync.RWMutex{}}
}
type unsafeSet struct {
d map[string]struct{}
}
// Add adds a new value to the set (no-op if the value is already present)
func (us *unsafeSet) Add(value string) {
us.d[value] = struct{}{}
}
// Remove removes the given value from the set
func (us *unsafeSet) Remove(value string) {
delete(us.d, value)
}
// Contains returns whether the set contains the given value
func (us *unsafeSet) Contains(value string) (exists bool) {
_, exists = us.d[value]
return
}
// ContainsAll returns whether the set contains all given values
func (us *unsafeSet) ContainsAll(values []string) bool {
for _, s := range values {
if !us.Contains(s) {
return false
}
}
return true
}
// Equals returns whether the contents of two sets are identical
func (us *unsafeSet) Equals(other Set) bool {
v1 := sort.StringSlice(us.Values())
v2 := sort.StringSlice(other.Values())
v1.Sort()
v2.Sort()
return reflect.DeepEqual(v1, v2)
}
// Length returns the number of elements in the set
func (us *unsafeSet) Length() int {
return len(us.d)
}
// Values returns the values of the Set in an unspecified order.
func (us *unsafeSet) Values() (values []string) {
values = make([]string, 0)
for val := range us.d {
values = append(values, val)
}
return
}
// Copy creates a new Set containing the values of the first
func (us *unsafeSet) Copy() Set {
cp := NewUnsafeSet()
for val := range us.d {
cp.Add(val)
}
return cp
}
// Sub removes all elements in other from the set
func (us *unsafeSet) Sub(other Set) Set {
oValues := other.Values()
result := us.Copy().(*unsafeSet)
for _, val := range oValues {
if _, ok := result.d[val]; !ok {
continue
}
delete(result.d, val)
}
return result
}
type tsafeSet struct {
us *unsafeSet
m sync.RWMutex
}
func (ts *tsafeSet) Add(value string) {
ts.m.Lock()
defer ts.m.Unlock()
ts.us.Add(value)
}
func (ts *tsafeSet) Remove(value string) {
ts.m.Lock()
defer ts.m.Unlock()
ts.us.Remove(value)
}
func (ts *tsafeSet) Contains(value string) (exists bool) {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Contains(value)
}
func (ts *tsafeSet) Equals(other Set) bool {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Equals(other)
}
func (ts *tsafeSet) Length() int {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Length()
}
func (ts *tsafeSet) Values() (values []string) {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Values()
}
func (ts *tsafeSet) Copy() Set {
ts.m.RLock()
defer ts.m.RUnlock()
usResult := ts.us.Copy().(*unsafeSet)
return &tsafeSet{usResult, sync.RWMutex{}}
}
func (ts *tsafeSet) Sub(other Set) Set {
ts.m.RLock()
defer ts.m.RUnlock()
usResult := ts.us.Sub(other).(*unsafeSet)
return &tsafeSet{usResult, sync.RWMutex{}}
}

22
vendor/github.com/coreos/etcd/pkg/types/slice.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
// Uint64Slice implements sort interface
type Uint64Slice []uint64
func (p Uint64Slice) Len() int { return len(p) }
func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

82
vendor/github.com/coreos/etcd/pkg/types/urls.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"errors"
"fmt"
"net"
"net/url"
"sort"
"strings"
)
type URLs []url.URL
func NewURLs(strs []string) (URLs, error) {
all := make([]url.URL, len(strs))
if len(all) == 0 {
return nil, errors.New("no valid URLs given")
}
for i, in := range strs {
in = strings.TrimSpace(in)
u, err := url.Parse(in)
if err != nil {
return nil, err
}
if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
}
if _, _, err := net.SplitHostPort(u.Host); err != nil {
return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
}
if u.Path != "" {
return nil, fmt.Errorf("URL must not contain a path: %s", in)
}
all[i] = *u
}
us := URLs(all)
us.Sort()
return us, nil
}
func MustNewURLs(strs []string) URLs {
urls, err := NewURLs(strs)
if err != nil {
panic(err)
}
return urls
}
func (us URLs) String() string {
return strings.Join(us.StringSlice(), ",")
}
func (us *URLs) Sort() {
sort.Sort(us)
}
func (us URLs) Len() int { return len(us) }
func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] }
func (us URLs) StringSlice() []string {
out := make([]string, len(us))
for i := range us {
out[i] = us[i].String()
}
return out
}

107
vendor/github.com/coreos/etcd/pkg/types/urlsmap.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"fmt"
"sort"
"strings"
)
// URLsMap is a map from a name to its URLs.
type URLsMap map[string]URLs
// NewURLsMap returns a URLsMap instantiated from the given string,
// which consists of discovery-formatted names-to-URLs, like:
// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
func NewURLsMap(s string) (URLsMap, error) {
m := parse(s)
cl := URLsMap{}
for name, urls := range m {
us, err := NewURLs(urls)
if err != nil {
return nil, err
}
cl[name] = us
}
return cl, nil
}
// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The
// string values in the map can be multiple values separated by the sep string.
func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) {
var err error
um := URLsMap{}
for k, v := range m {
um[k], err = NewURLs(strings.Split(v, sep))
if err != nil {
return nil, err
}
}
return um, nil
}
// String turns URLsMap into discovery-formatted name-to-URLs sorted by name.
func (c URLsMap) String() string {
var pairs []string
for name, urls := range c {
for _, url := range urls {
pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String()))
}
}
sort.Strings(pairs)
return strings.Join(pairs, ",")
}
// URLs returns a list of all URLs.
// The returned list is sorted in ascending lexicographical order.
func (c URLsMap) URLs() []string {
var urls []string
for _, us := range c {
for _, u := range us {
urls = append(urls, u.String())
}
}
sort.Strings(urls)
return urls
}
// Len returns the size of URLsMap.
func (c URLsMap) Len() int {
return len(c)
}
// parse parses the given string and returns a map listing the values specified for each key.
func parse(s string) map[string][]string {
m := make(map[string][]string)
for s != "" {
key := s
if i := strings.IndexAny(key, ","); i >= 0 {
key, s = key[:i], key[i+1:]
} else {
s = ""
}
if key == "" {
continue
}
value := ""
if i := strings.Index(key, "="); i >= 0 {
key, value = key[:i], key[i+1:]
}
m[key] = append(m[key], value)
}
return m
}

56
vendor/github.com/coreos/etcd/version/version.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package version implements etcd version parsing and contains latest version
// information.
package version
import (
"fmt"
"strings"
"github.com/coreos/go-semver/semver"
)
var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0"
Version = "3.2.9"
APIVersion = "unknown"
// Git SHA Value will be set during build
GitSHA = "Not provided (use ./build instead of go build)"
)
func init() {
ver, err := semver.NewVersion(Version)
if err == nil {
APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
}
}
type Versions struct {
Server string `json:"etcdserver"`
Cluster string `json:"etcdcluster"`
// TODO: raft state machine version
}
// Cluster only keeps the major.minor.
func Cluster(v string) string {
vs := strings.Split(v, ".")
if len(vs) <= 2 {
return v
}
return fmt.Sprintf("%s.%s", vs[0], vs[1])
}

202
vendor/github.com/coreos/go-semver/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

5
vendor/github.com/coreos/go-semver/NOTICE generated vendored Normal file
View File

@ -0,0 +1,5 @@
CoreOS Project
Copyright 2018 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

296
vendor/github.com/coreos/go-semver/semver/semver.go generated vendored Normal file
View File

@ -0,0 +1,296 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Semantic Versions http://semver.org
package semver
import (
"bytes"
"errors"
"fmt"
"regexp"
"strconv"
"strings"
)
type Version struct {
Major int64
Minor int64
Patch int64
PreRelease PreRelease
Metadata string
}
type PreRelease string
func splitOff(input *string, delim string) (val string) {
parts := strings.SplitN(*input, delim, 2)
if len(parts) == 2 {
*input = parts[0]
val = parts[1]
}
return val
}
func New(version string) *Version {
return Must(NewVersion(version))
}
func NewVersion(version string) (*Version, error) {
v := Version{}
if err := v.Set(version); err != nil {
return nil, err
}
return &v, nil
}
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
func Must(v *Version, err error) *Version {
if err != nil {
panic(err)
}
return v
}
// Set parses and updates v from the given version string. Implements flag.Value
func (v *Version) Set(version string) error {
metadata := splitOff(&version, "+")
preRelease := PreRelease(splitOff(&version, "-"))
dotParts := strings.SplitN(version, ".", 3)
if len(dotParts) != 3 {
return fmt.Errorf("%s is not in dotted-tri format", version)
}
if err := validateIdentifier(string(preRelease)); err != nil {
return fmt.Errorf("failed to validate pre-release: %v", err)
}
if err := validateIdentifier(metadata); err != nil {
return fmt.Errorf("failed to validate metadata: %v", err)
}
parsed := make([]int64, 3, 3)
for i, v := range dotParts[:3] {
val, err := strconv.ParseInt(v, 10, 64)
parsed[i] = val
if err != nil {
return err
}
}
v.Metadata = metadata
v.PreRelease = preRelease
v.Major = parsed[0]
v.Minor = parsed[1]
v.Patch = parsed[2]
return nil
}
func (v Version) String() string {
var buffer bytes.Buffer
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
if v.PreRelease != "" {
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
}
if v.Metadata != "" {
fmt.Fprintf(&buffer, "+%s", v.Metadata)
}
return buffer.String()
}
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
var data string
if err := unmarshal(&data); err != nil {
return err
}
return v.Set(data)
}
func (v Version) MarshalJSON() ([]byte, error) {
return []byte(`"` + v.String() + `"`), nil
}
func (v *Version) UnmarshalJSON(data []byte) error {
l := len(data)
if l == 0 || string(data) == `""` {
return nil
}
if l < 2 || data[0] != '"' || data[l-1] != '"' {
return errors.New("invalid semver string")
}
return v.Set(string(data[1 : l-1]))
}
// Compare tests if v is less than, equal to, or greater than versionB,
// returning -1, 0, or +1 respectively.
func (v Version) Compare(versionB Version) int {
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
return cmp
}
return preReleaseCompare(v, versionB)
}
// Equal tests if v is equal to versionB.
func (v Version) Equal(versionB Version) bool {
return v.Compare(versionB) == 0
}
// LessThan tests if v is less than versionB.
func (v Version) LessThan(versionB Version) bool {
return v.Compare(versionB) < 0
}
// Slice converts the comparable parts of the semver into a slice of integers.
func (v Version) Slice() []int64 {
return []int64{v.Major, v.Minor, v.Patch}
}
func (p PreRelease) Slice() []string {
preRelease := string(p)
return strings.Split(preRelease, ".")
}
func preReleaseCompare(versionA Version, versionB Version) int {
a := versionA.PreRelease
b := versionB.PreRelease
/* Handle the case where if two versions are otherwise equal it is the
* one without a PreRelease that is greater */
if len(a) == 0 && (len(b) > 0) {
return 1
} else if len(b) == 0 && (len(a) > 0) {
return -1
}
// If there is a prerelease, check and compare each part.
return recursivePreReleaseCompare(a.Slice(), b.Slice())
}
func recursiveCompare(versionA []int64, versionB []int64) int {
if len(versionA) == 0 {
return 0
}
a := versionA[0]
b := versionB[0]
if a > b {
return 1
} else if a < b {
return -1
}
return recursiveCompare(versionA[1:], versionB[1:])
}
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
// A larger set of pre-release fields has a higher precedence than a smaller set,
// if all of the preceding identifiers are equal.
if len(versionA) == 0 {
if len(versionB) > 0 {
return -1
}
return 0
} else if len(versionB) == 0 {
// We're longer than versionB so return 1.
return 1
}
a := versionA[0]
b := versionB[0]
aInt := false
bInt := false
aI, err := strconv.Atoi(versionA[0])
if err == nil {
aInt = true
}
bI, err := strconv.Atoi(versionB[0])
if err == nil {
bInt = true
}
// Numeric identifiers always have lower precedence than non-numeric identifiers.
if aInt && !bInt {
return -1
} else if !aInt && bInt {
return 1
}
// Handle Integer Comparison
if aInt && bInt {
if aI > bI {
return 1
} else if aI < bI {
return -1
}
}
// Handle String Comparison
if a > b {
return 1
} else if a < b {
return -1
}
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
}
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
func (v *Version) BumpMajor() {
v.Major += 1
v.Minor = 0
v.Patch = 0
v.PreRelease = PreRelease("")
v.Metadata = ""
}
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
func (v *Version) BumpMinor() {
v.Minor += 1
v.Patch = 0
v.PreRelease = PreRelease("")
v.Metadata = ""
}
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
func (v *Version) BumpPatch() {
v.Patch += 1
v.PreRelease = PreRelease("")
v.Metadata = ""
}
// validateIdentifier makes sure the provided identifier satisfies semver spec
func validateIdentifier(id string) error {
if id != "" && !reIdentifier.MatchString(id) {
return fmt.Errorf("%s is not a valid semver identifier", id)
}
return nil
}
// reIdentifier is a regular expression used to check that pre-release and metadata
// identifiers satisfy the spec requirements
var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)

38
vendor/github.com/coreos/go-semver/semver/sort.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semver
import (
"sort"
)
type Versions []*Version
func (s Versions) Len() int {
return len(s)
}
func (s Versions) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s Versions) Less(i, j int) bool {
return s[i].LessThan(*s[j])
}
// Sort sorts the given slice of Version
func Sort(versions []*Version) {
sort.Sort(Versions(versions))
}

1
vendor/github.com/miekg/dns/AUTHORS generated vendored Normal file
View File

@ -0,0 +1 @@
Miek Gieben <miek@miek.nl>

9
vendor/github.com/miekg/dns/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,9 @@
Alex A. Skinner
Andrew Tunnell-Jones
Ask Bjørn Hansen
Dave Cheney
Dusty Wilson
Marek Majkowski
Peter van Dijk
Omri Bahumi
Alex Sergeyev

9
vendor/github.com/miekg/dns/COPYRIGHT generated vendored Normal file
View File

@ -0,0 +1,9 @@
Copyright 2009 The Go Authors. All rights reserved. Use of this source code
is governed by a BSD-style license that can be found in the LICENSE file.
Extensions of the original work are copyright (c) 2011 Miek Gieben
Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is
governed by a BSD-style license that can be found in the LICENSE file.
Copyright 2014 CloudFlare. All rights reserved. Use of this source code is
governed by a BSD-style license that can be found in the LICENSE file.

32
vendor/github.com/miekg/dns/LICENSE generated vendored Normal file
View File

@ -0,0 +1,32 @@
Extensions of the original work are copyright (c) 2011 Miek Gieben
As this is fork of the official Go code the same license applies:
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

151
vendor/github.com/miekg/dns/README.md generated vendored Normal file
View File

@ -0,0 +1,151 @@
[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns)
# Alternative (more granular) approach to a DNS library
> Less is more.
Complete and usable DNS library. All widely used Resource Records are
supported, including the DNSSEC types. It follows a lean and mean philosophy.
If there is stuff you should know as a DNS programmer there isn't a convenience
function for it. Server side and client side programming is supported, i.e. you
can build servers and resolvers with it.
We try to keep the "master" branch as sane as possible and at the bleeding edge
of standards, avoiding breaking changes wherever reasonable. We support the last
two versions of Go, currently: 1.5 and 1.6.
# Goals
* KISS;
* Fast;
* Small API, if its easy to code in Go, don't make a function for it.
# Users
A not-so-up-to-date-list-that-may-be-actually-current:
* https://cloudflare.com
* https://github.com/abh/geodns
* http://www.statdns.com/
* http://www.dnsinspect.com/
* https://github.com/chuangbo/jianbing-dictionary-dns
* http://www.dns-lg.com/
* https://github.com/fcambus/rrda
* https://github.com/kenshinx/godns
* https://github.com/skynetservices/skydns
* https://github.com/hashicorp/consul
* https://github.com/DevelopersPL/godnsagent
* https://github.com/duedil-ltd/discodns
* https://github.com/StalkR/dns-reverse-proxy
* https://github.com/tianon/rawdns
* https://mesosphere.github.io/mesos-dns/
* https://pulse.turbobytes.com/
* https://play.google.com/store/apps/details?id=com.turbobytes.dig
* https://github.com/fcambus/statzone
* https://github.com/benschw/dns-clb-go
* https://github.com/corny/dnscheck for http://public-dns.info/
* https://namesmith.io
* https://github.com/miekg/unbound
* https://github.com/miekg/exdns
* https://dnslookup.org
* https://github.com/looterz/grimd
* https://github.com/phamhongviet/serf-dns
Send pull request if you want to be listed here.
# Features
* UDP/TCP queries, IPv4 and IPv6;
* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported;
* Fast:
* Reply speed around ~ 80K qps (faster hardware results in more qps);
* Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds;
* Server side programming (mimicking the net/http package);
* Client side programming;
* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA;
* EDNS0, NSID, Cookies;
* AXFR/IXFR;
* TSIG, SIG(0);
* DNS over TLS: optional encrypted connection between client and server;
* DNS name compression;
* Depends only on the standard library.
Have fun!
Miek Gieben - 2010-2012 - <miek@miek.nl>
# Building
Building is done with the `go` tool. If you have setup your GOPATH
correctly, the following should work:
go get github.com/miekg/dns
go build github.com/miekg/dns
## Examples
A short "how to use the API" is at the beginning of doc.go (this also will show
when you call `godoc github.com/miekg/dns`).
Example programs can be found in the `github.com/miekg/exdns` repository.
## Supported RFCs
*all of them*
* 103{4,5} - DNS standard
* 1348 - NSAP record (removed the record)
* 1982 - Serial Arithmetic
* 1876 - LOC record
* 1995 - IXFR
* 1996 - DNS notify
* 2136 - DNS Update (dynamic updates)
* 2181 - RRset definition - there is no RRset type though, just []RR
* 2537 - RSAMD5 DNS keys
* 2065 - DNSSEC (updated in later RFCs)
* 2671 - EDNS record
* 2782 - SRV record
* 2845 - TSIG record
* 2915 - NAPTR record
* 2929 - DNS IANA Considerations
* 3110 - RSASHA1 DNS keys
* 3225 - DO bit (DNSSEC OK)
* 340{1,2,3} - NAPTR record
* 3445 - Limiting the scope of (DNS)KEY
* 3597 - Unknown RRs
* 403{3,4,5} - DNSSEC + validation functions
* 4255 - SSHFP record
* 4343 - Case insensitivity
* 4408 - SPF record
* 4509 - SHA256 Hash in DS
* 4592 - Wildcards in the DNS
* 4635 - HMAC SHA TSIG
* 4701 - DHCID
* 4892 - id.server
* 5001 - NSID
* 5155 - NSEC3 record
* 5205 - HIP record
* 5702 - SHA2 in the DNS
* 5936 - AXFR
* 5966 - TCP implementation recommendations
* 6605 - ECDSA
* 6725 - IANA Registry Update
* 6742 - ILNP DNS
* 6840 - Clarifications and Implementation Notes for DNS Security
* 6844 - CAA record
* 6891 - EDNS0 update
* 6895 - DNS IANA considerations
* 6975 - Algorithm Understanding in DNSSEC
* 7043 - EUI48/EUI64 records
* 7314 - DNS (EDNS) EXPIRE Option
* 7553 - URI record
* 7858 - DNS over TLS: Initiation and Performance Considerations (draft)
* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies)
* xxxx - EDNS0 DNS Update Lease (draft)
## Loosely based upon
* `ldns`
* `NSD`
* `Net::DNS`
* `GRONG`

455
vendor/github.com/miekg/dns/client.go generated vendored Normal file
View File

@ -0,0 +1,455 @@
package dns
// A client implementation.
import (
"bytes"
"crypto/tls"
"encoding/binary"
"io"
"net"
"time"
)
const dnsTimeout time.Duration = 2 * time.Second
const tcpIdleTimeout time.Duration = 8 * time.Second
// A Conn represents a connection to a DNS server.
type Conn struct {
net.Conn // a net.Conn holding the connection
UDPSize uint16 // minimum receive buffer for UDP messages
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
rtt time.Duration
t time.Time
tsigRequestMAC string
}
// A Client defines parameters for a DNS client.
type Client struct {
Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
UDPSize uint16 // minimum receive buffer for UDP messages
TLSConfig *tls.Config // TLS connection configuration
Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
group singleflight
}
// Exchange performs a synchronous UDP query. It sends the message m to the address
// contained in a and waits for an reply. Exchange does not retry a failed query, nor
// will it fall back to TCP in case of truncation.
// See client.Exchange for more information on setting larger buffer sizes.
func Exchange(m *Msg, a string) (r *Msg, err error) {
var co *Conn
co, err = DialTimeout("udp", a, dnsTimeout)
if err != nil {
return nil, err
}
defer co.Close()
opt := m.IsEdns0()
// If EDNS0 is used use that for size.
if opt != nil && opt.UDPSize() >= MinMsgSize {
co.UDPSize = opt.UDPSize()
}
co.SetWriteDeadline(time.Now().Add(dnsTimeout))
if err = co.WriteMsg(m); err != nil {
return nil, err
}
co.SetReadDeadline(time.Now().Add(dnsTimeout))
r, err = co.ReadMsg()
if err == nil && r.Id != m.Id {
err = ErrId
}
return r, err
}
// ExchangeConn performs a synchronous query. It sends the message m via the connection
// c and waits for a reply. The connection c is not closed by ExchangeConn.
// This function is going away, but can easily be mimicked:
//
// co := &dns.Conn{Conn: c} // c is your net.Conn
// co.WriteMsg(m)
// in, _ := co.ReadMsg()
// co.Close()
//
func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
println("dns: this function is deprecated")
co := new(Conn)
co.Conn = c
if err = co.WriteMsg(m); err != nil {
return nil, err
}
r, err = co.ReadMsg()
if err == nil && r.Id != m.Id {
err = ErrId
}
return r, err
}
// Exchange performs an synchronous query. It sends the message m to the address
// contained in a and waits for an reply. Basic use pattern with a *dns.Client:
//
// c := new(dns.Client)
// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
//
// Exchange does not retry a failed query, nor will it fall back to TCP in
// case of truncation.
// It is up to the caller to create a message that allows for larger responses to be
// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
// buffer, see SetEdns0. Messsages without an OPT RR will fallback to the historic limit
// of 512 bytes.
func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight {
return c.exchange(m, a)
}
// This adds a bunch of garbage, TODO(miek).
t := "nop"
if t1, ok := TypeToString[m.Question[0].Qtype]; ok {
t = t1
}
cl := "nop"
if cl1, ok := ClassToString[m.Question[0].Qclass]; ok {
cl = cl1
}
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
return c.exchange(m, a)
})
if err != nil {
return r, rtt, err
}
if shared {
return r.Copy(), rtt, nil
}
return r, rtt, nil
}
func (c *Client) dialTimeout() time.Duration {
if c.Timeout != 0 {
return c.Timeout
}
if c.DialTimeout != 0 {
return c.DialTimeout
}
return dnsTimeout
}
func (c *Client) readTimeout() time.Duration {
if c.ReadTimeout != 0 {
return c.ReadTimeout
}
return dnsTimeout
}
func (c *Client) writeTimeout() time.Duration {
if c.WriteTimeout != 0 {
return c.WriteTimeout
}
return dnsTimeout
}
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
var co *Conn
network := "udp"
tls := false
switch c.Net {
case "tcp-tls":
network = "tcp"
tls = true
case "tcp4-tls":
network = "tcp4"
tls = true
case "tcp6-tls":
network = "tcp6"
tls = true
default:
if c.Net != "" {
network = c.Net
}
}
var deadline time.Time
if c.Timeout != 0 {
deadline = time.Now().Add(c.Timeout)
}
if tls {
co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, c.dialTimeout())
} else {
co, err = DialTimeout(network, a, c.dialTimeout())
}
if err != nil {
return nil, 0, err
}
defer co.Close()
opt := m.IsEdns0()
// If EDNS0 is used use that for size.
if opt != nil && opt.UDPSize() >= MinMsgSize {
co.UDPSize = opt.UDPSize()
}
// Otherwise use the client's configured UDP size.
if opt == nil && c.UDPSize >= MinMsgSize {
co.UDPSize = c.UDPSize
}
co.TsigSecret = c.TsigSecret
co.SetWriteDeadline(deadlineOrTimeout(deadline, c.writeTimeout()))
if err = co.WriteMsg(m); err != nil {
return nil, 0, err
}
co.SetReadDeadline(deadlineOrTimeout(deadline, c.readTimeout()))
r, err = co.ReadMsg()
if err == nil && r.Id != m.Id {
err = ErrId
}
return r, co.rtt, err
}
// ReadMsg reads a message from the connection co.
// If the received message contains a TSIG record the transaction
// signature is verified.
func (co *Conn) ReadMsg() (*Msg, error) {
p, err := co.ReadMsgHeader(nil)
if err != nil {
return nil, err
}
m := new(Msg)
if err := m.Unpack(p); err != nil {
// If ErrTruncated was returned, we still want to allow the user to use
// the message, but naively they can just check err if they don't want
// to use a truncated message
if err == ErrTruncated {
return m, err
}
return nil, err
}
if t := m.IsTsig(); t != nil {
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
return m, ErrSecret
}
// Need to work on the original message p, as that was used to calculate the tsig.
err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
}
return m, err
}
// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil).
// Returns message as a byte slice to be parsed with Msg.Unpack later on.
// Note that error handling on the message body is not possible as only the header is parsed.
func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
var (
p []byte
n int
err error
)
switch t := co.Conn.(type) {
case *net.TCPConn, *tls.Conn:
r := t.(io.Reader)
// First two bytes specify the length of the entire message.
l, err := tcpMsgLen(r)
if err != nil {
return nil, err
}
p = make([]byte, l)
n, err = tcpRead(r, p)
co.rtt = time.Since(co.t)
default:
if co.UDPSize > MinMsgSize {
p = make([]byte, co.UDPSize)
} else {
p = make([]byte, MinMsgSize)
}
n, err = co.Read(p)
co.rtt = time.Since(co.t)
}
if err != nil {
return nil, err
} else if n < headerSize {
return nil, ErrShortRead
}
p = p[:n]
if hdr != nil {
dh, _, err := unpackMsgHdr(p, 0)
if err != nil {
return nil, err
}
*hdr = dh
}
return p, err
}
// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length.
func tcpMsgLen(t io.Reader) (int, error) {
p := []byte{0, 0}
n, err := t.Read(p)
if err != nil {
return 0, err
}
if n != 2 {
return 0, ErrShortRead
}
l := binary.BigEndian.Uint16(p)
if l == 0 {
return 0, ErrShortRead
}
return int(l), nil
}
// tcpRead calls TCPConn.Read enough times to fill allocated buffer.
func tcpRead(t io.Reader, p []byte) (int, error) {
n, err := t.Read(p)
if err != nil {
return n, err
}
for n < len(p) {
j, err := t.Read(p[n:])
if err != nil {
return n, err
}
n += j
}
return n, err
}
// Read implements the net.Conn read method.
func (co *Conn) Read(p []byte) (n int, err error) {
if co.Conn == nil {
return 0, ErrConnEmpty
}
if len(p) < 2 {
return 0, io.ErrShortBuffer
}
switch t := co.Conn.(type) {
case *net.TCPConn, *tls.Conn:
r := t.(io.Reader)
l, err := tcpMsgLen(r)
if err != nil {
return 0, err
}
if l > len(p) {
return int(l), io.ErrShortBuffer
}
return tcpRead(r, p[:l])
}
// UDP connection
n, err = co.Conn.Read(p)
if err != nil {
return n, err
}
return n, err
}
// WriteMsg sends a message through the connection co.
// If the message m contains a TSIG record the transaction
// signature is calculated.
func (co *Conn) WriteMsg(m *Msg) (err error) {
var out []byte
if t := m.IsTsig(); t != nil {
mac := ""
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
return ErrSecret
}
out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
// Set for the next read, although only used in zone transfers
co.tsigRequestMAC = mac
} else {
out, err = m.Pack()
}
if err != nil {
return err
}
co.t = time.Now()
if _, err = co.Write(out); err != nil {
return err
}
return nil
}
// Write implements the net.Conn Write method.
func (co *Conn) Write(p []byte) (n int, err error) {
switch t := co.Conn.(type) {
case *net.TCPConn, *tls.Conn:
w := t.(io.Writer)
lp := len(p)
if lp < 2 {
return 0, io.ErrShortBuffer
}
if lp > MaxMsgSize {
return 0, &Error{err: "message too large"}
}
l := make([]byte, 2, lp+2)
binary.BigEndian.PutUint16(l, uint16(lp))
p = append(l, p...)
n, err := io.Copy(w, bytes.NewReader(p))
return int(n), err
}
n, err = co.Conn.(*net.UDPConn).Write(p)
return n, err
}
// Dial connects to the address on the named network.
func Dial(network, address string) (conn *Conn, err error) {
conn = new(Conn)
conn.Conn, err = net.Dial(network, address)
if err != nil {
return nil, err
}
return conn, nil
}
// DialTimeout acts like Dial but takes a timeout.
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
conn = new(Conn)
conn.Conn, err = net.DialTimeout(network, address, timeout)
if err != nil {
return nil, err
}
return conn, nil
}
// DialWithTLS connects to the address on the named network with TLS.
func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
conn = new(Conn)
conn.Conn, err = tls.Dial(network, address, tlsConfig)
if err != nil {
return nil, err
}
return conn, nil
}
// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
var dialer net.Dialer
dialer.Timeout = timeout
conn = new(Conn)
conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig)
if err != nil {
return nil, err
}
return conn, nil
}
func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time {
if deadline.IsZero() {
return time.Now().Add(timeout)
}
return deadline
}

99
vendor/github.com/miekg/dns/clientconfig.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
package dns
import (
"bufio"
"os"
"strconv"
"strings"
)
// ClientConfig wraps the contents of the /etc/resolv.conf file.
type ClientConfig struct {
Servers []string // servers to use
Search []string // suffixes to append to local name
Port string // what port to use
Ndots int // number of dots in name to trigger absolute lookup
Timeout int // seconds before giving up on packet
Attempts int // lost packets before giving up on server, not used in the package dns
}
// ClientConfigFromFile parses a resolv.conf(5) like file and returns
// a *ClientConfig.
func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
file, err := os.Open(resolvconf)
if err != nil {
return nil, err
}
defer file.Close()
c := new(ClientConfig)
scanner := bufio.NewScanner(file)
c.Servers = make([]string, 0)
c.Search = make([]string, 0)
c.Port = "53"
c.Ndots = 1
c.Timeout = 5
c.Attempts = 2
for scanner.Scan() {
if err := scanner.Err(); err != nil {
return nil, err
}
line := scanner.Text()
f := strings.Fields(line)
if len(f) < 1 {
continue
}
switch f[0] {
case "nameserver": // add one name server
if len(f) > 1 {
// One more check: make sure server name is
// just an IP address. Otherwise we need DNS
// to look it up.
name := f[1]
c.Servers = append(c.Servers, name)
}
case "domain": // set search path to just this domain
if len(f) > 1 {
c.Search = make([]string, 1)
c.Search[0] = f[1]
} else {
c.Search = make([]string, 0)
}
case "search": // set search path to given servers
c.Search = make([]string, len(f)-1)
for i := 0; i < len(c.Search); i++ {
c.Search[i] = f[i+1]
}
case "options": // magic options
for i := 1; i < len(f); i++ {
s := f[i]
switch {
case len(s) >= 6 && s[:6] == "ndots:":
n, _ := strconv.Atoi(s[6:])
if n < 1 {
n = 1
}
c.Ndots = n
case len(s) >= 8 && s[:8] == "timeout:":
n, _ := strconv.Atoi(s[8:])
if n < 1 {
n = 1
}
c.Timeout = n
case len(s) >= 8 && s[:9] == "attempts:":
n, _ := strconv.Atoi(s[9:])
if n < 1 {
n = 1
}
c.Attempts = n
case s == "rotate":
/* not imp */
}
}
}
}
return c, nil
}

282
vendor/github.com/miekg/dns/defaults.go generated vendored Normal file
View File

@ -0,0 +1,282 @@
package dns
import (
"errors"
"net"
"strconv"
)
const hexDigit = "0123456789abcdef"
// Everything is assumed in ClassINET.
// SetReply creates a reply message from a request message.
func (dns *Msg) SetReply(request *Msg) *Msg {
dns.Id = request.Id
dns.RecursionDesired = request.RecursionDesired // Copy rd bit
dns.Response = true
dns.Opcode = OpcodeQuery
dns.Rcode = RcodeSuccess
if len(request.Question) > 0 {
dns.Question = make([]Question, 1)
dns.Question[0] = request.Question[0]
}
return dns
}
// SetQuestion creates a question message, it sets the Question
// section, generates an Id and sets the RecursionDesired (RD)
// bit to true.
func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
dns.Id = Id()
dns.RecursionDesired = true
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, t, ClassINET}
return dns
}
// SetNotify creates a notify message, it sets the Question
// section, generates an Id and sets the Authoritative (AA)
// bit to true.
func (dns *Msg) SetNotify(z string) *Msg {
dns.Opcode = OpcodeNotify
dns.Authoritative = true
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeSOA, ClassINET}
return dns
}
// SetRcode creates an error message suitable for the request.
func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
dns.SetReply(request)
dns.Rcode = rcode
return dns
}
// SetRcodeFormatError creates a message with FormError set.
func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
dns.Rcode = RcodeFormatError
dns.Opcode = OpcodeQuery
dns.Response = true
dns.Authoritative = false
dns.Id = request.Id
return dns
}
// SetUpdate makes the message a dynamic update message. It
// sets the ZONE section to: z, TypeSOA, ClassINET.
func (dns *Msg) SetUpdate(z string) *Msg {
dns.Id = Id()
dns.Response = false
dns.Opcode = OpcodeUpdate
dns.Compress = false // BIND9 cannot handle compression
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeSOA, ClassINET}
return dns
}
// SetIxfr creates message for requesting an IXFR.
func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg {
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Ns = make([]RR, 1)
s := new(SOA)
s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
s.Serial = serial
s.Ns = ns
s.Mbox = mbox
dns.Question[0] = Question{z, TypeIXFR, ClassINET}
dns.Ns[0] = s
return dns
}
// SetAxfr creates message for requesting an AXFR.
func (dns *Msg) SetAxfr(z string) *Msg {
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeAXFR, ClassINET}
return dns
}
// SetTsig appends a TSIG RR to the message.
// This is only a skeleton TSIG RR that is added as the last RR in the
// additional section. The Tsig is calculated when the message is being send.
func (dns *Msg) SetTsig(z, algo string, fudge, timesigned int64) *Msg {
t := new(TSIG)
t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
t.Algorithm = algo
t.Fudge = 300
t.TimeSigned = uint64(timesigned)
t.OrigId = dns.Id
dns.Extra = append(dns.Extra, t)
return dns
}
// SetEdns0 appends a EDNS0 OPT RR to the message.
// TSIG should always the last RR in a message.
func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
e := new(OPT)
e.Hdr.Name = "."
e.Hdr.Rrtype = TypeOPT
e.SetUDPSize(udpsize)
if do {
e.SetDo()
}
dns.Extra = append(dns.Extra, e)
return dns
}
// IsTsig checks if the message has a TSIG record as the last record
// in the additional section. It returns the TSIG record found or nil.
func (dns *Msg) IsTsig() *TSIG {
if len(dns.Extra) > 0 {
if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
return dns.Extra[len(dns.Extra)-1].(*TSIG)
}
}
return nil
}
// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
// record in the additional section will do. It returns the OPT record
// found or nil.
func (dns *Msg) IsEdns0() *OPT {
// EDNS0 is at the end of the additional section, start there.
// We might want to change this to *only* look at the last two
// records. So we see TSIG and/or OPT - this a slightly bigger
// change though.
for i := len(dns.Extra) - 1; i >= 0; i-- {
if dns.Extra[i].Header().Rrtype == TypeOPT {
return dns.Extra[i].(*OPT)
}
}
return nil
}
// IsDomainName checks if s is a valid domain name, it returns the number of
// labels and true, when a domain name is valid. Note that non fully qualified
// domain name is considered valid, in this case the last label is counted in
// the number of labels. When false is returned the number of labels is not
// defined. Also note that this function is extremely liberal; almost any
// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
// label fits in 63 characters, but there is no length check for the entire
// string s. I.e. a domain name longer than 255 characters is considered valid.
func IsDomainName(s string) (labels int, ok bool) {
_, labels, err := packDomainName(s, nil, 0, nil, false)
return labels, err == nil
}
// IsSubDomain checks if child is indeed a child of the parent. If child and parent
// are the same domain true is returned as well.
func IsSubDomain(parent, child string) bool {
// Entire child is contained in parent
return CompareDomainName(parent, child) == CountLabel(parent)
}
// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
// The checking is performed on the binary payload.
func IsMsg(buf []byte) error {
// Header
if len(buf) < 12 {
return errors.New("dns: bad message header")
}
// Header: Opcode
// TODO(miek): more checks here, e.g. check all header bits.
return nil
}
// IsFqdn checks if a domain name is fully qualified.
func IsFqdn(s string) bool {
l := len(s)
if l == 0 {
return false
}
return s[l-1] == '.'
}
// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
// This means the RRs need to have the same type, name, and class. Returns true
// if the RR set is valid, otherwise false.
func IsRRset(rrset []RR) bool {
if len(rrset) == 0 {
return false
}
if len(rrset) == 1 {
return true
}
rrHeader := rrset[0].Header()
rrType := rrHeader.Rrtype
rrClass := rrHeader.Class
rrName := rrHeader.Name
for _, rr := range rrset[1:] {
curRRHeader := rr.Header()
if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
// Mismatch between the records, so this is not a valid rrset for
//signing/verifying
return false
}
}
return true
}
// Fqdn return the fully qualified domain name from s.
// If s is already fully qualified, it behaves as the identity function.
func Fqdn(s string) string {
if IsFqdn(s) {
return s
}
return s + "."
}
// Copied from the official Go code.
// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
// address suitable for reverse DNS (PTR) record lookups or an error if it fails
// to parse the IP address.
func ReverseAddr(addr string) (arpa string, err error) {
ip := net.ParseIP(addr)
if ip == nil {
return "", &Error{err: "unrecognized address: " + addr}
}
if ip.To4() != nil {
return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." +
strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil
}
// Must be IPv6
buf := make([]byte, 0, len(ip)*4+len("ip6.arpa."))
// Add it, in reverse, to the buffer
for i := len(ip) - 1; i >= 0; i-- {
v := ip[i]
buf = append(buf, hexDigit[v&0xF])
buf = append(buf, '.')
buf = append(buf, hexDigit[v>>4])
buf = append(buf, '.')
}
// Append "ip6.arpa." and return (buf already has the final .)
buf = append(buf, "ip6.arpa."...)
return string(buf), nil
}
// String returns the string representation for the type t.
func (t Type) String() string {
if t1, ok := TypeToString[uint16(t)]; ok {
return t1
}
return "TYPE" + strconv.Itoa(int(t))
}
// String returns the string representation for the class c.
func (c Class) String() string {
if c1, ok := ClassToString[uint16(c)]; ok {
return c1
}
return "CLASS" + strconv.Itoa(int(c))
}
// String returns the string representation for the name n.
func (n Name) String() string {
return sprintName(string(n))
}

104
vendor/github.com/miekg/dns/dns.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
package dns
import "strconv"
const (
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
defaultTtl = 3600 // Default internal TTL.
DefaultMsgSize = 4096 // DefaultMsgSize is the standard default for messages larger than 512 bytes.
MinMsgSize = 512 // MinMsgSize is the minimal size of a DNS packet.
MaxMsgSize = 65535 // MaxMsgSize is the largest possible DNS packet.
)
// Error represents a DNS error.
type Error struct{ err string }
func (e *Error) Error() string {
if e == nil {
return "dns: <nil>"
}
return "dns: " + e.err
}
// An RR represents a resource record.
type RR interface {
// Header returns the header of an resource record. The header contains
// everything up to the rdata.
Header() *RR_Header
// String returns the text representation of the resource record.
String() string
// copy returns a copy of the RR
copy() RR
// len returns the length (in octets) of the uncompressed RR in wire format.
len() int
// pack packs an RR into wire format.
pack([]byte, int, map[string]int, bool) (int, error)
}
// RR_Header is the header all DNS resource records share.
type RR_Header struct {
Name string `dns:"cdomain-name"`
Rrtype uint16
Class uint16
Ttl uint32
Rdlength uint16 // Length of data after header.
}
// Header returns itself. This is here to make RR_Header implements the RR interface.
func (h *RR_Header) Header() *RR_Header { return h }
// Just to implement the RR interface.
func (h *RR_Header) copy() RR { return nil }
func (h *RR_Header) copyHeader() *RR_Header {
r := new(RR_Header)
r.Name = h.Name
r.Rrtype = h.Rrtype
r.Class = h.Class
r.Ttl = h.Ttl
r.Rdlength = h.Rdlength
return r
}
func (h *RR_Header) String() string {
var s string
if h.Rrtype == TypeOPT {
s = ";"
// and maybe other things
}
s += sprintName(h.Name) + "\t"
s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
s += Class(h.Class).String() + "\t"
s += Type(h.Rrtype).String() + "\t"
return s
}
func (h *RR_Header) len() int {
l := len(h.Name) + 1
l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
return l
}
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
func (rr *RFC3597) ToRFC3597(r RR) error {
buf := make([]byte, r.len()*2)
off, err := PackRR(r, buf, 0, nil, false)
if err != nil {
return err
}
buf = buf[:off]
if int(r.Header().Rdlength) > off {
return ErrBuf
}
rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength))
if err != nil {
return err
}
*rr = *rfc3597.(*RFC3597)
return nil
}

721
vendor/github.com/miekg/dns/dnssec.go generated vendored Normal file
View File

@ -0,0 +1,721 @@
package dns
import (
"bytes"
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
_ "crypto/md5"
"crypto/rand"
"crypto/rsa"
_ "crypto/sha1"
_ "crypto/sha256"
_ "crypto/sha512"
"encoding/asn1"
"encoding/binary"
"encoding/hex"
"math/big"
"sort"
"strings"
"time"
)
// DNSSEC encryption algorithm codes.
const (
_ uint8 = iota
RSAMD5
DH
DSA
_ // Skip 4, RFC 6725, section 2.1
RSASHA1
DSANSEC3SHA1
RSASHA1NSEC3SHA1
RSASHA256
_ // Skip 9, RFC 6725, section 2.1
RSASHA512
_ // Skip 11, RFC 6725, section 2.1
ECCGOST
ECDSAP256SHA256
ECDSAP384SHA384
INDIRECT uint8 = 252
PRIVATEDNS uint8 = 253 // Private (experimental keys)
PRIVATEOID uint8 = 254
)
// Map for algorithm names.
var AlgorithmToString = map[uint8]string{
RSAMD5: "RSAMD5",
DH: "DH",
DSA: "DSA",
RSASHA1: "RSASHA1",
DSANSEC3SHA1: "DSA-NSEC3-SHA1",
RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1",
RSASHA256: "RSASHA256",
RSASHA512: "RSASHA512",
ECCGOST: "ECC-GOST",
ECDSAP256SHA256: "ECDSAP256SHA256",
ECDSAP384SHA384: "ECDSAP384SHA384",
INDIRECT: "INDIRECT",
PRIVATEDNS: "PRIVATEDNS",
PRIVATEOID: "PRIVATEOID",
}
// Map of algorithm strings.
var StringToAlgorithm = reverseInt8(AlgorithmToString)
// Map of algorithm crypto hashes.
var AlgorithmToHash = map[uint8]crypto.Hash{
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
RSASHA1: crypto.SHA1,
RSASHA1NSEC3SHA1: crypto.SHA1,
RSASHA256: crypto.SHA256,
ECDSAP256SHA256: crypto.SHA256,
ECDSAP384SHA384: crypto.SHA384,
RSASHA512: crypto.SHA512,
}
// DNSSEC hashing algorithm codes.
const (
_ uint8 = iota
SHA1 // RFC 4034
SHA256 // RFC 4509
GOST94 // RFC 5933
SHA384 // Experimental
SHA512 // Experimental
)
// Map for hash names.
var HashToString = map[uint8]string{
SHA1: "SHA1",
SHA256: "SHA256",
GOST94: "GOST94",
SHA384: "SHA384",
SHA512: "SHA512",
}
// Map of hash strings.
var StringToHash = reverseInt8(HashToString)
// DNSKEY flag values.
const (
SEP = 1
REVOKE = 1 << 7
ZONE = 1 << 8
)
// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing.
type rrsigWireFmt struct {
TypeCovered uint16
Algorithm uint8
Labels uint8
OrigTtl uint32
Expiration uint32
Inception uint32
KeyTag uint16
SignerName string `dns:"domain-name"`
/* No Signature */
}
// Used for converting DNSKEY's rdata to wirefmt.
type dnskeyWireFmt struct {
Flags uint16
Protocol uint8
Algorithm uint8
PublicKey string `dns:"base64"`
/* Nothing is left out */
}
func divRoundUp(a, b int) int {
return (a + b - 1) / b
}
// KeyTag calculates the keytag (or key-id) of the DNSKEY.
func (k *DNSKEY) KeyTag() uint16 {
if k == nil {
return 0
}
var keytag int
switch k.Algorithm {
case RSAMD5:
// Look at the bottom two bytes of the modules, which the last
// item in the pubkey. We could do this faster by looking directly
// at the base64 values. But I'm lazy.
modulus, _ := fromBase64([]byte(k.PublicKey))
if len(modulus) > 1 {
x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
keytag = int(x)
}
default:
keywire := new(dnskeyWireFmt)
keywire.Flags = k.Flags
keywire.Protocol = k.Protocol
keywire.Algorithm = k.Algorithm
keywire.PublicKey = k.PublicKey
wire := make([]byte, DefaultMsgSize)
n, err := packKeyWire(keywire, wire)
if err != nil {
return 0
}
wire = wire[:n]
for i, v := range wire {
if i&1 != 0 {
keytag += int(v) // must be larger than uint32
} else {
keytag += int(v) << 8
}
}
keytag += (keytag >> 16) & 0xFFFF
keytag &= 0xFFFF
}
return uint16(keytag)
}
// ToDS converts a DNSKEY record to a DS record.
func (k *DNSKEY) ToDS(h uint8) *DS {
if k == nil {
return nil
}
ds := new(DS)
ds.Hdr.Name = k.Hdr.Name
ds.Hdr.Class = k.Hdr.Class
ds.Hdr.Rrtype = TypeDS
ds.Hdr.Ttl = k.Hdr.Ttl
ds.Algorithm = k.Algorithm
ds.DigestType = h
ds.KeyTag = k.KeyTag()
keywire := new(dnskeyWireFmt)
keywire.Flags = k.Flags
keywire.Protocol = k.Protocol
keywire.Algorithm = k.Algorithm
keywire.PublicKey = k.PublicKey
wire := make([]byte, DefaultMsgSize)
n, err := packKeyWire(keywire, wire)
if err != nil {
return nil
}
wire = wire[:n]
owner := make([]byte, 255)
off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false)
if err1 != nil {
return nil
}
owner = owner[:off]
// RFC4034:
// digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA);
// "|" denotes concatenation
// DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
// digest buffer
digest := append(owner, wire...) // another copy
var hash crypto.Hash
switch h {
case SHA1:
hash = crypto.SHA1
case SHA256:
hash = crypto.SHA256
case SHA384:
hash = crypto.SHA384
case SHA512:
hash = crypto.SHA512
default:
return nil
}
s := hash.New()
s.Write(digest)
ds.Digest = hex.EncodeToString(s.Sum(nil))
return ds
}
// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
c := &CDNSKEY{DNSKEY: *k}
c.Hdr = *k.Hdr.copyHeader()
c.Hdr.Rrtype = TypeCDNSKEY
return c
}
// ToCDS converts a DS record to a CDS record.
func (d *DS) ToCDS() *CDS {
c := &CDS{DS: *d}
c.Hdr = *d.Hdr.copyHeader()
c.Hdr.Rrtype = TypeCDS
return c
}
// Sign signs an RRSet. The signature needs to be filled in with the values:
// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied
// from the RRset. Sign returns a non-nill error when the signing went OK.
// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non
// zero, it is used as-is, otherwise the TTL of the RRset is used as the
// OrigTTL.
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
if k == nil {
return ErrPrivKey
}
// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return ErrKey
}
rr.Hdr.Rrtype = TypeRRSIG
rr.Hdr.Name = rrset[0].Header().Name
rr.Hdr.Class = rrset[0].Header().Class
if rr.OrigTtl == 0 { // If set don't override
rr.OrigTtl = rrset[0].Header().Ttl
}
rr.TypeCovered = rrset[0].Header().Rrtype
rr.Labels = uint8(CountLabel(rrset[0].Header().Name))
if strings.HasPrefix(rrset[0].Header().Name, "*") {
rr.Labels-- // wildcard, remove from label count
}
sigwire := new(rrsigWireFmt)
sigwire.TypeCovered = rr.TypeCovered
sigwire.Algorithm = rr.Algorithm
sigwire.Labels = rr.Labels
sigwire.OrigTtl = rr.OrigTtl
sigwire.Expiration = rr.Expiration
sigwire.Inception = rr.Inception
sigwire.KeyTag = rr.KeyTag
// For signing, lowercase this name
sigwire.SignerName = strings.ToLower(rr.SignerName)
// Create the desired binary blob
signdata := make([]byte, DefaultMsgSize)
n, err := packSigWire(sigwire, signdata)
if err != nil {
return err
}
signdata = signdata[:n]
wire, err := rawSignatureData(rrset, rr)
if err != nil {
return err
}
signdata = append(signdata, wire...)
hash, ok := AlgorithmToHash[rr.Algorithm]
if !ok {
return ErrAlg
}
h := hash.New()
h.Write(signdata)
signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
if err != nil {
return err
}
rr.Signature = toBase64(signature)
return nil
}
func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) {
signature, err := k.Sign(rand.Reader, hashed, hash)
if err != nil {
return nil, err
}
switch alg {
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
return signature, nil
case ECDSAP256SHA256, ECDSAP384SHA384:
ecdsaSignature := &struct {
R, S *big.Int
}{}
if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil {
return nil, err
}
var intlen int
switch alg {
case ECDSAP256SHA256:
intlen = 32
case ECDSAP384SHA384:
intlen = 48
}
signature := intToBytes(ecdsaSignature.R, intlen)
signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...)
return signature, nil
// There is no defined interface for what a DSA backed crypto.Signer returns
case DSA, DSANSEC3SHA1:
// t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
// signature := []byte{byte(t)}
// signature = append(signature, intToBytes(r1, 20)...)
// signature = append(signature, intToBytes(s1, 20)...)
// rr.Signature = signature
}
return nil, ErrAlg
}
// Verify validates an RRSet with the signature and key. This is only the
// cryptographic test, the signature validity period must be checked separately.
// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
// First the easy checks
if !IsRRset(rrset) {
return ErrRRset
}
if rr.KeyTag != k.KeyTag() {
return ErrKey
}
if rr.Hdr.Class != k.Hdr.Class {
return ErrKey
}
if rr.Algorithm != k.Algorithm {
return ErrKey
}
if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) {
return ErrKey
}
if k.Protocol != 3 {
return ErrKey
}
// IsRRset checked that we have at least one RR and that the RRs in
// the set have consistent type, class, and name. Also check that type and
// class matches the RRSIG record.
if rrset[0].Header().Class != rr.Hdr.Class {
return ErrRRset
}
if rrset[0].Header().Rrtype != rr.TypeCovered {
return ErrRRset
}
// RFC 4035 5.3.2. Reconstructing the Signed Data
// Copy the sig, except the rrsig data
sigwire := new(rrsigWireFmt)
sigwire.TypeCovered = rr.TypeCovered
sigwire.Algorithm = rr.Algorithm
sigwire.Labels = rr.Labels
sigwire.OrigTtl = rr.OrigTtl
sigwire.Expiration = rr.Expiration
sigwire.Inception = rr.Inception
sigwire.KeyTag = rr.KeyTag
sigwire.SignerName = strings.ToLower(rr.SignerName)
// Create the desired binary blob
signeddata := make([]byte, DefaultMsgSize)
n, err := packSigWire(sigwire, signeddata)
if err != nil {
return err
}
signeddata = signeddata[:n]
wire, err := rawSignatureData(rrset, rr)
if err != nil {
return err
}
signeddata = append(signeddata, wire...)
sigbuf := rr.sigBuf() // Get the binary signature data
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
// TODO(miek)
// remove the domain name and assume its ours?
}
hash, ok := AlgorithmToHash[rr.Algorithm]
if !ok {
return ErrAlg
}
switch rr.Algorithm {
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5:
// TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
pubkey := k.publicKeyRSA() // Get the key
if pubkey == nil {
return ErrKey
}
h := hash.New()
h.Write(signeddata)
return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
case ECDSAP256SHA256, ECDSAP384SHA384:
pubkey := k.publicKeyECDSA()
if pubkey == nil {
return ErrKey
}
// Split sigbuf into the r and s coordinates
r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2])
s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:])
h := hash.New()
h.Write(signeddata)
if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
return nil
}
return ErrSig
default:
return ErrAlg
}
}
// ValidityPeriod uses RFC1982 serial arithmetic to calculate
// if a signature period is valid. If t is the zero time, the
// current time is taken other t is. Returns true if the signature
// is valid at the given time, otherwise returns false.
func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
var utc int64
if t.IsZero() {
utc = time.Now().UTC().Unix()
} else {
utc = t.UTC().Unix()
}
modi := (int64(rr.Inception) - utc) / year68
mode := (int64(rr.Expiration) - utc) / year68
ti := int64(rr.Inception) + (modi * year68)
te := int64(rr.Expiration) + (mode * year68)
return ti <= utc && utc <= te
}
// Return the signatures base64 encodedig sigdata as a byte slice.
func (rr *RRSIG) sigBuf() []byte {
sigbuf, err := fromBase64([]byte(rr.Signature))
if err != nil {
return nil
}
return sigbuf
}
// publicKeyRSA returns the RSA public key from a DNSKEY record.
func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
// RFC 2537/3110, section 2. RSA Public KEY Resource Records
// Length is in the 0th byte, unless its zero, then it
// it in bytes 1 and 2 and its a 16 bit number
explen := uint16(keybuf[0])
keyoff := 1
if explen == 0 {
explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
keyoff = 3
}
pubkey := new(rsa.PublicKey)
pubkey.N = big.NewInt(0)
shift := uint64((explen - 1) * 8)
expo := uint64(0)
for i := int(explen - 1); i > 0; i-- {
expo += uint64(keybuf[keyoff+i]) << shift
shift -= 8
}
// Remainder
expo += uint64(keybuf[keyoff])
if expo > 2<<31 {
// Larger expo than supported.
// println("dns: F5 primes (or larger) are not supported")
return nil
}
pubkey.E = int(expo)
pubkey.N.SetBytes(keybuf[keyoff+int(explen):])
return pubkey
}
// publicKeyECDSA returns the Curve public key from the DNSKEY record.
func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
pubkey := new(ecdsa.PublicKey)
switch k.Algorithm {
case ECDSAP256SHA256:
pubkey.Curve = elliptic.P256()
if len(keybuf) != 64 {
// wrongly encoded key
return nil
}
case ECDSAP384SHA384:
pubkey.Curve = elliptic.P384()
if len(keybuf) != 96 {
// Wrongly encoded key
return nil
}
}
pubkey.X = big.NewInt(0)
pubkey.X.SetBytes(keybuf[:len(keybuf)/2])
pubkey.Y = big.NewInt(0)
pubkey.Y.SetBytes(keybuf[len(keybuf)/2:])
return pubkey
}
func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
if len(keybuf) < 22 {
return nil
}
t, keybuf := int(keybuf[0]), keybuf[1:]
size := 64 + t*8
q, keybuf := keybuf[:20], keybuf[20:]
if len(keybuf) != 3*size {
return nil
}
p, keybuf := keybuf[:size], keybuf[size:]
g, y := keybuf[:size], keybuf[size:]
pubkey := new(dsa.PublicKey)
pubkey.Parameters.Q = big.NewInt(0).SetBytes(q)
pubkey.Parameters.P = big.NewInt(0).SetBytes(p)
pubkey.Parameters.G = big.NewInt(0).SetBytes(g)
pubkey.Y = big.NewInt(0).SetBytes(y)
return pubkey
}
type wireSlice [][]byte
func (p wireSlice) Len() int { return len(p) }
func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p wireSlice) Less(i, j int) bool {
_, ioff, _ := UnpackDomainName(p[i], 0)
_, joff, _ := UnpackDomainName(p[j], 0)
return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0
}
// Return the raw signature data.
func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
wires := make(wireSlice, len(rrset))
for i, r := range rrset {
r1 := r.copy()
r1.Header().Ttl = s.OrigTtl
labels := SplitDomainName(r1.Header().Name)
// 6.2. Canonical RR Form. (4) - wildcards
if len(labels) > int(s.Labels) {
// Wildcard
r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
}
// RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
r1.Header().Name = strings.ToLower(r1.Header().Name)
// 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
// SRV, DNAME, A6
//
// RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC):
// Section 6.2 of [RFC4034] also erroneously lists HINFO as a record
// that needs conversion to lowercase, and twice at that. Since HINFO
// records contain no domain names, they are not subject to case
// conversion.
switch x := r1.(type) {
case *NS:
x.Ns = strings.ToLower(x.Ns)
case *CNAME:
x.Target = strings.ToLower(x.Target)
case *SOA:
x.Ns = strings.ToLower(x.Ns)
x.Mbox = strings.ToLower(x.Mbox)
case *MB:
x.Mb = strings.ToLower(x.Mb)
case *MG:
x.Mg = strings.ToLower(x.Mg)
case *MR:
x.Mr = strings.ToLower(x.Mr)
case *PTR:
x.Ptr = strings.ToLower(x.Ptr)
case *MINFO:
x.Rmail = strings.ToLower(x.Rmail)
x.Email = strings.ToLower(x.Email)
case *MX:
x.Mx = strings.ToLower(x.Mx)
case *NAPTR:
x.Replacement = strings.ToLower(x.Replacement)
case *KX:
x.Exchanger = strings.ToLower(x.Exchanger)
case *SRV:
x.Target = strings.ToLower(x.Target)
case *DNAME:
x.Target = strings.ToLower(x.Target)
}
// 6.2. Canonical RR Form. (5) - origTTL
wire := make([]byte, r1.len()+1) // +1 to be safe(r)
off, err1 := PackRR(r1, wire, 0, nil, false)
if err1 != nil {
return nil, err1
}
wire = wire[:off]
wires[i] = wire
}
sort.Sort(wires)
for i, wire := range wires {
if i > 0 && bytes.Equal(wire, wires[i-1]) {
continue
}
buf = append(buf, wire...)
}
return buf, nil
}
func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) {
// copied from zmsg.go RRSIG packing
off, err := packUint16(sw.TypeCovered, msg, 0)
if err != nil {
return off, err
}
off, err = packUint8(sw.Algorithm, msg, off)
if err != nil {
return off, err
}
off, err = packUint8(sw.Labels, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.OrigTtl, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.Expiration, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.Inception, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(sw.KeyTag, msg, off)
if err != nil {
return off, err
}
off, err = PackDomainName(sw.SignerName, msg, off, nil, false)
if err != nil {
return off, err
}
return off, nil
}
func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) {
// copied from zmsg.go DNSKEY packing
off, err := packUint16(dw.Flags, msg, 0)
if err != nil {
return off, err
}
off, err = packUint8(dw.Protocol, msg, off)
if err != nil {
return off, err
}
off, err = packUint8(dw.Algorithm, msg, off)
if err != nil {
return off, err
}
off, err = packStringBase64(dw.PublicKey, msg, off)
if err != nil {
return off, err
}
return off, nil
}

156
vendor/github.com/miekg/dns/dnssec_keygen.go generated vendored Normal file
View File

@ -0,0 +1,156 @@
package dns
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"math/big"
)
// Generate generates a DNSKEY of the given bit size.
// The public part is put inside the DNSKEY record.
// The Algorithm in the key must be set as this will define
// what kind of DNSKEY will be generated.
// The ECDSA algorithms imply a fixed keysize, in that case
// bits should be set to the size of the algorithm.
func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
switch k.Algorithm {
case DSA, DSANSEC3SHA1:
if bits != 1024 {
return nil, ErrKeySize
}
case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
if bits < 512 || bits > 4096 {
return nil, ErrKeySize
}
case RSASHA512:
if bits < 1024 || bits > 4096 {
return nil, ErrKeySize
}
case ECDSAP256SHA256:
if bits != 256 {
return nil, ErrKeySize
}
case ECDSAP384SHA384:
if bits != 384 {
return nil, ErrKeySize
}
}
switch k.Algorithm {
case DSA, DSANSEC3SHA1:
params := new(dsa.Parameters)
if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil {
return nil, err
}
priv := new(dsa.PrivateKey)
priv.PublicKey.Parameters = *params
err := dsa.GenerateKey(priv, rand.Reader)
if err != nil {
return nil, err
}
k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y)
return priv, nil
case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
priv, err := rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return nil, err
}
k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
return priv, nil
case ECDSAP256SHA256, ECDSAP384SHA384:
var c elliptic.Curve
switch k.Algorithm {
case ECDSAP256SHA256:
c = elliptic.P256()
case ECDSAP384SHA384:
c = elliptic.P384()
}
priv, err := ecdsa.GenerateKey(c, rand.Reader)
if err != nil {
return nil, err
}
k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
return priv, nil
default:
return nil, ErrAlg
}
}
// Set the public key (the value E and N)
func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool {
if _E == 0 || _N == nil {
return false
}
buf := exponentToBuf(_E)
buf = append(buf, _N.Bytes()...)
k.PublicKey = toBase64(buf)
return true
}
// Set the public key for Elliptic Curves
func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool {
if _X == nil || _Y == nil {
return false
}
var intlen int
switch k.Algorithm {
case ECDSAP256SHA256:
intlen = 32
case ECDSAP384SHA384:
intlen = 48
}
k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen))
return true
}
// Set the public key for DSA
func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool {
if _Q == nil || _P == nil || _G == nil || _Y == nil {
return false
}
buf := dsaToBuf(_Q, _P, _G, _Y)
k.PublicKey = toBase64(buf)
return true
}
// Set the public key (the values E and N) for RSA
// RFC 3110: Section 2. RSA Public KEY Resource Records
func exponentToBuf(_E int) []byte {
var buf []byte
i := big.NewInt(int64(_E))
if len(i.Bytes()) < 256 {
buf = make([]byte, 1)
buf[0] = uint8(len(i.Bytes()))
} else {
buf = make([]byte, 3)
buf[0] = 0
buf[1] = uint8(len(i.Bytes()) >> 8)
buf[2] = uint8(len(i.Bytes()))
}
buf = append(buf, i.Bytes()...)
return buf
}
// Set the public key for X and Y for Curve. The two
// values are just concatenated.
func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
buf := intToBytes(_X, intlen)
buf = append(buf, intToBytes(_Y, intlen)...)
return buf
}
// Set the public key for X and Y for Curve. The two
// values are just concatenated.
func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte {
t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8)
buf := []byte{byte(t)}
buf = append(buf, intToBytes(_Q, 20)...)
buf = append(buf, intToBytes(_P, 64+t*8)...)
buf = append(buf, intToBytes(_G, 64+t*8)...)
buf = append(buf, intToBytes(_Y, 64+t*8)...)
return buf
}

249
vendor/github.com/miekg/dns/dnssec_keyscan.go generated vendored Normal file
View File

@ -0,0 +1,249 @@
package dns
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"io"
"math/big"
"strconv"
"strings"
)
// NewPrivateKey returns a PrivateKey by parsing the string s.
// s should be in the same form of the BIND private key files.
func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
if s[len(s)-1] != '\n' { // We need a closing newline
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
}
return k.ReadPrivateKey(strings.NewReader(s), "")
}
// ReadPrivateKey reads a private key from the io.Reader q. The string file is
// only used in error reporting.
// The public key must be known, because some cryptographic algorithms embed
// the public inside the privatekey.
func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
m, err := parseKey(q, file)
if m == nil {
return nil, err
}
if _, ok := m["private-key-format"]; !ok {
return nil, ErrPrivKey
}
if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" {
return nil, ErrPrivKey
}
// TODO(mg): check if the pubkey matches the private key
algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0])
if err != nil {
return nil, ErrPrivKey
}
switch uint8(algo) {
case DSA:
priv, err := readPrivateKeyDSA(m)
if err != nil {
return nil, err
}
pub := k.publicKeyDSA()
if pub == nil {
return nil, ErrKey
}
priv.PublicKey = *pub
return priv, nil
case RSAMD5:
fallthrough
case RSASHA1:
fallthrough
case RSASHA1NSEC3SHA1:
fallthrough
case RSASHA256:
fallthrough
case RSASHA512:
priv, err := readPrivateKeyRSA(m)
if err != nil {
return nil, err
}
pub := k.publicKeyRSA()
if pub == nil {
return nil, ErrKey
}
priv.PublicKey = *pub
return priv, nil
case ECCGOST:
return nil, ErrPrivKey
case ECDSAP256SHA256:
fallthrough
case ECDSAP384SHA384:
priv, err := readPrivateKeyECDSA(m)
if err != nil {
return nil, err
}
pub := k.publicKeyECDSA()
if pub == nil {
return nil, ErrKey
}
priv.PublicKey = *pub
return priv, nil
default:
return nil, ErrPrivKey
}
}
// Read a private key (file) string and create a public key. Return the private key.
func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
p := new(rsa.PrivateKey)
p.Primes = []*big.Int{nil, nil}
for k, v := range m {
switch k {
case "modulus", "publicexponent", "privateexponent", "prime1", "prime2":
v1, err := fromBase64([]byte(v))
if err != nil {
return nil, err
}
switch k {
case "modulus":
p.PublicKey.N = big.NewInt(0)
p.PublicKey.N.SetBytes(v1)
case "publicexponent":
i := big.NewInt(0)
i.SetBytes(v1)
p.PublicKey.E = int(i.Int64()) // int64 should be large enough
case "privateexponent":
p.D = big.NewInt(0)
p.D.SetBytes(v1)
case "prime1":
p.Primes[0] = big.NewInt(0)
p.Primes[0].SetBytes(v1)
case "prime2":
p.Primes[1] = big.NewInt(0)
p.Primes[1].SetBytes(v1)
}
case "exponent1", "exponent2", "coefficient":
// not used in Go (yet)
case "created", "publish", "activate":
// not used in Go (yet)
}
}
return p, nil
}
func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) {
p := new(dsa.PrivateKey)
p.X = big.NewInt(0)
for k, v := range m {
switch k {
case "private_value(x)":
v1, err := fromBase64([]byte(v))
if err != nil {
return nil, err
}
p.X.SetBytes(v1)
case "created", "publish", "activate":
/* not used in Go (yet) */
}
}
return p, nil
}
func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
p := new(ecdsa.PrivateKey)
p.D = big.NewInt(0)
// TODO: validate that the required flags are present
for k, v := range m {
switch k {
case "privatekey":
v1, err := fromBase64([]byte(v))
if err != nil {
return nil, err
}
p.D.SetBytes(v1)
case "created", "publish", "activate":
/* not used in Go (yet) */
}
}
return p, nil
}
// parseKey reads a private key from r. It returns a map[string]string,
// with the key-value pairs, or an error when the file is not correct.
func parseKey(r io.Reader, file string) (map[string]string, error) {
s := scanInit(r)
m := make(map[string]string)
c := make(chan lex)
k := ""
// Start the lexer
go klexer(s, c)
for l := range c {
// It should alternate
switch l.value {
case zKey:
k = l.token
case zValue:
if k == "" {
return nil, &ParseError{file, "no private key seen", l}
}
//println("Setting", strings.ToLower(k), "to", l.token, "b")
m[strings.ToLower(k)] = l.token
k = ""
}
}
return m, nil
}
// klexer scans the sourcefile and returns tokens on the channel c.
func klexer(s *scan, c chan lex) {
var l lex
str := "" // Hold the current read text
commt := false
key := true
x, err := s.tokenText()
defer close(c)
for err == nil {
l.column = s.position.Column
l.line = s.position.Line
switch x {
case ':':
if commt {
break
}
l.token = str
if key {
l.value = zKey
c <- l
// Next token is a space, eat it
s.tokenText()
key = false
str = ""
} else {
l.value = zValue
}
case ';':
commt = true
case '\n':
if commt {
// Reset a comment
commt = false
}
l.value = zValue
l.token = str
c <- l
str = ""
commt = false
key = true
default:
if commt {
break
}
str += string(x)
}
x, err = s.tokenText()
}
if len(str) > 0 {
// Send remainder
l.token = str
l.value = zValue
c <- l
}
}

85
vendor/github.com/miekg/dns/dnssec_privkey.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
package dns
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"math/big"
"strconv"
)
const format = "Private-key-format: v1.3\n"
// PrivateKeyString converts a PrivateKey to a string. This string has the same
// format as the private-key-file of BIND9 (Private-key-format: v1.3).
// It needs some info from the key (the algorithm), so its a method of the DNSKEY
// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey
func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
algorithm := strconv.Itoa(int(r.Algorithm))
algorithm += " (" + AlgorithmToString[r.Algorithm] + ")"
switch p := p.(type) {
case *rsa.PrivateKey:
modulus := toBase64(p.PublicKey.N.Bytes())
e := big.NewInt(int64(p.PublicKey.E))
publicExponent := toBase64(e.Bytes())
privateExponent := toBase64(p.D.Bytes())
prime1 := toBase64(p.Primes[0].Bytes())
prime2 := toBase64(p.Primes[1].Bytes())
// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
// and from: http://code.google.com/p/go/issues/detail?id=987
one := big.NewInt(1)
p1 := big.NewInt(0).Sub(p.Primes[0], one)
q1 := big.NewInt(0).Sub(p.Primes[1], one)
exp1 := big.NewInt(0).Mod(p.D, p1)
exp2 := big.NewInt(0).Mod(p.D, q1)
coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0])
exponent1 := toBase64(exp1.Bytes())
exponent2 := toBase64(exp2.Bytes())
coefficient := toBase64(coeff.Bytes())
return format +
"Algorithm: " + algorithm + "\n" +
"Modulus: " + modulus + "\n" +
"PublicExponent: " + publicExponent + "\n" +
"PrivateExponent: " + privateExponent + "\n" +
"Prime1: " + prime1 + "\n" +
"Prime2: " + prime2 + "\n" +
"Exponent1: " + exponent1 + "\n" +
"Exponent2: " + exponent2 + "\n" +
"Coefficient: " + coefficient + "\n"
case *ecdsa.PrivateKey:
var intlen int
switch r.Algorithm {
case ECDSAP256SHA256:
intlen = 32
case ECDSAP384SHA384:
intlen = 48
}
private := toBase64(intToBytes(p.D, intlen))
return format +
"Algorithm: " + algorithm + "\n" +
"PrivateKey: " + private + "\n"
case *dsa.PrivateKey:
T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8)
prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8))
subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20))
base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
priv := toBase64(intToBytes(p.X, 20))
pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
return format +
"Algorithm: " + algorithm + "\n" +
"Prime(p): " + prime + "\n" +
"Subprime(q): " + subprime + "\n" +
"Base(g): " + base + "\n" +
"Private_value(x): " + priv + "\n" +
"Public_value(y): " + pub + "\n"
default:
return ""
}
}

251
vendor/github.com/miekg/dns/doc.go generated vendored Normal file
View File

@ -0,0 +1,251 @@
/*
Package dns implements a full featured interface to the Domain Name System.
Server- and client-side programming is supported.
The package allows complete control over what is send out to the DNS. The package
API follows the less-is-more principle, by presenting a small, clean interface.
The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
Note that domain names MUST be fully qualified, before sending them, unqualified
names in a message will result in a packing failure.
Resource records are native types. They are not stored in wire format.
Basic usage pattern for creating a new resource record:
r := new(dns.MX)
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX,
Class: dns.ClassINET, Ttl: 3600}
r.Preference = 10
r.Mx = "mx.miek.nl."
Or directly from a string:
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
Or when the default TTL (3600) and class (IN) suit you:
mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
Or even:
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
In the DNS messages are exchanged, these messages contain resource
records (sets). Use pattern for creating a message:
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
Or when not certain if the domain name is fully qualified:
m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
The message m is now a message with the question section set to ask
the MX records for the miek.nl. zone.
The following is slightly more verbose, but more flexible:
m1 := new(dns.Msg)
m1.Id = dns.Id()
m1.RecursionDesired = true
m1.Question = make([]dns.Question, 1)
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
After creating a message it can be send.
Basic use pattern for synchronous querying the DNS at a
server configured on 127.0.0.1 and port 53:
c := new(dns.Client)
in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
Suppressing multiple outstanding queries (with the same question, type and
class) is as easy as setting:
c.SingleInflight = true
If these "advanced" features are not needed, a simple UDP query can be send,
with:
in, err := dns.Exchange(m1, "127.0.0.1:53")
When this functions returns you will get dns message. A dns message consists
out of four sections.
The question section: in.Question, the answer section: in.Answer,
the authority section: in.Ns and the additional section: in.Extra.
Each of these sections (except the Question section) contain a []RR. Basic
use pattern for accessing the rdata of a TXT RR as the first RR in
the Answer section:
if t, ok := in.Answer[0].(*dns.TXT); ok {
// do something with t.Txt
}
Domain Name and TXT Character String Representations
Both domain names and TXT character strings are converted to presentation
form both when unpacked and when converted to strings.
For TXT character strings, tabs, carriage returns and line feeds will be
converted to \t, \r and \n respectively. Back slashes and quotations marks
will be escaped. Bytes below 32 and above 127 will be converted to \DDD
form.
For domain names, in addition to the above rules brackets, periods,
spaces, semicolons and the at symbol are escaped.
DNSSEC
DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It
uses public key cryptography to sign resource records. The
public keys are stored in DNSKEY records and the signatures in RRSIG records.
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
to a request.
m := new(dns.Msg)
m.SetEdns0(4096, true)
Signature generation, signature verification and key generation are all supported.
DYNAMIC UPDATES
Dynamic updates reuses the DNS message format, but renames three of
the sections. Question is Zone, Answer is Prerequisite, Authority is
Update, only the Additional is not renamed. See RFC 2136 for the gory details.
You can set a rather complex set of rules for the existence of absence of
certain resource records or names in a zone to specify if resource records
should be added or removed. The table from RFC 2136 supplemented with the Go
DNS function shows which functions exist to specify the prerequisites.
3.2.4 - Table Of Metavalues Used In Prerequisite Section
CLASS TYPE RDATA Meaning Function
--------------------------------------------------------------
ANY ANY empty Name is in use dns.NameUsed
ANY rrset empty RRset exists (value indep) dns.RRsetUsed
NONE ANY empty Name is not in use dns.NameNotUsed
NONE rrset empty RRset does not exist dns.RRsetNotUsed
zone rrset rr RRset exists (value dep) dns.Used
The prerequisite section can also be left empty.
If you have decided on the prerequisites you can tell what RRs should
be added or deleted. The next table shows the options you have and
what functions to call.
3.4.2.6 - Table Of Metavalues Used In Update Section
CLASS TYPE RDATA Meaning Function
---------------------------------------------------------------
ANY ANY empty Delete all RRsets from name dns.RemoveName
ANY rrset empty Delete an RRset dns.RemoveRRset
NONE rrset rr Delete an RR from RRset dns.Remove
zone rrset rr Add to an RRset dns.Insert
TRANSACTION SIGNATURE
An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
Basic use pattern when querying with a TSIG name "axfr." (note that these key names
must be fully qualified - as they are domain names) and the base64 secret
"so6ZGir4GPAqINNh9U5c3A==":
c := new(dns.Client)
c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
...
// When sending the TSIG RR is calculated and filled in before sending
When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with
TSIG, this is the basic use pattern. In this example we request an AXFR for
miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A=="
and using the server 176.58.119.54:
t := new(dns.Transfer)
m := new(dns.Msg)
t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
m.SetAxfr("miek.nl.")
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
c, err := t.In(m, "176.58.119.54:53")
for r := range c { ... }
You can now read the records from the transfer as they come in. Each envelope is checked with TSIG.
If something is not correct an error is returned.
Basic use pattern validating and replying to a message that has TSIG set.
server := &dns.Server{Addr: ":53", Net: "udp"}
server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
go server.ListenAndServe()
dns.HandleFunc(".", handleRequest)
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
m := new(dns.Msg)
m.SetReply(r)
if r.IsTsig() != nil {
if w.TsigStatus() == nil {
// *Msg r has an TSIG record and it was validated
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
} else {
// *Msg r has an TSIG records and it was not valided
}
}
w.WriteMsg(m)
}
PRIVATE RRS
RFC 6895 sets aside a range of type codes for private use. This range
is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
can be used, before requesting an official type code from IANA.
see http://miek.nl/posts/2014/Sep/21/Private%20RRs%20and%20IDN%20in%20Go%20DNS/ for more
information.
EDNS0
EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated
by RFC 6891. It defines an new RR type, the OPT RR, which is then completely
abused.
Basic use pattern for creating an (empty) OPT RR:
o := new(dns.OPT)
o.Hdr.Name = "." // MUST be the root zone, per definition.
o.Hdr.Rrtype = dns.TypeOPT
The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891)
interfaces. Currently only a few have been standardized: EDNS0_NSID
(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note
that these options may be combined in an OPT RR.
Basic use pattern for a server to check if (and which) options are set:
// o is a dns.OPT
for _, s := range o.Option {
switch e := s.(type) {
case *dns.EDNS0_NSID:
// do stuff with e.Nsid
case *dns.EDNS0_SUBNET:
// access e.Family, e.Address, etc.
}
}
SIG(0)
From RFC 2931:
SIG(0) provides protection for DNS transactions and requests ....
... protection for glue records, DNS requests, protection for message headers
on requests and responses, and protection of the overall integrity of a response.
It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared
secret approach in TSIG.
Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and
RSASHA512.
Signing subsequent messages in multi-message sessions is not implemented.
*/
package dns

532
vendor/github.com/miekg/dns/edns.go generated vendored Normal file
View File

@ -0,0 +1,532 @@
package dns
import (
"encoding/binary"
"encoding/hex"
"errors"
"net"
"strconv"
)
// EDNS0 Option codes.
const (
EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
EDNS0NSID = 0x3 // nsid (RFC5001)
EDNS0DAU = 0x5 // DNSSEC Algorithm Understood
EDNS0DHU = 0x6 // DS Hash Understood
EDNS0N3U = 0x7 // NSEC3 Hash Understood
EDNS0SUBNET = 0x8 // client-subnet (RFC6891)
EDNS0EXPIRE = 0x9 // EDNS0 expire
EDNS0COOKIE = 0xa // EDNS0 Cookie
EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891)
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891)
_DO = 1 << 15 // dnssec ok
)
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
// See RFC 6891.
type OPT struct {
Hdr RR_Header
Option []EDNS0 `dns:"opt"`
}
func (rr *OPT) String() string {
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
if rr.Do() {
s += "flags: do; "
} else {
s += "flags: ; "
}
s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
for _, o := range rr.Option {
switch o.(type) {
case *EDNS0_NSID:
s += "\n; NSID: " + o.String()
h, e := o.pack()
var r string
if e == nil {
for _, c := range h {
r += "(" + string(c) + ")"
}
s += " " + r
}
case *EDNS0_SUBNET:
s += "\n; SUBNET: " + o.String()
if o.(*EDNS0_SUBNET).DraftOption {
s += " (draft)"
}
case *EDNS0_COOKIE:
s += "\n; COOKIE: " + o.String()
case *EDNS0_UL:
s += "\n; UPDATE LEASE: " + o.String()
case *EDNS0_LLQ:
s += "\n; LONG LIVED QUERIES: " + o.String()
case *EDNS0_DAU:
s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String()
case *EDNS0_DHU:
s += "\n; DS HASH UNDERSTOOD: " + o.String()
case *EDNS0_N3U:
s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
case *EDNS0_LOCAL:
s += "\n; LOCAL OPT: " + o.String()
}
}
return s
}
func (rr *OPT) len() int {
l := rr.Hdr.len()
for i := 0; i < len(rr.Option); i++ {
l += 4 // Account for 2-byte option code and 2-byte option length.
lo, _ := rr.Option[i].pack()
l += len(lo)
}
return l
}
// return the old value -> delete SetVersion?
// Version returns the EDNS version used. Only zero is defined.
func (rr *OPT) Version() uint8 {
return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16)
}
// SetVersion sets the version of EDNS. This is usually zero.
func (rr *OPT) SetVersion(v uint8) {
rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16)
}
// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
func (rr *OPT) ExtendedRcode() int {
return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15
}
// SetExtendedRcode sets the EDNS extended RCODE field.
func (rr *OPT) SetExtendedRcode(v uint8) {
if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have!
return
}
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24)
}
// UDPSize returns the UDP buffer size.
func (rr *OPT) UDPSize() uint16 {
return rr.Hdr.Class
}
// SetUDPSize sets the UDP buffer size.
func (rr *OPT) SetUDPSize(size uint16) {
rr.Hdr.Class = size
}
// Do returns the value of the DO (DNSSEC OK) bit.
func (rr *OPT) Do() bool {
return rr.Hdr.Ttl&_DO == _DO
}
// SetDo sets the DO (DNSSEC OK) bit.
func (rr *OPT) SetDo() {
rr.Hdr.Ttl |= _DO
}
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
type EDNS0 interface {
// Option returns the option code for the option.
Option() uint16
// pack returns the bytes of the option data.
pack() ([]byte, error)
// unpack sets the data as found in the buffer. Is also sets
// the length of the slice as the length of the option data.
unpack([]byte) error
// String returns the string representation of the option.
String() string
}
// The nsid EDNS0 option is used to retrieve a nameserver
// identifier. When sending a request Nsid must be set to the empty string
// The identifier is an opaque string encoded as hex.
// Basic use pattern for creating an nsid option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_NSID)
// e.Code = dns.EDNS0NSID
// e.Nsid = "AA"
// o.Option = append(o.Option, e)
type EDNS0_NSID struct {
Code uint16 // Always EDNS0NSID
Nsid string // This string needs to be hex encoded
}
func (e *EDNS0_NSID) pack() ([]byte, error) {
h, err := hex.DecodeString(e.Nsid)
if err != nil {
return nil, err
}
return h, nil
}
func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID }
func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
func (e *EDNS0_NSID) String() string { return string(e.Nsid) }
// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
// an idea of where the client lives. It can then give back a different
// answer depending on the location or network topology.
// Basic use pattern for creating an subnet option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_SUBNET)
// e.Code = dns.EDNS0SUBNET
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
// e.NetMask = 32 // 32 for IPV4, 128 for IPv6
// e.SourceScope = 0
// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
// o.Option = append(o.Option, e)
//
// Note: the spec (draft-ietf-dnsop-edns-client-subnet-00) has some insane logic
// for which netmask applies to the address. This code will parse all the
// available bits when unpacking (up to optlen). When packing it will apply
// SourceNetmask. If you need more advanced logic, patches welcome and good luck.
type EDNS0_SUBNET struct {
Code uint16 // Always EDNS0SUBNET
Family uint16 // 1 for IP, 2 for IP6
SourceNetmask uint8
SourceScope uint8
Address net.IP
DraftOption bool // Set to true if using the old (0x50fa) option code
}
func (e *EDNS0_SUBNET) Option() uint16 {
if e.DraftOption {
return EDNS0SUBNETDRAFT
}
return EDNS0SUBNET
}
func (e *EDNS0_SUBNET) pack() ([]byte, error) {
b := make([]byte, 4)
binary.BigEndian.PutUint16(b[0:], e.Family)
b[2] = e.SourceNetmask
b[3] = e.SourceScope
switch e.Family {
case 1:
if e.SourceNetmask > net.IPv4len*8 {
return nil, errors.New("dns: bad netmask")
}
if len(e.Address.To4()) != net.IPv4len {
return nil, errors.New("dns: bad address")
}
ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
b = append(b, ip[:needLength]...)
case 2:
if e.SourceNetmask > net.IPv6len*8 {
return nil, errors.New("dns: bad netmask")
}
if len(e.Address) != net.IPv6len {
return nil, errors.New("dns: bad address")
}
ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
b = append(b, ip[:needLength]...)
default:
return nil, errors.New("dns: bad address family")
}
return b, nil
}
func (e *EDNS0_SUBNET) unpack(b []byte) error {
if len(b) < 4 {
return ErrBuf
}
e.Family = binary.BigEndian.Uint16(b)
e.SourceNetmask = b[2]
e.SourceScope = b[3]
switch e.Family {
case 1:
if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
return errors.New("dns: bad netmask")
}
addr := make([]byte, net.IPv4len)
for i := 0; i < net.IPv4len && 4+i < len(b); i++ {
addr[i] = b[4+i]
}
e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3])
case 2:
if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
return errors.New("dns: bad netmask")
}
addr := make([]byte, net.IPv6len)
for i := 0; i < net.IPv6len && 4+i < len(b); i++ {
addr[i] = b[4+i]
}
e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4],
addr[5], addr[6], addr[7], addr[8], addr[9], addr[10],
addr[11], addr[12], addr[13], addr[14], addr[15]}
default:
return errors.New("dns: bad address family")
}
return nil
}
func (e *EDNS0_SUBNET) String() (s string) {
if e.Address == nil {
s = "<nil>"
} else if e.Address.To4() != nil {
s = e.Address.String()
} else {
s = "[" + e.Address.String() + "]"
}
s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope))
return
}
// The Cookie EDNS0 option
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_COOKIE)
// e.Code = dns.EDNS0COOKIE
// e.Cookie = "24a5ac.."
// o.Option = append(o.Option, e)
//
// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is
// always 8 bytes. It may then optionally be followed by the server cookie. The server
// cookie is of variable length, 8 to a maximum of 32 bytes. In other words:
//
// cCookie := o.Cookie[:16]
// sCookie := o.Cookie[16:]
//
// There is no guarantee that the Cookie string has a specific length.
type EDNS0_COOKIE struct {
Code uint16 // Always EDNS0COOKIE
Cookie string // Hex-encoded cookie data
}
func (e *EDNS0_COOKIE) pack() ([]byte, error) {
h, err := hex.DecodeString(e.Cookie)
if err != nil {
return nil, err
}
return h, nil
}
func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
func (e *EDNS0_COOKIE) String() string { return e.Cookie }
// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
// an expiration on an update RR. This is helpful for clients that cannot clean
// up after themselves. This is a draft RFC and more information can be found at
// http://files.dns-sd.org/draft-sekar-dns-ul.txt
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_UL)
// e.Code = dns.EDNS0UL
// e.Lease = 120 // in seconds
// o.Option = append(o.Option, e)
type EDNS0_UL struct {
Code uint16 // Always EDNS0UL
Lease uint32
}
func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
// Copied: http://golang.org/src/pkg/net/dnsmsg.go
func (e *EDNS0_UL) pack() ([]byte, error) {
b := make([]byte, 4)
binary.BigEndian.PutUint32(b, e.Lease)
return b, nil
}
func (e *EDNS0_UL) unpack(b []byte) error {
if len(b) < 4 {
return ErrBuf
}
e.Lease = binary.BigEndian.Uint32(b)
return nil
}
// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
// Implemented for completeness, as the EDNS0 type code is assigned.
type EDNS0_LLQ struct {
Code uint16 // Always EDNS0LLQ
Version uint16
Opcode uint16
Error uint16
Id uint64
LeaseLife uint32
}
func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
func (e *EDNS0_LLQ) pack() ([]byte, error) {
b := make([]byte, 18)
binary.BigEndian.PutUint16(b[0:], e.Version)
binary.BigEndian.PutUint16(b[2:], e.Opcode)
binary.BigEndian.PutUint16(b[4:], e.Error)
binary.BigEndian.PutUint64(b[6:], e.Id)
binary.BigEndian.PutUint32(b[14:], e.LeaseLife)
return b, nil
}
func (e *EDNS0_LLQ) unpack(b []byte) error {
if len(b) < 18 {
return ErrBuf
}
e.Version = binary.BigEndian.Uint16(b[0:])
e.Opcode = binary.BigEndian.Uint16(b[2:])
e.Error = binary.BigEndian.Uint16(b[4:])
e.Id = binary.BigEndian.Uint64(b[6:])
e.LeaseLife = binary.BigEndian.Uint32(b[14:])
return nil
}
func (e *EDNS0_LLQ) String() string {
s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
" " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) +
" " + strconv.FormatUint(uint64(e.LeaseLife), 10)
return s
}
type EDNS0_DAU struct {
Code uint16 // Always EDNS0DAU
AlgCode []uint8
}
func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_DAU) String() string {
s := ""
for i := 0; i < len(e.AlgCode); i++ {
if a, ok := AlgorithmToString[e.AlgCode[i]]; ok {
s += " " + a
} else {
s += " " + strconv.Itoa(int(e.AlgCode[i]))
}
}
return s
}
type EDNS0_DHU struct {
Code uint16 // Always EDNS0DHU
AlgCode []uint8
}
func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_DHU) String() string {
s := ""
for i := 0; i < len(e.AlgCode); i++ {
if a, ok := HashToString[e.AlgCode[i]]; ok {
s += " " + a
} else {
s += " " + strconv.Itoa(int(e.AlgCode[i]))
}
}
return s
}
type EDNS0_N3U struct {
Code uint16 // Always EDNS0N3U
AlgCode []uint8
}
func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_N3U) String() string {
// Re-use the hash map
s := ""
for i := 0; i < len(e.AlgCode); i++ {
if a, ok := HashToString[e.AlgCode[i]]; ok {
s += " " + a
} else {
s += " " + strconv.Itoa(int(e.AlgCode[i]))
}
}
return s
}
type EDNS0_EXPIRE struct {
Code uint16 // Always EDNS0EXPIRE
Expire uint32
}
func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
b := make([]byte, 4)
b[0] = byte(e.Expire >> 24)
b[1] = byte(e.Expire >> 16)
b[2] = byte(e.Expire >> 8)
b[3] = byte(e.Expire)
return b, nil
}
func (e *EDNS0_EXPIRE) unpack(b []byte) error {
if len(b) < 4 {
return ErrBuf
}
e.Expire = binary.BigEndian.Uint32(b)
return nil
}
// The EDNS0_LOCAL option is used for local/experimental purposes. The option
// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
// (RFC6891), although any unassigned code can actually be used. The content of
// the option is made available in Data, unaltered.
// Basic use pattern for creating a local option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_LOCAL)
// e.Code = dns.EDNS0LOCALSTART
// e.Data = []byte{72, 82, 74}
// o.Option = append(o.Option, e)
type EDNS0_LOCAL struct {
Code uint16
Data []byte
}
func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
func (e *EDNS0_LOCAL) String() string {
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
}
func (e *EDNS0_LOCAL) pack() ([]byte, error) {
b := make([]byte, len(e.Data))
copied := copy(b, e.Data)
if copied != len(e.Data) {
return nil, ErrBuf
}
return b, nil
}
func (e *EDNS0_LOCAL) unpack(b []byte) error {
e.Data = make([]byte, len(b))
copied := copy(e.Data, b)
if copied != len(b) {
return ErrBuf
}
return nil
}

87
vendor/github.com/miekg/dns/format.go generated vendored Normal file
View File

@ -0,0 +1,87 @@
package dns
import (
"net"
"reflect"
"strconv"
)
// NumField returns the number of rdata fields r has.
func NumField(r RR) int {
return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header
}
// Field returns the rdata field i as a string. Fields are indexed starting from 1.
// RR types that holds slice data, for instance the NSEC type bitmap will return a single
// string where the types are concatenated using a space.
// Accessing non existing fields will cause a panic.
func Field(r RR, i int) string {
if i == 0 {
return ""
}
d := reflect.ValueOf(r).Elem().Field(i)
switch k := d.Kind(); k {
case reflect.String:
return d.String()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(d.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return strconv.FormatUint(d.Uint(), 10)
case reflect.Slice:
switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
case `dns:"a"`:
// TODO(miek): Hmm store this as 16 bytes
if d.Len() < net.IPv6len {
return net.IPv4(byte(d.Index(0).Uint()),
byte(d.Index(1).Uint()),
byte(d.Index(2).Uint()),
byte(d.Index(3).Uint())).String()
}
return net.IPv4(byte(d.Index(12).Uint()),
byte(d.Index(13).Uint()),
byte(d.Index(14).Uint()),
byte(d.Index(15).Uint())).String()
case `dns:"aaaa"`:
return net.IP{
byte(d.Index(0).Uint()),
byte(d.Index(1).Uint()),
byte(d.Index(2).Uint()),
byte(d.Index(3).Uint()),
byte(d.Index(4).Uint()),
byte(d.Index(5).Uint()),
byte(d.Index(6).Uint()),
byte(d.Index(7).Uint()),
byte(d.Index(8).Uint()),
byte(d.Index(9).Uint()),
byte(d.Index(10).Uint()),
byte(d.Index(11).Uint()),
byte(d.Index(12).Uint()),
byte(d.Index(13).Uint()),
byte(d.Index(14).Uint()),
byte(d.Index(15).Uint()),
}.String()
case `dns:"nsec"`:
if d.Len() == 0 {
return ""
}
s := Type(d.Index(0).Uint()).String()
for i := 1; i < d.Len(); i++ {
s += " " + Type(d.Index(i).Uint()).String()
}
return s
default:
// if it does not have a tag its a string slice
fallthrough
case `dns:"txt"`:
if d.Len() == 0 {
return ""
}
s := d.Index(0).String()
for i := 1; i < d.Len(); i++ {
s += " " + d.Index(i).String()
}
return s
}
}
return ""
}

159
vendor/github.com/miekg/dns/generate.go generated vendored Normal file
View File

@ -0,0 +1,159 @@
package dns
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
)
// Parse the $GENERATE statement as used in BIND9 zones.
// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
// We are called after '$GENERATE '. After which we expect:
// * the range (12-24/2)
// * lhs (ownername)
// * [[ttl][class]]
// * type
// * rhs (rdata)
// But we are lazy here, only the range is parsed *all* occurrences
// of $ after that are interpreted.
// Any error are returned as a string value, the empty string signals
// "no error".
func generate(l lex, c chan lex, t chan *Token, o string) string {
step := 1
if i := strings.IndexAny(l.token, "/"); i != -1 {
if i+1 == len(l.token) {
return "bad step in $GENERATE range"
}
if s, err := strconv.Atoi(l.token[i+1:]); err == nil {
if s < 0 {
return "bad step in $GENERATE range"
}
step = s
} else {
return "bad step in $GENERATE range"
}
l.token = l.token[:i]
}
sx := strings.SplitN(l.token, "-", 2)
if len(sx) != 2 {
return "bad start-stop in $GENERATE range"
}
start, err := strconv.Atoi(sx[0])
if err != nil {
return "bad start in $GENERATE range"
}
end, err := strconv.Atoi(sx[1])
if err != nil {
return "bad stop in $GENERATE range"
}
if end < 0 || start < 0 || end < start {
return "bad range in $GENERATE range"
}
<-c // _BLANK
// Create a complete new string, which we then parse again.
s := ""
BuildRR:
l = <-c
if l.value != zNewline && l.value != zEOF {
s += l.token
goto BuildRR
}
for i := start; i <= end; i += step {
var (
escape bool
dom bytes.Buffer
mod string
err error
offset int
)
for j := 0; j < len(s); j++ { // No 'range' because we need to jump around
switch s[j] {
case '\\':
if escape {
dom.WriteByte('\\')
escape = false
continue
}
escape = true
case '$':
mod = "%d"
offset = 0
if escape {
dom.WriteByte('$')
escape = false
continue
}
escape = false
if j+1 >= len(s) { // End of the string
dom.WriteString(fmt.Sprintf(mod, i+offset))
continue
} else {
if s[j+1] == '$' {
dom.WriteByte('$')
j++
continue
}
}
// Search for { and }
if s[j+1] == '{' { // Modifier block
sep := strings.Index(s[j+2:], "}")
if sep == -1 {
return "bad modifier in $GENERATE"
}
mod, offset, err = modToPrintf(s[j+2 : j+2+sep])
if err != nil {
return err.Error()
}
j += 2 + sep // Jump to it
}
dom.WriteString(fmt.Sprintf(mod, i+offset))
default:
if escape { // Pretty useless here
escape = false
continue
}
dom.WriteByte(s[j])
}
}
// Re-parse the RR and send it on the current channel t
rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String())
if err != nil {
return err.Error()
}
t <- &Token{RR: rx}
// Its more efficient to first built the rrlist and then parse it in
// one go! But is this a problem?
}
return ""
}
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
func modToPrintf(s string) (string, int, error) {
xs := strings.SplitN(s, ",", 3)
if len(xs) != 3 {
return "", 0, errors.New("bad modifier in $GENERATE")
}
// xs[0] is offset, xs[1] is width, xs[2] is base
if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" {
return "", 0, errors.New("bad base in $GENERATE")
}
offset, err := strconv.Atoi(xs[0])
if err != nil || offset > 255 {
return "", 0, errors.New("bad offset in $GENERATE")
}
width, err := strconv.Atoi(xs[1])
if err != nil || width > 255 {
return "", offset, errors.New("bad width in $GENERATE")
}
switch {
case width < 0:
return "", offset, errors.New("bad width in $GENERATE")
case width == 0:
return "%" + xs[1] + xs[2], offset, nil
}
return "%0" + xs[1] + xs[2], offset, nil
}

168
vendor/github.com/miekg/dns/labels.go generated vendored Normal file
View File

@ -0,0 +1,168 @@
package dns
// Holds a bunch of helper functions for dealing with labels.
// SplitDomainName splits a name string into it's labels.
// www.miek.nl. returns []string{"www", "miek", "nl"}
// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
// The root label (.) returns nil. Note that using
// strings.Split(s) will work in most cases, but does not handle
// escaped dots (\.) for instance.
// s must be a syntactically valid domain name, see IsDomainName.
func SplitDomainName(s string) (labels []string) {
if len(s) == 0 {
return nil
}
fqdnEnd := 0 // offset of the final '.' or the length of the name
idx := Split(s)
begin := 0
if s[len(s)-1] == '.' {
fqdnEnd = len(s) - 1
} else {
fqdnEnd = len(s)
}
switch len(idx) {
case 0:
return nil
case 1:
// no-op
default:
end := 0
for i := 1; i < len(idx); i++ {
end = idx[i]
labels = append(labels, s[begin:end-1])
begin = end
}
}
labels = append(labels, s[begin:fqdnEnd])
return labels
}
// CompareDomainName compares the names s1 and s2 and
// returns how many labels they have in common starting from the *right*.
// The comparison stops at the first inequality. The names are not downcased
// before the comparison.
//
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
// www.miek.nl. and www.bla.nl. have one label in common: nl
//
// s1 and s2 must be syntactically valid domain names.
func CompareDomainName(s1, s2 string) (n int) {
s1 = Fqdn(s1)
s2 = Fqdn(s2)
l1 := Split(s1)
l2 := Split(s2)
// the first check: root label
if l1 == nil || l2 == nil {
return
}
j1 := len(l1) - 1 // end
i1 := len(l1) - 2 // start
j2 := len(l2) - 1
i2 := len(l2) - 2
// the second check can be done here: last/only label
// before we fall through into the for-loop below
if s1[l1[j1]:] == s2[l2[j2]:] {
n++
} else {
return
}
for {
if i1 < 0 || i2 < 0 {
break
}
if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] {
n++
} else {
break
}
j1--
i1--
j2--
i2--
}
return
}
// CountLabel counts the the number of labels in the string s.
// s must be a syntactically valid domain name.
func CountLabel(s string) (labels int) {
if s == "." {
return
}
off := 0
end := false
for {
off, end = NextLabel(s, off)
labels++
if end {
return
}
}
}
// Split splits a name s into its label indexes.
// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
// The root name (.) returns nil. Also see SplitDomainName.
// s must be a syntactically valid domain name.
func Split(s string) []int {
if s == "." {
return nil
}
idx := make([]int, 1, 3)
off := 0
end := false
for {
off, end = NextLabel(s, off)
if end {
return idx
}
idx = append(idx, off)
}
}
// NextLabel returns the index of the start of the next label in the
// string s starting at offset.
// The bool end is true when the end of the string has been reached.
// Also see PrevLabel.
func NextLabel(s string, offset int) (i int, end bool) {
quote := false
for i = offset; i < len(s)-1; i++ {
switch s[i] {
case '\\':
quote = !quote
default:
quote = false
case '.':
if quote {
quote = !quote
continue
}
return i + 1, false
}
}
return i + 1, true
}
// PrevLabel returns the index of the label when starting from the right and
// jumping n labels to the left.
// The bool start is true when the start of the string has been overshot.
// Also see NextLabel.
func PrevLabel(s string, n int) (i int, start bool) {
if n == 0 {
return len(s), false
}
lab := Split(s)
if lab == nil {
return 0, true
}
if n > len(lab) {
return 0, true
}
return lab[len(lab)-n], false
}

1231
vendor/github.com/miekg/dns/msg.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

340
vendor/github.com/miekg/dns/msg_generate.go generated vendored Normal file
View File

@ -0,0 +1,340 @@
//+build ignore
// msg_generate.go is meant to run with go generate. It will use
// go/{importer,types} to track down all the RR struct types. Then for each type
// it will generate pack/unpack methods based on the struct tags. The generated source is
// written to zmsg.go, and is meant to be checked into git.
package main
import (
"bytes"
"fmt"
"go/format"
"go/importer"
"go/types"
"log"
"os"
"strings"
)
var packageHdr = `
// *** DO NOT MODIFY ***
// AUTOGENERATED BY go generate from msg_generate.go
package dns
`
// getTypeStruct will take a type and the package scope, and return the
// (innermost) struct if the type is considered a RR type (currently defined as
// those structs beginning with a RR_Header, could be redefined as implementing
// the RR interface). The bool return value indicates if embedded structs were
// resolved.
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
st, ok := t.Underlying().(*types.Struct)
if !ok {
return nil, false
}
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
return st, false
}
if st.Field(0).Anonymous() {
st, _ := getTypeStruct(st.Field(0).Type(), scope)
return st, true
}
return nil, false
}
func main() {
// Import and type-check the package
pkg, err := importer.Default().Import("github.com/miekg/dns")
fatalIfErr(err)
scope := pkg.Scope()
// Collect actual types (*X)
var namedTypes []string
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
continue
}
if name == "PrivateRR" {
continue
}
// Check if corresponding TypeX exists
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
log.Fatalf("Constant Type%s does not exist.", o.Name())
}
namedTypes = append(namedTypes, o.Name())
}
b := &bytes.Buffer{}
b.WriteString(packageHdr)
fmt.Fprint(b, "// pack*() functions\n\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
st, _ := getTypeStruct(o.Type(), scope)
fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name)
fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress)
if err != nil {
return off, err
}
headerEnd := off
`)
for i := 1; i < st.NumFields(); i++ {
o := func(s string) {
fmt.Fprintf(b, s, st.Field(i).Name())
fmt.Fprint(b, `if err != nil {
return off, err
}
`)
}
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
case `dns:"-"`: // ignored
case `dns:"txt"`:
o("off, err = packStringTxt(rr.%s, msg, off)\n")
case `dns:"opt"`:
o("off, err = packDataOpt(rr.%s, msg, off)\n")
case `dns:"nsec"`:
o("off, err = packDataNsec(rr.%s, msg, off)\n")
case `dns:"domain-name"`:
o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n")
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
continue
}
switch {
case st.Tag(i) == `dns:"-"`: // ignored
case st.Tag(i) == `dns:"cdomain-name"`:
fallthrough
case st.Tag(i) == `dns:"domain-name"`:
o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n")
case st.Tag(i) == `dns:"a"`:
o("off, err = packDataA(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"aaaa"`:
o("off, err = packDataAAAA(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"uint48"`:
o("off, err = packUint48(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"txt"`:
o("off, err = packString(rr.%s, msg, off)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32
fallthrough
case st.Tag(i) == `dns:"base32"`:
o("off, err = packStringBase32(rr.%s, msg, off)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64
fallthrough
case st.Tag(i) == `dns:"base64"`:
o("off, err = packStringBase64(rr.%s, msg, off)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): // Hack to fix empty salt length for NSEC3
o("if rr.%s == \"-\" { /* do nothing, empty salt */ }\n")
continue
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex
fallthrough
case st.Tag(i) == `dns:"hex"`:
o("off, err = packStringHex(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"octet"`:
o("off, err = packStringOctet(rr.%s, msg, off)\n")
case st.Tag(i) == "":
switch st.Field(i).Type().(*types.Basic).Kind() {
case types.Uint8:
o("off, err = packUint8(rr.%s, msg, off)\n")
case types.Uint16:
o("off, err = packUint16(rr.%s, msg, off)\n")
case types.Uint32:
o("off, err = packUint32(rr.%s, msg, off)\n")
case types.Uint64:
o("off, err = packUint64(rr.%s, msg, off)\n")
case types.String:
o("off, err = packString(rr.%s, msg, off)\n")
default:
log.Fatalln(name, st.Field(i).Name())
}
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
}
// We have packed everything, only now we know the rdlength of this RR
fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)")
fmt.Fprintln(b, "return off, nil }\n")
}
fmt.Fprint(b, "// unpack*() functions\n\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
st, _ := getTypeStruct(o.Type(), scope)
fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name)
fmt.Fprintf(b, "rr := new(%s)\n", name)
fmt.Fprint(b, "rr.Hdr = h\n")
fmt.Fprint(b, `if noRdata(h) {
return rr, off, nil
}
var err error
rdStart := off
_ = rdStart
`)
for i := 1; i < st.NumFields(); i++ {
o := func(s string) {
fmt.Fprintf(b, s, st.Field(i).Name())
fmt.Fprint(b, `if err != nil {
return rr, off, err
}
`)
}
// size-* are special, because they reference a struct member we should use for the length.
if strings.HasPrefix(st.Tag(i), `dns:"size-`) {
structMember := structMember(st.Tag(i))
structTag := structTag(st.Tag(i))
switch structTag {
case "hex":
fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
case "base32":
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
case "base64":
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
fmt.Fprint(b, `if err != nil {
return rr, off, err
}
`)
continue
}
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
case `dns:"-"`: // ignored
case `dns:"txt"`:
o("rr.%s, off, err = unpackStringTxt(msg, off)\n")
case `dns:"opt"`:
o("rr.%s, off, err = unpackDataOpt(msg, off)\n")
case `dns:"nsec"`:
o("rr.%s, off, err = unpackDataNsec(msg, off)\n")
case `dns:"domain-name"`:
o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
continue
}
switch st.Tag(i) {
case `dns:"-"`: // ignored
case `dns:"cdomain-name"`:
fallthrough
case `dns:"domain-name"`:
o("rr.%s, off, err = UnpackDomainName(msg, off)\n")
case `dns:"a"`:
o("rr.%s, off, err = unpackDataA(msg, off)\n")
case `dns:"aaaa"`:
o("rr.%s, off, err = unpackDataAAAA(msg, off)\n")
case `dns:"uint48"`:
o("rr.%s, off, err = unpackUint48(msg, off)\n")
case `dns:"txt"`:
o("rr.%s, off, err = unpackString(msg, off)\n")
case `dns:"base32"`:
o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
case `dns:"base64"`:
o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
case `dns:"hex"`:
o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
case `dns:"octet"`:
o("rr.%s, off, err = unpackStringOctet(msg, off)\n")
case "":
switch st.Field(i).Type().(*types.Basic).Kind() {
case types.Uint8:
o("rr.%s, off, err = unpackUint8(msg, off)\n")
case types.Uint16:
o("rr.%s, off, err = unpackUint16(msg, off)\n")
case types.Uint32:
o("rr.%s, off, err = unpackUint32(msg, off)\n")
case types.Uint64:
o("rr.%s, off, err = unpackUint64(msg, off)\n")
case types.String:
o("rr.%s, off, err = unpackString(msg, off)\n")
default:
log.Fatalln(name, st.Field(i).Name())
}
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
// If we've hit len(msg) we return without error.
if i < st.NumFields()-1 {
fmt.Fprintf(b, `if off == len(msg) {
return rr, off, nil
}
`)
}
}
fmt.Fprintf(b, "return rr, off, err }\n\n")
}
// Generate typeToUnpack map
fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){")
for _, name := range namedTypes {
if name == "RFC3597" {
continue
}
fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name)
}
fmt.Fprintln(b, "}\n")
// gofmt
res, err := format.Source(b.Bytes())
if err != nil {
b.WriteTo(os.Stderr)
log.Fatal(err)
}
// write result
f, err := os.Create("zmsg.go")
fatalIfErr(err)
defer f.Close()
f.Write(res)
}
// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string.
func structMember(s string) string {
fields := strings.Split(s, ":")
if len(fields) == 0 {
return ""
}
f := fields[len(fields)-1]
// f should have a closing "
if len(f) > 1 {
return f[:len(f)-1]
}
return f
}
// structTag will take a tag like dns:"size-base32:SaltLength" and return base32.
func structTag(s string) string {
fields := strings.Split(s, ":")
if len(fields) < 2 {
return ""
}
return fields[1][len("\"size-"):]
}
func fatalIfErr(err error) {
if err != nil {
log.Fatal(err)
}
}

630
vendor/github.com/miekg/dns/msg_helpers.go generated vendored Normal file
View File

@ -0,0 +1,630 @@
package dns
import (
"encoding/base32"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"net"
"strconv"
)
// helper functions called from the generated zmsg.go
// These function are named after the tag to help pack/unpack, if there is no tag it is the name
// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
// packDataDomainName.
func unpackDataA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv4len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking a"}
}
a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
off += net.IPv4len
return a, off, nil
}
func packDataA(a net.IP, msg []byte, off int) (int, error) {
// It must be a slice of 4, even if it is 16, we encode only the first 4
if off+net.IPv4len > len(msg) {
return len(msg), &Error{err: "overflow packing a"}
}
switch len(a) {
case net.IPv4len, net.IPv6len:
copy(msg[off:], a.To4())
off += net.IPv4len
case 0:
// Allowed, for dynamic updates.
default:
return len(msg), &Error{err: "overflow packing a"}
}
return off, nil
}
func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv6len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
}
aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
off += net.IPv6len
return aaaa, off, nil
}
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
if off+net.IPv6len > len(msg) {
return len(msg), &Error{err: "overflow packing aaaa"}
}
switch len(aaaa) {
case net.IPv6len:
copy(msg[off:], aaaa)
off += net.IPv6len
case 0:
// Allowed, dynamic updates.
default:
return len(msg), &Error{err: "overflow packing aaaa"}
}
return off, nil
}
// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
// re-sliced msg according to the expected length of the RR.
func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
hdr := RR_Header{}
if off == len(msg) {
return hdr, off, msg, nil
}
hdr.Name, off, err = UnpackDomainName(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rrtype, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Class, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Ttl, off, err = unpackUint32(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rdlength, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
return hdr, off, msg, nil
}
// pack packs an RR header, returning the offset to the end of the header.
// See PackDomainName for documentation about the compression.
func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
if off == len(msg) {
return off, nil
}
off, err = PackDomainName(hdr.Name, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Rrtype, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Class, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint32(hdr.Ttl, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Rdlength, msg, off)
if err != nil {
return len(msg), err
}
return off, nil
}
// helper helper functions.
// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
// Returns an error if msg is smaller than the expected size.
func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
lenrd := off + int(rdlength)
if lenrd > len(msg) {
return msg, &Error{err: "overflowing header size"}
}
return msg[:lenrd], nil
}
func fromBase32(s []byte) (buf []byte, err error) {
buflen := base32.HexEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base32.HexEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) }
func fromBase64(s []byte) (buf []byte, err error) {
buflen := base64.StdEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base64.StdEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
// dynamicUpdate returns true if the Rdlength is zero.
func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
if off+1 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint8"}
}
return uint8(msg[off]), off + 1, nil
}
func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
if off+1 > len(msg) {
return len(msg), &Error{err: "overflow packing uint8"}
}
msg[off] = byte(i)
return off + 1, nil
}
func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
if off+2 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint16"}
}
return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
}
func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
if off+2 > len(msg) {
return len(msg), &Error{err: "overflow packing uint16"}
}
binary.BigEndian.PutUint16(msg[off:], i)
return off + 2, nil
}
func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
if off+4 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint32"}
}
return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
}
func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
if off+4 > len(msg) {
return len(msg), &Error{err: "overflow packing uint32"}
}
binary.BigEndian.PutUint32(msg[off:], i)
return off + 4, nil
}
func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
if off+6 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
}
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
uint64(msg[off+4])<<8 | uint64(msg[off+5])))
off += 6
return i, off, nil
}
func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
if off+6 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64 as uint48"}
}
msg[off] = byte(i >> 40)
msg[off+1] = byte(i >> 32)
msg[off+2] = byte(i >> 24)
msg[off+3] = byte(i >> 16)
msg[off+4] = byte(i >> 8)
msg[off+5] = byte(i)
off += 6
return off, nil
}
func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
if off+8 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64"}
}
return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
}
func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
if off+8 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64"}
}
binary.BigEndian.PutUint64(msg[off:], i)
off += 8
return off, nil
}
func unpackString(msg []byte, off int) (string, int, error) {
if off+1 > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
l := int(msg[off])
if off+l+1 > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
s := make([]byte, 0, l)
for _, b := range msg[off+1 : off+1+l] {
switch b {
case '"', '\\':
s = append(s, '\\', b)
case '\t', '\r', '\n':
s = append(s, b)
default:
if b < 32 || b > 127 { // unprintable
var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\')
for i := 0; i < 3-len(bufs); i++ {
s = append(s, '0')
}
for _, r := range bufs {
s = append(s, r)
}
} else {
s = append(s, b)
}
}
}
off += 1 + l
return string(s), off, nil
}
func packString(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packTxtString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base32"}
}
s := toBase32(msg[off:end])
return s, end, nil
}
func packStringBase32(s string, msg []byte, off int) (int, error) {
b32, err := fromBase32([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b32) > len(msg) {
return len(msg), &Error{err: "overflow packing base32"}
}
copy(msg[off:off+len(b32)], b32)
off += len(b32)
return off, nil
}
func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is base64 encoded value, so we don't need an explicit length
// to be set. Thus far all RR's that have base64 encoded fields have those as their
// last one. What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base64"}
}
s := toBase64(msg[off:end])
return s, end, nil
}
func packStringBase64(s string, msg []byte, off int) (int, error) {
b64, err := fromBase64([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b64) > len(msg) {
return len(msg), &Error{err: "overflow packing base64"}
}
copy(msg[off:off+len(b64)], b64)
off += len(b64)
return off, nil
}
func unpackStringHex(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is hex encoded value, so we don't need an explicit length
// to be set. NSEC and TSIG have hex fields with a length field.
// What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking hex"}
}
s := hex.EncodeToString(msg[off:end])
return s, end, nil
}
func packStringHex(s string, msg []byte, off int) (int, error) {
h, err := hex.DecodeString(s)
if err != nil {
return len(msg), err
}
if off+(len(h)) > len(msg) {
return len(msg), &Error{err: "overflow packing hex"}
}
copy(msg[off:off+len(h)], h)
off += len(h)
return off, nil
}
func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
txt, off, err := unpackTxt(msg, off)
if err != nil {
return nil, len(msg), err
}
return txt, off, nil
}
func packStringTxt(s []string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
off, err := packTxt(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
var edns []EDNS0
Option:
code := uint16(0)
if off+4 > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
code = binary.BigEndian.Uint16(msg[off:])
off += 2
optlen := binary.BigEndian.Uint16(msg[off:])
off += 2
if off+int(optlen) > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
switch code {
case EDNS0NSID:
e := new(EDNS0_NSID)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0SUBNET, EDNS0SUBNETDRAFT:
e := new(EDNS0_SUBNET)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
if code == EDNS0SUBNETDRAFT {
e.DraftOption = true
}
case EDNS0COOKIE:
e := new(EDNS0_COOKIE)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0UL:
e := new(EDNS0_UL)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0LLQ:
e := new(EDNS0_LLQ)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0DAU:
e := new(EDNS0_DAU)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0DHU:
e := new(EDNS0_DHU)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0N3U:
e := new(EDNS0_N3U)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
default:
e := new(EDNS0_LOCAL)
e.Code = code
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
}
if off < len(msg) {
goto Option
}
return edns, off, nil
}
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
for _, el := range options {
b, err := el.pack()
if err != nil || off+3 > len(msg) {
return len(msg), &Error{err: "overflow packing opt"}
}
binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
off += 4
if off+len(b) > len(msg) {
copy(msg[off:], b)
off = len(msg)
continue
}
// Actual data
copy(msg[off:off+len(b)], b)
off += len(b)
}
return off, nil
}
func unpackStringOctet(msg []byte, off int) (string, int, error) {
s := string(msg[off:])
return s, len(msg), nil
}
func packStringOctet(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packOctetString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
var nsec []uint16
length, window, lastwindow := 0, 0, -1
for off < len(msg) {
if off+2 > len(msg) {
return nsec, len(msg), &Error{err: "overflow unpacking nsecx"}
}
window = int(msg[off])
length = int(msg[off+1])
off += 2
if window <= lastwindow {
// RFC 4034: Blocks are present in the NSEC RR RDATA in
// increasing numerical order.
return nsec, len(msg), &Error{err: "out of order NSEC block"}
}
if length == 0 {
// RFC 4034: Blocks with no types present MUST NOT be included.
return nsec, len(msg), &Error{err: "empty NSEC block"}
}
if length > 32 {
return nsec, len(msg), &Error{err: "NSEC block too long"}
}
if off+length > len(msg) {
return nsec, len(msg), &Error{err: "overflowing NSEC block"}
}
// Walk the bytes in the window and extract the type bits
for j := 0; j < length; j++ {
b := msg[off+j]
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
nsec = append(nsec, uint16(window*256+j*8+0))
}
if b&0x40 == 0x40 {
nsec = append(nsec, uint16(window*256+j*8+1))
}
if b&0x20 == 0x20 {
nsec = append(nsec, uint16(window*256+j*8+2))
}
if b&0x10 == 0x10 {
nsec = append(nsec, uint16(window*256+j*8+3))
}
if b&0x8 == 0x8 {
nsec = append(nsec, uint16(window*256+j*8+4))
}
if b&0x4 == 0x4 {
nsec = append(nsec, uint16(window*256+j*8+5))
}
if b&0x2 == 0x2 {
nsec = append(nsec, uint16(window*256+j*8+6))
}
if b&0x1 == 0x1 {
nsec = append(nsec, uint16(window*256+j*8+7))
}
}
off += length
lastwindow = window
}
return nsec, off, nil
}
func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
if len(bitmap) == 0 {
return off, nil
}
var lastwindow, lastlength uint16
for j := 0; j < len(bitmap); j++ {
t := bitmap[j]
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
off += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
return len(msg), &Error{err: "nsec bits out of order"}
}
if off+2+int(length) > len(msg) {
return len(msg), &Error{err: "overflow packing nsec"}
}
// Setting the window #
msg[off] = byte(window)
// Setting the octets length
msg[off+1] = byte(length)
// Setting the bit value for the type in the right octet
msg[off+1+int(length)] |= byte(1 << (7 - (t % 8)))
lastwindow, lastlength = window, length
}
off += int(lastlength) + 2
return off, nil
}
func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
var (
servers []string
s string
err error
)
if end > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking domain names"}
}
for off < end {
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return servers, len(msg), err
}
servers = append(servers, s)
}
return servers, off, nil
}
func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) {
var err error
for j := 0; j < len(names); j++ {
off, err = PackDomainName(names[j], msg, off, compression, false && compress)
if err != nil {
return len(msg), err
}
}
return off, nil
}

119
vendor/github.com/miekg/dns/nsecx.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
package dns
import (
"crypto/sha1"
"hash"
"io"
"strings"
)
type saltWireFmt struct {
Salt string `dns:"size-hex"`
}
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
func HashName(label string, ha uint8, iter uint16, salt string) string {
saltwire := new(saltWireFmt)
saltwire.Salt = salt
wire := make([]byte, DefaultMsgSize)
n, err := packSaltWire(saltwire, wire)
if err != nil {
return ""
}
wire = wire[:n]
name := make([]byte, 255)
off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
if err != nil {
return ""
}
name = name[:off]
var s hash.Hash
switch ha {
case SHA1:
s = sha1.New()
default:
return ""
}
// k = 0
name = append(name, wire...)
io.WriteString(s, string(name))
nsec3 := s.Sum(nil)
// k > 0
for k := uint16(0); k < iter; k++ {
s.Reset()
nsec3 = append(nsec3, wire...)
io.WriteString(s, string(nsec3))
nsec3 = s.Sum(nil)
}
return toBase32(nsec3)
}
// Denialer is an interface that should be implemented by types that are used to denial
// answers in DNSSEC.
type Denialer interface {
// Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3.
Cover(name string) bool
// Match will check if the ownername matches the (unhashed) name for this NSEC3 or NSEC3.
Match(name string) bool
}
// Cover implements the Denialer interface.
func (rr *NSEC) Cover(name string) bool {
return true
}
// Match implements the Denialer interface.
func (rr *NSEC) Match(name string) bool {
return true
}
// Cover implements the Denialer interface.
func (rr *NSEC3) Cover(name string) bool {
// FIXME(miek): check if the zones match
// FIXME(miek): check if we're not dealing with parent nsec3
hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
labels := Split(rr.Hdr.Name)
if len(labels) < 2 {
return false
}
hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the dot
if hash == rr.NextDomain {
return false // empty interval
}
if hash > rr.NextDomain { // last name, points to apex
// hname > hash
// hname > rr.NextDomain
// TODO(miek)
}
if hname <= hash {
return false
}
if hname >= rr.NextDomain {
return false
}
return true
}
// Match implements the Denialer interface.
func (rr *NSEC3) Match(name string) bool {
// FIXME(miek): Check if we are in the same zone
hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
labels := Split(rr.Hdr.Name)
if len(labels) < 2 {
return false
}
hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the .
if hash == hname {
return true
}
return false
}
func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) {
off, err := packStringHex(sw.Salt, msg, 0)
if err != nil {
return off, err
}
return off, nil
}

149
vendor/github.com/miekg/dns/privaterr.go generated vendored Normal file
View File

@ -0,0 +1,149 @@
package dns
import (
"fmt"
"strings"
)
// PrivateRdata is an interface used for implementing "Private Use" RR types, see
// RFC 6895. This allows one to experiment with new RR types, without requesting an
// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
type PrivateRdata interface {
// String returns the text presentaton of the Rdata of the Private RR.
String() string
// Parse parses the Rdata of the private RR.
Parse([]string) error
// Pack is used when packing a private RR into a buffer.
Pack([]byte) (int, error)
// Unpack is used when unpacking a private RR from a buffer.
// TODO(miek): diff. signature than Pack, see edns0.go for instance.
Unpack([]byte) (int, error)
// Copy copies the Rdata.
Copy(PrivateRdata) error
// Len returns the length in octets of the Rdata.
Len() int
}
// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
// It mocks normal RRs and implements dns.RR interface.
type PrivateRR struct {
Hdr RR_Header
Data PrivateRdata
}
func mkPrivateRR(rrtype uint16) *PrivateRR {
// Panics if RR is not an instance of PrivateRR.
rrfunc, ok := TypeToRR[rrtype]
if !ok {
panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
}
anyrr := rrfunc()
switch rr := anyrr.(type) {
case *PrivateRR:
return rr
}
panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr))
}
// Header return the RR header of r.
func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
// Private len and copy parts to satisfy RR interface.
func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() }
func (r *PrivateRR) copy() RR {
// make new RR like this:
rr := mkPrivateRR(r.Hdr.Rrtype)
newh := r.Hdr.copyHeader()
rr.Hdr = *newh
err := r.Data.Copy(rr.Data)
if err != nil {
panic("dns: got value that could not be used to copy Private rdata")
}
return rr
}
func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
off, err := r.Hdr.pack(msg, off, compression, compress)
if err != nil {
return off, err
}
headerEnd := off
n, err := r.Data.Pack(msg[off:])
if err != nil {
return len(msg), err
}
off += n
r.Header().Rdlength = uint16(off - headerEnd)
return off, nil
}
// PrivateHandle registers a private resource record type. It requires
// string and numeric representation of private RR type and generator function as argument.
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
rtypestr = strings.ToUpper(rtypestr)
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
TypeToString[rtype] = rtypestr
StringToType[rtypestr] = rtype
typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) {
if noRdata(h) {
return &h, off, nil
}
var err error
rr := mkPrivateRR(h.Rrtype)
rr.Hdr = h
off1, err := rr.Data.Unpack(msg[off:])
off += off1
if err != nil {
return rr, off, err
}
return rr, off, err
}
setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := mkPrivateRR(h.Rrtype)
rr.Hdr = h
var l lex
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
Fetch:
for {
// TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard)
switch l = <-c; l.value {
case zNewline, zEOF:
break Fetch
case zString:
text = append(text, l.token)
}
}
err := rr.Data.Parse(text)
if err != nil {
return nil, &ParseError{f, err.Error(), l}, ""
}
return rr, nil, ""
}
typeToparserFunc[rtype] = parserFunc{setPrivateRR, true}
}
// PrivateHandleRemove removes defenitions required to support private RR type.
func PrivateHandleRemove(rtype uint16) {
rtypestr, ok := TypeToString[rtype]
if ok {
delete(TypeToRR, rtype)
delete(TypeToString, rtype)
delete(typeToparserFunc, rtype)
delete(StringToType, rtypestr)
delete(typeToUnpack, rtype)
}
return
}

49
vendor/github.com/miekg/dns/rawmsg.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
package dns
import "encoding/binary"
// rawSetRdlength sets the rdlength in the header of
// the RR. The offset 'off' must be positioned at the
// start of the header of the RR, 'end' must be the
// end of the RR.
func rawSetRdlength(msg []byte, off, end int) bool {
l := len(msg)
Loop:
for {
if off+1 > l {
return false
}
c := int(msg[off])
off++
switch c & 0xC0 {
case 0x00:
if c == 0x00 {
// End of the domainname
break Loop
}
if off+c > l {
return false
}
off += c
case 0xC0:
// pointer, next byte included, ends domainname
off++
break Loop
}
}
// The domainname has been seen, we at the start of the fixed part in the header.
// Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length.
off += 2 + 2 + 4
if off+2 > l {
return false
}
//off+1 is the end of the header, 'end' is the end of the rr
//so 'end' - 'off+2' is the length of the rdata
rdatalen := end - (off + 2)
if rdatalen > 0xFFFF {
return false
}
binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen))
return true
}

38
vendor/github.com/miekg/dns/reverse.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package dns
// StringToType is the reverse of TypeToString, needed for string parsing.
var StringToType = reverseInt16(TypeToString)
// StringToClass is the reverse of ClassToString, needed for string parsing.
var StringToClass = reverseInt16(ClassToString)
// Map of opcodes strings.
var StringToOpcode = reverseInt(OpcodeToString)
// Map of rcodes strings.
var StringToRcode = reverseInt(RcodeToString)
// Reverse a map
func reverseInt8(m map[uint8]string) map[string]uint8 {
n := make(map[string]uint8, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt16(m map[uint16]string) map[string]uint16 {
n := make(map[string]uint16, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt(m map[int]string) map[string]int {
n := make(map[string]int, len(m))
for u, s := range m {
n[s] = u
}
return n
}

84
vendor/github.com/miekg/dns/sanitize.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
package dns
// Dedup removes identical RRs from rrs. It preserves the original ordering.
// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
// rrs.
// m is used to store the RRs temporay. If it is nil a new map will be allocated.
func Dedup(rrs []RR, m map[string]RR) []RR {
if m == nil {
m = make(map[string]RR)
}
// Save the keys, so we don't have to call normalizedString twice.
keys := make([]*string, 0, len(rrs))
for _, r := range rrs {
key := normalizedString(r)
keys = append(keys, &key)
if _, ok := m[key]; ok {
// Shortest TTL wins.
if m[key].Header().Ttl > r.Header().Ttl {
m[key].Header().Ttl = r.Header().Ttl
}
continue
}
m[key] = r
}
// If the length of the result map equals the amount of RRs we got,
// it means they were all different. We can then just return the original rrset.
if len(m) == len(rrs) {
return rrs
}
j := 0
for i, r := range rrs {
// If keys[i] lives in the map, we should copy and remove it.
if _, ok := m[*keys[i]]; ok {
delete(m, *keys[i])
rrs[j] = r
j++
}
if len(m) == 0 {
break
}
}
return rrs[:j]
}
// normalizedString returns a normalized string from r. The TTL
// is removed and the domain name is lowercased. We go from this:
// DomainName<TAB>TTL<TAB>CLASS<TAB>TYPE<TAB>RDATA to:
// lowercasename<TAB>CLASS<TAB>TYPE...
func normalizedString(r RR) string {
// A string Go DNS makes has: domainname<TAB>TTL<TAB>...
b := []byte(r.String())
// find the first non-escaped tab, then another, so we capture where the TTL lives.
esc := false
ttlStart, ttlEnd := 0, 0
for i := 0; i < len(b) && ttlEnd == 0; i++ {
switch {
case b[i] == '\\':
esc = !esc
case b[i] == '\t' && !esc:
if ttlStart == 0 {
ttlStart = i
continue
}
if ttlEnd == 0 {
ttlEnd = i
}
case b[i] >= 'A' && b[i] <= 'Z' && !esc:
b[i] += 32
default:
esc = false
}
}
// remove TTL.
copy(b[ttlStart:], b[ttlEnd:])
cut := ttlEnd - ttlStart
return string(b[:len(b)-cut])
}

974
vendor/github.com/miekg/dns/scan.go generated vendored Normal file
View File

@ -0,0 +1,974 @@
package dns
import (
"io"
"log"
"os"
"strconv"
"strings"
)
type debugging bool
const debug debugging = false
func (d debugging) Printf(format string, args ...interface{}) {
if d {
log.Printf(format, args...)
}
}
const maxTok = 2048 // Largest token we can return.
const maxUint16 = 1<<16 - 1
// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
// * Add ownernames if they are left blank;
// * Suppress sequences of spaces;
// * Make each RR fit on one line (_NEWLINE is send as last)
// * Handle comments: ;
// * Handle braces - anywhere.
const (
// Zonefile
zEOF = iota
zString
zBlank
zQuote
zNewline
zRrtpe
zOwner
zClass
zDirOrigin // $ORIGIN
zDirTtl // $TTL
zDirInclude // $INCLUDE
zDirGenerate // $GENERATE
// Privatekey file
zValue
zKey
zExpectOwnerDir // Ownername
zExpectOwnerBl // Whitespace after the ownername
zExpectAny // Expect rrtype, ttl or class
zExpectAnyNoClass // Expect rrtype or ttl
zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS
zExpectAnyNoTtl // Expect rrtype or class
zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL
zExpectRrtype // Expect rrtype
zExpectRrtypeBl // Whitespace BEFORE rrtype
zExpectRdata // The first element of the rdata
zExpectDirTtlBl // Space after directive $TTL
zExpectDirTtl // Directive $TTL
zExpectDirOriginBl // Space after directive $ORIGIN
zExpectDirOrigin // Directive $ORIGIN
zExpectDirIncludeBl // Space after directive $INCLUDE
zExpectDirInclude // Directive $INCLUDE
zExpectDirGenerate // Directive $GENERATE
zExpectDirGenerateBl // Space after directive $GENERATE
)
// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
// where the error occurred.
type ParseError struct {
file string
err string
lex lex
}
func (e *ParseError) Error() (s string) {
if e.file != "" {
s = e.file + ": "
}
s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " +
strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column)
return
}
type lex struct {
token string // text of the token
tokenUpper string // uppercase text of the token
length int // length of the token
err bool // when true, token text has lexer error
value uint8 // value: zString, _BLANK, etc.
line int // line in the file
column int // column in the file
torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
comment string // any comment text seen
}
// Token holds the token that are returned when a zone file is parsed.
type Token struct {
// The scanned resource record when error is not nil.
RR
// When an error occurred, this has the error specifics.
Error *ParseError
// A potential comment positioned after the RR and on the same line.
Comment string
}
// NewRR reads the RR contained in the string s. Only the first RR is
// returned. If s contains no RR, return nil with no error. The class
// defaults to IN and TTL defaults to 3600. The full zone file syntax
// like $TTL, $ORIGIN, etc. is supported. All fields of the returned
// RR are set, except RR.Header().Rdlength which is set to 0.
func NewRR(s string) (RR, error) {
if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
return ReadRR(strings.NewReader(s+"\n"), "")
}
return ReadRR(strings.NewReader(s), "")
}
// ReadRR reads the RR contained in q.
// See NewRR for more documentation.
func ReadRR(q io.Reader, filename string) (RR, error) {
r := <-parseZoneHelper(q, ".", filename, 1)
if r == nil {
return nil, nil
}
if r.Error != nil {
return nil, r.Error
}
return r.RR, nil
}
// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the
// returned channel, which consist out the parsed RR, a potential comment or an error.
// If there is an error the RR is nil. The string file is only used
// in error reporting. The string origin is used as the initial origin, as
// if the file would start with: $ORIGIN origin .
// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported.
// The channel t is closed by ParseZone when the end of r is reached.
//
// Basic usage pattern when reading from a string (z) containing the
// zone data:
//
// for x := range dns.ParseZone(strings.NewReader(z), "", "") {
// if x.Error != nil {
// // log.Println(x.Error)
// } else {
// // Do something with x.RR
// }
// }
//
// Comments specified after an RR (and on the same line!) are returned too:
//
// foo. IN A 10.0.0.1 ; this is a comment
//
// The text "; this is comment" is returned in Token.Comment. Comments inside the
// RR are discarded. Comments on a line by themselves are discarded too.
func ParseZone(r io.Reader, origin, file string) chan *Token {
return parseZoneHelper(r, origin, file, 10000)
}
func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token {
t := make(chan *Token, chansize)
go parseZone(r, origin, file, t, 0)
return t
}
func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
defer func() {
if include == 0 {
close(t)
}
}()
s := scanInit(r)
c := make(chan lex)
// Start the lexer
go zlexer(s, c)
// 6 possible beginnings of a line, _ is a space
// 0. zRRTYPE -> all omitted until the rrtype
// 1. zOwner _ zRrtype -> class/ttl omitted
// 2. zOwner _ zString _ zRrtype -> class omitted
// 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class
// 4. zOwner _ zClass _ zRrtype -> ttl omitted
// 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed)
// After detecting these, we know the zRrtype so we can jump to functions
// handling the rdata for each of these types.
if origin == "" {
origin = "."
}
origin = Fqdn(origin)
if _, ok := IsDomainName(origin); !ok {
t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
return
}
st := zExpectOwnerDir // initial state
var h RR_Header
var defttl uint32 = defaultTtl
var prevName string
for l := range c {
// Lexer spotted an error already
if l.err == true {
t <- &Token{Error: &ParseError{f, l.token, l}}
return
}
switch st {
case zExpectOwnerDir:
// We can also expect a directive, like $TTL or $ORIGIN
h.Ttl = defttl
h.Class = ClassINET
switch l.value {
case zNewline:
st = zExpectOwnerDir
case zOwner:
h.Name = l.token
if l.token[0] == '@' {
h.Name = origin
prevName = h.Name
st = zExpectOwnerBl
break
}
if h.Name[l.length-1] != '.' {
h.Name = appendOrigin(h.Name, origin)
}
_, ok := IsDomainName(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "bad owner name", l}}
return
}
prevName = h.Name
st = zExpectOwnerBl
case zDirTtl:
st = zExpectDirTtlBl
case zDirOrigin:
st = zExpectDirOriginBl
case zDirInclude:
st = zExpectDirIncludeBl
case zDirGenerate:
st = zExpectDirGenerateBl
case zRrtpe:
h.Name = prevName
h.Rrtype = l.torc
st = zExpectRdata
case zClass:
h.Name = prevName
h.Class = l.torc
st = zExpectAnyNoClassBl
case zBlank:
// Discard, can happen when there is nothing on the
// line except the RR type
case zString:
ttl, ok := stringToTtl(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
return
}
h.Ttl = ttl
// Don't about the defttl, we should take the $TTL value
// defttl = ttl
st = zExpectAnyNoTtlBl
default:
t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}}
return
}
case zExpectDirIncludeBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}}
return
}
st = zExpectDirInclude
case zExpectDirInclude:
if l.value != zString {
t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}}
return
}
neworigin := origin // There may be optionally a new origin set after the filename, if not use current one
l := <-c
switch l.value {
case zBlank:
l := <-c
if l.value == zString {
if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err {
t <- &Token{Error: &ParseError{f, "bad origin name", l}}
return
}
// a new origin is specified.
if l.token[l.length-1] != '.' {
if origin != "." { // Prevent .. endings
neworigin = l.token + "." + origin
} else {
neworigin = l.token + origin
}
} else {
neworigin = l.token
}
}
case zNewline, zEOF:
// Ok
default:
t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}}
return
}
// Start with the new file
r1, e1 := os.Open(l.token)
if e1 != nil {
t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}}
return
}
if include+1 > 7 {
t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}}
return
}
parseZone(r1, l.token, neworigin, t, include+1)
st = zExpectOwnerDir
case zExpectDirTtlBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}}
return
}
st = zExpectDirTtl
case zExpectDirTtl:
if l.value != zString {
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
return
}
if e, _ := slurpRemainder(c, f); e != nil {
t <- &Token{Error: e}
return
}
ttl, ok := stringToTtl(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
return
}
defttl = ttl
st = zExpectOwnerDir
case zExpectDirOriginBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}}
return
}
st = zExpectDirOrigin
case zExpectDirOrigin:
if l.value != zString {
t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}}
return
}
if e, _ := slurpRemainder(c, f); e != nil {
t <- &Token{Error: e}
}
if _, ok := IsDomainName(l.token); !ok {
t <- &Token{Error: &ParseError{f, "bad origin name", l}}
return
}
if l.token[l.length-1] != '.' {
if origin != "." { // Prevent .. endings
origin = l.token + "." + origin
} else {
origin = l.token + origin
}
} else {
origin = l.token
}
st = zExpectOwnerDir
case zExpectDirGenerateBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}}
return
}
st = zExpectDirGenerate
case zExpectDirGenerate:
if l.value != zString {
t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
return
}
if errMsg := generate(l, c, t, origin); errMsg != "" {
t <- &Token{Error: &ParseError{f, errMsg, l}}
return
}
st = zExpectOwnerDir
case zExpectOwnerBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after owner", l}}
return
}
st = zExpectAny
case zExpectAny:
switch l.value {
case zRrtpe:
h.Rrtype = l.torc
st = zExpectRdata
case zClass:
h.Class = l.torc
st = zExpectAnyNoClassBl
case zString:
ttl, ok := stringToTtl(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
return
}
h.Ttl = ttl
// defttl = ttl // don't set the defttl here
st = zExpectAnyNoTtlBl
default:
t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
return
}
case zExpectAnyNoClassBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank before class", l}}
return
}
st = zExpectAnyNoClass
case zExpectAnyNoTtlBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank before TTL", l}}
return
}
st = zExpectAnyNoTtl
case zExpectAnyNoTtl:
switch l.value {
case zClass:
h.Class = l.torc
st = zExpectRrtypeBl
case zRrtpe:
h.Rrtype = l.torc
st = zExpectRdata
default:
t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}}
return
}
case zExpectAnyNoClass:
switch l.value {
case zString:
ttl, ok := stringToTtl(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
return
}
h.Ttl = ttl
// defttl = ttl // don't set the def ttl anymore
st = zExpectRrtypeBl
case zRrtpe:
h.Rrtype = l.torc
st = zExpectRdata
default:
t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}}
return
}
case zExpectRrtypeBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank before RR type", l}}
return
}
st = zExpectRrtype
case zExpectRrtype:
if l.value != zRrtpe {
t <- &Token{Error: &ParseError{f, "unknown RR type", l}}
return
}
h.Rrtype = l.torc
st = zExpectRdata
case zExpectRdata:
r, e, c1 := setRR(h, c, origin, f)
if e != nil {
// If e.lex is nil than we have encounter a unknown RR type
// in that case we substitute our current lex token
if e.lex.token == "" && e.lex.value == 0 {
e.lex = l // Uh, dirty
}
t <- &Token{Error: e}
return
}
t <- &Token{RR: r, Comment: c1}
st = zExpectOwnerDir
}
}
// If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
// is not an error, because an empty zone file is still a zone file.
}
// zlexer scans the sourcefile and returns tokens on the channel c.
func zlexer(s *scan, c chan lex) {
var l lex
str := make([]byte, maxTok) // Should be enough for any token
stri := 0 // Offset in str (0 means empty)
com := make([]byte, maxTok) // Hold comment text
comi := 0
quote := false
escape := false
space := false
commt := false
rrtype := false
owner := true
brace := 0
x, err := s.tokenText()
defer close(c)
for err == nil {
l.column = s.position.Column
l.line = s.position.Line
if stri >= maxTok {
l.token = "token length insufficient for parsing"
l.err = true
debug.Printf("[%+v]", l.token)
c <- l
return
}
if comi >= maxTok {
l.token = "comment length insufficient for parsing"
l.err = true
debug.Printf("[%+v]", l.token)
c <- l
return
}
switch x {
case ' ', '\t':
if escape {
escape = false
str[stri] = x
stri++
break
}
if quote {
// Inside quotes this is legal
str[stri] = x
stri++
break
}
if commt {
com[comi] = x
comi++
break
}
if stri == 0 {
// Space directly in the beginning, handled in the grammar
} else if owner {
// If we have a string and its the first, make it an owner
l.value = zOwner
l.token = string(str[:stri])
l.tokenUpper = strings.ToUpper(l.token)
l.length = stri
// escape $... start with a \ not a $, so this will work
switch l.tokenUpper {
case "$TTL":
l.value = zDirTtl
case "$ORIGIN":
l.value = zDirOrigin
case "$INCLUDE":
l.value = zDirInclude
case "$GENERATE":
l.value = zDirGenerate
}
debug.Printf("[7 %+v]", l.token)
c <- l
} else {
l.value = zString
l.token = string(str[:stri])
l.tokenUpper = strings.ToUpper(l.token)
l.length = stri
if !rrtype {
if t, ok := StringToType[l.tokenUpper]; ok {
l.value = zRrtpe
l.torc = t
rrtype = true
} else {
if strings.HasPrefix(l.tokenUpper, "TYPE") {
t, ok := typeToInt(l.token)
if !ok {
l.token = "unknown RR type"
l.err = true
c <- l
return
}
l.value = zRrtpe
l.torc = t
}
}
if t, ok := StringToClass[l.tokenUpper]; ok {
l.value = zClass
l.torc = t
} else {
if strings.HasPrefix(l.tokenUpper, "CLASS") {
t, ok := classToInt(l.token)
if !ok {
l.token = "unknown class"
l.err = true
c <- l
return
}
l.value = zClass
l.torc = t
}
}
}
debug.Printf("[6 %+v]", l.token)
c <- l
}
stri = 0
// I reverse space stuff here
if !space && !commt {
l.value = zBlank
l.token = " "
l.length = 1
debug.Printf("[5 %+v]", l.token)
c <- l
}
owner = false
space = true
case ';':
if escape {
escape = false
str[stri] = x
stri++
break
}
if quote {
// Inside quotes this is legal
str[stri] = x
stri++
break
}
if stri > 0 {
l.value = zString
l.token = string(str[:stri])
l.length = stri
debug.Printf("[4 %+v]", l.token)
c <- l
stri = 0
}
commt = true
com[comi] = ';'
comi++
case '\r':
escape = false
if quote {
str[stri] = x
stri++
break
}
// discard if outside of quotes
case '\n':
escape = false
// Escaped newline
if quote {
str[stri] = x
stri++
break
}
// inside quotes this is legal
if commt {
// Reset a comment
commt = false
rrtype = false
stri = 0
// If not in a brace this ends the comment AND the RR
if brace == 0 {
owner = true
owner = true
l.value = zNewline
l.token = "\n"
l.length = 1
l.comment = string(com[:comi])
debug.Printf("[3 %+v %+v]", l.token, l.comment)
c <- l
l.comment = ""
comi = 0
break
}
com[comi] = ' ' // convert newline to space
comi++
break
}
if brace == 0 {
// If there is previous text, we should output it here
if stri != 0 {
l.value = zString
l.token = string(str[:stri])
l.tokenUpper = strings.ToUpper(l.token)
l.length = stri
if !rrtype {
if t, ok := StringToType[l.tokenUpper]; ok {
l.value = zRrtpe
l.torc = t
rrtype = true
}
}
debug.Printf("[2 %+v]", l.token)
c <- l
}
l.value = zNewline
l.token = "\n"
l.length = 1
debug.Printf("[1 %+v]", l.token)
c <- l
stri = 0
commt = false
rrtype = false
owner = true
comi = 0
}
case '\\':
// comments do not get escaped chars, everything is copied
if commt {
com[comi] = x
comi++
break
}
// something already escaped must be in string
if escape {
str[stri] = x
stri++
escape = false
break
}
// something escaped outside of string gets added to string
str[stri] = x
stri++
escape = true
case '"':
if commt {
com[comi] = x
comi++
break
}
if escape {
str[stri] = x
stri++
escape = false
break
}
space = false
// send previous gathered text and the quote
if stri != 0 {
l.value = zString
l.token = string(str[:stri])
l.length = stri
debug.Printf("[%+v]", l.token)
c <- l
stri = 0
}
// send quote itself as separate token
l.value = zQuote
l.token = "\""
l.length = 1
c <- l
quote = !quote
case '(', ')':
if commt {
com[comi] = x
comi++
break
}
if escape {
str[stri] = x
stri++
escape = false
break
}
if quote {
str[stri] = x
stri++
break
}
switch x {
case ')':
brace--
if brace < 0 {
l.token = "extra closing brace"
l.err = true
debug.Printf("[%+v]", l.token)
c <- l
return
}
case '(':
brace++
}
default:
escape = false
if commt {
com[comi] = x
comi++
break
}
str[stri] = x
stri++
space = false
}
x, err = s.tokenText()
}
if stri > 0 {
// Send remainder
l.token = string(str[:stri])
l.length = stri
l.value = zString
debug.Printf("[%+v]", l.token)
c <- l
}
}
// Extract the class number from CLASSxx
func classToInt(token string) (uint16, bool) {
offset := 5
if len(token) < offset+1 {
return 0, false
}
class, ok := strconv.Atoi(token[offset:])
if ok != nil || class > maxUint16 {
return 0, false
}
return uint16(class), true
}
// Extract the rr number from TYPExxx
func typeToInt(token string) (uint16, bool) {
offset := 4
if len(token) < offset+1 {
return 0, false
}
typ, ok := strconv.Atoi(token[offset:])
if ok != nil || typ > maxUint16 {
return 0, false
}
return uint16(typ), true
}
// Parse things like 2w, 2m, etc, Return the time in seconds.
func stringToTtl(token string) (uint32, bool) {
s := uint32(0)
i := uint32(0)
for _, c := range token {
switch c {
case 's', 'S':
s += i
i = 0
case 'm', 'M':
s += i * 60
i = 0
case 'h', 'H':
s += i * 60 * 60
i = 0
case 'd', 'D':
s += i * 60 * 60 * 24
i = 0
case 'w', 'W':
s += i * 60 * 60 * 24 * 7
i = 0
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
i *= 10
i += uint32(c) - '0'
default:
return 0, false
}
}
return s + i, true
}
// Parse LOC records' <digits>[.<digits>][mM] into a
// mantissa exponent format. Token should contain the entire
// string (i.e. no spaces allowed)
func stringToCm(token string) (e, m uint8, ok bool) {
if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' {
token = token[0 : len(token)-1]
}
s := strings.SplitN(token, ".", 2)
var meters, cmeters, val int
var err error
switch len(s) {
case 2:
if cmeters, err = strconv.Atoi(s[1]); err != nil {
return
}
fallthrough
case 1:
if meters, err = strconv.Atoi(s[0]); err != nil {
return
}
case 0:
// huh?
return 0, 0, false
}
ok = true
if meters > 0 {
e = 2
val = meters
} else {
e = 0
val = cmeters
}
for val > 10 {
e++
val /= 10
}
if e > 9 {
ok = false
}
m = uint8(val)
return
}
func appendOrigin(name, origin string) string {
if origin == "." {
return name + origin
}
return name + "." + origin
}
// LOC record helper function
func locCheckNorth(token string, latitude uint32) (uint32, bool) {
switch token {
case "n", "N":
return LOC_EQUATOR + latitude, true
case "s", "S":
return LOC_EQUATOR - latitude, true
}
return latitude, false
}
// LOC record helper function
func locCheckEast(token string, longitude uint32) (uint32, bool) {
switch token {
case "e", "E":
return LOC_EQUATOR + longitude, true
case "w", "W":
return LOC_EQUATOR - longitude, true
}
return longitude, false
}
// "Eat" the rest of the "line". Return potential comments
func slurpRemainder(c chan lex, f string) (*ParseError, string) {
l := <-c
com := ""
switch l.value {
case zBlank:
l = <-c
com = l.comment
if l.value != zNewline && l.value != zEOF {
return &ParseError{f, "garbage after rdata", l}, ""
}
case zNewline:
com = l.comment
case zEOF:
default:
return &ParseError{f, "garbage after rdata", l}, ""
}
return nil, com
}
// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64"
// Used for NID and L64 record.
func stringToNodeID(l lex) (uint64, *ParseError) {
if len(l.token) < 19 {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
// There must be three colons at fixes postitions, if not its a parse error
if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
u, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
return u, nil
}

2143
vendor/github.com/miekg/dns/scan_rr.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

43
vendor/github.com/miekg/dns/scanner.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package dns
// Implement a simple scanner, return a byte stream from an io reader.
import (
"bufio"
"io"
"text/scanner"
)
type scan struct {
src *bufio.Reader
position scanner.Position
eof bool // Have we just seen a eof
}
func scanInit(r io.Reader) *scan {
s := new(scan)
s.src = bufio.NewReader(r)
s.position.Line = 1
return s
}
// tokenText returns the next byte from the input
func (s *scan) tokenText() (byte, error) {
c, err := s.src.ReadByte()
if err != nil {
return c, err
}
// delay the newline handling until the next token is delivered,
// fixes off-by-one errors when reporting a parse error.
if s.eof == true {
s.position.Line++
s.position.Column = 0
s.eof = false
}
if c == '\n' {
s.eof = true
return c, nil
}
s.position.Column++
return c, nil
}

732
vendor/github.com/miekg/dns/server.go generated vendored Normal file
View File

@ -0,0 +1,732 @@
// DNS server implementation.
package dns
import (
"bytes"
"crypto/tls"
"encoding/binary"
"io"
"net"
"sync"
"time"
)
// Maximum number of TCP queries before we close the socket.
const maxTCPQueries = 128
// Handler is implemented by any value that implements ServeDNS.
type Handler interface {
ServeDNS(w ResponseWriter, r *Msg)
}
// A ResponseWriter interface is used by an DNS handler to
// construct an DNS response.
type ResponseWriter interface {
// LocalAddr returns the net.Addr of the server
LocalAddr() net.Addr
// RemoteAddr returns the net.Addr of the client that sent the current request.
RemoteAddr() net.Addr
// WriteMsg writes a reply back to the client.
WriteMsg(*Msg) error
// Write writes a raw buffer back to the client.
Write([]byte) (int, error)
// Close closes the connection.
Close() error
// TsigStatus returns the status of the Tsig.
TsigStatus() error
// TsigTimersOnly sets the tsig timers only boolean.
TsigTimersOnly(bool)
// Hijack lets the caller take over the connection.
// After a call to Hijack(), the DNS package will not do anything with the connection.
Hijack()
}
type response struct {
hijacked bool // connection has been hijacked by handler
tsigStatus error
tsigTimersOnly bool
tsigRequestMAC string
tsigSecret map[string]string // the tsig secrets
udp *net.UDPConn // i/o connection if UDP was used
tcp net.Conn // i/o connection if TCP was used
udpSession *SessionUDP // oob data to get egress interface right
remoteAddr net.Addr // address of the client
writer Writer // writer to output the raw DNS bits
}
// ServeMux is an DNS request multiplexer. It matches the
// zone name of each incoming request against a list of
// registered patterns add calls the handler for the pattern
// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning
// that queries for the DS record are redirected to the parent zone (if that
// is also registered), otherwise the child gets the query.
// ServeMux is also safe for concurrent access from multiple goroutines.
type ServeMux struct {
z map[string]Handler
m *sync.RWMutex
}
// NewServeMux allocates and returns a new ServeMux.
func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} }
// DefaultServeMux is the default ServeMux used by Serve.
var DefaultServeMux = NewServeMux()
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as DNS handlers. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler object that calls f.
type HandlerFunc func(ResponseWriter, *Msg)
// ServeDNS calls f(w, r).
func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
f(w, r)
}
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
func HandleFailed(w ResponseWriter, r *Msg) {
m := new(Msg)
m.SetRcode(r, RcodeServerFailure)
// does not matter if this write fails
w.WriteMsg(m)
}
func failedHandler() Handler { return HandlerFunc(HandleFailed) }
// ListenAndServe Starts a server on address and network specified Invoke handler
// for incoming queries.
func ListenAndServe(addr string, network string, handler Handler) error {
server := &Server{Addr: addr, Net: network, Handler: handler}
return server.ListenAndServe()
}
// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
// http://golang.org/pkg/net/http/#ListenAndServeTLS
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return err
}
config := tls.Config{
Certificates: []tls.Certificate{cert},
}
server := &Server{
Addr: addr,
Net: "tcp-tls",
TLSConfig: &config,
Handler: handler,
}
return server.ListenAndServe()
}
// ActivateAndServe activates a server with a listener from systemd,
// l and p should not both be non-nil.
// If both l and p are not nil only p will be used.
// Invoke handler for incoming queries.
func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
server := &Server{Listener: l, PacketConn: p, Handler: handler}
return server.ActivateAndServe()
}
func (mux *ServeMux) match(q string, t uint16) Handler {
mux.m.RLock()
defer mux.m.RUnlock()
var handler Handler
b := make([]byte, len(q)) // worst case, one label of length q
off := 0
end := false
for {
l := len(q[off:])
for i := 0; i < l; i++ {
b[i] = q[off+i]
if b[i] >= 'A' && b[i] <= 'Z' {
b[i] |= ('a' - 'A')
}
}
if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key
if t != TypeDS {
return h
}
// Continue for DS to see if we have a parent too, if so delegeate to the parent
handler = h
}
off, end = NextLabel(q, off)
if end {
break
}
}
// Wildcard match, if we have found nothing try the root zone as a last resort.
if h, ok := mux.z["."]; ok {
return h
}
return handler
}
// Handle adds a handler to the ServeMux for pattern.
func (mux *ServeMux) Handle(pattern string, handler Handler) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
}
mux.m.Lock()
mux.z[Fqdn(pattern)] = handler
mux.m.Unlock()
}
// HandleFunc adds a handler function to the ServeMux for pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
mux.Handle(pattern, HandlerFunc(handler))
}
// HandleRemove deregistrars the handler specific for pattern from the ServeMux.
func (mux *ServeMux) HandleRemove(pattern string) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
}
mux.m.Lock()
delete(mux.z, Fqdn(pattern))
mux.m.Unlock()
}
// ServeDNS dispatches the request to the handler whose
// pattern most closely matches the request message. If DefaultServeMux
// is used the correct thing for DS queries is done: a possible parent
// is sought.
// If no handler is found a standard SERVFAIL message is returned
// If the request message does not have exactly one question in the
// question section a SERVFAIL is returned, unlesss Unsafe is true.
func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) {
var h Handler
if len(request.Question) < 1 { // allow more than one question
h = failedHandler()
} else {
if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil {
h = failedHandler()
}
}
h.ServeDNS(w, request)
}
// Handle registers the handler with the given pattern
// in the DefaultServeMux. The documentation for
// ServeMux explains how patterns are matched.
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
// HandleRemove deregisters the handle with the given pattern
// in the DefaultServeMux.
func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
// HandleFunc registers the handler function with the given pattern
// in the DefaultServeMux.
func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
DefaultServeMux.HandleFunc(pattern, handler)
}
// Writer writes raw DNS messages; each call to Write should send an entire message.
type Writer interface {
io.Writer
}
// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message.
type Reader interface {
// ReadTCP reads a raw message from a TCP connection. Implementations may alter
// connection properties, for example the read-deadline.
ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
// ReadUDP reads a raw message from a UDP connection. Implementations may alter
// connection properties, for example the read-deadline.
ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
}
// defaultReader is an adapter for the Server struct that implements the Reader interface
// using the readTCP and readUDP func of the embedded Server.
type defaultReader struct {
*Server
}
func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
return dr.readTCP(conn, timeout)
}
func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
return dr.readUDP(conn, timeout)
}
// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
// Implementations should never return a nil Reader.
type DecorateReader func(Reader) Reader
// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
// Implementations should never return a nil Writer.
type DecorateWriter func(Writer) Writer
// A Server defines parameters for running an DNS server.
type Server struct {
// Address to listen on, ":dns" if empty.
Addr string
// if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
Net string
// TCP Listener to use, this is to aid in systemd's socket activation.
Listener net.Listener
// TLS connection configuration
TLSConfig *tls.Config
// UDP "Listener" to use, this is to aid in systemd's socket activation.
PacketConn net.PacketConn
// Handler to invoke, dns.DefaultServeMux if nil.
Handler Handler
// Default buffer size to use to read incoming UDP messages. If not set
// it defaults to MinMsgSize (512 B).
UDPSize int
// The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second.
ReadTimeout time.Duration
// The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second.
WriteTimeout time.Duration
// TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
IdleTimeout func() time.Duration
// Secret(s) for Tsig map[<zonename>]<base64 secret>.
TsigSecret map[string]string
// Unsafe instructs the server to disregard any sanity checks and directly hand the message to
// the handler. It will specifically not check if the query has the QR bit not set.
Unsafe bool
// If NotifyStartedFunc is set it is called once the server has started listening.
NotifyStartedFunc func()
// DecorateReader is optional, allows customization of the process that reads raw DNS messages.
DecorateReader DecorateReader
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
DecorateWriter DecorateWriter
// Graceful shutdown handling
inFlight sync.WaitGroup
lock sync.RWMutex
started bool
}
// ListenAndServe starts a nameserver on the configured address in *Server.
func (srv *Server) ListenAndServe() error {
srv.lock.Lock()
defer srv.lock.Unlock()
if srv.started {
return &Error{err: "server already started"}
}
addr := srv.Addr
if addr == "" {
addr = ":domain"
}
if srv.UDPSize == 0 {
srv.UDPSize = MinMsgSize
}
switch srv.Net {
case "tcp", "tcp4", "tcp6":
a, err := net.ResolveTCPAddr(srv.Net, addr)
if err != nil {
return err
}
l, err := net.ListenTCP(srv.Net, a)
if err != nil {
return err
}
srv.Listener = l
srv.started = true
srv.lock.Unlock()
err = srv.serveTCP(l)
srv.lock.Lock() // to satisfy the defer at the top
return err
case "tcp-tls", "tcp4-tls", "tcp6-tls":
network := "tcp"
if srv.Net == "tcp4-tls" {
network = "tcp4"
} else if srv.Net == "tcp6" {
network = "tcp6"
}
l, err := tls.Listen(network, addr, srv.TLSConfig)
if err != nil {
return err
}
srv.Listener = l
srv.started = true
srv.lock.Unlock()
err = srv.serveTCP(l)
srv.lock.Lock() // to satisfy the defer at the top
return err
case "udp", "udp4", "udp6":
a, err := net.ResolveUDPAddr(srv.Net, addr)
if err != nil {
return err
}
l, err := net.ListenUDP(srv.Net, a)
if err != nil {
return err
}
if e := setUDPSocketOptions(l); e != nil {
return e
}
srv.PacketConn = l
srv.started = true
srv.lock.Unlock()
err = srv.serveUDP(l)
srv.lock.Lock() // to satisfy the defer at the top
return err
}
return &Error{err: "bad network"}
}
// ActivateAndServe starts a nameserver with the PacketConn or Listener
// configured in *Server. Its main use is to start a server from systemd.
func (srv *Server) ActivateAndServe() error {
srv.lock.Lock()
defer srv.lock.Unlock()
if srv.started {
return &Error{err: "server already started"}
}
pConn := srv.PacketConn
l := srv.Listener
if pConn != nil {
if srv.UDPSize == 0 {
srv.UDPSize = MinMsgSize
}
if t, ok := pConn.(*net.UDPConn); ok {
if e := setUDPSocketOptions(t); e != nil {
return e
}
srv.started = true
srv.lock.Unlock()
e := srv.serveUDP(t)
srv.lock.Lock() // to satisfy the defer at the top
return e
}
}
if l != nil {
srv.started = true
srv.lock.Unlock()
e := srv.serveTCP(l)
srv.lock.Lock() // to satisfy the defer at the top
return e
}
return &Error{err: "bad listeners"}
}
// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and
// ActivateAndServe will return. All in progress queries are completed before the server
// is taken down. If the Shutdown is taking longer than the reading timeout an error
// is returned.
func (srv *Server) Shutdown() error {
srv.lock.Lock()
if !srv.started {
srv.lock.Unlock()
return &Error{err: "server not started"}
}
srv.started = false
srv.lock.Unlock()
if srv.PacketConn != nil {
srv.PacketConn.Close()
}
if srv.Listener != nil {
srv.Listener.Close()
}
fin := make(chan bool)
go func() {
srv.inFlight.Wait()
fin <- true
}()
select {
case <-time.After(srv.getReadTimeout()):
return &Error{err: "server shutdown is pending"}
case <-fin:
return nil
}
}
// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
func (srv *Server) getReadTimeout() time.Duration {
rtimeout := dnsTimeout
if srv.ReadTimeout != 0 {
rtimeout = srv.ReadTimeout
}
return rtimeout
}
// serveTCP starts a TCP listener for the server.
// Each request is handled in a separate goroutine.
func (srv *Server) serveTCP(l net.Listener) error {
defer l.Close()
if srv.NotifyStartedFunc != nil {
srv.NotifyStartedFunc()
}
reader := Reader(&defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
}
handler := srv.Handler
if handler == nil {
handler = DefaultServeMux
}
rtimeout := srv.getReadTimeout()
// deadline is not used here
for {
rw, err := l.Accept()
if err != nil {
if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
continue
}
return err
}
m, err := reader.ReadTCP(rw, rtimeout)
srv.lock.RLock()
if !srv.started {
srv.lock.RUnlock()
return nil
}
srv.lock.RUnlock()
if err != nil {
continue
}
srv.inFlight.Add(1)
go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
}
}
// serveUDP starts a UDP listener for the server.
// Each request is handled in a separate goroutine.
func (srv *Server) serveUDP(l *net.UDPConn) error {
defer l.Close()
if srv.NotifyStartedFunc != nil {
srv.NotifyStartedFunc()
}
reader := Reader(&defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
}
handler := srv.Handler
if handler == nil {
handler = DefaultServeMux
}
rtimeout := srv.getReadTimeout()
// deadline is not used here
for {
m, s, err := reader.ReadUDP(l, rtimeout)
srv.lock.RLock()
if !srv.started {
srv.lock.RUnlock()
return nil
}
srv.lock.RUnlock()
if err != nil {
continue
}
srv.inFlight.Add(1)
go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
}
}
// Serve a new connection.
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) {
defer srv.inFlight.Done()
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
if srv.DecorateWriter != nil {
w.writer = srv.DecorateWriter(w)
} else {
w.writer = w
}
q := 0 // counter for the amount of TCP queries we get
reader := Reader(&defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
}
Redo:
req := new(Msg)
err := req.Unpack(m)
if err != nil { // Send a FormatError back
x := new(Msg)
x.SetRcodeFormatError(req)
w.WriteMsg(x)
goto Exit
}
if !srv.Unsafe && req.Response {
goto Exit
}
w.tsigStatus = nil
if w.tsigSecret != nil {
if t := req.IsTsig(); t != nil {
secret := t.Hdr.Name
if _, ok := w.tsigSecret[secret]; !ok {
w.tsigStatus = ErrKeyAlg
}
w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false)
w.tsigTimersOnly = false
w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
}
}
h.ServeDNS(w, req) // Writes back to the client
Exit:
if w.tcp == nil {
return
}
// TODO(miek): make this number configurable?
if q > maxTCPQueries { // close socket after this many queries
w.Close()
return
}
if w.hijacked {
return // client calls Close()
}
if u != nil { // UDP, "close" and return
w.Close()
return
}
idleTimeout := tcpIdleTimeout
if srv.IdleTimeout != nil {
idleTimeout = srv.IdleTimeout()
}
m, err = reader.ReadTCP(w.tcp, idleTimeout)
if err == nil {
q++
goto Redo
}
w.Close()
return
}
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
conn.SetReadDeadline(time.Now().Add(timeout))
l := make([]byte, 2)
n, err := conn.Read(l)
if err != nil || n != 2 {
if err != nil {
return nil, err
}
return nil, ErrShortRead
}
length := binary.BigEndian.Uint16(l)
if length == 0 {
return nil, ErrShortRead
}
m := make([]byte, int(length))
n, err = conn.Read(m[:int(length)])
if err != nil || n == 0 {
if err != nil {
return nil, err
}
return nil, ErrShortRead
}
i := n
for i < int(length) {
j, err := conn.Read(m[i:int(length)])
if err != nil {
return nil, err
}
i += j
}
n = i
m = m[:n]
return m, nil
}
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
conn.SetReadDeadline(time.Now().Add(timeout))
m := make([]byte, srv.UDPSize)
n, s, err := ReadFromSessionUDP(conn, m)
if err != nil || n == 0 {
if err != nil {
return nil, nil, err
}
return nil, nil, ErrShortRead
}
m = m[:n]
return m, s, nil
}
// WriteMsg implements the ResponseWriter.WriteMsg method.
func (w *response) WriteMsg(m *Msg) (err error) {
var data []byte
if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check)
if t := m.IsTsig(); t != nil {
data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly)
if err != nil {
return err
}
_, err = w.writer.Write(data)
return err
}
}
data, err = m.Pack()
if err != nil {
return err
}
_, err = w.writer.Write(data)
return err
}
// Write implements the ResponseWriter.Write method.
func (w *response) Write(m []byte) (int, error) {
switch {
case w.udp != nil:
n, err := WriteToSessionUDP(w.udp, m, w.udpSession)
return n, err
case w.tcp != nil:
lm := len(m)
if lm < 2 {
return 0, io.ErrShortBuffer
}
if lm > MaxMsgSize {
return 0, &Error{err: "message too large"}
}
l := make([]byte, 2, 2+lm)
binary.BigEndian.PutUint16(l, uint16(lm))
m = append(l, m...)
n, err := io.Copy(w.tcp, bytes.NewReader(m))
return int(n), err
}
panic("not reached")
}
// LocalAddr implements the ResponseWriter.LocalAddr method.
func (w *response) LocalAddr() net.Addr {
if w.tcp != nil {
return w.tcp.LocalAddr()
}
return w.udp.LocalAddr()
}
// RemoteAddr implements the ResponseWriter.RemoteAddr method.
func (w *response) RemoteAddr() net.Addr { return w.remoteAddr }
// TsigStatus implements the ResponseWriter.TsigStatus method.
func (w *response) TsigStatus() error { return w.tsigStatus }
// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method.
func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b }
// Hijack implements the ResponseWriter.Hijack method.
func (w *response) Hijack() { w.hijacked = true }
// Close implements the ResponseWriter.Close method
func (w *response) Close() error {
// Can't close the udp conn, as that is actually the listener.
if w.tcp != nil {
e := w.tcp.Close()
w.tcp = nil
return e
}
return nil
}

219
vendor/github.com/miekg/dns/sig0.go generated vendored Normal file
View File

@ -0,0 +1,219 @@
package dns
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"encoding/binary"
"math/big"
"strings"
"time"
)
// Sign signs a dns.Msg. It fills the signature with the appropriate data.
// The SIG record should have the SignerName, KeyTag, Algorithm, Inception
// and Expiration set.
func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
if k == nil {
return nil, ErrPrivKey
}
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return nil, ErrKey
}
rr.Header().Rrtype = TypeSIG
rr.Header().Class = ClassANY
rr.Header().Ttl = 0
rr.Header().Name = "."
rr.OrigTtl = 0
rr.TypeCovered = 0
rr.Labels = 0
buf := make([]byte, m.Len()+rr.len())
mbuf, err := m.PackBuffer(buf)
if err != nil {
return nil, err
}
if &buf[0] != &mbuf[0] {
return nil, ErrBuf
}
off, err := PackRR(rr, buf, len(mbuf), nil, false)
if err != nil {
return nil, err
}
buf = buf[:off:cap(buf)]
hash, ok := AlgorithmToHash[rr.Algorithm]
if !ok {
return nil, ErrAlg
}
hasher := hash.New()
// Write SIG rdata
hasher.Write(buf[len(mbuf)+1+2+2+4+2:])
// Write message
hasher.Write(buf[:len(mbuf)])
signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm)
if err != nil {
return nil, err
}
rr.Signature = toBase64(signature)
sig := string(signature)
buf = append(buf, sig...)
if len(buf) > int(^uint16(0)) {
return nil, ErrBuf
}
// Adjust sig data length
rdoff := len(mbuf) + 1 + 2 + 2 + 4
rdlen := binary.BigEndian.Uint16(buf[rdoff:])
rdlen += uint16(len(sig))
binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
// Adjust additional count
adc := binary.BigEndian.Uint16(buf[10:])
adc++
binary.BigEndian.PutUint16(buf[10:], adc)
return buf, nil
}
// Verify validates the message buf using the key k.
// It's assumed that buf is a valid message from which rr was unpacked.
func (rr *SIG) Verify(k *KEY, buf []byte) error {
if k == nil {
return ErrKey
}
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return ErrKey
}
var hash crypto.Hash
switch rr.Algorithm {
case DSA, RSASHA1:
hash = crypto.SHA1
case RSASHA256, ECDSAP256SHA256:
hash = crypto.SHA256
case ECDSAP384SHA384:
hash = crypto.SHA384
case RSASHA512:
hash = crypto.SHA512
default:
return ErrAlg
}
hasher := hash.New()
buflen := len(buf)
qdc := binary.BigEndian.Uint16(buf[4:])
anc := binary.BigEndian.Uint16(buf[6:])
auc := binary.BigEndian.Uint16(buf[8:])
adc := binary.BigEndian.Uint16(buf[10:])
offset := 12
var err error
for i := uint16(0); i < qdc && offset < buflen; i++ {
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip past Type and Class
offset += 2 + 2
}
for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ {
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip past Type, Class and TTL
offset += 2 + 2 + 4
if offset+1 >= buflen {
continue
}
var rdlen uint16
rdlen = binary.BigEndian.Uint16(buf[offset:])
offset += 2
offset += int(rdlen)
}
if offset >= buflen {
return &Error{err: "overflowing unpacking signed message"}
}
// offset should be just prior to SIG
bodyend := offset
// owner name SHOULD be root
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip Type, Class, TTL, RDLen
offset += 2 + 2 + 4 + 2
sigstart := offset
// Skip Type Covered, Algorithm, Labels, Original TTL
offset += 2 + 1 + 1 + 4
if offset+4+4 >= buflen {
return &Error{err: "overflow unpacking signed message"}
}
expire := binary.BigEndian.Uint32(buf[offset:])
offset += 4
incept := binary.BigEndian.Uint32(buf[offset:])
offset += 4
now := uint32(time.Now().Unix())
if now < incept || now > expire {
return ErrTime
}
// Skip key tag
offset += 2
var signername string
signername, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// If key has come from the DNS name compression might
// have mangled the case of the name
if strings.ToLower(signername) != strings.ToLower(k.Header().Name) {
return &Error{err: "signer name doesn't match key name"}
}
sigend := offset
hasher.Write(buf[sigstart:sigend])
hasher.Write(buf[:10])
hasher.Write([]byte{
byte((adc - 1) << 8),
byte(adc - 1),
})
hasher.Write(buf[12:bodyend])
hashed := hasher.Sum(nil)
sig := buf[sigend:]
switch k.Algorithm {
case DSA:
pk := k.publicKeyDSA()
sig = sig[1:]
r := big.NewInt(0)
r.SetBytes(sig[:len(sig)/2])
s := big.NewInt(0)
s.SetBytes(sig[len(sig)/2:])
if pk != nil {
if dsa.Verify(pk, hashed, r, s) {
return nil
}
return ErrSig
}
case RSASHA1, RSASHA256, RSASHA512:
pk := k.publicKeyRSA()
if pk != nil {
return rsa.VerifyPKCS1v15(pk, hash, hashed, sig)
}
case ECDSAP256SHA256, ECDSAP384SHA384:
pk := k.publicKeyECDSA()
r := big.NewInt(0)
r.SetBytes(sig[:len(sig)/2])
s := big.NewInt(0)
s.SetBytes(sig[len(sig)/2:])
if pk != nil {
if ecdsa.Verify(pk, hashed, r, s) {
return nil
}
return ErrSig
}
}
return ErrKeyAlg
}

57
vendor/github.com/miekg/dns/singleinflight.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Adapted for dns package usage by Miek Gieben.
package dns
import "sync"
import "time"
// call is an in-flight or completed singleflight.Do call
type call struct {
wg sync.WaitGroup
val *Msg
rtt time.Duration
err error
dups int
}
// singleflight represents a class of work and forms a namespace in
// which units of work can be executed with duplicate suppression.
type singleflight struct {
sync.Mutex // protects m
m map[string]*call // lazily initialized
}
// Do executes and returns the results of the given function, making
// sure that only one execution is in-flight for a given key at a
// time. If a duplicate comes in, the duplicate caller waits for the
// original to complete and receives the same results.
// The return value shared indicates whether v was given to multiple callers.
func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
g.Lock()
if g.m == nil {
g.m = make(map[string]*call)
}
if c, ok := g.m[key]; ok {
c.dups++
g.Unlock()
c.wg.Wait()
return c.val, c.rtt, c.err, true
}
c := new(call)
c.wg.Add(1)
g.m[key] = c
g.Unlock()
c.val, c.rtt, c.err = fn()
c.wg.Done()
g.Lock()
delete(g.m, key)
g.Unlock()
return c.val, c.rtt, c.err, c.dups > 0
}

86
vendor/github.com/miekg/dns/tlsa.go generated vendored Normal file
View File

@ -0,0 +1,86 @@
package dns
import (
"crypto/sha256"
"crypto/sha512"
"crypto/x509"
"encoding/hex"
"errors"
"io"
"net"
"strconv"
)
// CertificateToDANE converts a certificate to a hex string as used in the TLSA record.
func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
switch matchingType {
case 0:
switch selector {
case 0:
return hex.EncodeToString(cert.Raw), nil
case 1:
return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
}
case 1:
h := sha256.New()
switch selector {
case 0:
io.WriteString(h, string(cert.Raw))
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
return hex.EncodeToString(h.Sum(nil)), nil
}
case 2:
h := sha512.New()
switch selector {
case 0:
io.WriteString(h, string(cert.Raw))
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
return hex.EncodeToString(h.Sum(nil)), nil
}
}
return "", errors.New("dns: bad TLSA MatchingType or TLSA Selector")
}
// Sign creates a TLSA record from an SSL certificate.
func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
r.Hdr.Rrtype = TypeTLSA
r.Usage = uint8(usage)
r.Selector = uint8(selector)
r.MatchingType = uint8(matchingType)
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
if err != nil {
return err
}
return nil
}
// Verify verifies a TLSA record against an SSL certificate. If it is OK
// a nil error is returned.
func (r *TLSA) Verify(cert *x509.Certificate) error {
c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
if err != nil {
return err // Not also ErrSig?
}
if r.Certificate == c {
return nil
}
return ErrSig // ErrSig, really?
}
// TLSAName returns the ownername of a TLSA resource record as per the
// rules specified in RFC 6698, Section 3.
func TLSAName(name, service, network string) (string, error) {
if !IsFqdn(name) {
return "", ErrFqdn
}
p, err := net.LookupPort(network, service)
if err != nil {
return "", err
}
return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil
}

384
vendor/github.com/miekg/dns/tsig.go generated vendored Normal file
View File

@ -0,0 +1,384 @@
package dns
import (
"crypto/hmac"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/binary"
"encoding/hex"
"hash"
"io"
"strconv"
"strings"
"time"
)
// HMAC hashing codes. These are transmitted as domain names.
const (
HmacMD5 = "hmac-md5.sig-alg.reg.int."
HmacSHA1 = "hmac-sha1."
HmacSHA256 = "hmac-sha256."
HmacSHA512 = "hmac-sha512."
)
// TSIG is the RR the holds the transaction signature of a message.
// See RFC 2845 and RFC 4635.
type TSIG struct {
Hdr RR_Header
Algorithm string `dns:"domain-name"`
TimeSigned uint64 `dns:"uint48"`
Fudge uint16
MACSize uint16
MAC string `dns:"size-hex:MACSize"`
OrigId uint16
Error uint16
OtherLen uint16
OtherData string `dns:"size-hex:OtherLen"`
}
// TSIG has no official presentation format, but this will suffice.
func (rr *TSIG) String() string {
s := "\n;; TSIG PSEUDOSECTION:\n"
s += rr.Hdr.String() +
" " + rr.Algorithm +
" " + tsigTimeToString(rr.TimeSigned) +
" " + strconv.Itoa(int(rr.Fudge)) +
" " + strconv.Itoa(int(rr.MACSize)) +
" " + strings.ToUpper(rr.MAC) +
" " + strconv.Itoa(int(rr.OrigId)) +
" " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR
" " + strconv.Itoa(int(rr.OtherLen)) +
" " + rr.OtherData
return s
}
// The following values must be put in wireformat, so that the MAC can be calculated.
// RFC 2845, section 3.4.2. TSIG Variables.
type tsigWireFmt struct {
// From RR_Header
Name string `dns:"domain-name"`
Class uint16
Ttl uint32
// Rdata of the TSIG
Algorithm string `dns:"domain-name"`
TimeSigned uint64 `dns:"uint48"`
Fudge uint16
// MACSize, MAC and OrigId excluded
Error uint16
OtherLen uint16
OtherData string `dns:"size-hex:OtherLen"`
}
// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC
type macWireFmt struct {
MACSize uint16
MAC string `dns:"size-hex:MACSize"`
}
// 3.3. Time values used in TSIG calculations
type timerWireFmt struct {
TimeSigned uint64 `dns:"uint48"`
Fudge uint16
}
// TsigGenerate fills out the TSIG record attached to the message.
// The message should contain
// a "stub" TSIG RR with the algorithm, key name (owner name of the RR),
// time fudge (defaults to 300 seconds) and the current time
// The TSIG MAC is saved in that Tsig RR.
// When TsigGenerate is called for the first time requestMAC is set to the empty string and
// timersOnly is false.
// If something goes wrong an error is returned, otherwise it is nil.
func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) {
if m.IsTsig() == nil {
panic("dns: TSIG not last RR in additional")
}
// If we barf here, the caller is to blame
rawsecret, err := fromBase64([]byte(secret))
if err != nil {
return nil, "", err
}
rr := m.Extra[len(m.Extra)-1].(*TSIG)
m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg
mbuf, err := m.Pack()
if err != nil {
return nil, "", err
}
buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly)
t := new(TSIG)
var h hash.Hash
switch strings.ToLower(rr.Algorithm) {
case HmacMD5:
h = hmac.New(md5.New, []byte(rawsecret))
case HmacSHA1:
h = hmac.New(sha1.New, []byte(rawsecret))
case HmacSHA256:
h = hmac.New(sha256.New, []byte(rawsecret))
case HmacSHA512:
h = hmac.New(sha512.New, []byte(rawsecret))
default:
return nil, "", ErrKeyAlg
}
io.WriteString(h, string(buf))
t.MAC = hex.EncodeToString(h.Sum(nil))
t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0}
t.Fudge = rr.Fudge
t.TimeSigned = rr.TimeSigned
t.Algorithm = rr.Algorithm
t.OrigId = m.Id
tbuf := make([]byte, t.len())
if off, err := PackRR(t, tbuf, 0, nil, false); err == nil {
tbuf = tbuf[:off] // reset to actual size used
} else {
return nil, "", err
}
mbuf = append(mbuf, tbuf...)
// Update the ArCount directly in the buffer.
binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1))
return mbuf, t.MAC, nil
}
// TsigVerify verifies the TSIG on a message.
// If the signature does not validate err contains the
// error, otherwise it is nil.
func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
rawsecret, err := fromBase64([]byte(secret))
if err != nil {
return err
}
// Strip the TSIG from the incoming msg
stripped, tsig, err := stripTsig(msg)
if err != nil {
return err
}
msgMAC, err := hex.DecodeString(tsig.MAC)
if err != nil {
return err
}
buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
// Fudge factor works both ways. A message can arrive before it was signed because
// of clock skew.
now := uint64(time.Now().Unix())
ti := now - tsig.TimeSigned
if now < tsig.TimeSigned {
ti = tsig.TimeSigned - now
}
if uint64(tsig.Fudge) < ti {
return ErrTime
}
var h hash.Hash
switch strings.ToLower(tsig.Algorithm) {
case HmacMD5:
h = hmac.New(md5.New, rawsecret)
case HmacSHA1:
h = hmac.New(sha1.New, rawsecret)
case HmacSHA256:
h = hmac.New(sha256.New, rawsecret)
case HmacSHA512:
h = hmac.New(sha512.New, rawsecret)
default:
return ErrKeyAlg
}
h.Write(buf)
if !hmac.Equal(h.Sum(nil), msgMAC) {
return ErrSig
}
return nil
}
// Create a wiredata buffer for the MAC calculation.
func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte {
var buf []byte
if rr.TimeSigned == 0 {
rr.TimeSigned = uint64(time.Now().Unix())
}
if rr.Fudge == 0 {
rr.Fudge = 300 // Standard (RFC) default.
}
if requestMAC != "" {
m := new(macWireFmt)
m.MACSize = uint16(len(requestMAC) / 2)
m.MAC = requestMAC
buf = make([]byte, len(requestMAC)) // long enough
n, _ := packMacWire(m, buf)
buf = buf[:n]
}
tsigvar := make([]byte, DefaultMsgSize)
if timersOnly {
tsig := new(timerWireFmt)
tsig.TimeSigned = rr.TimeSigned
tsig.Fudge = rr.Fudge
n, _ := packTimerWire(tsig, tsigvar)
tsigvar = tsigvar[:n]
} else {
tsig := new(tsigWireFmt)
tsig.Name = strings.ToLower(rr.Hdr.Name)
tsig.Class = ClassANY
tsig.Ttl = rr.Hdr.Ttl
tsig.Algorithm = strings.ToLower(rr.Algorithm)
tsig.TimeSigned = rr.TimeSigned
tsig.Fudge = rr.Fudge
tsig.Error = rr.Error
tsig.OtherLen = rr.OtherLen
tsig.OtherData = rr.OtherData
n, _ := packTsigWire(tsig, tsigvar)
tsigvar = tsigvar[:n]
}
if requestMAC != "" {
x := append(buf, msgbuf...)
buf = append(x, tsigvar...)
} else {
buf = append(msgbuf, tsigvar...)
}
return buf
}
// Strip the TSIG from the raw message.
func stripTsig(msg []byte) ([]byte, *TSIG, error) {
// Copied from msg.go's Unpack() Header, but modified.
var (
dh Header
err error
)
off, tsigoff := 0, 0
if dh, off, err = unpackMsgHdr(msg, off); err != nil {
return nil, nil, err
}
if dh.Arcount == 0 {
return nil, nil, ErrNoSig
}
// Rcode, see msg.go Unpack()
if int(dh.Bits&0xF) == RcodeNotAuth {
return nil, nil, ErrAuth
}
for i := 0; i < int(dh.Qdcount); i++ {
_, off, err = unpackQuestion(msg, off)
if err != nil {
return nil, nil, err
}
}
_, off, err = unpackRRslice(int(dh.Ancount), msg, off)
if err != nil {
return nil, nil, err
}
_, off, err = unpackRRslice(int(dh.Nscount), msg, off)
if err != nil {
return nil, nil, err
}
rr := new(TSIG)
var extra RR
for i := 0; i < int(dh.Arcount); i++ {
tsigoff = off
extra, off, err = UnpackRR(msg, off)
if err != nil {
return nil, nil, err
}
if extra.Header().Rrtype == TypeTSIG {
rr = extra.(*TSIG)
// Adjust Arcount.
arcount := binary.BigEndian.Uint16(msg[10:])
binary.BigEndian.PutUint16(msg[10:], arcount-1)
break
}
}
if rr == nil {
return nil, nil, ErrNoSig
}
return msg[:tsigoff], rr, nil
}
// Translate the TSIG time signed into a date. There is no
// need for RFC1982 calculations as this date is 48 bits.
func tsigTimeToString(t uint64) string {
ti := time.Unix(int64(t), 0).UTC()
return ti.Format("20060102150405")
}
func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) {
// copied from zmsg.go TSIG packing
// RR_Header
off, err := PackDomainName(tw.Name, msg, 0, nil, false)
if err != nil {
return off, err
}
off, err = packUint16(tw.Class, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(tw.Ttl, msg, off)
if err != nil {
return off, err
}
off, err = PackDomainName(tw.Algorithm, msg, off, nil, false)
if err != nil {
return off, err
}
off, err = packUint48(tw.TimeSigned, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(tw.Fudge, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(tw.Error, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(tw.OtherLen, msg, off)
if err != nil {
return off, err
}
off, err = packStringHex(tw.OtherData, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func packMacWire(mw *macWireFmt, msg []byte) (int, error) {
off, err := packUint16(mw.MACSize, msg, 0)
if err != nil {
return off, err
}
off, err = packStringHex(mw.MAC, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) {
off, err := packUint48(tw.TimeSigned, msg, 0)
if err != nil {
return off, err
}
off, err = packUint16(tw.Fudge, msg, off)
if err != nil {
return off, err
}
return off, nil
}

1249
vendor/github.com/miekg/dns/types.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

271
vendor/github.com/miekg/dns/types_generate.go generated vendored Normal file
View File

@ -0,0 +1,271 @@
//+build ignore
// types_generate.go is meant to run with go generate. It will use
// go/{importer,types} to track down all the RR struct types. Then for each type
// it will generate conversion tables (TypeToRR and TypeToString) and banal
// methods (len, Header, copy) based on the struct tags. The generated source is
// written to ztypes.go, and is meant to be checked into git.
package main
import (
"bytes"
"fmt"
"go/format"
"go/importer"
"go/types"
"log"
"os"
"strings"
"text/template"
)
var skipLen = map[string]struct{}{
"NSEC": {},
"NSEC3": {},
"OPT": {},
}
var packageHdr = `
// *** DO NOT MODIFY ***
// AUTOGENERATED BY go generate from type_generate.go
package dns
import (
"encoding/base64"
"net"
)
`
var TypeToRR = template.Must(template.New("TypeToRR").Parse(`
// TypeToRR is a map of constructors for each RR type.
var TypeToRR = map[uint16]func() RR{
{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) },
{{end}}{{end}} }
`))
var typeToString = template.Must(template.New("typeToString").Parse(`
// TypeToString is a map of strings for each RR type.
var TypeToString = map[uint16]string{
{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}",
{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR",
}
`))
var headerFunc = template.Must(template.New("headerFunc").Parse(`
// Header() functions
{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr }
{{end}}
`))
// getTypeStruct will take a type and the package scope, and return the
// (innermost) struct if the type is considered a RR type (currently defined as
// those structs beginning with a RR_Header, could be redefined as implementing
// the RR interface). The bool return value indicates if embedded structs were
// resolved.
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
st, ok := t.Underlying().(*types.Struct)
if !ok {
return nil, false
}
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
return st, false
}
if st.Field(0).Anonymous() {
st, _ := getTypeStruct(st.Field(0).Type(), scope)
return st, true
}
return nil, false
}
func main() {
// Import and type-check the package
pkg, err := importer.Default().Import("github.com/miekg/dns")
fatalIfErr(err)
scope := pkg.Scope()
// Collect constants like TypeX
var numberedTypes []string
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
b, ok := o.Type().(*types.Basic)
if !ok || b.Kind() != types.Uint16 {
continue
}
if !strings.HasPrefix(o.Name(), "Type") {
continue
}
name := strings.TrimPrefix(o.Name(), "Type")
if name == "PrivateRR" {
continue
}
numberedTypes = append(numberedTypes, name)
}
// Collect actual types (*X)
var namedTypes []string
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
continue
}
if name == "PrivateRR" {
continue
}
// Check if corresponding TypeX exists
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
log.Fatalf("Constant Type%s does not exist.", o.Name())
}
namedTypes = append(namedTypes, o.Name())
}
b := &bytes.Buffer{}
b.WriteString(packageHdr)
// Generate TypeToRR
fatalIfErr(TypeToRR.Execute(b, namedTypes))
// Generate typeToString
fatalIfErr(typeToString.Execute(b, numberedTypes))
// Generate headerFunc
fatalIfErr(headerFunc.Execute(b, namedTypes))
// Generate len()
fmt.Fprint(b, "// len() functions\n")
for _, name := range namedTypes {
if _, ok := skipLen[name]; ok {
continue
}
o := scope.Lookup(name)
st, isEmbedded := getTypeStruct(o.Type(), scope)
if isEmbedded {
continue
}
fmt.Fprintf(b, "func (rr *%s) len() int {\n", name)
fmt.Fprintf(b, "l := rr.Hdr.len()\n")
for i := 1; i < st.NumFields(); i++ {
o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) }
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
case `dns:"-"`:
// ignored
case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`:
o("for _, x := range rr.%s { l += len(x) + 1 }\n")
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
continue
}
switch {
case st.Tag(i) == `dns:"-"`:
// ignored
case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`:
o("l += len(rr.%s) + 1\n")
case st.Tag(i) == `dns:"octet"`:
o("l += len(rr.%s)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`):
fallthrough
case st.Tag(i) == `dns:"base64"`:
o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`):
fallthrough
case st.Tag(i) == `dns:"hex"`:
o("l += len(rr.%s)/2 + 1\n")
case st.Tag(i) == `dns:"a"`:
o("l += net.IPv4len // %s\n")
case st.Tag(i) == `dns:"aaaa"`:
o("l += net.IPv6len // %s\n")
case st.Tag(i) == `dns:"txt"`:
o("for _, t := range rr.%s { l += len(t) + 1 }\n")
case st.Tag(i) == `dns:"uint48"`:
o("l += 6 // %s\n")
case st.Tag(i) == "":
switch st.Field(i).Type().(*types.Basic).Kind() {
case types.Uint8:
o("l += 1 // %s\n")
case types.Uint16:
o("l += 2 // %s\n")
case types.Uint32:
o("l += 4 // %s\n")
case types.Uint64:
o("l += 8 // %s\n")
case types.String:
o("l += len(rr.%s) + 1\n")
default:
log.Fatalln(name, st.Field(i).Name())
}
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
}
fmt.Fprintf(b, "return l }\n")
}
// Generate copy()
fmt.Fprint(b, "// copy() functions\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
st, isEmbedded := getTypeStruct(o.Type(), scope)
if isEmbedded {
continue
}
fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name)
fields := []string{"*rr.Hdr.copyHeader()"}
for i := 1; i < st.NumFields(); i++ {
f := st.Field(i).Name()
if sl, ok := st.Field(i).Type().(*types.Slice); ok {
t := sl.Underlying().String()
t = strings.TrimPrefix(t, "[]")
if strings.Contains(t, ".") {
splits := strings.Split(t, ".")
t = splits[len(splits)-1]
}
fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n",
f, t, f, f, f)
fields = append(fields, f)
continue
}
if st.Field(i).Type().String() == "net.IP" {
fields = append(fields, "copyIP(rr."+f+")")
continue
}
fields = append(fields, "rr."+f)
}
fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ","))
fmt.Fprintf(b, "}\n")
}
// gofmt
res, err := format.Source(b.Bytes())
if err != nil {
b.WriteTo(os.Stderr)
log.Fatal(err)
}
// write result
f, err := os.Create("ztypes.go")
fatalIfErr(err)
defer f.Close()
f.Write(res)
}
func fatalIfErr(err error) {
if err != nil {
log.Fatal(err)
}
}

58
vendor/github.com/miekg/dns/udp.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
// +build !windows,!plan9
package dns
import (
"net"
"syscall"
)
// SessionUDP holds the remote address and the associated
// out-of-band data.
type SessionUDP struct {
raddr *net.UDPAddr
context []byte
}
// RemoteAddr returns the remote network address.
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
// setUDPSocketOptions sets the UDP socket options.
// This function is implemented on a per platform basis. See udp_*.go for more details
func setUDPSocketOptions(conn *net.UDPConn) error {
sa, err := getUDPSocketName(conn)
if err != nil {
return err
}
switch sa.(type) {
case *syscall.SockaddrInet6:
v6only, err := getUDPSocketOptions6Only(conn)
if err != nil {
return err
}
setUDPSocketOptions6(conn)
if !v6only {
setUDPSocketOptions4(conn)
}
case *syscall.SockaddrInet4:
setUDPSocketOptions4(conn)
}
return nil
}
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
// net.UDPAddr.
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
oob := make([]byte, 40)
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
if err != nil {
return n, nil, err
}
return n, &SessionUDP{raddr, oob[:oobn]}, err
}
// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
return n, err
}

Some files were not shown because too many files have changed in this diff Show More