Release v0.3.0

This commit is contained in:
Manu Herrera
2020-11-09 10:05:29 -03:00
parent 4e9aa7a3c5
commit 8107c4478b
1265 changed files with 440488 additions and 107809 deletions

View File

@@ -86,6 +86,12 @@ type AcceptChannel struct {
// base point in order to derive the revocation keys that are placed
// within the commitment transaction of the sender.
FirstCommitmentPoint *btcec.PublicKey
// UpfrontShutdownScript is the script to which the channel funds should
// be paid when mutually closing the channel. This field is optional, and
// and has a length prefix, so a zero will be written if it is not set
// and its length followed by the script will be written if it is set.
UpfrontShutdownScript DeliveryAddress
}
// A compile time check to ensure AcceptChannel implements the lnwire.Message
@@ -113,6 +119,7 @@ func (a *AcceptChannel) Encode(w io.Writer, pver uint32) error {
a.DelayedPaymentPoint,
a.HtlcPoint,
a.FirstCommitmentPoint,
a.UpfrontShutdownScript,
)
}
@@ -122,7 +129,8 @@ func (a *AcceptChannel) Encode(w io.Writer, pver uint32) error {
//
// This is part of the lnwire.Message interface.
func (a *AcceptChannel) Decode(r io.Reader, pver uint32) error {
return ReadElements(r,
// Read all the mandatory fields in the accept message.
err := ReadElements(r,
a.PendingChannelID[:],
&a.DustLimit,
&a.MaxValueInFlight,
@@ -138,6 +146,17 @@ func (a *AcceptChannel) Decode(r io.Reader, pver uint32) error {
&a.HtlcPoint,
&a.FirstCommitmentPoint,
)
if err != nil {
return err
}
// Check for the optional upfront shutdown script field. If it is not there,
// silence the EOF error.
err = ReadElement(r, &a.UpfrontShutdownScript)
if err != nil && err != io.EOF {
return err
}
return nil
}
// MsgType returns the MessageType code which uniquely identifies this message
@@ -154,5 +173,10 @@ func (a *AcceptChannel) MsgType() MessageType {
// This is part of the lnwire.Message interface.
func (a *AcceptChannel) MaxPayloadLength(uint32) uint32 {
// 32 + (8 * 4) + (4 * 1) + (2 * 2) + (33 * 6)
return 270
var length uint32 = 270 // base length
// Upfront shutdown script max length.
length += 2 + deliveryAddressMaxSize
return length
}

View File

@@ -56,11 +56,11 @@ func NewChanIDFromOutPoint(op *wire.OutPoint) ChannelID {
// ChannelID. To do this, we expect the cid parameter to contain the txid
// unaltered and the outputIndex to be the output index
func xorTxid(cid *ChannelID, outputIndex uint16) {
var buf [32]byte
binary.BigEndian.PutUint16(buf[30:], outputIndex)
var buf [2]byte
binary.BigEndian.PutUint16(buf[:], outputIndex)
cid[30] = cid[30] ^ buf[30]
cid[31] = cid[31] ^ buf[31]
cid[30] ^= buf[0]
cid[31] ^= buf[1]
}
// GenPossibleOutPoints generates all the possible outputs given a channel ID.
@@ -69,13 +69,13 @@ func xorTxid(cid *ChannelID, outputIndex uint16) {
// mapping from channelID back to OutPoint.
func (c *ChannelID) GenPossibleOutPoints() [MaxFundingTxOutputs]wire.OutPoint {
var possiblePoints [MaxFundingTxOutputs]wire.OutPoint
for i := uint32(0); i < MaxFundingTxOutputs; i++ {
for i := uint16(0); i < MaxFundingTxOutputs; i++ {
cidCopy := *c
xorTxid(&cidCopy, uint16(i))
xorTxid(&cidCopy, i)
possiblePoints[i] = wire.OutPoint{
Hash: chainhash.Hash(cidCopy),
Index: i,
Index: uint32(i),
}
}

View File

@@ -2,10 +2,16 @@ package lnwire
import (
"encoding/binary"
"fmt"
"errors"
"io"
)
var (
// ErrFeaturePairExists signals an error in feature vector construction
// where the opposing bit in a feature pair has already been set.
ErrFeaturePairExists = errors.New("feature pair exists")
)
// FeatureBit represents a feature that can be enabled in either a local or
// global feature vector at a specific bit position. Feature bits follow the
// "it's OK to be odd" rule, where features at even bit positions must be known
@@ -35,6 +41,16 @@ const (
// connection is established.
InitialRoutingSync FeatureBit = 3
// UpfrontShutdownScriptRequired is a feature bit which indicates that a
// peer *requires* that the remote peer accept an upfront shutdown script to
// which payout is enforced on cooperative closes.
UpfrontShutdownScriptRequired FeatureBit = 4
// UpfrontShutdownScriptOptional is an optional feature bit which indicates
// that the peer will accept an upfront shutdown script to which payout is
// enforced on cooperative closes.
UpfrontShutdownScriptOptional FeatureBit = 5
// GossipQueriesRequired is a feature bit that indicates that the
// receiving peer MUST know of the set of features that allows nodes to
// more efficiently query the network view of peers on the network for
@@ -65,6 +81,36 @@ const (
// party's non-delay output should not be tweaked.
StaticRemoteKeyOptional FeatureBit = 13
// PaymentAddrRequired is a required feature bit that signals that a
// node requires payment addresses, which are used to mitigate probing
// attacks on the receiver of a payment.
PaymentAddrRequired FeatureBit = 14
// PaymentAddrOptional is an optional feature bit that signals that a
// node supports payment addresses, which are used to mitigate probing
// attacks on the receiver of a payment.
PaymentAddrOptional FeatureBit = 15
// MPPOptional is a required feature bit that signals that the receiver
// of a payment requires settlement of an invoice with more than one
// HTLC.
MPPRequired FeatureBit = 16
// MPPOptional is an optional feature bit that signals that the receiver
// of a payment supports settlement of an invoice with more than one
// HTLC.
MPPOptional FeatureBit = 17
// AnchorsRequired is a required feature bit that signals that the node
// requires channels to be made using commitments having anchor
// outputs.
AnchorsRequired FeatureBit = 1336
// AnchorsRequired is an optional feature bit that signals that the
// node supports channels to be made using commitments having anchor
// outputs.
AnchorsOptional FeatureBit = 1337
// maxAllowedSize is a maximum allowed size of feature vector.
//
// NOTE: Within the protocol, the maximum allowed message size is 65535
@@ -78,28 +124,32 @@ const (
maxAllowedSize = 32764
)
// LocalFeatures is a mapping of known connection-local feature bits to a
// descriptive name. All known local feature bits must be assigned a name in
// this mapping. Local features are those which are only sent to the peer and
// not advertised to the entire network. A full description of these feature
// bits is provided in the BOLT-09 specification.
var LocalFeatures = map[FeatureBit]string{
DataLossProtectRequired: "data-loss-protect",
DataLossProtectOptional: "data-loss-protect",
InitialRoutingSync: "initial-routing-sync",
GossipQueriesRequired: "gossip-queries",
GossipQueriesOptional: "gossip-queries",
// IsRequired returns true if the feature bit is even, and false otherwise.
func (b FeatureBit) IsRequired() bool {
return b&0x01 == 0x00
}
// GlobalFeatures is a mapping of known global feature bits to a descriptive
// name. All known global feature bits must be assigned a name in this mapping.
// Global features are those which are advertised to the entire network. A full
// description of these feature bits is provided in the BOLT-09 specification.
var GlobalFeatures = map[FeatureBit]string{
TLVOnionPayloadRequired: "tlv-onion",
TLVOnionPayloadOptional: "tlv-onion",
StaticRemoteKeyOptional: "static-remote-key",
StaticRemoteKeyRequired: "static-remote-key",
// Features is a mapping of known feature bits to a descriptive name. All known
// feature bits must be assigned a name in this mapping, and feature bit pairs
// must be assigned together for correct behavior.
var Features = map[FeatureBit]string{
DataLossProtectRequired: "data-loss-protect",
DataLossProtectOptional: "data-loss-protect",
InitialRoutingSync: "initial-routing-sync",
UpfrontShutdownScriptRequired: "upfront-shutdown-script",
UpfrontShutdownScriptOptional: "upfront-shutdown-script",
GossipQueriesRequired: "gossip-queries",
GossipQueriesOptional: "gossip-queries",
TLVOnionPayloadRequired: "tlv-onion",
TLVOnionPayloadOptional: "tlv-onion",
StaticRemoteKeyOptional: "static-remote-key",
StaticRemoteKeyRequired: "static-remote-key",
PaymentAddrOptional: "payment-addr",
PaymentAddrRequired: "payment-addr",
MPPOptional: "multi-path-payments",
MPPRequired: "multi-path-payments",
AnchorsRequired: "anchor-commitments",
AnchorsOptional: "anchor-commitments",
}
// RawFeatureVector represents a set of feature bits as defined in BOLT-09. A
@@ -121,6 +171,26 @@ func NewRawFeatureVector(bits ...FeatureBit) *RawFeatureVector {
return fv
}
// Merges sets all feature bits in other on the receiver's feature vector.
func (fv *RawFeatureVector) Merge(other *RawFeatureVector) error {
for bit := range other.features {
err := fv.SafeSet(bit)
if err != nil {
return err
}
}
return nil
}
// Clone makes a copy of a feature vector.
func (fv *RawFeatureVector) Clone() *RawFeatureVector {
newFeatures := NewRawFeatureVector()
for bit := range fv.features {
newFeatures.Set(bit)
}
return newFeatures
}
// IsSet returns whether a particular feature bit is enabled in the vector.
func (fv *RawFeatureVector) IsSet(feature FeatureBit) bool {
return fv.features[feature]
@@ -131,6 +201,20 @@ func (fv *RawFeatureVector) Set(feature FeatureBit) {
fv.features[feature] = true
}
// SafeSet sets the chosen feature bit in the feature vector, but returns an
// error if the opposing feature bit is already set. This ensures both that we
// are creating properly structured feature vectors, and in some cases, that
// peers are sending properly encoded ones, i.e. it can't be both optional and
// required.
func (fv *RawFeatureVector) SafeSet(feature FeatureBit) error {
if _, ok := fv.features[feature^1]; ok {
return ErrFeaturePairExists
}
fv.Set(feature)
return nil
}
// Unset marks a feature as disabled in the vector.
func (fv *RawFeatureVector) Unset(feature FeatureBit) {
delete(fv.features, feature)
@@ -184,8 +268,16 @@ func (fv *RawFeatureVector) Encode(w io.Writer) error {
return fv.encode(w, length, 8)
}
// EncodeBase256 writes the feature vector in base256 representation. Every
// feature is encoded as a bit, and the bit vector is serialized using the least
// number of bytes.
func (fv *RawFeatureVector) EncodeBase256(w io.Writer) error {
length := fv.SerializeSize()
return fv.encode(w, length, 8)
}
// EncodeBase32 writes the feature vector in base32 representation. Every feature
// encoded as a bit, and the bit vector is serialized using the least number of
// is encoded as a bit, and the bit vector is serialized using the least number of
// bytes.
func (fv *RawFeatureVector) EncodeBase32(w io.Writer) error {
length := fv.SerializeSize32()
@@ -207,8 +299,8 @@ func (fv *RawFeatureVector) encode(w io.Writer, length, width int) error {
}
// Decode reads the feature vector from its byte representation. Every feature
// encoded as a bit, and the bit vector is serialized using the least number of
// bytes. Since the bit vector length is variable, the first two bytes of the
// is encoded as a bit, and the bit vector is serialized using the least number
// of bytes. Since the bit vector length is variable, the first two bytes of the
// serialization represent the length.
func (fv *RawFeatureVector) Decode(r io.Reader) error {
// Read the length of the feature vector.
@@ -221,6 +313,13 @@ func (fv *RawFeatureVector) Decode(r io.Reader) error {
return fv.decode(r, int(length), 8)
}
// DecodeBase256 reads the feature vector from its base256 representation. Every
// feature encoded as a bit, and the bit vector is serialized using the least
// number of bytes.
func (fv *RawFeatureVector) DecodeBase256(r io.Reader, length int) error {
return fv.decode(r, length, 8)
}
// DecodeBase32 reads the feature vector from its base32 representation. Every
// feature encoded as a bit, and the bit vector is serialized using the least
// number of bytes.
@@ -274,6 +373,11 @@ func NewFeatureVector(featureVector *RawFeatureVector,
}
}
// EmptyFeatureVector returns a feature vector with no bits set.
func EmptyFeatureVector() *FeatureVector {
return NewFeatureVector(nil, Features)
}
// HasFeature returns whether a particular feature is included in the set. The
// feature can be seen as set either if the bit is set directly OR the queried
// bit has the same meaning as its corresponding even/odd bit, which is set
@@ -303,9 +407,9 @@ func (fv *FeatureVector) UnknownRequiredFeatures() []FeatureBit {
func (fv *FeatureVector) Name(bit FeatureBit) string {
name, known := fv.featureNames[bit]
if !known {
name = "unknown"
return "unknown"
}
return fmt.Sprintf("%s(%d)", name, bit)
return name
}
// IsKnown returns whether this feature bit represents a known feature.
@@ -324,3 +428,19 @@ func (fv *FeatureVector) isFeatureBitPair(bit FeatureBit) bool {
name2, known2 := fv.featureNames[bit^1]
return known1 && known2 && name1 == name2
}
// Features returns the set of raw features contained in the feature vector.
func (fv *FeatureVector) Features() map[FeatureBit]struct{} {
fs := make(map[FeatureBit]struct{}, len(fv.RawFeatureVector.features))
for b := range fv.RawFeatureVector.features {
fs[b] = struct{}{}
}
return fs
}
// Clone copies a feature vector, carrying over its feature bits. The feature
// names are not copied.
func (fv *FeatureVector) Clone() *FeatureVector {
features := fv.RawFeatureVector.Clone()
return NewFeatureVector(features, fv.featureNames)
}

View File

@@ -7,20 +7,26 @@ import "io"
// diagnosis where features are incompatible. Each node MUST wait to receive
// init before sending any other messages.
type Init struct {
// GlobalFeatures is feature vector which affects HTLCs and thus are
// also advertised to other nodes.
// GlobalFeatures is a legacy feature vector used for backwards
// compatibility with older nodes. Any features defined here should be
// merged with those presented in Features.
GlobalFeatures *RawFeatureVector
// LocalFeatures is feature vector which only affect the protocol
// between two nodes.
LocalFeatures *RawFeatureVector
// Features is a feature vector containing a the features supported by
// the remote node.
//
// NOTE: Older nodes may place some features in GlobalFeatures, but all
// new features are to be added in Features. When handling an Init
// message, any GlobalFeatures should be merged into the unified
// Features field.
Features *RawFeatureVector
}
// NewInitMessage creates new instance of init message object.
func NewInitMessage(gf *RawFeatureVector, lf *RawFeatureVector) *Init {
func NewInitMessage(gf *RawFeatureVector, f *RawFeatureVector) *Init {
return &Init{
GlobalFeatures: gf,
LocalFeatures: lf,
Features: f,
}
}
@@ -35,7 +41,7 @@ var _ Message = (*Init)(nil)
func (msg *Init) Decode(r io.Reader, pver uint32) error {
return ReadElements(r,
&msg.GlobalFeatures,
&msg.LocalFeatures,
&msg.Features,
)
}
@@ -46,7 +52,7 @@ func (msg *Init) Decode(r io.Reader, pver uint32) error {
func (msg *Init) Encode(w io.Writer, pver uint32) error {
return WriteElements(w,
msg.GlobalFeatures,
msg.LocalFeatures,
msg.Features,
)
}

View File

@@ -816,8 +816,8 @@ func ReadElement(r io.Reader, element interface{}) error {
}
length := binary.BigEndian.Uint16(addrLen[:])
var addrBytes [34]byte
if length > 34 {
var addrBytes [deliveryAddressMaxSize]byte
if length > deliveryAddressMaxSize {
return fmt.Errorf("Cannot read %d bytes into addrBytes", length)
}
if _, err = io.ReadFull(r, addrBytes[:length]); err != nil {

View File

@@ -6,9 +6,15 @@ import (
"github.com/btcsuite/btcutil"
)
// mSatScale is a value that's used to scale satoshis to milli-satoshis, and
// the other way around.
const mSatScale uint64 = 1000
const (
// mSatScale is a value that's used to scale satoshis to milli-satoshis, and
// the other way around.
mSatScale uint64 = 1000
// MaxMilliSatoshi is the maximum number of msats that can be expressed
// in this data type.
MaxMilliSatoshi = ^MilliSatoshi(0)
)
// MilliSatoshi are the native unit of the Lightning Network. A milli-satoshi
// is simply 1/1000th of a satoshi. There are 1000 milli-satoshis in a single

View File

@@ -101,14 +101,6 @@ type NodeAnnouncement struct {
ExtraOpaqueData []byte
}
// UpdateNodeAnnAddrs is a functional option that allows updating the addresses
// of the given node announcement.
func UpdateNodeAnnAddrs(addrs []net.Addr) func(*NodeAnnouncement) {
return func(nodeAnn *NodeAnnouncement) {
nodeAnn.Addresses = addrs
}
}
// A compile time check to ensure NodeAnnouncement implements the
// lnwire.Message interface.
var _ Message = (*NodeAnnouncement)(nil)

View File

@@ -11,6 +11,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/tlv"
)
// FailureMessage represents the onion failure object identified by its unique
@@ -78,6 +79,8 @@ const (
CodeFinalIncorrectCltvExpiry FailCode = 18
CodeFinalIncorrectHtlcAmount FailCode = 19
CodeExpiryTooFar FailCode = 21
CodeInvalidOnionPayload = FlagPerm | 22
CodeMPPTimeout FailCode = 23
)
// String returns the string representation of the failure code.
@@ -149,6 +152,12 @@ func (c FailCode) String() string {
case CodeExpiryTooFar:
return "ExpiryTooFar"
case CodeInvalidOnionPayload:
return "InvalidOnionPayload"
case CodeMPPTimeout:
return "MPPTimeout"
default:
return "<unknown>"
}
@@ -1117,6 +1126,86 @@ func (f *FailExpiryTooFar) Error() string {
return f.Code().String()
}
// InvalidOnionPayload is returned if the hop could not process the TLV payload
// enclosed in the onion.
type InvalidOnionPayload struct {
// Type is the TLV type that caused the specific failure.
Type uint64
// Offset is the byte offset within the payload where the failure
// occurred.
Offset uint16
}
// NewInvalidOnionPayload initializes a new InvalidOnionPayload failure.
func NewInvalidOnionPayload(typ uint64, offset uint16) *InvalidOnionPayload {
return &InvalidOnionPayload{
Type: typ,
Offset: offset,
}
}
// Code returns the failure unique code.
//
// NOTE: Part of the FailureMessage interface.
func (f *InvalidOnionPayload) Code() FailCode {
return CodeInvalidOnionPayload
}
// Returns a human readable string describing the target FailureMessage.
//
// NOTE: Implements the error interface.
func (f *InvalidOnionPayload) Error() string {
return fmt.Sprintf("%v(type=%v, offset=%d)",
f.Code(), f.Type, f.Offset)
}
// Decode decodes the failure from bytes stream.
//
// NOTE: Part of the Serializable interface.
func (f *InvalidOnionPayload) Decode(r io.Reader, pver uint32) error {
var buf [8]byte
typ, err := tlv.ReadVarInt(r, &buf)
if err != nil {
return err
}
f.Type = typ
return ReadElements(r, &f.Offset)
}
// Encode writes the failure in bytes stream.
//
// NOTE: Part of the Serializable interface.
func (f *InvalidOnionPayload) Encode(w io.Writer, pver uint32) error {
var buf [8]byte
if err := tlv.WriteVarInt(w, f.Type, &buf); err != nil {
return err
}
return WriteElements(w, f.Offset)
}
// FailMPPTimeout is returned if the complete amount for a multi part payment
// was not received within a reasonable time.
//
// NOTE: May only be returned by the final node in the path.
type FailMPPTimeout struct{}
// Code returns the failure unique code.
//
// NOTE: Part of the FailureMessage interface.
func (f *FailMPPTimeout) Code() FailCode {
return CodeMPPTimeout
}
// Returns a human readable string describing the target FailureMessage.
//
// NOTE: Implements the error interface.
func (f *FailMPPTimeout) Error() string {
return f.Code().String()
}
// DecodeFailure decodes, validates, and parses the lnwire onion failure, for
// the provided protocol version.
func DecodeFailure(r io.Reader, pver uint32) (FailureMessage, error) {
@@ -1298,6 +1387,12 @@ func makeEmptyOnionError(code FailCode) (FailureMessage, error) {
case CodeExpiryTooFar:
return &FailExpiryTooFar{}, nil
case CodeInvalidOnionPayload:
return &InvalidOnionPayload{}, nil
case CodeMPPTimeout:
return &FailMPPTimeout{}, nil
default:
return nil, errors.Errorf("unknown error code: %v", code)
}

View File

@@ -122,6 +122,12 @@ type OpenChannel struct {
// Currently, the least significant bit of this bit field indicates the
// initiator of the channel wishes to advertise this channel publicly.
ChannelFlags FundingFlag
// UpfrontShutdownScript is the script to which the channel funds should
// be paid when mutually closing the channel. This field is optional, and
// and has a length prefix, so a zero will be written if it is not set
// and its length followed by the script will be written if it is set.
UpfrontShutdownScript DeliveryAddress
}
// A compile time check to ensure OpenChannel implements the lnwire.Message
@@ -153,6 +159,7 @@ func (o *OpenChannel) Encode(w io.Writer, pver uint32) error {
o.HtlcPoint,
o.FirstCommitmentPoint,
o.ChannelFlags,
o.UpfrontShutdownScript,
)
}
@@ -162,7 +169,7 @@ func (o *OpenChannel) Encode(w io.Writer, pver uint32) error {
//
// This is part of the lnwire.Message interface.
func (o *OpenChannel) Decode(r io.Reader, pver uint32) error {
return ReadElements(r,
if err := ReadElements(r,
o.ChainHash[:],
o.PendingChannelID[:],
&o.FundingAmount,
@@ -181,7 +188,18 @@ func (o *OpenChannel) Decode(r io.Reader, pver uint32) error {
&o.HtlcPoint,
&o.FirstCommitmentPoint,
&o.ChannelFlags,
)
); err != nil {
return err
}
// Check for the optional upfront shutdown script field. If it is not there,
// silence the EOF error.
err := ReadElement(r, &o.UpfrontShutdownScript)
if err != nil && err != io.EOF {
return err
}
return nil
}
// MsgType returns the MessageType code which uniquely identifies this message
@@ -198,5 +216,10 @@ func (o *OpenChannel) MsgType() MessageType {
// This is part of the lnwire.Message interface.
func (o *OpenChannel) MaxPayloadLength(uint32) uint32 {
// (32 * 2) + (8 * 6) + (4 * 1) + (2 * 2) + (33 * 6) + 1
return 319
var length uint32 = 319 // base length
// Upfront shutdown script max length.
length += 2 + deliveryAddressMaxSize
return length
}

View File

@@ -2,6 +2,7 @@ package lnwire
import (
"io"
"math"
"github.com/btcsuite/btcd/chaincfg/chainhash"
)
@@ -75,3 +76,14 @@ func (q *QueryChannelRange) MaxPayloadLength(uint32) uint32 {
// 32 + 4 + 4
return 40
}
// LastBlockHeight returns the last block height covered by the range of a
// QueryChannelRange message.
func (q *QueryChannelRange) LastBlockHeight() uint32 {
// Handle overflows by casting to uint64.
lastBlockHeight := uint64(q.FirstBlockHeight) + uint64(q.NumBlocks) - 1
if lastBlockHeight > math.MaxUint32 {
return math.MaxUint32
}
return uint32(lastBlockHeight)
}

View File

@@ -35,6 +35,19 @@ const (
maxZlibBufSize = 67413630
)
// ErrUnsortedSIDs is returned when decoding a QueryShortChannelID request whose
// items were not sorted.
type ErrUnsortedSIDs struct {
prevSID ShortChannelID
curSID ShortChannelID
}
// Error returns a human-readable description of the error.
func (e ErrUnsortedSIDs) Error() string {
return fmt.Sprintf("current sid: %v isn't greater than last sid: %v",
e.curSID, e.prevSID)
}
// zlibDecodeMtx is a package level mutex that we'll use in order to ensure
// that we'll only attempt a single zlib decoding instance at a time. This
// allows us to also further bound our memory usage.
@@ -67,6 +80,12 @@ type QueryShortChanIDs struct {
// ShortChanIDs is a slice of decoded short channel ID's.
ShortChanIDs []ShortChannelID
// noSort indicates whether or not to sort the short channel ids before
// writing them out.
//
// NOTE: This should only be used during testing.
noSort bool
}
// NewQueryShortChanIDs creates a new QueryShortChanIDs message.
@@ -113,7 +132,7 @@ func decodeShortChanIDs(r io.Reader) (ShortChanIDEncoding, []ShortChannelID, err
}
if numBytesResp == 0 {
return 0, nil, fmt.Errorf("No encoding type specified")
return 0, nil, nil
}
queryBody := make([]byte, numBytesResp)
@@ -158,11 +177,18 @@ func decodeShortChanIDs(r io.Reader) (ShortChanIDEncoding, []ShortChannelID, err
// ID's to conclude our parsing.
shortChanIDs := make([]ShortChannelID, numShortChanIDs)
bodyReader := bytes.NewReader(queryBody)
var lastChanID ShortChannelID
for i := 0; i < numShortChanIDs; i++ {
if err := ReadElements(bodyReader, &shortChanIDs[i]); err != nil {
return 0, nil, fmt.Errorf("unable to parse "+
"short chan ID: %v", err)
}
cid := shortChanIDs[i]
if cid.ToUint64() <= lastChanID.ToUint64() {
return 0, nil, ErrUnsortedSIDs{lastChanID, cid}
}
lastChanID = cid
}
return encodingType, shortChanIDs, nil
@@ -177,6 +203,13 @@ func decodeShortChanIDs(r io.Reader) (ShortChanIDEncoding, []ShortChannelID, err
zlibDecodeMtx.Lock()
defer zlibDecodeMtx.Unlock()
// At this point, if there's no body remaining, then only the encoding
// type was specified, meaning that there're no further bytes to be
// parsed.
if len(queryBody) == 0 {
return encodingType, nil, nil
}
// Before we start to decode, we'll create a limit reader over
// the current reader. This will ensure that we can control how
// much memory we're allocating during the decoding process.
@@ -224,9 +257,7 @@ func decodeShortChanIDs(r io.Reader) (ShortChanIDEncoding, []ShortChannelID, err
// within the encoding, and if violated can aide us in
// detecting malicious payloads.
if cid.ToUint64() <= lastChanID.ToUint64() {
return 0, nil, fmt.Errorf("current sid of %v "+
"isn't greater than last sid of %v", cid,
lastChanID)
return 0, nil, ErrUnsortedSIDs{lastChanID, cid}
}
lastChanID = cid
@@ -253,20 +284,23 @@ func (q *QueryShortChanIDs) Encode(w io.Writer, pver uint32) error {
// Base on our encoding type, we'll write out the set of short channel
// ID's.
return encodeShortChanIDs(w, q.EncodingType, q.ShortChanIDs)
return encodeShortChanIDs(w, q.EncodingType, q.ShortChanIDs, q.noSort)
}
// encodeShortChanIDs encodes the passed short channel ID's into the passed
// io.Writer, respecting the specified encoding type.
func encodeShortChanIDs(w io.Writer, encodingType ShortChanIDEncoding,
shortChanIDs []ShortChannelID) error {
shortChanIDs []ShortChannelID, noSort bool) error {
// For both of the current encoding types, the channel ID's are to be
// sorted in place, so we'll do that now.
sort.Slice(shortChanIDs, func(i, j int) bool {
return shortChanIDs[i].ToUint64() <
shortChanIDs[j].ToUint64()
})
// sorted in place, so we'll do that now. The sorting is applied unless
// we were specifically requested not to for testing purposes.
if !noSort {
sort.Slice(shortChanIDs, func(i, j int) bool {
return shortChanIDs[i].ToUint64() <
shortChanIDs[j].ToUint64()
})
}
switch encodingType {
@@ -311,27 +345,43 @@ func encodeShortChanIDs(w io.Writer, encodingType ShortChanIDEncoding,
var buf bytes.Buffer
zlibWriter := zlib.NewWriter(&buf)
// Next, we'll write out all the channel ID's directly into the
// zlib writer, which will do compressing on the fly.
for _, chanID := range shortChanIDs {
err := WriteElements(zlibWriter, chanID)
if err != nil {
return fmt.Errorf("unable to write short chan "+
"ID: %v", err)
// If we don't have anything at all to write, then we'll write
// an empty payload so we don't include things like the zlib
// header when the remote party is expecting no actual short
// channel IDs.
var compressedPayload []byte
if len(shortChanIDs) > 0 {
// Next, we'll write out all the channel ID's directly
// into the zlib writer, which will do compressing on
// the fly.
for _, chanID := range shortChanIDs {
err := WriteElements(zlibWriter, chanID)
if err != nil {
return fmt.Errorf("unable to write short chan "+
"ID: %v", err)
}
}
}
// Now that we've written all the elements, we'll ensure the
// compressed stream is written to the underlying buffer.
if err := zlibWriter.Close(); err != nil {
return fmt.Errorf("unable to finalize "+
"compression: %v", err)
// Now that we've written all the elements, we'll
// ensure the compressed stream is written to the
// underlying buffer.
if err := zlibWriter.Close(); err != nil {
return fmt.Errorf("unable to finalize "+
"compression: %v", err)
}
compressedPayload = buf.Bytes()
}
// Now that we have all the items compressed, we can compute
// what the total payload size will be. We add one to account
// for the byte to encode the type.
compressedPayload := buf.Bytes()
//
// If we don't have any actual bytes to write, then we'll end
// up emitting one byte for the length, followed by the
// encoding type, and nothing more. The spec isn't 100% clear
// in this area, but we do this as this is what most of the
// other implementations do.
numBytesBody := len(compressedPayload) + 1
// Finally, we can write out the number of bytes, the

View File

@@ -21,6 +21,12 @@ type ReplyChannelRange struct {
// ShortChanIDs is a slice of decoded short channel ID's.
ShortChanIDs []ShortChannelID
// noSort indicates whether or not to sort the short channel ids before
// writing them out.
//
// NOTE: This should only be used for testing.
noSort bool
}
// NewReplyChannelRange creates a new empty ReplyChannelRange message.
@@ -64,7 +70,7 @@ func (c *ReplyChannelRange) Encode(w io.Writer, pver uint32) error {
return err
}
return encodeShortChanIDs(w, c.EncodingType, c.ShortChanIDs)
return encodeShortChanIDs(w, c.EncodingType, c.ShortChanIDs, c.noSort)
}
// MsgType returns the integer uniquely identifying this message type on the

View File

@@ -22,6 +22,15 @@ type Shutdown struct {
// p2wpkh.
type DeliveryAddress []byte
// deliveryAddressMaxSize is the maximum expected size in bytes of a
// DeliveryAddress based on the types of scripts we know.
// Following are the known scripts and their sizes in bytes.
// - pay to witness script hash: 34
// - pay to pubkey hash: 25
// - pay to script hash: 22
// - pay to witness pubkey hash: 22.
const deliveryAddressMaxSize = 34
// NewShutdown creates a new Shutdown message.
func NewShutdown(cid ChannelID, addr DeliveryAddress) *Shutdown {
return &Shutdown{
@@ -71,11 +80,8 @@ func (s *Shutdown) MaxPayloadLength(pver uint32) uint32 {
// Len - 2 bytes
length += 2
// ScriptPubKey - 34 bytes for pay to witness script hash
length += 34
// NOTE: pay to pubkey hash is 25 bytes, pay to script hash is 22
// bytes, and pay to witness pubkey hash is 22 bytes in length.
// ScriptPubKey - maximum delivery address size.
length += deliveryAddressMaxSize
return length
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/btcsuite/btcd/btcec"
"github.com/lightningnetwork/lnd/input"
)
// Sig is a fixed-sized ECDSA signature. Unlike Bitcoin, we use fixed sized
@@ -64,7 +65,7 @@ func NewSigFromRawSignature(sig []byte) (Sig, error) {
// NewSigFromSignature creates a new signature as used on the wire, from an
// existing btcec.Signature.
func NewSigFromSignature(e *btcec.Signature) (Sig, error) {
func NewSigFromSignature(e input.Signature) (Sig, error) {
if e == nil {
return Sig{}, fmt.Errorf("cannot decode empty signature")
}