Release v0.3.0

This commit is contained in:
Manu Herrera
2020-11-09 10:05:29 -03:00
parent 4e9aa7a3c5
commit 8107c4478b
1265 changed files with 440488 additions and 107809 deletions

309
vendor/github.com/lightningnetwork/lnd/tlv/primitive.go generated vendored Normal file
View File

@@ -0,0 +1,309 @@
package tlv
import (
"encoding/binary"
"fmt"
"io"
"github.com/btcsuite/btcd/btcec"
)
// ErrTypeForEncoding signals that an incorrect type was passed to an Encoder.
type ErrTypeForEncoding struct {
val interface{}
expType string
}
// NewTypeForEncodingErr creates a new ErrTypeForEncoding given the incorrect
// val and the expected type.
func NewTypeForEncodingErr(val interface{}, expType string) ErrTypeForEncoding {
return ErrTypeForEncoding{
val: val,
expType: expType,
}
}
// Error returns a human-readable description of the type mismatch.
func (e ErrTypeForEncoding) Error() string {
return fmt.Sprintf("ErrTypeForEncoding want (type: *%s), "+
"got (type: %T)", e.expType, e.val)
}
// ErrTypeForDecoding signals that an incorrect type was passed to a Decoder or
// that the expected length of the encoding is different from that required by
// the expected type.
type ErrTypeForDecoding struct {
val interface{}
expType string
valLength uint64
expLength uint64
}
// NewTypeForDecodingErr creates a new ErrTypeForDecoding given the incorrect
// val and expected type, or the mismatch in their expected lengths.
func NewTypeForDecodingErr(val interface{}, expType string,
valLength, expLength uint64) ErrTypeForDecoding {
return ErrTypeForDecoding{
val: val,
expType: expType,
valLength: valLength,
expLength: expLength,
}
}
// Error returns a human-readable description of the type mismatch.
func (e ErrTypeForDecoding) Error() string {
return fmt.Sprintf("ErrTypeForDecoding want (type: *%s, length: %v), "+
"got (type: %T, length: %v)", e.expType, e.expLength, e.val,
e.valLength)
}
var (
byteOrder = binary.BigEndian
)
// EUint8 is an Encoder for uint8 values. An error is returned if val is not a
// *uint8.
func EUint8(w io.Writer, val interface{}, buf *[8]byte) error {
if i, ok := val.(*uint8); ok {
buf[0] = *i
_, err := w.Write(buf[:1])
return err
}
return NewTypeForEncodingErr(val, "uint8")
}
// EUint8T encodes a uint8 val to the provided io.Writer. This method is exposed
// so that encodings for custom uint8-like types can be created without
// incurring an extra heap allocation.
func EUint8T(w io.Writer, val uint8, buf *[8]byte) error {
buf[0] = val
_, err := w.Write(buf[:1])
return err
}
// EUint16 is an Encoder for uint16 values. An error is returned if val is not a
// *uint16.
func EUint16(w io.Writer, val interface{}, buf *[8]byte) error {
if i, ok := val.(*uint16); ok {
byteOrder.PutUint16(buf[:2], *i)
_, err := w.Write(buf[:2])
return err
}
return NewTypeForEncodingErr(val, "uint16")
}
// EUint16T encodes a uint16 val to the provided io.Writer. This method is
// exposed so that encodings for custom uint16-like types can be created without
// incurring an extra heap allocation.
func EUint16T(w io.Writer, val uint16, buf *[8]byte) error {
byteOrder.PutUint16(buf[:2], val)
_, err := w.Write(buf[:2])
return err
}
// EUint32 is an Encoder for uint32 values. An error is returned if val is not a
// *uint32.
func EUint32(w io.Writer, val interface{}, buf *[8]byte) error {
if i, ok := val.(*uint32); ok {
byteOrder.PutUint32(buf[:4], *i)
_, err := w.Write(buf[:4])
return err
}
return NewTypeForEncodingErr(val, "uint32")
}
// EUint32T encodes a uint32 val to the provided io.Writer. This method is
// exposed so that encodings for custom uint32-like types can be created without
// incurring an extra heap allocation.
func EUint32T(w io.Writer, val uint32, buf *[8]byte) error {
byteOrder.PutUint32(buf[:4], val)
_, err := w.Write(buf[:4])
return err
}
// EUint64 is an Encoder for uint64 values. An error is returned if val is not a
// *uint64.
func EUint64(w io.Writer, val interface{}, buf *[8]byte) error {
if i, ok := val.(*uint64); ok {
byteOrder.PutUint64(buf[:], *i)
_, err := w.Write(buf[:])
return err
}
return NewTypeForEncodingErr(val, "uint64")
}
// EUint64T encodes a uint64 val to the provided io.Writer. This method is
// exposed so that encodings for custom uint64-like types can be created without
// incurring an extra heap allocation.
func EUint64T(w io.Writer, val uint64, buf *[8]byte) error {
byteOrder.PutUint64(buf[:], val)
_, err := w.Write(buf[:])
return err
}
// DUint8 is a Decoder for uint8 values. An error is returned if val is not a
// *uint8.
func DUint8(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
if i, ok := val.(*uint8); ok && l == 1 {
if _, err := io.ReadFull(r, buf[:1]); err != nil {
return err
}
*i = buf[0]
return nil
}
return NewTypeForDecodingErr(val, "uint8", l, 1)
}
// DUint16 is a Decoder for uint16 values. An error is returned if val is not a
// *uint16.
func DUint16(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
if i, ok := val.(*uint16); ok && l == 2 {
if _, err := io.ReadFull(r, buf[:2]); err != nil {
return err
}
*i = byteOrder.Uint16(buf[:2])
return nil
}
return NewTypeForDecodingErr(val, "uint16", l, 2)
}
// DUint32 is a Decoder for uint32 values. An error is returned if val is not a
// *uint32.
func DUint32(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
if i, ok := val.(*uint32); ok && l == 4 {
if _, err := io.ReadFull(r, buf[:4]); err != nil {
return err
}
*i = byteOrder.Uint32(buf[:4])
return nil
}
return NewTypeForDecodingErr(val, "uint32", l, 4)
}
// DUint64 is a Decoder for uint64 values. An error is returned if val is not a
// *uint64.
func DUint64(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
if i, ok := val.(*uint64); ok && l == 8 {
if _, err := io.ReadFull(r, buf[:]); err != nil {
return err
}
*i = byteOrder.Uint64(buf[:])
return nil
}
return NewTypeForDecodingErr(val, "uint64", l, 8)
}
// EBytes32 is an Encoder for 32-byte arrays. An error is returned if val is not
// a *[32]byte.
func EBytes32(w io.Writer, val interface{}, _ *[8]byte) error {
if b, ok := val.(*[32]byte); ok {
_, err := w.Write(b[:])
return err
}
return NewTypeForEncodingErr(val, "[32]byte")
}
// DBytes32 is a Decoder for 32-byte arrays. An error is returned if val is not
// a *[32]byte.
func DBytes32(r io.Reader, val interface{}, _ *[8]byte, l uint64) error {
if b, ok := val.(*[32]byte); ok && l == 32 {
_, err := io.ReadFull(r, b[:])
return err
}
return NewTypeForDecodingErr(val, "[32]byte", l, 32)
}
// EBytes33 is an Encoder for 33-byte arrays. An error is returned if val is not
// a *[33]byte.
func EBytes33(w io.Writer, val interface{}, _ *[8]byte) error {
if b, ok := val.(*[33]byte); ok {
_, err := w.Write(b[:])
return err
}
return NewTypeForEncodingErr(val, "[33]byte")
}
// DBytes33 is a Decoder for 33-byte arrays. An error is returned if val is not
// a *[33]byte.
func DBytes33(r io.Reader, val interface{}, _ *[8]byte, l uint64) error {
if b, ok := val.(*[33]byte); ok {
_, err := io.ReadFull(r, b[:])
return err
}
return NewTypeForDecodingErr(val, "[33]byte", l, 33)
}
// EBytes64 is an Encoder for 64-byte arrays. An error is returned if val is not
// a *[64]byte.
func EBytes64(w io.Writer, val interface{}, _ *[8]byte) error {
if b, ok := val.(*[64]byte); ok {
_, err := w.Write(b[:])
return err
}
return NewTypeForEncodingErr(val, "[64]byte")
}
// DBytes64 is an Decoder for 64-byte arrays. An error is returned if val is not
// a *[64]byte.
func DBytes64(r io.Reader, val interface{}, _ *[8]byte, l uint64) error {
if b, ok := val.(*[64]byte); ok && l == 64 {
_, err := io.ReadFull(r, b[:])
return err
}
return NewTypeForDecodingErr(val, "[64]byte", l, 64)
}
// EPubKey is an Encoder for *btcec.PublicKey values. An error is returned if
// val is not a **btcec.PublicKey.
func EPubKey(w io.Writer, val interface{}, _ *[8]byte) error {
if pk, ok := val.(**btcec.PublicKey); ok {
_, err := w.Write((*pk).SerializeCompressed())
return err
}
return NewTypeForEncodingErr(val, "*btcec.PublicKey")
}
// DPubKey is a Decoder for *btcec.PublicKey values. An error is returned if val
// is not a **btcec.PublicKey.
func DPubKey(r io.Reader, val interface{}, _ *[8]byte, l uint64) error {
if pk, ok := val.(**btcec.PublicKey); ok && l == 33 {
var b [33]byte
_, err := io.ReadFull(r, b[:])
if err != nil {
return err
}
p, err := btcec.ParsePubKey(b[:], btcec.S256())
if err != nil {
return err
}
*pk = p
return nil
}
return NewTypeForDecodingErr(val, "*btcec.PublicKey", l, 33)
}
// EVarBytes is an Encoder for variable byte slices. An error is returned if val
// is not *[]byte.
func EVarBytes(w io.Writer, val interface{}, _ *[8]byte) error {
if b, ok := val.(*[]byte); ok {
_, err := w.Write(*b)
return err
}
return NewTypeForEncodingErr(val, "[]byte")
}
// DVarBytes is a Decoder for variable byte slices. An error is returned if val
// is not *[]byte.
func DVarBytes(r io.Reader, val interface{}, _ *[8]byte, l uint64) error {
if b, ok := val.(*[]byte); ok {
*b = make([]byte, l)
_, err := io.ReadFull(r, *b)
return err
}
return NewTypeForDecodingErr(val, "[]byte", l, l)
}

251
vendor/github.com/lightningnetwork/lnd/tlv/record.go generated vendored Normal file
View File

@@ -0,0 +1,251 @@
package tlv
import (
"bytes"
"fmt"
"io"
"sort"
"github.com/btcsuite/btcd/btcec"
)
// Type is an 64-bit identifier for a TLV Record.
type Type uint64
// TypeMap is a map of parsed Types. The map values are byte slices. If the byte
// slice is nil, the type was successfully parsed. Otherwise the value is byte
// slice containing the encoded data.
type TypeMap map[Type][]byte
// Encoder is a signature for methods that can encode TLV values. An error
// should be returned if the Encoder cannot support the underlying type of val.
// The provided scratch buffer must be non-nil.
type Encoder func(w io.Writer, val interface{}, buf *[8]byte) error
// Decoder is a signature for methods that can decode TLV values. An error
// should be returned if the Decoder cannot support the underlying type of val.
// The provided scratch buffer must be non-nil.
type Decoder func(r io.Reader, val interface{}, buf *[8]byte, l uint64) error
// ENOP is an encoder that doesn't modify the io.Writer and never fails.
func ENOP(io.Writer, interface{}, *[8]byte) error { return nil }
// DNOP is an encoder that doesn't modify the io.Reader and never fails.
func DNOP(io.Reader, interface{}, *[8]byte, uint64) error { return nil }
// SizeFunc is a function that can compute the length of a given field. Since
// the size of the underlying field can change, this allows the size of the
// field to be evaluated at the time of encoding.
type SizeFunc func() uint64
// SizeVarBytes returns a SizeFunc that can compute the length of a byte slice.
func SizeVarBytes(e *[]byte) SizeFunc {
return func() uint64 {
return uint64(len(*e))
}
}
// RecorderProducer is an interface for objects that can produce a Record object
// capable of encoding and/or decoding the RecordProducer as a Record.
type RecordProducer interface {
// Record returns a Record that can be used to encode or decode the
// backing object.
Record() Record
}
// Record holds the required information to encode or decode a TLV record.
type Record struct {
value interface{}
typ Type
staticSize uint64
sizeFunc SizeFunc
encoder Encoder
decoder Decoder
}
// Size returns the size of the Record's value. If no static size is known, the
// dynamic size will be evaluated.
func (f *Record) Size() uint64 {
if f.sizeFunc == nil {
return f.staticSize
}
return f.sizeFunc()
}
// Type returns the type of the underlying TLV record.
func (f *Record) Type() Type {
return f.typ
}
// Encode writes out the TLV record to the passed writer. This is useful when a
// caller wants to obtain the raw encoding of a *single* TLV record, outside
// the context of the Stream struct.
func (f *Record) Encode(w io.Writer) error {
var b [8]byte
return f.encoder(w, f.value, &b)
}
// Decode read in the TLV record from the passed reader. This is useful when a
// caller wants decode a *single* TLV record, outside the context of the Stream
// struct.
func (f *Record) Decode(r io.Reader, l uint64) error {
var b [8]byte
return f.decoder(r, f.value, &b, l)
}
// MakePrimitiveRecord creates a record for common types.
func MakePrimitiveRecord(typ Type, val interface{}) Record {
var (
staticSize uint64
sizeFunc SizeFunc
encoder Encoder
decoder Decoder
)
switch e := val.(type) {
case *uint8:
staticSize = 1
encoder = EUint8
decoder = DUint8
case *uint16:
staticSize = 2
encoder = EUint16
decoder = DUint16
case *uint32:
staticSize = 4
encoder = EUint32
decoder = DUint32
case *uint64:
staticSize = 8
encoder = EUint64
decoder = DUint64
case *[32]byte:
staticSize = 32
encoder = EBytes32
decoder = DBytes32
case *[33]byte:
staticSize = 33
encoder = EBytes33
decoder = DBytes33
case **btcec.PublicKey:
staticSize = 33
encoder = EPubKey
decoder = DPubKey
case *[64]byte:
staticSize = 64
encoder = EBytes64
decoder = DBytes64
case *[]byte:
sizeFunc = SizeVarBytes(e)
encoder = EVarBytes
decoder = DVarBytes
default:
panic(fmt.Sprintf("unknown primitive type: %T", val))
}
return Record{
value: val,
typ: typ,
staticSize: staticSize,
sizeFunc: sizeFunc,
encoder: encoder,
decoder: decoder,
}
}
// MakeStaticRecord creates a record for a field of fixed-size
func MakeStaticRecord(typ Type, val interface{}, size uint64, encoder Encoder,
decoder Decoder) Record {
return Record{
value: val,
typ: typ,
staticSize: size,
encoder: encoder,
decoder: decoder,
}
}
// MakeDynamicRecord creates a record whose size may vary, and will be
// determined at the time of encoding via sizeFunc.
func MakeDynamicRecord(typ Type, val interface{}, sizeFunc SizeFunc,
encoder Encoder, decoder Decoder) Record {
return Record{
value: val,
typ: typ,
sizeFunc: sizeFunc,
encoder: encoder,
decoder: decoder,
}
}
// RecordsToMap encodes a series of TLV records as raw key-value pairs in the
// form of a map.
func RecordsToMap(records []Record) (map[uint64][]byte, error) {
tlvMap := make(map[uint64][]byte, len(records))
for _, record := range records {
var b bytes.Buffer
if err := record.Encode(&b); err != nil {
return nil, err
}
tlvMap[uint64(record.Type())] = b.Bytes()
}
return tlvMap, nil
}
// StubEncoder is a factory function that makes a stub tlv.Encoder out of a raw
// value. We can use this to make a record that can be encoded when we don't
// actually know it's true underlying value, and only it serialization.
func StubEncoder(v []byte) Encoder {
return func(w io.Writer, val interface{}, buf *[8]byte) error {
_, err := w.Write(v)
return err
}
}
// MapToRecords encodes the passed TLV map as a series of regular tlv.Record
// instances. The resulting set of records will be returned in sorted order by
// their type.
func MapToRecords(tlvMap map[uint64][]byte) []Record {
records := make([]Record, 0, len(tlvMap))
for k, v := range tlvMap {
// We don't pass in a decoder here since we don't actually know
// the type, and only expect this Record to be used for display
// and encoding purposes.
record := MakeStaticRecord(
Type(k), nil, uint64(len(v)), StubEncoder(v), nil,
)
records = append(records, record)
}
SortRecords(records)
return records
}
// SortRecords is a helper function that will sort a slice of records in place
// according to their type.
func SortRecords(records []Record) {
if len(records) == 0 {
return
}
sort.Slice(records, func(i, j int) bool {
return records[i].Type() < records[j].Type()
})
}

318
vendor/github.com/lightningnetwork/lnd/tlv/stream.go generated vendored Normal file
View File

@@ -0,0 +1,318 @@
package tlv
import (
"bytes"
"errors"
"io"
"io/ioutil"
"math"
)
// MaxRecordSize is the maximum size of a particular record that will be parsed
// by a stream decoder. This value is currently chosen to the be equal to the
// maximum message size permitted by BOLT 1, as no record should be bigger than
// an entire message.
const MaxRecordSize = 65535 // 65KB
// ErrStreamNotCanonical signals that a decoded stream does not contain records
// sorting by monotonically-increasing type.
var ErrStreamNotCanonical = errors.New("tlv stream is not canonical")
// ErrRecordTooLarge signals that a decoded record has a length that is too
// long to parse.
var ErrRecordTooLarge = errors.New("record is too large")
// Stream defines a TLV stream that can be used for encoding or decoding a set
// of TLV Records.
type Stream struct {
records []Record
buf [8]byte
}
// NewStream creates a new TLV Stream given an encoding codec, a decoding codec,
// and a set of known records.
func NewStream(records ...Record) (*Stream, error) {
// Assert that the ordering of the Records is canonical and appear in
// ascending order of type.
var (
min Type
overflow bool
)
for _, record := range records {
if overflow || record.typ < min {
return nil, ErrStreamNotCanonical
}
if record.encoder == nil {
record.encoder = ENOP
}
if record.decoder == nil {
record.decoder = DNOP
}
if record.typ == math.MaxUint64 {
overflow = true
}
min = record.typ + 1
}
return &Stream{
records: records,
}, nil
}
// MustNewStream creates a new TLV Stream given an encoding codec, a decoding
// codec, and a set of known records. If an error is encountered in creating the
// stream, this method will panic instead of returning the error.
func MustNewStream(records ...Record) *Stream {
stream, err := NewStream(records...)
if err != nil {
panic(err.Error())
}
return stream
}
// Encode writes a Stream to the passed io.Writer. Each of the Records known to
// the Stream is written in ascending order of their type so as to be canonical.
//
// The stream is constructed by concatenating the individual, serialized Records
// where each record has the following format:
// [varint: type]
// [varint: length]
// [length: value]
//
// An error is returned if the io.Writer fails to accept bytes from the
// encoding, and nothing else. The ordering of the Records is asserted upon the
// creation of a Stream, and thus the output will be by definition canonical.
func (s *Stream) Encode(w io.Writer) error {
// Iterate through all known records, if any, serializing each record's
// type, length and value.
for i := range s.records {
rec := &s.records[i]
// Write the record's type as a varint.
err := WriteVarInt(w, uint64(rec.typ), &s.buf)
if err != nil {
return err
}
// Write the record's length as a varint.
err = WriteVarInt(w, rec.Size(), &s.buf)
if err != nil {
return err
}
// Encode the current record's value using the stream's codec.
err = rec.encoder(w, rec.value, &s.buf)
if err != nil {
return err
}
}
return nil
}
// Decode deserializes TLV Stream from the passed io.Reader. The Stream will
// inspect each record that is parsed and check to see if it has a corresponding
// Record to facilitate deserialization of that field. If the record is unknown,
// the Stream will discard the record's bytes and proceed to the subsequent
// record.
//
// Each record has the following format:
// [varint: type]
// [varint: length]
// [length: value]
//
// A series of (possibly zero) records are concatenated into a stream, this
// example contains two records:
//
// (t: 0x01, l: 0x04, v: 0xff, 0xff, 0xff, 0xff)
// (t: 0x02, l: 0x01, v: 0x01)
//
// This method asserts that the byte stream is canonical, namely that each
// record is unique and that all records are sorted in ascending order. An
// ErrNotCanonicalStream error is returned if the encoded TLV stream is not.
//
// We permit an io.EOF error only when reading the type byte which signals that
// the last record was read cleanly and we should stop parsing. All other io.EOF
// or io.ErrUnexpectedEOF errors are returned.
func (s *Stream) Decode(r io.Reader) error {
_, err := s.decode(r, nil)
return err
}
// DecodeWithParsedTypes is identical to Decode, but if successful, returns a
// TypeMap containing the types of all records that were decoded or ignored from
// the stream.
func (s *Stream) DecodeWithParsedTypes(r io.Reader) (TypeMap, error) {
return s.decode(r, make(TypeMap))
}
// decode is a helper function that performs the basis of stream decoding. If
// the caller needs the set of parsed types, it must provide an initialized
// parsedTypes, otherwise the returned TypeMap will be nil.
func (s *Stream) decode(r io.Reader, parsedTypes TypeMap) (TypeMap, error) {
var (
typ Type
min Type
recordIdx int
overflow bool
)
// Iterate through all possible type identifiers. As types are read from
// the io.Reader, min will skip forward to the last read type.
for {
// Read the next varint type.
t, err := ReadVarInt(r, &s.buf)
switch {
// We'll silence an EOF when zero bytes remain, meaning the
// stream was cleanly encoded.
case err == io.EOF:
return parsedTypes, nil
// Other unexpected errors.
case err != nil:
return nil, err
}
typ = Type(t)
// Assert that this type is greater than any previously read.
// If we've already overflowed and we parsed another type, the
// stream is not canonical. This check prevents us from accepts
// encodings that have duplicate records or from accepting an
// unsorted series.
if overflow || typ < min {
return nil, ErrStreamNotCanonical
}
// Read the varint length.
length, err := ReadVarInt(r, &s.buf)
switch {
// We'll convert any EOFs to ErrUnexpectedEOF, since this
// results in an invalid record.
case err == io.EOF:
return nil, io.ErrUnexpectedEOF
// Other unexpected errors.
case err != nil:
return nil, err
}
// Place a soft limit on the size of a sane record, which
// prevents malicious encoders from causing us to allocate an
// unbounded amount of memory when decoding variable-sized
// fields.
if length > MaxRecordSize {
return nil, ErrRecordTooLarge
}
// Search the records known to the stream for this type. We'll
// begin the search and recordIdx and walk forward until we find
// it or the next record's type is larger.
rec, newIdx, ok := s.getRecord(typ, recordIdx)
switch {
// We know of this record type, proceed to decode the value.
// This method asserts that length bytes are read in the
// process, and returns an error if the number of bytes is not
// exactly length.
case ok:
err := rec.decoder(r, rec.value, &s.buf, length)
switch {
// We'll convert any EOFs to ErrUnexpectedEOF, since this
// results in an invalid record.
case err == io.EOF:
return nil, io.ErrUnexpectedEOF
// Other unexpected errors.
case err != nil:
return nil, err
}
// Record the successfully decoded type if the caller
// provided an initialized TypeMap.
if parsedTypes != nil {
parsedTypes[typ] = nil
}
// Otherwise, the record type is unknown and is odd, discard the
// number of bytes specified by length.
default:
// If the caller provided an initialized TypeMap, record
// the encoded bytes.
var b *bytes.Buffer
writer := ioutil.Discard
if parsedTypes != nil {
b = bytes.NewBuffer(make([]byte, 0, length))
writer = b
}
_, err := io.CopyN(writer, r, int64(length))
switch {
// We'll convert any EOFs to ErrUnexpectedEOF, since this
// results in an invalid record.
case err == io.EOF:
return nil, io.ErrUnexpectedEOF
// Other unexpected errors.
case err != nil:
return nil, err
}
if parsedTypes != nil {
parsedTypes[typ] = b.Bytes()
}
}
// Update our record index so that we can begin our next search
// from where we left off.
recordIdx = newIdx
// If we've parsed the largest possible type, the next loop will
// overflow back to zero. However, we need to attempt parsing
// the next type to ensure that the stream is empty.
if typ == math.MaxUint64 {
overflow = true
}
// Finally, set our lower bound on the next accepted type.
min = typ + 1
}
}
// getRecord searches for a record matching typ known to the stream. The boolean
// return value indicates whether the record is known to the stream. The integer
// return value carries the index from where getRecord should be invoked on the
// subsequent call. The first call to getRecord should always use an idx of 0.
func (s *Stream) getRecord(typ Type, idx int) (Record, int, bool) {
for idx < len(s.records) {
record := s.records[idx]
switch {
// Found target record, return it to the caller. The next index
// returned points to the immediately following record.
case record.typ == typ:
return record, idx + 1, true
// This record's type is lower than the target. Advance our
// index and continue to the next record which will have a
// strictly higher type.
case record.typ < typ:
idx++
continue
// This record's type is larger than the target, hence we have
// no record matching the current type. Return the current index
// so that we can start our search from here when processing the
// next tlv record.
default:
return Record{}, idx, false
}
}
// All known records are exhausted.
return Record{}, idx, false
}

207
vendor/github.com/lightningnetwork/lnd/tlv/truncated.go generated vendored Normal file
View File

@@ -0,0 +1,207 @@
package tlv
import (
"encoding/binary"
"errors"
"io"
)
// ErrTUintNotMinimal signals that decoding a truncated uint failed because the
// value was not minimally encoded.
var ErrTUintNotMinimal = errors.New("truncated uint not minimally encoded")
// numLeadingZeroBytes16 computes the number of leading zeros for a uint16.
func numLeadingZeroBytes16(v uint16) uint64 {
switch {
case v == 0:
return 2
case v&0xff00 == 0:
return 1
default:
return 0
}
}
// SizeTUint16 returns the number of bytes remaining in a uint16 after
// truncating the leading zeros.
func SizeTUint16(v uint16) uint64 {
return 2 - numLeadingZeroBytes16(v)
}
// ETUint16 is an Encoder for truncated uint16 values, where leading zeros will
// be omitted. An error is returned if val is not a *uint16.
func ETUint16(w io.Writer, val interface{}, buf *[8]byte) error {
if t, ok := val.(*uint16); ok {
binary.BigEndian.PutUint16(buf[:2], *t)
numZeros := numLeadingZeroBytes16(*t)
_, err := w.Write(buf[numZeros:2])
return err
}
return NewTypeForEncodingErr(val, "uint16")
}
// ETUint16T is an Encoder for truncated uint16 values, where leading zeros will
// be omitted. An error is returned if val is not a *uint16.
func ETUint16T(w io.Writer, val uint16, buf *[8]byte) error {
binary.BigEndian.PutUint16(buf[:2], val)
numZeros := numLeadingZeroBytes16(val)
_, err := w.Write(buf[numZeros:2])
return err
}
// DTUint16 is an Decoder for truncated uint16 values, where leading zeros will
// be resurrected. An error is returned if val is not a *uint16.
func DTUint16(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
if t, ok := val.(*uint16); ok && l <= 2 {
_, err := io.ReadFull(r, buf[2-l:2])
if err != nil {
return err
}
zero(buf[:2-l])
*t = binary.BigEndian.Uint16(buf[:2])
if 2-numLeadingZeroBytes16(*t) != l {
return ErrTUintNotMinimal
}
return nil
}
return NewTypeForDecodingErr(val, "uint16", l, 2)
}
// numLeadingZeroBytes16 computes the number of leading zeros for a uint32.
func numLeadingZeroBytes32(v uint32) uint64 {
switch {
case v == 0:
return 4
case v&0xffffff00 == 0:
return 3
case v&0xffff0000 == 0:
return 2
case v&0xff000000 == 0:
return 1
default:
return 0
}
}
// SizeTUint32 returns the number of bytes remaining in a uint32 after
// truncating the leading zeros.
func SizeTUint32(v uint32) uint64 {
return 4 - numLeadingZeroBytes32(v)
}
// ETUint32 is an Encoder for truncated uint32 values, where leading zeros will
// be omitted. An error is returned if val is not a *uint32.
func ETUint32(w io.Writer, val interface{}, buf *[8]byte) error {
if t, ok := val.(*uint32); ok {
binary.BigEndian.PutUint32(buf[:4], *t)
numZeros := numLeadingZeroBytes32(*t)
_, err := w.Write(buf[numZeros:4])
return err
}
return NewTypeForEncodingErr(val, "uint32")
}
// ETUint32T is an Encoder for truncated uint32 values, where leading zeros will
// be omitted. An error is returned if val is not a *uint32.
func ETUint32T(w io.Writer, val uint32, buf *[8]byte) error {
binary.BigEndian.PutUint32(buf[:4], val)
numZeros := numLeadingZeroBytes32(val)
_, err := w.Write(buf[numZeros:4])
return err
}
// DTUint32 is an Decoder for truncated uint32 values, where leading zeros will
// be resurrected. An error is returned if val is not a *uint32.
func DTUint32(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
if t, ok := val.(*uint32); ok && l <= 4 {
_, err := io.ReadFull(r, buf[4-l:4])
if err != nil {
return err
}
zero(buf[:4-l])
*t = binary.BigEndian.Uint32(buf[:4])
if 4-numLeadingZeroBytes32(*t) != l {
return ErrTUintNotMinimal
}
return nil
}
return NewTypeForDecodingErr(val, "uint32", l, 4)
}
// numLeadingZeroBytes64 computes the number of leading zeros for a uint32.
//
// TODO(conner): optimize using unrolled binary search
func numLeadingZeroBytes64(v uint64) uint64 {
switch {
case v == 0:
return 8
case v&0xffffffffffffff00 == 0:
return 7
case v&0xffffffffffff0000 == 0:
return 6
case v&0xffffffffff000000 == 0:
return 5
case v&0xffffffff00000000 == 0:
return 4
case v&0xffffff0000000000 == 0:
return 3
case v&0xffff000000000000 == 0:
return 2
case v&0xff00000000000000 == 0:
return 1
default:
return 0
}
}
// SizeTUint64 returns the number of bytes remaining in a uint64 after
// truncating the leading zeros.
func SizeTUint64(v uint64) uint64 {
return 8 - numLeadingZeroBytes64(v)
}
// ETUint64 is an Encoder for truncated uint64 values, where leading zeros will
// be omitted. An error is returned if val is not a *uint64.
func ETUint64(w io.Writer, val interface{}, buf *[8]byte) error {
if t, ok := val.(*uint64); ok {
binary.BigEndian.PutUint64(buf[:], *t)
numZeros := numLeadingZeroBytes64(*t)
_, err := w.Write(buf[numZeros:])
return err
}
return NewTypeForEncodingErr(val, "uint64")
}
// ETUint64T is an Encoder for truncated uint64 values, where leading zeros will
// be omitted. An error is returned if val is not a *uint64.
func ETUint64T(w io.Writer, val uint64, buf *[8]byte) error {
binary.BigEndian.PutUint64(buf[:], val)
numZeros := numLeadingZeroBytes64(val)
_, err := w.Write(buf[numZeros:])
return err
}
// DTUint64 is an Decoder for truncated uint64 values, where leading zeros will
// be resurrected. An error is returned if val is not a *uint64.
func DTUint64(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
if t, ok := val.(*uint64); ok && l <= 8 {
_, err := io.ReadFull(r, buf[8-l:])
if err != nil {
return err
}
zero(buf[:8-l])
*t = binary.BigEndian.Uint64(buf[:])
if 8-numLeadingZeroBytes64(*t) != l {
return ErrTUintNotMinimal
}
return nil
}
return NewTypeForDecodingErr(val, "uint64", l, 8)
}
// zero clears the passed byte slice.
func zero(b []byte) {
for i := range b {
b[i] = 0x00
}
}

116
vendor/github.com/lightningnetwork/lnd/tlv/varint.go generated vendored Normal file
View File

@@ -0,0 +1,116 @@
package tlv
import (
"encoding/binary"
"errors"
"io"
"github.com/btcsuite/btcd/wire"
)
// ErrVarIntNotCanonical signals that the decoded varint was not minimally encoded.
var ErrVarIntNotCanonical = errors.New("decoded varint is not canonical")
// ReadVarInt reads a variable length integer from r and returns it as a uint64.
func ReadVarInt(r io.Reader, buf *[8]byte) (uint64, error) {
_, err := io.ReadFull(r, buf[:1])
if err != nil {
return 0, err
}
discriminant := buf[0]
var rv uint64
switch {
case discriminant < 0xfd:
rv = uint64(discriminant)
case discriminant == 0xfd:
_, err := io.ReadFull(r, buf[:2])
switch {
case err == io.EOF:
return 0, io.ErrUnexpectedEOF
case err != nil:
return 0, err
}
rv = uint64(binary.BigEndian.Uint16(buf[:2]))
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
if rv < 0xfd {
return 0, ErrVarIntNotCanonical
}
case discriminant == 0xfe:
_, err := io.ReadFull(r, buf[:4])
switch {
case err == io.EOF:
return 0, io.ErrUnexpectedEOF
case err != nil:
return 0, err
}
rv = uint64(binary.BigEndian.Uint32(buf[:4]))
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
if rv <= 0xffff {
return 0, ErrVarIntNotCanonical
}
default:
_, err := io.ReadFull(r, buf[:])
switch {
case err == io.EOF:
return 0, io.ErrUnexpectedEOF
case err != nil:
return 0, err
}
rv = binary.BigEndian.Uint64(buf[:])
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
if rv <= 0xffffffff {
return 0, ErrVarIntNotCanonical
}
}
return rv, nil
}
// WriteVarInt serializes val to w using a variable number of bytes depending
// on its value.
func WriteVarInt(w io.Writer, val uint64, buf *[8]byte) error {
var length int
switch {
case val < 0xfd:
buf[0] = uint8(val)
length = 1
case val <= 0xffff:
buf[0] = uint8(0xfd)
binary.BigEndian.PutUint16(buf[1:3], uint16(val))
length = 3
case val <= 0xffffffff:
buf[0] = uint8(0xfe)
binary.BigEndian.PutUint32(buf[1:5], uint32(val))
length = 5
default:
buf[0] = uint8(0xff)
_, err := w.Write(buf[:1])
if err != nil {
return err
}
binary.BigEndian.PutUint64(buf[:], uint64(val))
length = 8
}
_, err := w.Write(buf[:length])
return err
}
// VarIntSize returns the required number of bytes to encode a var int.
func VarIntSize(val uint64) uint64 {
return uint64(wire.VarIntSerializeSize(val))
}