mirror of
https://github.com/muun/recovery.git
synced 2025-11-10 22:10:14 -05:00
Release v0.3.0
This commit is contained in:
24
vendor/github.com/lightningnetwork/lnd/channeldb/README.md
generated
vendored
Normal file
24
vendor/github.com/lightningnetwork/lnd/channeldb/README.md
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
channeldb
|
||||
==========
|
||||
|
||||
[](https://travis-ci.org/lightningnetwork/lnd)
|
||||
[](https://github.com/lightningnetwork/lnd/blob/master/LICENSE)
|
||||
[](http://godoc.org/github.com/lightningnetwork/lnd/channeldb)
|
||||
|
||||
The channeldb implements the persistent storage engine for `lnd` and
|
||||
generically a data storage layer for the required state within the Lightning
|
||||
Network. The backing storage engine is
|
||||
[boltdb](https://github.com/coreos/bbolt), an embedded pure-go key-value store
|
||||
based off of LMDB.
|
||||
|
||||
The package implements an object-oriented storage model with queries and
|
||||
mutations flowing through a particular object instance rather than the database
|
||||
itself. The storage implemented by the objects includes: open channels, past
|
||||
commitment revocation states, the channel graph which includes authenticated
|
||||
node and channel announcements, outgoing payments, and invoices
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/lightningnetwork/lnd/channeldb
|
||||
```
|
||||
221
vendor/github.com/lightningnetwork/lnd/channeldb/addr.go
generated
vendored
Normal file
221
vendor/github.com/lightningnetwork/lnd/channeldb/addr.go
generated
vendored
Normal file
@@ -0,0 +1,221 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/lightningnetwork/lnd/tor"
|
||||
)
|
||||
|
||||
// addressType specifies the network protocol and version that should be used
|
||||
// when connecting to a node at a particular address.
|
||||
type addressType uint8
|
||||
|
||||
const (
|
||||
// tcp4Addr denotes an IPv4 TCP address.
|
||||
tcp4Addr addressType = 0
|
||||
|
||||
// tcp6Addr denotes an IPv6 TCP address.
|
||||
tcp6Addr addressType = 1
|
||||
|
||||
// v2OnionAddr denotes a version 2 Tor onion service address.
|
||||
v2OnionAddr addressType = 2
|
||||
|
||||
// v3OnionAddr denotes a version 3 Tor (prop224) onion service address.
|
||||
v3OnionAddr addressType = 3
|
||||
)
|
||||
|
||||
// encodeTCPAddr serializes a TCP address into its compact raw bytes
|
||||
// representation.
|
||||
func encodeTCPAddr(w io.Writer, addr *net.TCPAddr) error {
|
||||
var (
|
||||
addrType byte
|
||||
ip []byte
|
||||
)
|
||||
|
||||
if addr.IP.To4() != nil {
|
||||
addrType = byte(tcp4Addr)
|
||||
ip = addr.IP.To4()
|
||||
} else {
|
||||
addrType = byte(tcp6Addr)
|
||||
ip = addr.IP.To16()
|
||||
}
|
||||
|
||||
if ip == nil {
|
||||
return fmt.Errorf("unable to encode IP %v", addr.IP)
|
||||
}
|
||||
|
||||
if _, err := w.Write([]byte{addrType}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(ip); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var port [2]byte
|
||||
byteOrder.PutUint16(port[:], uint16(addr.Port))
|
||||
if _, err := w.Write(port[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeOnionAddr serializes an onion address into its compact raw bytes
|
||||
// representation.
|
||||
func encodeOnionAddr(w io.Writer, addr *tor.OnionAddr) error {
|
||||
var suffixIndex int
|
||||
hostLen := len(addr.OnionService)
|
||||
switch hostLen {
|
||||
case tor.V2Len:
|
||||
if _, err := w.Write([]byte{byte(v2OnionAddr)}); err != nil {
|
||||
return err
|
||||
}
|
||||
suffixIndex = tor.V2Len - tor.OnionSuffixLen
|
||||
case tor.V3Len:
|
||||
if _, err := w.Write([]byte{byte(v3OnionAddr)}); err != nil {
|
||||
return err
|
||||
}
|
||||
suffixIndex = tor.V3Len - tor.OnionSuffixLen
|
||||
default:
|
||||
return errors.New("unknown onion service length")
|
||||
}
|
||||
|
||||
suffix := addr.OnionService[suffixIndex:]
|
||||
if suffix != tor.OnionSuffix {
|
||||
return fmt.Errorf("invalid suffix \"%v\"", suffix)
|
||||
}
|
||||
|
||||
host, err := tor.Base32Encoding.DecodeString(
|
||||
addr.OnionService[:suffixIndex],
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sanity check the decoded length.
|
||||
switch {
|
||||
case hostLen == tor.V2Len && len(host) != tor.V2DecodedLen:
|
||||
return fmt.Errorf("onion service %v decoded to invalid host %x",
|
||||
addr.OnionService, host)
|
||||
|
||||
case hostLen == tor.V3Len && len(host) != tor.V3DecodedLen:
|
||||
return fmt.Errorf("onion service %v decoded to invalid host %x",
|
||||
addr.OnionService, host)
|
||||
}
|
||||
|
||||
if _, err := w.Write(host); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var port [2]byte
|
||||
byteOrder.PutUint16(port[:], uint16(addr.Port))
|
||||
if _, err := w.Write(port[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deserializeAddr reads the serialized raw representation of an address and
|
||||
// deserializes it into the actual address. This allows us to avoid address
|
||||
// resolution within the channeldb package.
|
||||
func deserializeAddr(r io.Reader) (net.Addr, error) {
|
||||
var addrType [1]byte
|
||||
if _, err := r.Read(addrType[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var address net.Addr
|
||||
switch addressType(addrType[0]) {
|
||||
case tcp4Addr:
|
||||
var ip [4]byte
|
||||
if _, err := r.Read(ip[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var port [2]byte
|
||||
if _, err := r.Read(port[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
address = &net.TCPAddr{
|
||||
IP: net.IP(ip[:]),
|
||||
Port: int(binary.BigEndian.Uint16(port[:])),
|
||||
}
|
||||
case tcp6Addr:
|
||||
var ip [16]byte
|
||||
if _, err := r.Read(ip[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var port [2]byte
|
||||
if _, err := r.Read(port[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
address = &net.TCPAddr{
|
||||
IP: net.IP(ip[:]),
|
||||
Port: int(binary.BigEndian.Uint16(port[:])),
|
||||
}
|
||||
case v2OnionAddr:
|
||||
var h [tor.V2DecodedLen]byte
|
||||
if _, err := r.Read(h[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var p [2]byte
|
||||
if _, err := r.Read(p[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
onionService := tor.Base32Encoding.EncodeToString(h[:])
|
||||
onionService += tor.OnionSuffix
|
||||
port := int(binary.BigEndian.Uint16(p[:]))
|
||||
|
||||
address = &tor.OnionAddr{
|
||||
OnionService: onionService,
|
||||
Port: port,
|
||||
}
|
||||
case v3OnionAddr:
|
||||
var h [tor.V3DecodedLen]byte
|
||||
if _, err := r.Read(h[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var p [2]byte
|
||||
if _, err := r.Read(p[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
onionService := tor.Base32Encoding.EncodeToString(h[:])
|
||||
onionService += tor.OnionSuffix
|
||||
port := int(binary.BigEndian.Uint16(p[:]))
|
||||
|
||||
address = &tor.OnionAddr{
|
||||
OnionService: onionService,
|
||||
Port: port,
|
||||
}
|
||||
default:
|
||||
return nil, ErrUnknownAddressType
|
||||
}
|
||||
|
||||
return address, nil
|
||||
}
|
||||
|
||||
// serializeAddr serializes an address into its raw bytes representation so that
|
||||
// it can be deserialized without requiring address resolution.
|
||||
func serializeAddr(w io.Writer, address net.Addr) error {
|
||||
switch addr := address.(type) {
|
||||
case *net.TCPAddr:
|
||||
return encodeTCPAddr(w, addr)
|
||||
case *tor.OnionAddr:
|
||||
return encodeOnionAddr(w, addr)
|
||||
default:
|
||||
return ErrUnknownAddressType
|
||||
}
|
||||
}
|
||||
3331
vendor/github.com/lightningnetwork/lnd/channeldb/channel.go
generated
vendored
Normal file
3331
vendor/github.com/lightningnetwork/lnd/channeldb/channel.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
50
vendor/github.com/lightningnetwork/lnd/channeldb/channel_cache.go
generated
vendored
Normal file
50
vendor/github.com/lightningnetwork/lnd/channeldb/channel_cache.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package channeldb
|
||||
|
||||
// channelCache is an in-memory cache used to improve the performance of
|
||||
// ChanUpdatesInHorizon. It caches the chan info and edge policies for a
|
||||
// particular channel.
|
||||
type channelCache struct {
|
||||
n int
|
||||
channels map[uint64]ChannelEdge
|
||||
}
|
||||
|
||||
// newChannelCache creates a new channelCache with maximum capacity of n
|
||||
// channels.
|
||||
func newChannelCache(n int) *channelCache {
|
||||
return &channelCache{
|
||||
n: n,
|
||||
channels: make(map[uint64]ChannelEdge),
|
||||
}
|
||||
}
|
||||
|
||||
// get returns the channel from the cache, if it exists.
|
||||
func (c *channelCache) get(chanid uint64) (ChannelEdge, bool) {
|
||||
channel, ok := c.channels[chanid]
|
||||
return channel, ok
|
||||
}
|
||||
|
||||
// insert adds the entry to the channel cache. If an entry for chanid already
|
||||
// exists, it will be replaced with the new entry. If the entry doesn't exist,
|
||||
// it will be inserted to the cache, performing a random eviction if the cache
|
||||
// is at capacity.
|
||||
func (c *channelCache) insert(chanid uint64, channel ChannelEdge) {
|
||||
// If entry exists, replace it.
|
||||
if _, ok := c.channels[chanid]; ok {
|
||||
c.channels[chanid] = channel
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, evict an entry at random and insert.
|
||||
if len(c.channels) == c.n {
|
||||
for id := range c.channels {
|
||||
delete(c.channels, id)
|
||||
break
|
||||
}
|
||||
}
|
||||
c.channels[chanid] = channel
|
||||
}
|
||||
|
||||
// remove deletes an edge for chanid from the cache, if it exists.
|
||||
func (c *channelCache) remove(chanid uint64) {
|
||||
delete(c.channels, chanid)
|
||||
}
|
||||
454
vendor/github.com/lightningnetwork/lnd/channeldb/codec.go
generated
vendored
Normal file
454
vendor/github.com/lightningnetwork/lnd/channeldb/codec.go
generated
vendored
Normal file
@@ -0,0 +1,454 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/keychain"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/shachain"
|
||||
)
|
||||
|
||||
// writeOutpoint writes an outpoint to the passed writer using the minimal
|
||||
// amount of bytes possible.
|
||||
func writeOutpoint(w io.Writer, o *wire.OutPoint) error {
|
||||
if _, err := w.Write(o.Hash[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(w, byteOrder, o.Index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readOutpoint reads an outpoint from the passed reader that was previously
|
||||
// written using the writeOutpoint struct.
|
||||
func readOutpoint(r io.Reader, o *wire.OutPoint) error {
|
||||
if _, err := io.ReadFull(r, o.Hash[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Read(r, byteOrder, &o.Index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnknownElementType is an error returned when the codec is unable to encode or
|
||||
// decode a particular type.
|
||||
type UnknownElementType struct {
|
||||
method string
|
||||
element interface{}
|
||||
}
|
||||
|
||||
// NewUnknownElementType creates a new UnknownElementType error from the passed
|
||||
// method name and element.
|
||||
func NewUnknownElementType(method string, el interface{}) UnknownElementType {
|
||||
return UnknownElementType{method: method, element: el}
|
||||
}
|
||||
|
||||
// Error returns the name of the method that encountered the error, as well as
|
||||
// the type that was unsupported.
|
||||
func (e UnknownElementType) Error() string {
|
||||
return fmt.Sprintf("Unknown type in %s: %T", e.method, e.element)
|
||||
}
|
||||
|
||||
// WriteElement is a one-stop shop to write the big endian representation of
|
||||
// any element which is to be serialized for storage on disk. The passed
|
||||
// io.Writer should be backed by an appropriately sized byte slice, or be able
|
||||
// to dynamically expand to accommodate additional data.
|
||||
func WriteElement(w io.Writer, element interface{}) error {
|
||||
switch e := element.(type) {
|
||||
case keychain.KeyDescriptor:
|
||||
if err := binary.Write(w, byteOrder, e.Family); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(w, byteOrder, e.Index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if e.PubKey != nil {
|
||||
if err := binary.Write(w, byteOrder, true); err != nil {
|
||||
return fmt.Errorf("error writing serialized element: %s", err)
|
||||
}
|
||||
|
||||
return WriteElement(w, e.PubKey)
|
||||
}
|
||||
|
||||
return binary.Write(w, byteOrder, false)
|
||||
case ChannelType:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case chainhash.Hash:
|
||||
if _, err := w.Write(e[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case wire.OutPoint:
|
||||
return writeOutpoint(w, &e)
|
||||
|
||||
case lnwire.ShortChannelID:
|
||||
if err := binary.Write(w, byteOrder, e.ToUint64()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case lnwire.ChannelID:
|
||||
if _, err := w.Write(e[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case int64, uint64:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case uint32:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case int32:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case uint16:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case uint8:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case bool:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case btcutil.Amount:
|
||||
if err := binary.Write(w, byteOrder, uint64(e)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case lnwire.MilliSatoshi:
|
||||
if err := binary.Write(w, byteOrder, uint64(e)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *btcec.PrivateKey:
|
||||
b := e.Serialize()
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *btcec.PublicKey:
|
||||
b := e.SerializeCompressed()
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case shachain.Producer:
|
||||
return e.Encode(w)
|
||||
|
||||
case shachain.Store:
|
||||
return e.Encode(w)
|
||||
|
||||
case *wire.MsgTx:
|
||||
return e.Serialize(w)
|
||||
|
||||
case [32]byte:
|
||||
if _, err := w.Write(e[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case []byte:
|
||||
if err := wire.WriteVarBytes(w, 0, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case lnwire.Message:
|
||||
if _, err := lnwire.WriteMessage(w, e, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case ChannelStatus:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case ClosureType:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case lnwire.FundingFlag:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case net.Addr:
|
||||
if err := serializeAddr(w, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case []net.Addr:
|
||||
if err := WriteElement(w, uint32(len(e))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, addr := range e {
|
||||
if err := serializeAddr(w, addr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return UnknownElementType{"WriteElement", e}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteElements is writes each element in the elements slice to the passed
|
||||
// io.Writer using WriteElement.
|
||||
func WriteElements(w io.Writer, elements ...interface{}) error {
|
||||
for _, element := range elements {
|
||||
err := WriteElement(w, element)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadElement is a one-stop utility function to deserialize any datastructure
|
||||
// encoded using the serialization format of the database.
|
||||
func ReadElement(r io.Reader, element interface{}) error {
|
||||
switch e := element.(type) {
|
||||
case *keychain.KeyDescriptor:
|
||||
if err := binary.Read(r, byteOrder, &e.Family); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Read(r, byteOrder, &e.Index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var hasPubKey bool
|
||||
if err := binary.Read(r, byteOrder, &hasPubKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hasPubKey {
|
||||
return ReadElement(r, &e.PubKey)
|
||||
}
|
||||
|
||||
case *ChannelType:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *chainhash.Hash:
|
||||
if _, err := io.ReadFull(r, e[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *wire.OutPoint:
|
||||
return readOutpoint(r, e)
|
||||
|
||||
case *lnwire.ShortChannelID:
|
||||
var a uint64
|
||||
if err := binary.Read(r, byteOrder, &a); err != nil {
|
||||
return err
|
||||
}
|
||||
*e = lnwire.NewShortChanIDFromInt(a)
|
||||
|
||||
case *lnwire.ChannelID:
|
||||
if _, err := io.ReadFull(r, e[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *int64, *uint64:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *uint32:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *int32:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *uint16:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *uint8:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *bool:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *btcutil.Amount:
|
||||
var a uint64
|
||||
if err := binary.Read(r, byteOrder, &a); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = btcutil.Amount(a)
|
||||
|
||||
case *lnwire.MilliSatoshi:
|
||||
var a uint64
|
||||
if err := binary.Read(r, byteOrder, &a); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = lnwire.MilliSatoshi(a)
|
||||
|
||||
case **btcec.PrivateKey:
|
||||
var b [btcec.PrivKeyBytesLen]byte
|
||||
if _, err := io.ReadFull(r, b[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), b[:])
|
||||
*e = priv
|
||||
|
||||
case **btcec.PublicKey:
|
||||
var b [btcec.PubKeyBytesLenCompressed]byte
|
||||
if _, err := io.ReadFull(r, b[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pubKey, err := btcec.ParsePubKey(b[:], btcec.S256())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*e = pubKey
|
||||
|
||||
case *shachain.Producer:
|
||||
var root [32]byte
|
||||
if _, err := io.ReadFull(r, root[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(roasbeef): remove
|
||||
producer, err := shachain.NewRevocationProducerFromBytes(root[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = producer
|
||||
|
||||
case *shachain.Store:
|
||||
store, err := shachain.NewRevocationStoreFromBytes(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = store
|
||||
|
||||
case **wire.MsgTx:
|
||||
tx := wire.NewMsgTx(2)
|
||||
if err := tx.Deserialize(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = tx
|
||||
|
||||
case *[32]byte:
|
||||
if _, err := io.ReadFull(r, e[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *[]byte:
|
||||
bytes, err := wire.ReadVarBytes(r, 0, 66000, "[]byte")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = bytes
|
||||
|
||||
case *lnwire.Message:
|
||||
msg, err := lnwire.ReadMessage(r, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = msg
|
||||
|
||||
case *ChannelStatus:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *ClosureType:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *lnwire.FundingFlag:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *net.Addr:
|
||||
addr, err := deserializeAddr(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*e = addr
|
||||
|
||||
case *[]net.Addr:
|
||||
var numAddrs uint32
|
||||
if err := ReadElement(r, &numAddrs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = make([]net.Addr, numAddrs)
|
||||
for i := uint32(0); i < numAddrs; i++ {
|
||||
addr, err := deserializeAddr(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*e)[i] = addr
|
||||
}
|
||||
|
||||
default:
|
||||
return UnknownElementType{"ReadElement", e}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadElements deserializes a variable number of elements into the passed
|
||||
// io.Reader, with each element being deserialized according to the ReadElement
|
||||
// function.
|
||||
func ReadElements(r io.Reader, elements ...interface{}) error {
|
||||
for _, element := range elements {
|
||||
err := ReadElement(r, element)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
1323
vendor/github.com/lightningnetwork/lnd/channeldb/db.go
generated
vendored
Normal file
1323
vendor/github.com/lightningnetwork/lnd/channeldb/db.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
vendor/github.com/lightningnetwork/lnd/channeldb/doc.go
generated
vendored
Normal file
1
vendor/github.com/lightningnetwork/lnd/channeldb/doc.go
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
package channeldb
|
||||
246
vendor/github.com/lightningnetwork/lnd/channeldb/duplicate_payments.go
generated
vendored
Normal file
246
vendor/github.com/lightningnetwork/lnd/channeldb/duplicate_payments.go
generated
vendored
Normal file
@@ -0,0 +1,246 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
)
|
||||
|
||||
var (
|
||||
// duplicatePaymentsBucket is the name of a optional sub-bucket within
|
||||
// the payment hash bucket, that is used to hold duplicate payments to a
|
||||
// payment hash. This is needed to support information from earlier
|
||||
// versions of lnd, where it was possible to pay to a payment hash more
|
||||
// than once.
|
||||
duplicatePaymentsBucket = []byte("payment-duplicate-bucket")
|
||||
|
||||
// duplicatePaymentSettleInfoKey is a key used in the payment's
|
||||
// sub-bucket to store the settle info of the payment.
|
||||
duplicatePaymentSettleInfoKey = []byte("payment-settle-info")
|
||||
|
||||
// duplicatePaymentAttemptInfoKey is a key used in the payment's
|
||||
// sub-bucket to store the info about the latest attempt that was done
|
||||
// for the payment in question.
|
||||
duplicatePaymentAttemptInfoKey = []byte("payment-attempt-info")
|
||||
|
||||
// duplicatePaymentCreationInfoKey is a key used in the payment's
|
||||
// sub-bucket to store the creation info of the payment.
|
||||
duplicatePaymentCreationInfoKey = []byte("payment-creation-info")
|
||||
|
||||
// duplicatePaymentFailInfoKey is a key used in the payment's sub-bucket
|
||||
// to store information about the reason a payment failed.
|
||||
duplicatePaymentFailInfoKey = []byte("payment-fail-info")
|
||||
|
||||
// duplicatePaymentSequenceKey is a key used in the payment's sub-bucket
|
||||
// to store the sequence number of the payment.
|
||||
duplicatePaymentSequenceKey = []byte("payment-sequence-key")
|
||||
)
|
||||
|
||||
// duplicateHTLCAttemptInfo contains static information about a specific HTLC
|
||||
// attempt for a payment. This information is used by the router to handle any
|
||||
// errors coming back after an attempt is made, and to query the switch about
|
||||
// the status of the attempt.
|
||||
type duplicateHTLCAttemptInfo struct {
|
||||
// attemptID is the unique ID used for this attempt.
|
||||
attemptID uint64
|
||||
|
||||
// sessionKey is the ephemeral key used for this attempt.
|
||||
sessionKey *btcec.PrivateKey
|
||||
|
||||
// route is the route attempted to send the HTLC.
|
||||
route route.Route
|
||||
}
|
||||
|
||||
// fetchDuplicatePaymentStatus fetches the payment status of the payment. If the
|
||||
// payment isn't found, it will default to "StatusUnknown".
|
||||
func fetchDuplicatePaymentStatus(bucket kvdb.ReadBucket) PaymentStatus {
|
||||
if bucket.Get(duplicatePaymentSettleInfoKey) != nil {
|
||||
return StatusSucceeded
|
||||
}
|
||||
|
||||
if bucket.Get(duplicatePaymentFailInfoKey) != nil {
|
||||
return StatusFailed
|
||||
}
|
||||
|
||||
if bucket.Get(duplicatePaymentCreationInfoKey) != nil {
|
||||
return StatusInFlight
|
||||
}
|
||||
|
||||
return StatusUnknown
|
||||
}
|
||||
|
||||
func deserializeDuplicateHTLCAttemptInfo(r io.Reader) (
|
||||
*duplicateHTLCAttemptInfo, error) {
|
||||
|
||||
a := &duplicateHTLCAttemptInfo{}
|
||||
err := ReadElements(r, &a.attemptID, &a.sessionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.route, err = DeserializeRoute(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func deserializeDuplicatePaymentCreationInfo(r io.Reader) (
|
||||
*PaymentCreationInfo, error) {
|
||||
|
||||
var scratch [8]byte
|
||||
|
||||
c := &PaymentCreationInfo{}
|
||||
|
||||
if _, err := io.ReadFull(r, c.PaymentHash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(r, scratch[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
|
||||
|
||||
if _, err := io.ReadFull(r, scratch[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.CreationTime = time.Unix(int64(byteOrder.Uint64(scratch[:])), 0)
|
||||
|
||||
if _, err := io.ReadFull(r, scratch[:4]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqLen := byteOrder.Uint32(scratch[:4])
|
||||
payReq := make([]byte, reqLen)
|
||||
if reqLen > 0 {
|
||||
if _, err := io.ReadFull(r, payReq); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
c.PaymentRequest = payReq
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func fetchDuplicatePayment(bucket kvdb.ReadBucket) (*MPPayment, error) {
|
||||
seqBytes := bucket.Get(duplicatePaymentSequenceKey)
|
||||
if seqBytes == nil {
|
||||
return nil, fmt.Errorf("sequence number not found")
|
||||
}
|
||||
|
||||
sequenceNum := binary.BigEndian.Uint64(seqBytes)
|
||||
|
||||
// Get the payment status.
|
||||
paymentStatus := fetchDuplicatePaymentStatus(bucket)
|
||||
|
||||
// Get the PaymentCreationInfo.
|
||||
b := bucket.Get(duplicatePaymentCreationInfoKey)
|
||||
if b == nil {
|
||||
return nil, fmt.Errorf("creation info not found")
|
||||
}
|
||||
|
||||
r := bytes.NewReader(b)
|
||||
creationInfo, err := deserializeDuplicatePaymentCreationInfo(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
|
||||
// Get failure reason if available.
|
||||
var failureReason *FailureReason
|
||||
b = bucket.Get(duplicatePaymentFailInfoKey)
|
||||
if b != nil {
|
||||
reason := FailureReason(b[0])
|
||||
failureReason = &reason
|
||||
}
|
||||
|
||||
payment := &MPPayment{
|
||||
SequenceNum: sequenceNum,
|
||||
Info: creationInfo,
|
||||
FailureReason: failureReason,
|
||||
Status: paymentStatus,
|
||||
}
|
||||
|
||||
// Get the HTLCAttemptInfo. It can be absent.
|
||||
b = bucket.Get(duplicatePaymentAttemptInfoKey)
|
||||
if b != nil {
|
||||
r = bytes.NewReader(b)
|
||||
attempt, err := deserializeDuplicateHTLCAttemptInfo(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
htlc := HTLCAttempt{
|
||||
HTLCAttemptInfo: HTLCAttemptInfo{
|
||||
AttemptID: attempt.attemptID,
|
||||
Route: attempt.route,
|
||||
SessionKey: attempt.sessionKey,
|
||||
},
|
||||
}
|
||||
|
||||
// Get the payment preimage. This is only found for
|
||||
// successful payments.
|
||||
b = bucket.Get(duplicatePaymentSettleInfoKey)
|
||||
if b != nil {
|
||||
var preimg lntypes.Preimage
|
||||
copy(preimg[:], b)
|
||||
|
||||
htlc.Settle = &HTLCSettleInfo{
|
||||
Preimage: preimg,
|
||||
SettleTime: time.Time{},
|
||||
}
|
||||
} else {
|
||||
// Otherwise the payment must have failed.
|
||||
htlc.Failure = &HTLCFailInfo{
|
||||
FailTime: time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
payment.HTLCs = []HTLCAttempt{htlc}
|
||||
}
|
||||
|
||||
return payment, nil
|
||||
}
|
||||
|
||||
func fetchDuplicatePayments(paymentHashBucket kvdb.ReadBucket) ([]*MPPayment,
|
||||
error) {
|
||||
|
||||
var payments []*MPPayment
|
||||
|
||||
// For older versions of lnd, duplicate payments to a payment has was
|
||||
// possible. These will be found in a sub-bucket indexed by their
|
||||
// sequence number if available.
|
||||
dup := paymentHashBucket.NestedReadBucket(duplicatePaymentsBucket)
|
||||
if dup == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err := dup.ForEach(func(k, v []byte) error {
|
||||
subBucket := dup.NestedReadBucket(k)
|
||||
if subBucket == nil {
|
||||
// We one bucket for each duplicate to be found.
|
||||
return fmt.Errorf("non bucket element" +
|
||||
"in duplicate bucket")
|
||||
}
|
||||
|
||||
p, err := fetchDuplicatePayment(subBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
payments = append(payments, p)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return payments, nil
|
||||
}
|
||||
133
vendor/github.com/lightningnetwork/lnd/channeldb/error.go
generated
vendored
Normal file
133
vendor/github.com/lightningnetwork/lnd/channeldb/error.go
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoChanDBExists is returned when a channel bucket hasn't been
|
||||
// created.
|
||||
ErrNoChanDBExists = fmt.Errorf("channel db has not yet been created")
|
||||
|
||||
// ErrNoHistoricalBucket is returned when the historical channel bucket
|
||||
// not been created yet.
|
||||
ErrNoHistoricalBucket = fmt.Errorf("historical channel bucket has " +
|
||||
"not yet been created")
|
||||
|
||||
// ErrDBReversion is returned when detecting an attempt to revert to a
|
||||
// prior database version.
|
||||
ErrDBReversion = fmt.Errorf("channel db cannot revert to prior version")
|
||||
|
||||
// ErrLinkNodesNotFound is returned when node info bucket hasn't been
|
||||
// created.
|
||||
ErrLinkNodesNotFound = fmt.Errorf("no link nodes exist")
|
||||
|
||||
// ErrNoActiveChannels is returned when there is no active (open)
|
||||
// channels within the database.
|
||||
ErrNoActiveChannels = fmt.Errorf("no active channels exist")
|
||||
|
||||
// ErrNoPastDeltas is returned when the channel delta bucket hasn't been
|
||||
// created.
|
||||
ErrNoPastDeltas = fmt.Errorf("channel has no recorded deltas")
|
||||
|
||||
// ErrInvoiceNotFound is returned when a targeted invoice can't be
|
||||
// found.
|
||||
ErrInvoiceNotFound = fmt.Errorf("unable to locate invoice")
|
||||
|
||||
// ErrNoInvoicesCreated is returned when we don't have invoices in
|
||||
// our database to return.
|
||||
ErrNoInvoicesCreated = fmt.Errorf("there are no existing invoices")
|
||||
|
||||
// ErrDuplicateInvoice is returned when an invoice with the target
|
||||
// payment hash already exists.
|
||||
ErrDuplicateInvoice = fmt.Errorf("invoice with payment hash already exists")
|
||||
|
||||
// ErrNoPaymentsCreated is returned when bucket of payments hasn't been
|
||||
// created.
|
||||
ErrNoPaymentsCreated = fmt.Errorf("there are no existing payments")
|
||||
|
||||
// ErrNodeNotFound is returned when node bucket exists, but node with
|
||||
// specific identity can't be found.
|
||||
ErrNodeNotFound = fmt.Errorf("link node with target identity not found")
|
||||
|
||||
// ErrChannelNotFound is returned when we attempt to locate a channel
|
||||
// for a specific chain, but it is not found.
|
||||
ErrChannelNotFound = fmt.Errorf("channel not found")
|
||||
|
||||
// ErrMetaNotFound is returned when meta bucket hasn't been
|
||||
// created.
|
||||
ErrMetaNotFound = fmt.Errorf("unable to locate meta information")
|
||||
|
||||
// ErrGraphNotFound is returned when at least one of the components of
|
||||
// graph doesn't exist.
|
||||
ErrGraphNotFound = fmt.Errorf("graph bucket not initialized")
|
||||
|
||||
// ErrGraphNeverPruned is returned when graph was never pruned.
|
||||
ErrGraphNeverPruned = fmt.Errorf("graph never pruned")
|
||||
|
||||
// ErrSourceNodeNotSet is returned if the source node of the graph
|
||||
// hasn't been added The source node is the center node within a
|
||||
// star-graph.
|
||||
ErrSourceNodeNotSet = fmt.Errorf("source node does not exist")
|
||||
|
||||
// ErrGraphNodesNotFound is returned in case none of the nodes has
|
||||
// been added in graph node bucket.
|
||||
ErrGraphNodesNotFound = fmt.Errorf("no graph nodes exist")
|
||||
|
||||
// ErrGraphNoEdgesFound is returned in case of none of the channel/edges
|
||||
// has been added in graph edge bucket.
|
||||
ErrGraphNoEdgesFound = fmt.Errorf("no graph edges exist")
|
||||
|
||||
// ErrGraphNodeNotFound is returned when we're unable to find the target
|
||||
// node.
|
||||
ErrGraphNodeNotFound = fmt.Errorf("unable to find node")
|
||||
|
||||
// ErrEdgeNotFound is returned when an edge for the target chanID
|
||||
// can't be found.
|
||||
ErrEdgeNotFound = fmt.Errorf("edge not found")
|
||||
|
||||
// ErrZombieEdge is an error returned when we attempt to look up an edge
|
||||
// but it is marked as a zombie within the zombie index.
|
||||
ErrZombieEdge = errors.New("edge marked as zombie")
|
||||
|
||||
// ErrEdgeAlreadyExist is returned when edge with specific
|
||||
// channel id can't be added because it already exist.
|
||||
ErrEdgeAlreadyExist = fmt.Errorf("edge already exist")
|
||||
|
||||
// ErrNodeAliasNotFound is returned when alias for node can't be found.
|
||||
ErrNodeAliasNotFound = fmt.Errorf("alias for node not found")
|
||||
|
||||
// ErrUnknownAddressType is returned when a node's addressType is not
|
||||
// an expected value.
|
||||
ErrUnknownAddressType = fmt.Errorf("address type cannot be resolved")
|
||||
|
||||
// ErrNoClosedChannels is returned when a node is queries for all the
|
||||
// channels it has closed, but it hasn't yet closed any channels.
|
||||
ErrNoClosedChannels = fmt.Errorf("no channel have been closed yet")
|
||||
|
||||
// ErrNoForwardingEvents is returned in the case that a query fails due
|
||||
// to the log not having any recorded events.
|
||||
ErrNoForwardingEvents = fmt.Errorf("no recorded forwarding events")
|
||||
|
||||
// ErrEdgePolicyOptionalFieldNotFound is an error returned if a channel
|
||||
// policy field is not found in the db even though its message flags
|
||||
// indicate it should be.
|
||||
ErrEdgePolicyOptionalFieldNotFound = fmt.Errorf("optional field not " +
|
||||
"present")
|
||||
|
||||
// ErrChanAlreadyExists is return when the caller attempts to create a
|
||||
// channel with a channel point that is already present in the
|
||||
// database.
|
||||
ErrChanAlreadyExists = fmt.Errorf("channel already exists")
|
||||
)
|
||||
|
||||
// ErrTooManyExtraOpaqueBytes creates an error which should be returned if the
|
||||
// caller attempts to write an announcement message which bares too many extra
|
||||
// opaque bytes. We limit this value in order to ensure that we don't waste
|
||||
// disk space due to nodes unnecessarily padding out their announcements with
|
||||
// garbage data.
|
||||
func ErrTooManyExtraOpaqueBytes(numBytes int) error {
|
||||
return fmt.Errorf("max allowed number of opaque bytes is %v, received "+
|
||||
"%v bytes", MaxAllowedExtraOpaqueBytes, numBytes)
|
||||
}
|
||||
1
vendor/github.com/lightningnetwork/lnd/channeldb/fees.go
generated
vendored
Normal file
1
vendor/github.com/lightningnetwork/lnd/channeldb/fees.go
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
package channeldb
|
||||
274
vendor/github.com/lightningnetwork/lnd/channeldb/forwarding_log.go
generated
vendored
Normal file
274
vendor/github.com/lightningnetwork/lnd/channeldb/forwarding_log.go
generated
vendored
Normal file
@@ -0,0 +1,274 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
)
|
||||
|
||||
var (
|
||||
// forwardingLogBucket is the bucket that we'll use to store the
|
||||
// forwarding log. The forwarding log contains a time series database
|
||||
// of the forwarding history of a lightning daemon. Each key within the
|
||||
// bucket is a timestamp (in nano seconds since the unix epoch), and
|
||||
// the value a slice of a forwarding event for that timestamp.
|
||||
forwardingLogBucket = []byte("circuit-fwd-log")
|
||||
)
|
||||
|
||||
const (
|
||||
// forwardingEventSize is the size of a forwarding event. The breakdown
|
||||
// is as follows:
|
||||
//
|
||||
// * 8 byte incoming chan ID || 8 byte outgoing chan ID || 8 byte value in
|
||||
// || 8 byte value out
|
||||
//
|
||||
// From the value in and value out, callers can easily compute the
|
||||
// total fee extract from a forwarding event.
|
||||
forwardingEventSize = 32
|
||||
|
||||
// MaxResponseEvents is the max number of forwarding events that will
|
||||
// be returned by a single query response. This size was selected to
|
||||
// safely remain under gRPC's 4MiB message size response limit. As each
|
||||
// full forwarding event (including the timestamp) is 40 bytes, we can
|
||||
// safely return 50k entries in a single response.
|
||||
MaxResponseEvents = 50000
|
||||
)
|
||||
|
||||
// ForwardingLog returns an instance of the ForwardingLog object backed by the
|
||||
// target database instance.
|
||||
func (d *DB) ForwardingLog() *ForwardingLog {
|
||||
return &ForwardingLog{
|
||||
db: d,
|
||||
}
|
||||
}
|
||||
|
||||
// ForwardingLog is a time series database that logs the fulfilment of payment
|
||||
// circuits by a lightning network daemon. The log contains a series of
|
||||
// forwarding events which map a timestamp to a forwarding event. A forwarding
|
||||
// event describes which channels were used to create+settle a circuit, and the
|
||||
// amount involved. Subtracting the outgoing amount from the incoming amount
|
||||
// reveals the fee charged for the forwarding service.
|
||||
type ForwardingLog struct {
|
||||
db *DB
|
||||
}
|
||||
|
||||
// ForwardingEvent is an event in the forwarding log's time series. Each
|
||||
// forwarding event logs the creation and tear-down of a payment circuit. A
|
||||
// circuit is created once an incoming HTLC has been fully forwarded, and
|
||||
// destroyed once the payment has been settled.
|
||||
type ForwardingEvent struct {
|
||||
// Timestamp is the settlement time of this payment circuit.
|
||||
Timestamp time.Time
|
||||
|
||||
// IncomingChanID is the incoming channel ID of the payment circuit.
|
||||
IncomingChanID lnwire.ShortChannelID
|
||||
|
||||
// OutgoingChanID is the outgoing channel ID of the payment circuit.
|
||||
OutgoingChanID lnwire.ShortChannelID
|
||||
|
||||
// AmtIn is the amount of the incoming HTLC. Subtracting this from the
|
||||
// outgoing amount gives the total fees of this payment circuit.
|
||||
AmtIn lnwire.MilliSatoshi
|
||||
|
||||
// AmtOut is the amount of the outgoing HTLC. Subtracting the incoming
|
||||
// amount from this gives the total fees for this payment circuit.
|
||||
AmtOut lnwire.MilliSatoshi
|
||||
}
|
||||
|
||||
// encodeForwardingEvent writes out the target forwarding event to the passed
|
||||
// io.Writer, using the expected DB format. Note that the timestamp isn't
|
||||
// serialized as this will be the key value within the bucket.
|
||||
func encodeForwardingEvent(w io.Writer, f *ForwardingEvent) error {
|
||||
return WriteElements(
|
||||
w, f.IncomingChanID, f.OutgoingChanID, f.AmtIn, f.AmtOut,
|
||||
)
|
||||
}
|
||||
|
||||
// decodeForwardingEvent attempts to decode the raw bytes of a serialized
|
||||
// forwarding event into the target ForwardingEvent. Note that the timestamp
|
||||
// won't be decoded, as the caller is expected to set this due to the bucket
|
||||
// structure of the forwarding log.
|
||||
func decodeForwardingEvent(r io.Reader, f *ForwardingEvent) error {
|
||||
return ReadElements(
|
||||
r, &f.IncomingChanID, &f.OutgoingChanID, &f.AmtIn, &f.AmtOut,
|
||||
)
|
||||
}
|
||||
|
||||
// AddForwardingEvents adds a series of forwarding events to the database.
|
||||
// Before inserting, the set of events will be sorted according to their
|
||||
// timestamp. This ensures that all writes to disk are sequential.
|
||||
func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) error {
|
||||
// Before we create the database transaction, we'll ensure that the set
|
||||
// of forwarding events are properly sorted according to their
|
||||
// timestamp.
|
||||
sort.Slice(events, func(i, j int) bool {
|
||||
return events[i].Timestamp.Before(events[j].Timestamp)
|
||||
})
|
||||
|
||||
var timestamp [8]byte
|
||||
|
||||
return kvdb.Batch(f.db.Backend, func(tx kvdb.RwTx) error {
|
||||
// First, we'll fetch the bucket that stores our time series
|
||||
// log.
|
||||
logBucket, err := tx.CreateTopLevelBucket(
|
||||
forwardingLogBucket,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// With the bucket obtained, we can now begin to write out the
|
||||
// series of events.
|
||||
for _, event := range events {
|
||||
var eventBytes [forwardingEventSize]byte
|
||||
eventBuf := bytes.NewBuffer(eventBytes[0:0:forwardingEventSize])
|
||||
|
||||
// First, we'll serialize this timestamp into our
|
||||
// timestamp buffer.
|
||||
byteOrder.PutUint64(
|
||||
timestamp[:], uint64(event.Timestamp.UnixNano()),
|
||||
)
|
||||
|
||||
// With the key encoded, we'll then encode the event
|
||||
// into our buffer, then write it out to disk.
|
||||
err := encodeForwardingEvent(eventBuf, &event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = logBucket.Put(timestamp[:], eventBuf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ForwardingEventQuery represents a query to the forwarding log payment
|
||||
// circuit time series database. The query allows a caller to retrieve all
|
||||
// records for a particular time slice, offset in that time slice, limiting the
|
||||
// total number of responses returned.
|
||||
type ForwardingEventQuery struct {
|
||||
// StartTime is the start time of the time slice.
|
||||
StartTime time.Time
|
||||
|
||||
// EndTime is the end time of the time slice.
|
||||
EndTime time.Time
|
||||
|
||||
// IndexOffset is the offset within the time slice to start at. This
|
||||
// can be used to start the response at a particular record.
|
||||
IndexOffset uint32
|
||||
|
||||
// NumMaxEvents is the max number of events to return.
|
||||
NumMaxEvents uint32
|
||||
}
|
||||
|
||||
// ForwardingLogTimeSlice is the response to a forwarding query. It includes
|
||||
// the original query, the set events that match the query, and an integer
|
||||
// which represents the offset index of the last item in the set of retuned
|
||||
// events. This integer allows callers to resume their query using this offset
|
||||
// in the event that the query's response exceeds the max number of returnable
|
||||
// events.
|
||||
type ForwardingLogTimeSlice struct {
|
||||
ForwardingEventQuery
|
||||
|
||||
// ForwardingEvents is the set of events in our time series that answer
|
||||
// the query embedded above.
|
||||
ForwardingEvents []ForwardingEvent
|
||||
|
||||
// LastIndexOffset is the index of the last element in the set of
|
||||
// returned ForwardingEvents above. Callers can use this to resume
|
||||
// their query in the event that the time slice has too many events to
|
||||
// fit into a single response.
|
||||
LastIndexOffset uint32
|
||||
}
|
||||
|
||||
// Query allows a caller to query the forwarding event time series for a
|
||||
// particular time slice. The caller can control the precise time as well as
|
||||
// the number of events to be returned.
|
||||
//
|
||||
// TODO(roasbeef): rename?
|
||||
func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, error) {
|
||||
resp := ForwardingLogTimeSlice{
|
||||
ForwardingEventQuery: q,
|
||||
}
|
||||
|
||||
// If the user provided an index offset, then we'll not know how many
|
||||
// records we need to skip. We'll also keep track of the record offset
|
||||
// as that's part of the final return value.
|
||||
recordsToSkip := q.IndexOffset
|
||||
recordOffset := q.IndexOffset
|
||||
|
||||
err := kvdb.View(f.db, func(tx kvdb.ReadTx) error {
|
||||
// If the bucket wasn't found, then there aren't any events to
|
||||
// be returned.
|
||||
logBucket := tx.ReadBucket(forwardingLogBucket)
|
||||
if logBucket == nil {
|
||||
return ErrNoForwardingEvents
|
||||
}
|
||||
|
||||
// We'll be using a cursor to seek into the database, so we'll
|
||||
// populate byte slices that represent the start of the key
|
||||
// space we're interested in, and the end.
|
||||
var startTime, endTime [8]byte
|
||||
byteOrder.PutUint64(startTime[:], uint64(q.StartTime.UnixNano()))
|
||||
byteOrder.PutUint64(endTime[:], uint64(q.EndTime.UnixNano()))
|
||||
|
||||
// If we know that a set of log events exists, then we'll begin
|
||||
// our seek through the log in order to satisfy the query.
|
||||
// We'll continue until either we reach the end of the range,
|
||||
// or reach our max number of events.
|
||||
logCursor := logBucket.ReadCursor()
|
||||
timestamp, events := logCursor.Seek(startTime[:])
|
||||
for ; timestamp != nil && bytes.Compare(timestamp, endTime[:]) <= 0; timestamp, events = logCursor.Next() {
|
||||
// If our current return payload exceeds the max number
|
||||
// of events, then we'll exit now.
|
||||
if uint32(len(resp.ForwardingEvents)) >= q.NumMaxEvents {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we're not yet past the user defined offset, then
|
||||
// we'll continue to seek forward.
|
||||
if recordsToSkip > 0 {
|
||||
recordsToSkip--
|
||||
continue
|
||||
}
|
||||
|
||||
currentTime := time.Unix(
|
||||
0, int64(byteOrder.Uint64(timestamp)),
|
||||
)
|
||||
|
||||
// At this point, we've skipped enough records to start
|
||||
// to collate our query. For each record, we'll
|
||||
// increment the final record offset so the querier can
|
||||
// utilize pagination to seek further.
|
||||
readBuf := bytes.NewReader(events)
|
||||
for readBuf.Len() != 0 {
|
||||
var event ForwardingEvent
|
||||
err := decodeForwardingEvent(readBuf, &event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
event.Timestamp = currentTime
|
||||
resp.ForwardingEvents = append(resp.ForwardingEvents, event)
|
||||
|
||||
recordOffset++
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil && err != ErrNoForwardingEvents {
|
||||
return ForwardingLogTimeSlice{}, err
|
||||
}
|
||||
|
||||
resp.LastIndexOffset = recordOffset
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
928
vendor/github.com/lightningnetwork/lnd/channeldb/forwarding_package.go
generated
vendored
Normal file
928
vendor/github.com/lightningnetwork/lnd/channeldb/forwarding_package.go
generated
vendored
Normal file
@@ -0,0 +1,928 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
)
|
||||
|
||||
// ErrCorruptedFwdPkg signals that the on-disk structure of the forwarding
|
||||
// package has potentially been mangled.
|
||||
var ErrCorruptedFwdPkg = errors.New("fwding package db has been corrupted")
|
||||
|
||||
// FwdState is an enum used to describe the lifecycle of a FwdPkg.
|
||||
type FwdState byte
|
||||
|
||||
const (
|
||||
// FwdStateLockedIn is the starting state for all forwarding packages.
|
||||
// Packages in this state have not yet committed to the exact set of
|
||||
// Adds to forward to the switch.
|
||||
FwdStateLockedIn FwdState = iota
|
||||
|
||||
// FwdStateProcessed marks the state in which all Adds have been
|
||||
// locally processed and the forwarding decision to the switch has been
|
||||
// persisted.
|
||||
FwdStateProcessed
|
||||
|
||||
// FwdStateCompleted signals that all Adds have been acked, and that all
|
||||
// settles and fails have been delivered to their sources. Packages in
|
||||
// this state can be removed permanently.
|
||||
FwdStateCompleted
|
||||
)
|
||||
|
||||
var (
|
||||
// fwdPackagesKey is the root-level bucket that all forwarding packages
|
||||
// are written. This bucket is further subdivided based on the short
|
||||
// channel ID of each channel.
|
||||
fwdPackagesKey = []byte("fwd-packages")
|
||||
|
||||
// addBucketKey is the bucket to which all Add log updates are written.
|
||||
addBucketKey = []byte("add-updates")
|
||||
|
||||
// failSettleBucketKey is the bucket to which all Settle/Fail log
|
||||
// updates are written.
|
||||
failSettleBucketKey = []byte("fail-settle-updates")
|
||||
|
||||
// fwdFilterKey is a key used to write the set of Adds that passed
|
||||
// validation and are to be forwarded to the switch.
|
||||
// NOTE: The presence of this key within a forwarding package indicates
|
||||
// that the package has reached FwdStateProcessed.
|
||||
fwdFilterKey = []byte("fwd-filter-key")
|
||||
|
||||
// ackFilterKey is a key used to access the PkgFilter indicating which
|
||||
// Adds have received a Settle/Fail. This response may come from a
|
||||
// number of sources, including: exitHop settle/fails, switch failures,
|
||||
// chain arbiter interjections, as well as settle/fails from the
|
||||
// next hop in the route.
|
||||
ackFilterKey = []byte("ack-filter-key")
|
||||
|
||||
// settleFailFilterKey is a key used to access the PkgFilter indicating
|
||||
// which Settles/Fails in have been received and processed by the link
|
||||
// that originally received the Add.
|
||||
settleFailFilterKey = []byte("settle-fail-filter-key")
|
||||
)
|
||||
|
||||
// PkgFilter is used to compactly represent a particular subset of the Adds in a
|
||||
// forwarding package. Each filter is represented as a simple, statically-sized
|
||||
// bitvector, where the elements are intended to be the indices of the Adds as
|
||||
// they are written in the FwdPkg.
|
||||
type PkgFilter struct {
|
||||
count uint16
|
||||
filter []byte
|
||||
}
|
||||
|
||||
// NewPkgFilter initializes an empty PkgFilter supporting `count` elements.
|
||||
func NewPkgFilter(count uint16) *PkgFilter {
|
||||
// We add 7 to ensure that the integer division yields properly rounded
|
||||
// values.
|
||||
filterLen := (count + 7) / 8
|
||||
|
||||
return &PkgFilter{
|
||||
count: count,
|
||||
filter: make([]byte, filterLen),
|
||||
}
|
||||
}
|
||||
|
||||
// Count returns the number of elements represented by this PkgFilter.
|
||||
func (f *PkgFilter) Count() uint16 {
|
||||
return f.count
|
||||
}
|
||||
|
||||
// Set marks the `i`-th element as included by this filter.
|
||||
// NOTE: It is assumed that i is always less than count.
|
||||
func (f *PkgFilter) Set(i uint16) {
|
||||
byt := i / 8
|
||||
bit := i % 8
|
||||
|
||||
// Set the i-th bit in the filter.
|
||||
// TODO(conner): ignore if > count to prevent panic?
|
||||
f.filter[byt] |= byte(1 << (7 - bit))
|
||||
}
|
||||
|
||||
// Contains queries the filter for membership of index `i`.
|
||||
// NOTE: It is assumed that i is always less than count.
|
||||
func (f *PkgFilter) Contains(i uint16) bool {
|
||||
byt := i / 8
|
||||
bit := i % 8
|
||||
|
||||
// Read the i-th bit in the filter.
|
||||
// TODO(conner): ignore if > count to prevent panic?
|
||||
return f.filter[byt]&(1<<(7-bit)) != 0
|
||||
}
|
||||
|
||||
// Equal checks two PkgFilters for equality.
|
||||
func (f *PkgFilter) Equal(f2 *PkgFilter) bool {
|
||||
if f == f2 {
|
||||
return true
|
||||
}
|
||||
if f.count != f2.count {
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(f.filter, f2.filter)
|
||||
}
|
||||
|
||||
// IsFull returns true if every element in the filter has been Set, and false
|
||||
// otherwise.
|
||||
func (f *PkgFilter) IsFull() bool {
|
||||
// Batch validate bytes that are fully used.
|
||||
for i := uint16(0); i < f.count/8; i++ {
|
||||
if f.filter[i] != 0xFF {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// If the count is not a multiple of 8, check that the filter contains
|
||||
// all remaining bits.
|
||||
rem := f.count % 8
|
||||
for idx := f.count - rem; idx < f.count; idx++ {
|
||||
if !f.Contains(idx) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Size returns number of bytes produced when the PkgFilter is serialized.
|
||||
func (f *PkgFilter) Size() uint16 {
|
||||
// 2 bytes for uint16 `count`, then round up number of bytes required to
|
||||
// represent `count` bits.
|
||||
return 2 + (f.count+7)/8
|
||||
}
|
||||
|
||||
// Encode writes the filter to the provided io.Writer.
|
||||
func (f *PkgFilter) Encode(w io.Writer) error {
|
||||
if err := binary.Write(w, binary.BigEndian, f.count); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := w.Write(f.filter)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode reads the filter from the provided io.Reader.
|
||||
func (f *PkgFilter) Decode(r io.Reader) error {
|
||||
if err := binary.Read(r, binary.BigEndian, &f.count); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.filter = make([]byte, f.Size()-2)
|
||||
_, err := io.ReadFull(r, f.filter)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// FwdPkg records all adds, settles, and fails that were locked in as a result
|
||||
// of the remote peer sending us a revocation. Each package is identified by
|
||||
// the short chanid and remote commitment height corresponding to the revocation
|
||||
// that locked in the HTLCs. For everything except a locally initiated payment,
|
||||
// settles and fails in a forwarding package must have a corresponding Add in
|
||||
// another package, and can be removed individually once the source link has
|
||||
// received the fail/settle.
|
||||
//
|
||||
// Adds cannot be removed, as we need to present the same batch of Adds to
|
||||
// properly handle replay protection. Instead, we use a PkgFilter to mark that
|
||||
// we have finished processing a particular Add. A FwdPkg should only be deleted
|
||||
// after the AckFilter is full and all settles and fails have been persistently
|
||||
// removed.
|
||||
type FwdPkg struct {
|
||||
// Source identifies the channel that wrote this forwarding package.
|
||||
Source lnwire.ShortChannelID
|
||||
|
||||
// Height is the height of the remote commitment chain that locked in
|
||||
// this forwarding package.
|
||||
Height uint64
|
||||
|
||||
// State signals the persistent condition of the package and directs how
|
||||
// to reprocess the package in the event of failures.
|
||||
State FwdState
|
||||
|
||||
// Adds contains all add messages which need to be processed and
|
||||
// forwarded to the switch. Adds does not change over the life of a
|
||||
// forwarding package.
|
||||
Adds []LogUpdate
|
||||
|
||||
// FwdFilter is a filter containing the indices of all Adds that were
|
||||
// forwarded to the switch.
|
||||
FwdFilter *PkgFilter
|
||||
|
||||
// AckFilter is a filter containing the indices of all Adds for which
|
||||
// the source has received a settle or fail and is reflected in the next
|
||||
// commitment txn. A package should not be removed until IsFull()
|
||||
// returns true.
|
||||
AckFilter *PkgFilter
|
||||
|
||||
// SettleFails contains all settle and fail messages that should be
|
||||
// forwarded to the switch.
|
||||
SettleFails []LogUpdate
|
||||
|
||||
// SettleFailFilter is a filter containing the indices of all Settle or
|
||||
// Fails originating in this package that have been received and locked
|
||||
// into the incoming link's commitment state.
|
||||
SettleFailFilter *PkgFilter
|
||||
}
|
||||
|
||||
// NewFwdPkg initializes a new forwarding package in FwdStateLockedIn. This
|
||||
// should be used to create a package at the time we receive a revocation.
|
||||
func NewFwdPkg(source lnwire.ShortChannelID, height uint64,
|
||||
addUpdates, settleFailUpdates []LogUpdate) *FwdPkg {
|
||||
|
||||
nAddUpdates := uint16(len(addUpdates))
|
||||
nSettleFailUpdates := uint16(len(settleFailUpdates))
|
||||
|
||||
return &FwdPkg{
|
||||
Source: source,
|
||||
Height: height,
|
||||
State: FwdStateLockedIn,
|
||||
Adds: addUpdates,
|
||||
FwdFilter: NewPkgFilter(nAddUpdates),
|
||||
AckFilter: NewPkgFilter(nAddUpdates),
|
||||
SettleFails: settleFailUpdates,
|
||||
SettleFailFilter: NewPkgFilter(nSettleFailUpdates),
|
||||
}
|
||||
}
|
||||
|
||||
// ID returns an unique identifier for this package, used to ensure that sphinx
|
||||
// replay processing of this batch is idempotent.
|
||||
func (f *FwdPkg) ID() []byte {
|
||||
var id = make([]byte, 16)
|
||||
byteOrder.PutUint64(id[:8], f.Source.ToUint64())
|
||||
byteOrder.PutUint64(id[8:], f.Height)
|
||||
return id
|
||||
}
|
||||
|
||||
// String returns a human-readable description of the forwarding package.
|
||||
func (f *FwdPkg) String() string {
|
||||
return fmt.Sprintf("%T(src=%v, height=%v, nadds=%v, nfailsettles=%v)",
|
||||
f, f.Source, f.Height, len(f.Adds), len(f.SettleFails))
|
||||
}
|
||||
|
||||
// AddRef is used to identify a particular Add in a FwdPkg. The short channel ID
|
||||
// is assumed to be that of the packager.
|
||||
type AddRef struct {
|
||||
// Height is the remote commitment height that locked in the Add.
|
||||
Height uint64
|
||||
|
||||
// Index is the index of the Add within the fwd pkg's Adds.
|
||||
//
|
||||
// NOTE: This index is static over the lifetime of a forwarding package.
|
||||
Index uint16
|
||||
}
|
||||
|
||||
// Encode serializes the AddRef to the given io.Writer.
|
||||
func (a *AddRef) Encode(w io.Writer) error {
|
||||
if err := binary.Write(w, binary.BigEndian, a.Height); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return binary.Write(w, binary.BigEndian, a.Index)
|
||||
}
|
||||
|
||||
// Decode deserializes the AddRef from the given io.Reader.
|
||||
func (a *AddRef) Decode(r io.Reader) error {
|
||||
if err := binary.Read(r, binary.BigEndian, &a.Height); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return binary.Read(r, binary.BigEndian, &a.Index)
|
||||
}
|
||||
|
||||
// SettleFailRef is used to locate a Settle/Fail in another channel's FwdPkg. A
|
||||
// channel does not remove its own Settle/Fail htlcs, so the source is provided
|
||||
// to locate a db bucket belonging to another channel.
|
||||
type SettleFailRef struct {
|
||||
// Source identifies the outgoing link that locked in the settle or
|
||||
// fail. This is then used by the *incoming* link to find the settle
|
||||
// fail in another link's forwarding packages.
|
||||
Source lnwire.ShortChannelID
|
||||
|
||||
// Height is the remote commitment height that locked in this
|
||||
// Settle/Fail.
|
||||
Height uint64
|
||||
|
||||
// Index is the index of the Add with the fwd pkg's SettleFails.
|
||||
//
|
||||
// NOTE: This index is static over the lifetime of a forwarding package.
|
||||
Index uint16
|
||||
}
|
||||
|
||||
// SettleFailAcker is a generic interface providing the ability to acknowledge
|
||||
// settle/fail HTLCs stored in forwarding packages.
|
||||
type SettleFailAcker interface {
|
||||
// AckSettleFails atomically updates the settle-fail filters in *other*
|
||||
// channels' forwarding packages.
|
||||
AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) error
|
||||
}
|
||||
|
||||
// GlobalFwdPkgReader is an interface used to retrieve the forwarding packages
|
||||
// of any active channel.
|
||||
type GlobalFwdPkgReader interface {
|
||||
// LoadChannelFwdPkgs loads all known forwarding packages for the given
|
||||
// channel.
|
||||
LoadChannelFwdPkgs(tx kvdb.RwTx,
|
||||
source lnwire.ShortChannelID) ([]*FwdPkg, error)
|
||||
}
|
||||
|
||||
// FwdOperator defines the interfaces for managing forwarding packages that are
|
||||
// external to a particular channel. This interface is used by the switch to
|
||||
// read forwarding packages from arbitrary channels, and acknowledge settles and
|
||||
// fails for locally-sourced payments.
|
||||
type FwdOperator interface {
|
||||
// GlobalFwdPkgReader provides read access to all known forwarding
|
||||
// packages
|
||||
GlobalFwdPkgReader
|
||||
|
||||
// SettleFailAcker grants the ability to acknowledge settles or fails
|
||||
// residing in arbitrary forwarding packages.
|
||||
SettleFailAcker
|
||||
}
|
||||
|
||||
// SwitchPackager is a concrete implementation of the FwdOperator interface.
|
||||
// A SwitchPackager offers the ability to read any forwarding package, and ack
|
||||
// arbitrary settle and fail HTLCs.
|
||||
type SwitchPackager struct{}
|
||||
|
||||
// NewSwitchPackager instantiates a new SwitchPackager.
|
||||
func NewSwitchPackager() *SwitchPackager {
|
||||
return &SwitchPackager{}
|
||||
}
|
||||
|
||||
// AckSettleFails atomically updates the settle-fail filters in *other*
|
||||
// channels' forwarding packages, to mark that the switch has received a settle
|
||||
// or fail residing in the forwarding package of a link.
|
||||
func (*SwitchPackager) AckSettleFails(tx kvdb.RwTx,
|
||||
settleFailRefs ...SettleFailRef) error {
|
||||
|
||||
return ackSettleFails(tx, settleFailRefs)
|
||||
}
|
||||
|
||||
// LoadChannelFwdPkgs loads all forwarding packages for a particular channel.
|
||||
func (*SwitchPackager) LoadChannelFwdPkgs(tx kvdb.RwTx,
|
||||
source lnwire.ShortChannelID) ([]*FwdPkg, error) {
|
||||
|
||||
return loadChannelFwdPkgs(tx, source)
|
||||
}
|
||||
|
||||
// FwdPackager supports all operations required to modify fwd packages, such as
|
||||
// creation, updates, reading, and removal. The interfaces are broken down in
|
||||
// this way to support future delegation of the subinterfaces.
|
||||
type FwdPackager interface {
|
||||
// AddFwdPkg serializes and writes a FwdPkg for this channel at the
|
||||
// remote commitment height included in the forwarding package.
|
||||
AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) error
|
||||
|
||||
// SetFwdFilter looks up the forwarding package at the remote `height`
|
||||
// and sets the `fwdFilter`, marking the Adds for which:
|
||||
// 1) We are not the exit node
|
||||
// 2) Passed all validation
|
||||
// 3) Should be forwarded to the switch immediately after a failure
|
||||
SetFwdFilter(tx kvdb.RwTx, height uint64, fwdFilter *PkgFilter) error
|
||||
|
||||
// AckAddHtlcs atomically updates the add filters in this channel's
|
||||
// forwarding packages to mark the resolution of an Add that was
|
||||
// received from the remote party.
|
||||
AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) error
|
||||
|
||||
// SettleFailAcker allows a link to acknowledge settle/fail HTLCs
|
||||
// belonging to other channels.
|
||||
SettleFailAcker
|
||||
|
||||
// LoadFwdPkgs loads all known forwarding packages owned by this
|
||||
// channel.
|
||||
LoadFwdPkgs(tx kvdb.ReadTx) ([]*FwdPkg, error)
|
||||
|
||||
// RemovePkg deletes a forwarding package owned by this channel at
|
||||
// the provided remote `height`.
|
||||
RemovePkg(tx kvdb.RwTx, height uint64) error
|
||||
}
|
||||
|
||||
// ChannelPackager is used by a channel to manage the lifecycle of its forwarding
|
||||
// packages. The packager is tied to a particular source channel ID, allowing it
|
||||
// to create and edit its own packages. Each packager also has the ability to
|
||||
// remove fail/settle htlcs that correspond to an add contained in one of
|
||||
// source's packages.
|
||||
type ChannelPackager struct {
|
||||
source lnwire.ShortChannelID
|
||||
}
|
||||
|
||||
// NewChannelPackager creates a new packager for a single channel.
|
||||
func NewChannelPackager(source lnwire.ShortChannelID) *ChannelPackager {
|
||||
return &ChannelPackager{
|
||||
source: source,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFwdPkg writes a newly locked in forwarding package to disk.
|
||||
func (*ChannelPackager) AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) error {
|
||||
fwdPkgBkt, err := tx.CreateTopLevelBucket(fwdPackagesKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
source := makeLogKey(fwdPkg.Source.ToUint64())
|
||||
sourceBkt, err := fwdPkgBkt.CreateBucketIfNotExists(source[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
heightKey := makeLogKey(fwdPkg.Height)
|
||||
heightBkt, err := sourceBkt.CreateBucketIfNotExists(heightKey[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write ADD updates we received at this commit height.
|
||||
addBkt, err := heightBkt.CreateBucketIfNotExists(addBucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write SETTLE/FAIL updates we received at this commit height.
|
||||
failSettleBkt, err := heightBkt.CreateBucketIfNotExists(failSettleBucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range fwdPkg.Adds {
|
||||
err = putLogUpdate(addBkt, uint16(i), &fwdPkg.Adds[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Persist the initialized pkg filter, which will be used to determine
|
||||
// when we can remove this forwarding package from disk.
|
||||
var ackFilterBuf bytes.Buffer
|
||||
if err := fwdPkg.AckFilter.Encode(&ackFilterBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := heightBkt.Put(ackFilterKey, ackFilterBuf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range fwdPkg.SettleFails {
|
||||
err = putLogUpdate(failSettleBkt, uint16(i), &fwdPkg.SettleFails[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var settleFailFilterBuf bytes.Buffer
|
||||
err = fwdPkg.SettleFailFilter.Encode(&settleFailFilterBuf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return heightBkt.Put(settleFailFilterKey, settleFailFilterBuf.Bytes())
|
||||
}
|
||||
|
||||
// putLogUpdate writes an htlc to the provided `bkt`, using `index` as the key.
|
||||
func putLogUpdate(bkt kvdb.RwBucket, idx uint16, htlc *LogUpdate) error {
|
||||
var b bytes.Buffer
|
||||
if err := htlc.Encode(&b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bkt.Put(uint16Key(idx), b.Bytes())
|
||||
}
|
||||
|
||||
// LoadFwdPkgs scans the forwarding log for any packages that haven't been
|
||||
// processed, and returns their deserialized log updates in a map indexed by the
|
||||
// remote commitment height at which the updates were locked in.
|
||||
func (p *ChannelPackager) LoadFwdPkgs(tx kvdb.ReadTx) ([]*FwdPkg, error) {
|
||||
return loadChannelFwdPkgs(tx, p.source)
|
||||
}
|
||||
|
||||
// loadChannelFwdPkgs loads all forwarding packages owned by `source`.
|
||||
func loadChannelFwdPkgs(tx kvdb.ReadTx, source lnwire.ShortChannelID) ([]*FwdPkg, error) {
|
||||
fwdPkgBkt := tx.ReadBucket(fwdPackagesKey)
|
||||
if fwdPkgBkt == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
sourceKey := makeLogKey(source.ToUint64())
|
||||
sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:])
|
||||
if sourceBkt == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var heights []uint64
|
||||
if err := sourceBkt.ForEach(func(k, _ []byte) error {
|
||||
if len(k) != 8 {
|
||||
return ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
heights = append(heights, byteOrder.Uint64(k))
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load the forwarding package for each retrieved height.
|
||||
fwdPkgs := make([]*FwdPkg, 0, len(heights))
|
||||
for _, height := range heights {
|
||||
fwdPkg, err := loadFwdPkg(fwdPkgBkt, source, height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fwdPkgs = append(fwdPkgs, fwdPkg)
|
||||
}
|
||||
|
||||
return fwdPkgs, nil
|
||||
}
|
||||
|
||||
// loadFwPkg reads the packager's fwd pkg at a given height, and determines the
|
||||
// appropriate FwdState.
|
||||
func loadFwdPkg(fwdPkgBkt kvdb.ReadBucket, source lnwire.ShortChannelID,
|
||||
height uint64) (*FwdPkg, error) {
|
||||
|
||||
sourceKey := makeLogKey(source.ToUint64())
|
||||
sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:])
|
||||
if sourceBkt == nil {
|
||||
return nil, ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
heightKey := makeLogKey(height)
|
||||
heightBkt := sourceBkt.NestedReadBucket(heightKey[:])
|
||||
if heightBkt == nil {
|
||||
return nil, ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
// Load ADDs from disk.
|
||||
addBkt := heightBkt.NestedReadBucket(addBucketKey)
|
||||
if addBkt == nil {
|
||||
return nil, ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
adds, err := loadHtlcs(addBkt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load ack filter from disk.
|
||||
ackFilterBytes := heightBkt.Get(ackFilterKey)
|
||||
if ackFilterBytes == nil {
|
||||
return nil, ErrCorruptedFwdPkg
|
||||
}
|
||||
ackFilterReader := bytes.NewReader(ackFilterBytes)
|
||||
|
||||
ackFilter := &PkgFilter{}
|
||||
if err := ackFilter.Decode(ackFilterReader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load SETTLE/FAILs from disk.
|
||||
failSettleBkt := heightBkt.NestedReadBucket(failSettleBucketKey)
|
||||
if failSettleBkt == nil {
|
||||
return nil, ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
failSettles, err := loadHtlcs(failSettleBkt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load settle fail filter from disk.
|
||||
settleFailFilterBytes := heightBkt.Get(settleFailFilterKey)
|
||||
if settleFailFilterBytes == nil {
|
||||
return nil, ErrCorruptedFwdPkg
|
||||
}
|
||||
settleFailFilterReader := bytes.NewReader(settleFailFilterBytes)
|
||||
|
||||
settleFailFilter := &PkgFilter{}
|
||||
if err := settleFailFilter.Decode(settleFailFilterReader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the fwding package, which always starts in the
|
||||
// FwdStateLockedIn. We can determine what state the package was left in
|
||||
// by examining constraints on the information loaded from disk.
|
||||
fwdPkg := &FwdPkg{
|
||||
Source: source,
|
||||
State: FwdStateLockedIn,
|
||||
Height: height,
|
||||
Adds: adds,
|
||||
AckFilter: ackFilter,
|
||||
SettleFails: failSettles,
|
||||
SettleFailFilter: settleFailFilter,
|
||||
}
|
||||
|
||||
// Check to see if we have written the set exported filter adds to
|
||||
// disk. If we haven't, processing of this package was never started, or
|
||||
// failed during the last attempt.
|
||||
fwdFilterBytes := heightBkt.Get(fwdFilterKey)
|
||||
if fwdFilterBytes == nil {
|
||||
nAdds := uint16(len(adds))
|
||||
fwdPkg.FwdFilter = NewPkgFilter(nAdds)
|
||||
return fwdPkg, nil
|
||||
}
|
||||
|
||||
fwdFilterReader := bytes.NewReader(fwdFilterBytes)
|
||||
fwdPkg.FwdFilter = &PkgFilter{}
|
||||
if err := fwdPkg.FwdFilter.Decode(fwdFilterReader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Otherwise, a complete round of processing was completed, and we
|
||||
// advance the package to FwdStateProcessed.
|
||||
fwdPkg.State = FwdStateProcessed
|
||||
|
||||
// If every add, settle, and fail has been fully acknowledged, we can
|
||||
// safely set the package's state to FwdStateCompleted, signalling that
|
||||
// it can be garbage collected.
|
||||
if fwdPkg.AckFilter.IsFull() && fwdPkg.SettleFailFilter.IsFull() {
|
||||
fwdPkg.State = FwdStateCompleted
|
||||
}
|
||||
|
||||
return fwdPkg, nil
|
||||
}
|
||||
|
||||
// loadHtlcs retrieves all serialized htlcs in a bucket, returning
|
||||
// them in order of the indexes they were written under.
|
||||
func loadHtlcs(bkt kvdb.ReadBucket) ([]LogUpdate, error) {
|
||||
var htlcs []LogUpdate
|
||||
if err := bkt.ForEach(func(_, v []byte) error {
|
||||
var htlc LogUpdate
|
||||
if err := htlc.Decode(bytes.NewReader(v)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
htlcs = append(htlcs, htlc)
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return htlcs, nil
|
||||
}
|
||||
|
||||
// SetFwdFilter writes the set of indexes corresponding to Adds at the
|
||||
// `height` that are to be forwarded to the switch. Calling this method causes
|
||||
// the forwarding package at `height` to be in FwdStateProcessed. We write this
|
||||
// forwarding decision so that we always arrive at the same behavior for HTLCs
|
||||
// leaving this channel. After a restart, we skip validation of these Adds,
|
||||
// since they are assumed to have already been validated, and make the switch or
|
||||
// outgoing link responsible for handling replays.
|
||||
func (p *ChannelPackager) SetFwdFilter(tx kvdb.RwTx, height uint64,
|
||||
fwdFilter *PkgFilter) error {
|
||||
|
||||
fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
|
||||
if fwdPkgBkt == nil {
|
||||
return ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
source := makeLogKey(p.source.ToUint64())
|
||||
sourceBkt := fwdPkgBkt.NestedReadWriteBucket(source[:])
|
||||
if sourceBkt == nil {
|
||||
return ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
heightKey := makeLogKey(height)
|
||||
heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:])
|
||||
if heightBkt == nil {
|
||||
return ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
// If the fwd filter has already been written, we return early to avoid
|
||||
// modifying the persistent state.
|
||||
forwardedAddsBytes := heightBkt.Get(fwdFilterKey)
|
||||
if forwardedAddsBytes != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise we serialize and write the provided fwd filter.
|
||||
var b bytes.Buffer
|
||||
if err := fwdFilter.Encode(&b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return heightBkt.Put(fwdFilterKey, b.Bytes())
|
||||
}
|
||||
|
||||
// AckAddHtlcs accepts a list of references to add htlcs, and updates the
|
||||
// AckAddFilter of those forwarding packages to indicate that a settle or fail
|
||||
// has been received in response to the add.
|
||||
func (p *ChannelPackager) AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) error {
|
||||
if len(addRefs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
|
||||
if fwdPkgBkt == nil {
|
||||
return ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
sourceKey := makeLogKey(p.source.ToUint64())
|
||||
sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceKey[:])
|
||||
if sourceBkt == nil {
|
||||
return ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
// Organize the forward references such that we just get a single slice
|
||||
// of indexes for each unique height.
|
||||
heightDiffs := make(map[uint64][]uint16)
|
||||
for _, addRef := range addRefs {
|
||||
heightDiffs[addRef.Height] = append(
|
||||
heightDiffs[addRef.Height],
|
||||
addRef.Index,
|
||||
)
|
||||
}
|
||||
|
||||
// Load each height bucket once and remove all acked htlcs at that
|
||||
// height.
|
||||
for height, indexes := range heightDiffs {
|
||||
err := ackAddHtlcsAtHeight(sourceBkt, height, indexes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ackAddHtlcsAtHeight updates the AddAckFilter of a single forwarding package
|
||||
// with a list of indexes, writing the resulting filter back in its place.
|
||||
func ackAddHtlcsAtHeight(sourceBkt kvdb.RwBucket, height uint64,
|
||||
indexes []uint16) error {
|
||||
|
||||
heightKey := makeLogKey(height)
|
||||
heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:])
|
||||
if heightBkt == nil {
|
||||
// If the height bucket isn't found, this could be because the
|
||||
// forwarding package was already removed. We'll return nil to
|
||||
// signal that the operation is successful, as there is nothing
|
||||
// to ack.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load ack filter from disk.
|
||||
ackFilterBytes := heightBkt.Get(ackFilterKey)
|
||||
if ackFilterBytes == nil {
|
||||
return ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
ackFilter := &PkgFilter{}
|
||||
ackFilterReader := bytes.NewReader(ackFilterBytes)
|
||||
if err := ackFilter.Decode(ackFilterReader); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the ack filter for this height.
|
||||
for _, index := range indexes {
|
||||
ackFilter.Set(index)
|
||||
}
|
||||
|
||||
// Write the resulting filter to disk.
|
||||
var ackFilterBuf bytes.Buffer
|
||||
if err := ackFilter.Encode(&ackFilterBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return heightBkt.Put(ackFilterKey, ackFilterBuf.Bytes())
|
||||
}
|
||||
|
||||
// AckSettleFails persistently acknowledges settles or fails from a remote forwarding
|
||||
// package. This should only be called after the source of the Add has locked in
|
||||
// the settle/fail, or it becomes otherwise safe to forgo retransmitting the
|
||||
// settle/fail after a restart.
|
||||
func (p *ChannelPackager) AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) error {
|
||||
return ackSettleFails(tx, settleFailRefs)
|
||||
}
|
||||
|
||||
// ackSettleFails persistently acknowledges a batch of settle fail references.
|
||||
func ackSettleFails(tx kvdb.RwTx, settleFailRefs []SettleFailRef) error {
|
||||
if len(settleFailRefs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
|
||||
if fwdPkgBkt == nil {
|
||||
return ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
// Organize the forward references such that we just get a single slice
|
||||
// of indexes for each unique destination-height pair.
|
||||
destHeightDiffs := make(map[lnwire.ShortChannelID]map[uint64][]uint16)
|
||||
for _, settleFailRef := range settleFailRefs {
|
||||
destHeights, ok := destHeightDiffs[settleFailRef.Source]
|
||||
if !ok {
|
||||
destHeights = make(map[uint64][]uint16)
|
||||
destHeightDiffs[settleFailRef.Source] = destHeights
|
||||
}
|
||||
|
||||
destHeights[settleFailRef.Height] = append(
|
||||
destHeights[settleFailRef.Height],
|
||||
settleFailRef.Index,
|
||||
)
|
||||
}
|
||||
|
||||
// With the references organized by destination and height, we now load
|
||||
// each remote bucket, and update the settle fail filter for any
|
||||
// settle/fail htlcs.
|
||||
for dest, destHeights := range destHeightDiffs {
|
||||
destKey := makeLogKey(dest.ToUint64())
|
||||
destBkt := fwdPkgBkt.NestedReadWriteBucket(destKey[:])
|
||||
if destBkt == nil {
|
||||
// If the destination bucket is not found, this is
|
||||
// likely the result of the destination channel being
|
||||
// closed and having it's forwarding packages wiped. We
|
||||
// won't treat this as an error, because the response
|
||||
// will no longer be retransmitted internally.
|
||||
continue
|
||||
}
|
||||
|
||||
for height, indexes := range destHeights {
|
||||
err := ackSettleFailsAtHeight(destBkt, height, indexes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ackSettleFailsAtHeight given a destination bucket, acks the provided indexes
|
||||
// at particular a height by updating the settle fail filter.
|
||||
func ackSettleFailsAtHeight(destBkt kvdb.RwBucket, height uint64,
|
||||
indexes []uint16) error {
|
||||
|
||||
heightKey := makeLogKey(height)
|
||||
heightBkt := destBkt.NestedReadWriteBucket(heightKey[:])
|
||||
if heightBkt == nil {
|
||||
// If the height bucket isn't found, this could be because the
|
||||
// forwarding package was already removed. We'll return nil to
|
||||
// signal that the operation is as there is nothing to ack.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load ack filter from disk.
|
||||
settleFailFilterBytes := heightBkt.Get(settleFailFilterKey)
|
||||
if settleFailFilterBytes == nil {
|
||||
return ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
settleFailFilter := &PkgFilter{}
|
||||
settleFailFilterReader := bytes.NewReader(settleFailFilterBytes)
|
||||
if err := settleFailFilter.Decode(settleFailFilterReader); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the ack filter for this height.
|
||||
for _, index := range indexes {
|
||||
settleFailFilter.Set(index)
|
||||
}
|
||||
|
||||
// Write the resulting filter to disk.
|
||||
var settleFailFilterBuf bytes.Buffer
|
||||
if err := settleFailFilter.Encode(&settleFailFilterBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return heightBkt.Put(settleFailFilterKey, settleFailFilterBuf.Bytes())
|
||||
}
|
||||
|
||||
// RemovePkg deletes the forwarding package at the given height from the
|
||||
// packager's source bucket.
|
||||
func (p *ChannelPackager) RemovePkg(tx kvdb.RwTx, height uint64) error {
|
||||
fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
|
||||
if fwdPkgBkt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
sourceBytes := makeLogKey(p.source.ToUint64())
|
||||
sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceBytes[:])
|
||||
if sourceBkt == nil {
|
||||
return ErrCorruptedFwdPkg
|
||||
}
|
||||
|
||||
heightKey := makeLogKey(height)
|
||||
|
||||
return sourceBkt.DeleteNestedBucket(heightKey[:])
|
||||
}
|
||||
|
||||
// uint16Key writes the provided 16-bit unsigned integer to a 2-byte slice.
|
||||
func uint16Key(i uint16) []byte {
|
||||
key := make([]byte, 2)
|
||||
byteOrder.PutUint16(key, i)
|
||||
return key
|
||||
}
|
||||
|
||||
// Compile-time constraint to ensure that ChannelPackager implements the public
|
||||
// FwdPackager interface.
|
||||
var _ FwdPackager = (*ChannelPackager)(nil)
|
||||
|
||||
// Compile-time constraint to ensure that SwitchPackager implements the public
|
||||
// FwdOperator interface.
|
||||
var _ FwdOperator = (*SwitchPackager)(nil)
|
||||
4077
vendor/github.com/lightningnetwork/lnd/channeldb/graph.go
generated
vendored
Normal file
4077
vendor/github.com/lightningnetwork/lnd/channeldb/graph.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1597
vendor/github.com/lightningnetwork/lnd/channeldb/invoices.go
generated
vendored
Normal file
1597
vendor/github.com/lightningnetwork/lnd/channeldb/invoices.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
10
vendor/github.com/lightningnetwork/lnd/channeldb/kvdb/bbolt.go
generated
vendored
Normal file
10
vendor/github.com/lightningnetwork/lnd/channeldb/kvdb/bbolt.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
_ "github.com/btcsuite/btcwallet/walletdb/bdb" // Import to register backend.
|
||||
)
|
||||
|
||||
// BoltBackendName is the name of the backend that should be passed into
|
||||
// kvdb.Create to initialize a new instance of kvdb.Backend backed by a live
|
||||
// instance of bbolt.
|
||||
const BoltBackendName = "bdb"
|
||||
90
vendor/github.com/lightningnetwork/lnd/channeldb/kvdb/interface.go
generated
vendored
Normal file
90
vendor/github.com/lightningnetwork/lnd/channeldb/kvdb/interface.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
_ "github.com/btcsuite/btcwallet/walletdb/bdb" // Import to register backend.
|
||||
)
|
||||
|
||||
// Update opens a database read/write transaction and executes the function f
|
||||
// with the transaction passed as a parameter. After f exits, if f did not
|
||||
// error, the transaction is committed. Otherwise, if f did error, the
|
||||
// transaction is rolled back. If the rollback fails, the original error
|
||||
// returned by f is still returned. If the commit fails, the commit error is
|
||||
// returned.
|
||||
var Update = walletdb.Update
|
||||
|
||||
// View opens a database read transaction and executes the function f with the
|
||||
// transaction passed as a parameter. After f exits, the transaction is rolled
|
||||
// back. If f errors, its error is returned, not a rollback error (if any
|
||||
// occur).
|
||||
var View = walletdb.View
|
||||
|
||||
// Batch is identical to the Update call, but it attempts to combine several
|
||||
// individual Update transactions into a single write database transaction on
|
||||
// an optimistic basis. This only has benefits if multiple goroutines call
|
||||
// Batch.
|
||||
var Batch = walletdb.Batch
|
||||
|
||||
// Create initializes and opens a database for the specified type. The
|
||||
// arguments are specific to the database type driver. See the documentation
|
||||
// for the database driver for further details.
|
||||
//
|
||||
// ErrDbUnknownType will be returned if the database type is not registered.
|
||||
var Create = walletdb.Create
|
||||
|
||||
// Backend represents an ACID database. All database access is performed
|
||||
// through read or read+write transactions.
|
||||
type Backend = walletdb.DB
|
||||
|
||||
// Open opens an existing database for the specified type. The arguments are
|
||||
// specific to the database type driver. See the documentation for the database
|
||||
// driver for further details.
|
||||
//
|
||||
// ErrDbUnknownType will be returned if the database type is not registered.
|
||||
var Open = walletdb.Open
|
||||
|
||||
// Driver defines a structure for backend drivers to use when they registered
|
||||
// themselves as a backend which implements the Backend interface.
|
||||
type Driver = walletdb.Driver
|
||||
|
||||
// ReadBucket represents a bucket (a hierarchical structure within the
|
||||
// database) that is only allowed to perform read operations.
|
||||
type ReadBucket = walletdb.ReadBucket
|
||||
|
||||
// ReadCursor represents a bucket cursor that can be positioned at the start or
|
||||
// end of the bucket's key/value pairs and iterate over pairs in the bucket.
|
||||
// This type is only allowed to perform database read operations.
|
||||
type ReadCursor = walletdb.ReadCursor
|
||||
|
||||
// ReadTx represents a database transaction that can only be used for reads. If
|
||||
// a database update must occur, use a RwTx.
|
||||
type ReadTx = walletdb.ReadTx
|
||||
|
||||
// RwBucket represents a bucket (a hierarchical structure within the database)
|
||||
// that is allowed to perform both read and write operations.
|
||||
type RwBucket = walletdb.ReadWriteBucket
|
||||
|
||||
// RwCursor represents a bucket cursor that can be positioned at the start or
|
||||
// end of the bucket's key/value pairs and iterate over pairs in the bucket.
|
||||
// This abstraction is allowed to perform both database read and write
|
||||
// operations.
|
||||
type RwCursor = walletdb.ReadWriteCursor
|
||||
|
||||
// ReadWriteTx represents a database transaction that can be used for both
|
||||
// reads and writes. When only reads are necessary, consider using a ReadTx
|
||||
// instead.
|
||||
type RwTx = walletdb.ReadWriteTx
|
||||
|
||||
var (
|
||||
// ErrBucketNotFound is returned when trying to access a bucket that
|
||||
// has not been created yet.
|
||||
ErrBucketNotFound = walletdb.ErrBucketNotFound
|
||||
|
||||
// ErrBucketExists is returned when creating a bucket that already
|
||||
// exists.
|
||||
ErrBucketExists = walletdb.ErrBucketExists
|
||||
|
||||
// ErrDatabaseNotOpen is returned when a database instance is accessed
|
||||
// before it is opened or after it is closed.
|
||||
ErrDatabaseNotOpen = walletdb.ErrDbNotOpen
|
||||
)
|
||||
55
vendor/github.com/lightningnetwork/lnd/channeldb/legacy_serialization.go
generated
vendored
Normal file
55
vendor/github.com/lightningnetwork/lnd/channeldb/legacy_serialization.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// deserializeCloseChannelSummaryV6 reads the v6 database format for
|
||||
// ChannelCloseSummary.
|
||||
//
|
||||
// NOTE: deprecated, only for migration.
|
||||
func deserializeCloseChannelSummaryV6(r io.Reader) (*ChannelCloseSummary, error) {
|
||||
c := &ChannelCloseSummary{}
|
||||
|
||||
err := ReadElements(r,
|
||||
&c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID,
|
||||
&c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance,
|
||||
&c.TimeLockedBalance, &c.CloseType, &c.IsPending,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We'll now check to see if the channel close summary was encoded with
|
||||
// any of the additional optional fields.
|
||||
err = ReadElements(r, &c.RemoteCurrentRevocation)
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
return c, nil
|
||||
|
||||
// If we got a non-eof error, then we know there's an actually issue.
|
||||
// Otherwise, it may have been the case that this summary didn't have
|
||||
// the set of optional fields.
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Finally, we'll attempt to read the next unrevoked commitment point
|
||||
// for the remote party. If we closed the channel before receiving a
|
||||
// funding locked message, then this can be nil. As a result, we'll use
|
||||
// the same technique to read the field, only if there's still data
|
||||
// left in the buffer.
|
||||
err = ReadElements(r, &c.RemoteNextRevocation)
|
||||
if err != nil && err != io.EOF {
|
||||
// If we got a non-eof error, then we know there's an actually
|
||||
// issue. Otherwise, it may have been the case that this
|
||||
// summary didn't have the set of optional fields.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
34
vendor/github.com/lightningnetwork/lnd/channeldb/log.go
generated
vendored
Normal file
34
vendor/github.com/lightningnetwork/lnd/channeldb/log.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/lightningnetwork/lnd/build"
|
||||
"github.com/lightningnetwork/lnd/channeldb/migration12"
|
||||
"github.com/lightningnetwork/lnd/channeldb/migration13"
|
||||
"github.com/lightningnetwork/lnd/channeldb/migration_01_to_11"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
var log btclog.Logger
|
||||
|
||||
func init() {
|
||||
UseLogger(build.NewSubLogger("CHDB", nil))
|
||||
}
|
||||
|
||||
// DisableLog disables all library log output. Logging output is disabled
|
||||
// by default until UseLogger is called.
|
||||
func DisableLog() {
|
||||
UseLogger(btclog.Disabled)
|
||||
}
|
||||
|
||||
// UseLogger uses a specified Logger to output package logging info.
|
||||
// This should be used in preference to SetLogWriter if the caller is also
|
||||
// using btclog.
|
||||
func UseLogger(logger btclog.Logger) {
|
||||
log = logger
|
||||
migration_01_to_11.UseLogger(logger)
|
||||
migration12.UseLogger(logger)
|
||||
migration13.UseLogger(logger)
|
||||
}
|
||||
80
vendor/github.com/lightningnetwork/lnd/channeldb/meta.go
generated
vendored
Normal file
80
vendor/github.com/lightningnetwork/lnd/channeldb/meta.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
)
|
||||
|
||||
var (
|
||||
// metaBucket stores all the meta information concerning the state of
|
||||
// the database.
|
||||
metaBucket = []byte("metadata")
|
||||
|
||||
// dbVersionKey is a boltdb key and it's used for storing/retrieving
|
||||
// current database version.
|
||||
dbVersionKey = []byte("dbp")
|
||||
)
|
||||
|
||||
// Meta structure holds the database meta information.
|
||||
type Meta struct {
|
||||
// DbVersionNumber is the current schema version of the database.
|
||||
DbVersionNumber uint32
|
||||
}
|
||||
|
||||
// FetchMeta fetches the meta data from boltdb and returns filled meta
|
||||
// structure.
|
||||
func (d *DB) FetchMeta(tx kvdb.ReadTx) (*Meta, error) {
|
||||
meta := &Meta{}
|
||||
|
||||
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
|
||||
return fetchMeta(meta, tx)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// fetchMeta is an internal helper function used in order to allow callers to
|
||||
// re-use a database transaction. See the publicly exported FetchMeta method
|
||||
// for more information.
|
||||
func fetchMeta(meta *Meta, tx kvdb.ReadTx) error {
|
||||
metaBucket := tx.ReadBucket(metaBucket)
|
||||
if metaBucket == nil {
|
||||
return ErrMetaNotFound
|
||||
}
|
||||
|
||||
data := metaBucket.Get(dbVersionKey)
|
||||
if data == nil {
|
||||
meta.DbVersionNumber = getLatestDBVersion(dbVersions)
|
||||
} else {
|
||||
meta.DbVersionNumber = byteOrder.Uint32(data)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutMeta writes the passed instance of the database met-data struct to disk.
|
||||
func (d *DB) PutMeta(meta *Meta) error {
|
||||
return kvdb.Update(d, func(tx kvdb.RwTx) error {
|
||||
return putMeta(meta, tx)
|
||||
})
|
||||
}
|
||||
|
||||
// putMeta is an internal helper function used in order to allow callers to
|
||||
// re-use a database transaction. See the publicly exported PutMeta method for
|
||||
// more information.
|
||||
func putMeta(meta *Meta, tx kvdb.RwTx) error {
|
||||
metaBucket, err := tx.CreateTopLevelBucket(metaBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return putDbVersion(metaBucket, meta)
|
||||
}
|
||||
|
||||
func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) error {
|
||||
scratch := make([]byte, 4)
|
||||
byteOrder.PutUint32(scratch, meta.DbVersionNumber)
|
||||
return metaBucket.Put(dbVersionKey, scratch)
|
||||
}
|
||||
318
vendor/github.com/lightningnetwork/lnd/channeldb/migration12/invoices.go
generated
vendored
Normal file
318
vendor/github.com/lightningnetwork/lnd/channeldb/migration12/invoices.go
generated
vendored
Normal file
@@ -0,0 +1,318 @@
|
||||
package migration12
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/tlv"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxMemoSize is maximum size of the memo field within invoices stored
|
||||
// in the database.
|
||||
MaxMemoSize = 1024
|
||||
|
||||
// maxReceiptSize is the maximum size of the payment receipt stored
|
||||
// within the database along side incoming/outgoing invoices.
|
||||
maxReceiptSize = 1024
|
||||
|
||||
// MaxPaymentRequestSize is the max size of a payment request for
|
||||
// this invoice.
|
||||
// TODO(halseth): determine the max length payment request when field
|
||||
// lengths are final.
|
||||
MaxPaymentRequestSize = 4096
|
||||
|
||||
memoType tlv.Type = 0
|
||||
payReqType tlv.Type = 1
|
||||
createTimeType tlv.Type = 2
|
||||
settleTimeType tlv.Type = 3
|
||||
addIndexType tlv.Type = 4
|
||||
settleIndexType tlv.Type = 5
|
||||
preimageType tlv.Type = 6
|
||||
valueType tlv.Type = 7
|
||||
cltvDeltaType tlv.Type = 8
|
||||
expiryType tlv.Type = 9
|
||||
paymentAddrType tlv.Type = 10
|
||||
featuresType tlv.Type = 11
|
||||
invStateType tlv.Type = 12
|
||||
amtPaidType tlv.Type = 13
|
||||
)
|
||||
|
||||
var (
|
||||
// invoiceBucket is the name of the bucket within the database that
|
||||
// stores all data related to invoices no matter their final state.
|
||||
// Within the invoice bucket, each invoice is keyed by its invoice ID
|
||||
// which is a monotonically increasing uint32.
|
||||
invoiceBucket = []byte("invoices")
|
||||
|
||||
// Big endian is the preferred byte order, due to cursor scans over
|
||||
// integer keys iterating in order.
|
||||
byteOrder = binary.BigEndian
|
||||
)
|
||||
|
||||
// ContractState describes the state the invoice is in.
|
||||
type ContractState uint8
|
||||
|
||||
// ContractTerm is a companion struct to the Invoice struct. This struct houses
|
||||
// the necessary conditions required before the invoice can be considered fully
|
||||
// settled by the payee.
|
||||
type ContractTerm struct {
|
||||
// PaymentPreimage is the preimage which is to be revealed in the
|
||||
// occasion that an HTLC paying to the hash of this preimage is
|
||||
// extended.
|
||||
PaymentPreimage lntypes.Preimage
|
||||
|
||||
// Value is the expected amount of milli-satoshis to be paid to an HTLC
|
||||
// which can be satisfied by the above preimage.
|
||||
Value lnwire.MilliSatoshi
|
||||
|
||||
// State describes the state the invoice is in.
|
||||
State ContractState
|
||||
|
||||
// PaymentAddr is a randomly generated value include in the MPP record
|
||||
// by the sender to prevent probing of the receiver.
|
||||
PaymentAddr [32]byte
|
||||
|
||||
// Features is the feature vectors advertised on the payment request.
|
||||
Features *lnwire.FeatureVector
|
||||
}
|
||||
|
||||
// Invoice is a payment invoice generated by a payee in order to request
|
||||
// payment for some good or service. The inclusion of invoices within Lightning
|
||||
// creates a payment work flow for merchants very similar to that of the
|
||||
// existing financial system within PayPal, etc. Invoices are added to the
|
||||
// database when a payment is requested, then can be settled manually once the
|
||||
// payment is received at the upper layer. For record keeping purposes,
|
||||
// invoices are never deleted from the database, instead a bit is toggled
|
||||
// denoting the invoice has been fully settled. Within the database, all
|
||||
// invoices must have a unique payment hash which is generated by taking the
|
||||
// sha256 of the payment preimage.
|
||||
type Invoice struct {
|
||||
// Memo is an optional memo to be stored along side an invoice. The
|
||||
// memo may contain further details pertaining to the invoice itself,
|
||||
// or any other message which fits within the size constraints.
|
||||
Memo []byte
|
||||
|
||||
// PaymentRequest is an optional field where a payment request created
|
||||
// for this invoice can be stored.
|
||||
PaymentRequest []byte
|
||||
|
||||
// FinalCltvDelta is the minimum required number of blocks before htlc
|
||||
// expiry when the invoice is accepted.
|
||||
FinalCltvDelta int32
|
||||
|
||||
// Expiry defines how long after creation this invoice should expire.
|
||||
Expiry time.Duration
|
||||
|
||||
// CreationDate is the exact time the invoice was created.
|
||||
CreationDate time.Time
|
||||
|
||||
// SettleDate is the exact time the invoice was settled.
|
||||
SettleDate time.Time
|
||||
|
||||
// Terms are the contractual payment terms of the invoice. Once all the
|
||||
// terms have been satisfied by the payer, then the invoice can be
|
||||
// considered fully fulfilled.
|
||||
//
|
||||
// TODO(roasbeef): later allow for multiple terms to fulfill the final
|
||||
// invoice: payment fragmentation, etc.
|
||||
Terms ContractTerm
|
||||
|
||||
// AddIndex is an auto-incrementing integer that acts as a
|
||||
// monotonically increasing sequence number for all invoices created.
|
||||
// Clients can then use this field as a "checkpoint" of sorts when
|
||||
// implementing a streaming RPC to notify consumers of instances where
|
||||
// an invoice has been added before they re-connected.
|
||||
//
|
||||
// NOTE: This index starts at 1.
|
||||
AddIndex uint64
|
||||
|
||||
// SettleIndex is an auto-incrementing integer that acts as a
|
||||
// monotonically increasing sequence number for all settled invoices.
|
||||
// Clients can then use this field as a "checkpoint" of sorts when
|
||||
// implementing a streaming RPC to notify consumers of instances where
|
||||
// an invoice has been settled before they re-connected.
|
||||
//
|
||||
// NOTE: This index starts at 1.
|
||||
SettleIndex uint64
|
||||
|
||||
// AmtPaid is the final amount that we ultimately accepted for pay for
|
||||
// this invoice. We specify this value independently as it's possible
|
||||
// that the invoice originally didn't specify an amount, or the sender
|
||||
// overpaid.
|
||||
AmtPaid lnwire.MilliSatoshi
|
||||
|
||||
// Htlcs records all htlcs that paid to this invoice. Some of these
|
||||
// htlcs may have been marked as canceled.
|
||||
Htlcs []byte
|
||||
}
|
||||
|
||||
// LegacyDeserializeInvoice decodes an invoice from the passed io.Reader using
|
||||
// the pre-TLV serialization.
|
||||
func LegacyDeserializeInvoice(r io.Reader) (Invoice, error) {
|
||||
var err error
|
||||
invoice := Invoice{}
|
||||
|
||||
// TODO(roasbeef): use read full everywhere
|
||||
invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
_, err = wire.ReadVarBytes(r, 0, maxReceiptSize, "")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
if err := binary.Read(r, byteOrder, &invoice.FinalCltvDelta); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
var expiry int64
|
||||
if err := binary.Read(r, byteOrder, &expiry); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
invoice.Expiry = time.Duration(expiry)
|
||||
|
||||
birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
var scratch [8]byte
|
||||
if _, err := io.ReadFull(r, scratch[:]); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
|
||||
|
||||
if err := binary.Read(r, byteOrder, &invoice.Terms.State); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
if err := binary.Read(r, byteOrder, &invoice.AddIndex); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
if err := binary.Read(r, byteOrder, &invoice.SettleIndex); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
if err := binary.Read(r, byteOrder, &invoice.AmtPaid); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
invoice.Htlcs, err = deserializeHtlcs(r)
|
||||
if err != nil {
|
||||
return Invoice{}, err
|
||||
}
|
||||
|
||||
return invoice, nil
|
||||
}
|
||||
|
||||
// deserializeHtlcs reads a list of invoice htlcs from a reader and returns it
|
||||
// as a flattened byte slice.
|
||||
func deserializeHtlcs(r io.Reader) ([]byte, error) {
|
||||
var b bytes.Buffer
|
||||
_, err := io.Copy(&b, r)
|
||||
return b.Bytes(), err
|
||||
}
|
||||
|
||||
// SerializeInvoice serializes an invoice to a writer.
|
||||
//
|
||||
// nolint: dupl
|
||||
func SerializeInvoice(w io.Writer, i *Invoice) error {
|
||||
creationDateBytes, err := i.CreationDate.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
settleDateBytes, err := i.SettleDate.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var fb bytes.Buffer
|
||||
err = i.Terms.Features.EncodeBase256(&fb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
featureBytes := fb.Bytes()
|
||||
|
||||
preimage := [32]byte(i.Terms.PaymentPreimage)
|
||||
value := uint64(i.Terms.Value)
|
||||
cltvDelta := uint32(i.FinalCltvDelta)
|
||||
expiry := uint64(i.Expiry)
|
||||
|
||||
amtPaid := uint64(i.AmtPaid)
|
||||
state := uint8(i.Terms.State)
|
||||
|
||||
tlvStream, err := tlv.NewStream(
|
||||
// Memo and payreq.
|
||||
tlv.MakePrimitiveRecord(memoType, &i.Memo),
|
||||
tlv.MakePrimitiveRecord(payReqType, &i.PaymentRequest),
|
||||
|
||||
// Add/settle metadata.
|
||||
tlv.MakePrimitiveRecord(createTimeType, &creationDateBytes),
|
||||
tlv.MakePrimitiveRecord(settleTimeType, &settleDateBytes),
|
||||
tlv.MakePrimitiveRecord(addIndexType, &i.AddIndex),
|
||||
tlv.MakePrimitiveRecord(settleIndexType, &i.SettleIndex),
|
||||
|
||||
// Terms.
|
||||
tlv.MakePrimitiveRecord(preimageType, &preimage),
|
||||
tlv.MakePrimitiveRecord(valueType, &value),
|
||||
tlv.MakePrimitiveRecord(cltvDeltaType, &cltvDelta),
|
||||
tlv.MakePrimitiveRecord(expiryType, &expiry),
|
||||
tlv.MakePrimitiveRecord(paymentAddrType, &i.Terms.PaymentAddr),
|
||||
tlv.MakePrimitiveRecord(featuresType, &featureBytes),
|
||||
|
||||
// Invoice state.
|
||||
tlv.MakePrimitiveRecord(invStateType, &state),
|
||||
tlv.MakePrimitiveRecord(amtPaidType, &amtPaid),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
if err = tlvStream.Encode(&b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = binary.Write(w, byteOrder, uint64(b.Len()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = w.Write(b.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return serializeHtlcs(w, i.Htlcs)
|
||||
}
|
||||
|
||||
// serializeHtlcs writes a serialized list of invoice htlcs into a writer.
|
||||
func serializeHtlcs(w io.Writer, htlcs []byte) error {
|
||||
_, err := w.Write(htlcs)
|
||||
return err
|
||||
}
|
||||
14
vendor/github.com/lightningnetwork/lnd/channeldb/migration12/log.go
generated
vendored
Normal file
14
vendor/github.com/lightningnetwork/lnd/channeldb/migration12/log.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package migration12
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized as disabled. This means the package will
|
||||
// not perform any logging by default until a logger is set.
|
||||
var log = btclog.Disabled
|
||||
|
||||
// UseLogger uses a specified Logger to output package logging info.
|
||||
func UseLogger(logger btclog.Logger) {
|
||||
log = logger
|
||||
}
|
||||
74
vendor/github.com/lightningnetwork/lnd/channeldb/migration12/migration.go
generated
vendored
Normal file
74
vendor/github.com/lightningnetwork/lnd/channeldb/migration12/migration.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package migration12
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
)
|
||||
|
||||
var emptyFeatures = lnwire.NewFeatureVector(nil, nil)
|
||||
|
||||
// MigrateInvoiceTLV migrates all existing invoice bodies over to be serialized
|
||||
// in a single TLV stream. In the process, we drop the Receipt field and add
|
||||
// PaymentAddr and Features to the invoice Terms.
|
||||
func MigrateInvoiceTLV(tx kvdb.RwTx) error {
|
||||
log.Infof("Migrating invoice bodies to TLV, " +
|
||||
"adding payment addresses and feature vectors.")
|
||||
|
||||
invoiceB := tx.ReadWriteBucket(invoiceBucket)
|
||||
if invoiceB == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
type keyedInvoice struct {
|
||||
key []byte
|
||||
invoice Invoice
|
||||
}
|
||||
|
||||
// Read in all existing invoices using the old format.
|
||||
var invoices []keyedInvoice
|
||||
err := invoiceB.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
invoiceReader := bytes.NewReader(v)
|
||||
invoice, err := LegacyDeserializeInvoice(invoiceReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Insert an empty feature vector on all old payments.
|
||||
invoice.Terms.Features = emptyFeatures
|
||||
|
||||
invoices = append(invoices, keyedInvoice{
|
||||
key: k,
|
||||
invoice: invoice,
|
||||
})
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write out each one under its original key using TLV.
|
||||
for _, ki := range invoices {
|
||||
var b bytes.Buffer
|
||||
err = SerializeInvoice(&b, &ki.invoice)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = invoiceB.Put(ki.key, b.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Migration to TLV invoice bodies, " +
|
||||
"payment address, and features complete!")
|
||||
|
||||
return nil
|
||||
}
|
||||
14
vendor/github.com/lightningnetwork/lnd/channeldb/migration13/log.go
generated
vendored
Normal file
14
vendor/github.com/lightningnetwork/lnd/channeldb/migration13/log.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package migration13
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized as disabled. This means the package will
|
||||
// not perform any logging by default until a logger is set.
|
||||
var log = btclog.Disabled
|
||||
|
||||
// UseLogger uses a specified Logger to output package logging info.
|
||||
func UseLogger(logger btclog.Logger) {
|
||||
log = logger
|
||||
}
|
||||
202
vendor/github.com/lightningnetwork/lnd/channeldb/migration13/migration.go
generated
vendored
Normal file
202
vendor/github.com/lightningnetwork/lnd/channeldb/migration13/migration.go
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
package migration13
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
)
|
||||
|
||||
var (
|
||||
paymentsRootBucket = []byte("payments-root-bucket")
|
||||
|
||||
// paymentCreationInfoKey is a key used in the payment's sub-bucket to
|
||||
// store the creation info of the payment.
|
||||
paymentCreationInfoKey = []byte("payment-creation-info")
|
||||
|
||||
// paymentFailInfoKey is a key used in the payment's sub-bucket to
|
||||
// store information about the reason a payment failed.
|
||||
paymentFailInfoKey = []byte("payment-fail-info")
|
||||
|
||||
// paymentAttemptInfoKey is a key used in the payment's sub-bucket to
|
||||
// store the info about the latest attempt that was done for the
|
||||
// payment in question.
|
||||
paymentAttemptInfoKey = []byte("payment-attempt-info")
|
||||
|
||||
// paymentSettleInfoKey is a key used in the payment's sub-bucket to
|
||||
// store the settle info of the payment.
|
||||
paymentSettleInfoKey = []byte("payment-settle-info")
|
||||
|
||||
// paymentHtlcsBucket is a bucket where we'll store the information
|
||||
// about the HTLCs that were attempted for a payment.
|
||||
paymentHtlcsBucket = []byte("payment-htlcs-bucket")
|
||||
|
||||
// htlcAttemptInfoKey is a key used in a HTLC's sub-bucket to store the
|
||||
// info about the attempt that was done for the HTLC in question.
|
||||
htlcAttemptInfoKey = []byte("htlc-attempt-info")
|
||||
|
||||
// htlcSettleInfoKey is a key used in a HTLC's sub-bucket to store the
|
||||
// settle info, if any.
|
||||
htlcSettleInfoKey = []byte("htlc-settle-info")
|
||||
|
||||
// htlcFailInfoKey is a key used in a HTLC's sub-bucket to store
|
||||
// failure information, if any.
|
||||
htlcFailInfoKey = []byte("htlc-fail-info")
|
||||
|
||||
byteOrder = binary.BigEndian
|
||||
)
|
||||
|
||||
// MigrateMPP migrates the payments to a new structure that accommodates for mpp
|
||||
// payments.
|
||||
func MigrateMPP(tx kvdb.RwTx) error {
|
||||
log.Infof("Migrating payments to mpp structure")
|
||||
|
||||
// Iterate over all payments and store their indexing keys. This is
|
||||
// needed, because no modifications are allowed inside a Bucket.ForEach
|
||||
// loop.
|
||||
paymentsBucket := tx.ReadWriteBucket(paymentsRootBucket)
|
||||
if paymentsBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var paymentKeys [][]byte
|
||||
err := paymentsBucket.ForEach(func(k, v []byte) error {
|
||||
paymentKeys = append(paymentKeys, k)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// With all keys retrieved, start the migration.
|
||||
for _, k := range paymentKeys {
|
||||
bucket := paymentsBucket.NestedReadWriteBucket(k)
|
||||
|
||||
// We only expect sub-buckets to be found in
|
||||
// this top-level bucket.
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("non bucket element in " +
|
||||
"payments bucket")
|
||||
}
|
||||
|
||||
// Fetch old format creation info.
|
||||
creationInfo := bucket.Get(paymentCreationInfoKey)
|
||||
if creationInfo == nil {
|
||||
return fmt.Errorf("creation info not found")
|
||||
}
|
||||
|
||||
// Make a copy because bbolt doesn't allow this value to be
|
||||
// changed in-place.
|
||||
newCreationInfo := make([]byte, len(creationInfo))
|
||||
copy(newCreationInfo, creationInfo)
|
||||
|
||||
// Convert to nano seconds.
|
||||
timeBytes := newCreationInfo[32+8 : 32+8+8]
|
||||
time := byteOrder.Uint64(timeBytes)
|
||||
timeNs := time * 1000000000
|
||||
byteOrder.PutUint64(timeBytes, timeNs)
|
||||
|
||||
// Write back new format creation info.
|
||||
err := bucket.Put(paymentCreationInfoKey, newCreationInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// No migration needed if there is no attempt stored.
|
||||
attemptInfo := bucket.Get(paymentAttemptInfoKey)
|
||||
if attemptInfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Delete attempt info on the payment level.
|
||||
if err := bucket.Delete(paymentAttemptInfoKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save attempt id for later use.
|
||||
attemptID := attemptInfo[:8]
|
||||
|
||||
// Discard attempt id. It will become a bucket key in the new
|
||||
// structure.
|
||||
attemptInfo = attemptInfo[8:]
|
||||
|
||||
// Append unknown (zero) attempt time.
|
||||
var zero [8]byte
|
||||
attemptInfo = append(attemptInfo, zero[:]...)
|
||||
|
||||
// Create bucket that contains all htlcs.
|
||||
htlcsBucket, err := bucket.CreateBucket(paymentHtlcsBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create an htlc for this attempt.
|
||||
htlcBucket, err := htlcsBucket.CreateBucket(attemptID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save migrated attempt info.
|
||||
err = htlcBucket.Put(htlcAttemptInfoKey, attemptInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Migrate settle info.
|
||||
settleInfo := bucket.Get(paymentSettleInfoKey)
|
||||
if settleInfo != nil {
|
||||
// Payment-level settle info can be deleted.
|
||||
err := bucket.Delete(paymentSettleInfoKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Append unknown (zero) settle time.
|
||||
settleInfo = append(settleInfo, zero[:]...)
|
||||
|
||||
// Save settle info.
|
||||
err = htlcBucket.Put(htlcSettleInfoKey, settleInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Migration for settled htlc completed.
|
||||
continue
|
||||
}
|
||||
|
||||
// If there is no payment-level failure reason, the payment is
|
||||
// still in flight and nothing else needs to be migrated.
|
||||
// Otherwise the payment-level failure reason can remain
|
||||
// unchanged.
|
||||
inFlight := bucket.Get(paymentFailInfoKey) == nil
|
||||
if inFlight {
|
||||
continue
|
||||
}
|
||||
|
||||
// The htlc failed. Add htlc fail info with reason unknown. We
|
||||
// don't have access to the original failure reason anymore.
|
||||
failInfo := []byte{
|
||||
// Fail time unknown.
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
|
||||
// Zero length wire message.
|
||||
0,
|
||||
|
||||
// Failure reason unknown.
|
||||
0,
|
||||
|
||||
// Failure source index zero.
|
||||
0, 0, 0, 0,
|
||||
}
|
||||
|
||||
// Save fail info.
|
||||
err = htlcBucket.Put(htlcFailInfoKey, failInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Migration of payments to mpp structure complete!")
|
||||
|
||||
return nil
|
||||
}
|
||||
221
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/addr.go
generated
vendored
Normal file
221
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/addr.go
generated
vendored
Normal file
@@ -0,0 +1,221 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/lightningnetwork/lnd/tor"
|
||||
)
|
||||
|
||||
// addressType specifies the network protocol and version that should be used
|
||||
// when connecting to a node at a particular address.
|
||||
type addressType uint8
|
||||
|
||||
const (
|
||||
// tcp4Addr denotes an IPv4 TCP address.
|
||||
tcp4Addr addressType = 0
|
||||
|
||||
// tcp6Addr denotes an IPv6 TCP address.
|
||||
tcp6Addr addressType = 1
|
||||
|
||||
// v2OnionAddr denotes a version 2 Tor onion service address.
|
||||
v2OnionAddr addressType = 2
|
||||
|
||||
// v3OnionAddr denotes a version 3 Tor (prop224) onion service address.
|
||||
v3OnionAddr addressType = 3
|
||||
)
|
||||
|
||||
// encodeTCPAddr serializes a TCP address into its compact raw bytes
|
||||
// representation.
|
||||
func encodeTCPAddr(w io.Writer, addr *net.TCPAddr) error {
|
||||
var (
|
||||
addrType byte
|
||||
ip []byte
|
||||
)
|
||||
|
||||
if addr.IP.To4() != nil {
|
||||
addrType = byte(tcp4Addr)
|
||||
ip = addr.IP.To4()
|
||||
} else {
|
||||
addrType = byte(tcp6Addr)
|
||||
ip = addr.IP.To16()
|
||||
}
|
||||
|
||||
if ip == nil {
|
||||
return fmt.Errorf("unable to encode IP %v", addr.IP)
|
||||
}
|
||||
|
||||
if _, err := w.Write([]byte{addrType}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(ip); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var port [2]byte
|
||||
byteOrder.PutUint16(port[:], uint16(addr.Port))
|
||||
if _, err := w.Write(port[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeOnionAddr serializes an onion address into its compact raw bytes
|
||||
// representation.
|
||||
func encodeOnionAddr(w io.Writer, addr *tor.OnionAddr) error {
|
||||
var suffixIndex int
|
||||
hostLen := len(addr.OnionService)
|
||||
switch hostLen {
|
||||
case tor.V2Len:
|
||||
if _, err := w.Write([]byte{byte(v2OnionAddr)}); err != nil {
|
||||
return err
|
||||
}
|
||||
suffixIndex = tor.V2Len - tor.OnionSuffixLen
|
||||
case tor.V3Len:
|
||||
if _, err := w.Write([]byte{byte(v3OnionAddr)}); err != nil {
|
||||
return err
|
||||
}
|
||||
suffixIndex = tor.V3Len - tor.OnionSuffixLen
|
||||
default:
|
||||
return errors.New("unknown onion service length")
|
||||
}
|
||||
|
||||
suffix := addr.OnionService[suffixIndex:]
|
||||
if suffix != tor.OnionSuffix {
|
||||
return fmt.Errorf("invalid suffix \"%v\"", suffix)
|
||||
}
|
||||
|
||||
host, err := tor.Base32Encoding.DecodeString(
|
||||
addr.OnionService[:suffixIndex],
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sanity check the decoded length.
|
||||
switch {
|
||||
case hostLen == tor.V2Len && len(host) != tor.V2DecodedLen:
|
||||
return fmt.Errorf("onion service %v decoded to invalid host %x",
|
||||
addr.OnionService, host)
|
||||
|
||||
case hostLen == tor.V3Len && len(host) != tor.V3DecodedLen:
|
||||
return fmt.Errorf("onion service %v decoded to invalid host %x",
|
||||
addr.OnionService, host)
|
||||
}
|
||||
|
||||
if _, err := w.Write(host); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var port [2]byte
|
||||
byteOrder.PutUint16(port[:], uint16(addr.Port))
|
||||
if _, err := w.Write(port[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deserializeAddr reads the serialized raw representation of an address and
|
||||
// deserializes it into the actual address. This allows us to avoid address
|
||||
// resolution within the channeldb package.
|
||||
func deserializeAddr(r io.Reader) (net.Addr, error) {
|
||||
var addrType [1]byte
|
||||
if _, err := r.Read(addrType[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var address net.Addr
|
||||
switch addressType(addrType[0]) {
|
||||
case tcp4Addr:
|
||||
var ip [4]byte
|
||||
if _, err := r.Read(ip[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var port [2]byte
|
||||
if _, err := r.Read(port[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
address = &net.TCPAddr{
|
||||
IP: net.IP(ip[:]),
|
||||
Port: int(binary.BigEndian.Uint16(port[:])),
|
||||
}
|
||||
case tcp6Addr:
|
||||
var ip [16]byte
|
||||
if _, err := r.Read(ip[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var port [2]byte
|
||||
if _, err := r.Read(port[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
address = &net.TCPAddr{
|
||||
IP: net.IP(ip[:]),
|
||||
Port: int(binary.BigEndian.Uint16(port[:])),
|
||||
}
|
||||
case v2OnionAddr:
|
||||
var h [tor.V2DecodedLen]byte
|
||||
if _, err := r.Read(h[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var p [2]byte
|
||||
if _, err := r.Read(p[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
onionService := tor.Base32Encoding.EncodeToString(h[:])
|
||||
onionService += tor.OnionSuffix
|
||||
port := int(binary.BigEndian.Uint16(p[:]))
|
||||
|
||||
address = &tor.OnionAddr{
|
||||
OnionService: onionService,
|
||||
Port: port,
|
||||
}
|
||||
case v3OnionAddr:
|
||||
var h [tor.V3DecodedLen]byte
|
||||
if _, err := r.Read(h[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var p [2]byte
|
||||
if _, err := r.Read(p[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
onionService := tor.Base32Encoding.EncodeToString(h[:])
|
||||
onionService += tor.OnionSuffix
|
||||
port := int(binary.BigEndian.Uint16(p[:]))
|
||||
|
||||
address = &tor.OnionAddr{
|
||||
OnionService: onionService,
|
||||
Port: port,
|
||||
}
|
||||
default:
|
||||
return nil, ErrUnknownAddressType
|
||||
}
|
||||
|
||||
return address, nil
|
||||
}
|
||||
|
||||
// serializeAddr serializes an address into its raw bytes representation so that
|
||||
// it can be deserialized without requiring address resolution.
|
||||
func serializeAddr(w io.Writer, address net.Addr) error {
|
||||
switch addr := address.(type) {
|
||||
case *net.TCPAddr:
|
||||
return encodeTCPAddr(w, addr)
|
||||
case *tor.OnionAddr:
|
||||
return encodeOnionAddr(w, addr)
|
||||
default:
|
||||
return ErrUnknownAddressType
|
||||
}
|
||||
}
|
||||
751
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/channel.go
generated
vendored
Normal file
751
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/channel.go
generated
vendored
Normal file
@@ -0,0 +1,751 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/keychain"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/shachain"
|
||||
)
|
||||
|
||||
var (
|
||||
// closedChannelBucket stores summarization information concerning
|
||||
// previously open, but now closed channels.
|
||||
closedChannelBucket = []byte("closed-chan-bucket")
|
||||
|
||||
// openChanBucket stores all the currently open channels. This bucket
|
||||
// has a second, nested bucket which is keyed by a node's ID. Within
|
||||
// that node ID bucket, all attributes required to track, update, and
|
||||
// close a channel are stored.
|
||||
//
|
||||
// openChan -> nodeID -> chanPoint
|
||||
//
|
||||
// TODO(roasbeef): flesh out comment
|
||||
openChannelBucket = []byte("open-chan-bucket")
|
||||
)
|
||||
|
||||
// ChannelType is an enum-like type that describes one of several possible
|
||||
// channel types. Each open channel is associated with a particular type as the
|
||||
// channel type may determine how higher level operations are conducted such as
|
||||
// fee negotiation, channel closing, the format of HTLCs, etc.
|
||||
// TODO(roasbeef): split up per-chain?
|
||||
type ChannelType uint8
|
||||
|
||||
const (
|
||||
// NOTE: iota isn't used here for this enum needs to be stable
|
||||
// long-term as it will be persisted to the database.
|
||||
|
||||
// SingleFunder represents a channel wherein one party solely funds the
|
||||
// entire capacity of the channel.
|
||||
SingleFunder ChannelType = 0
|
||||
)
|
||||
|
||||
// ChannelConstraints represents a set of constraints meant to allow a node to
|
||||
// limit their exposure, enact flow control and ensure that all HTLCs are
|
||||
// economically relevant. This struct will be mirrored for both sides of the
|
||||
// channel, as each side will enforce various constraints that MUST be adhered
|
||||
// to for the life time of the channel. The parameters for each of these
|
||||
// constraints are static for the duration of the channel, meaning the channel
|
||||
// must be torn down for them to change.
|
||||
type ChannelConstraints struct {
|
||||
// DustLimit is the threshold (in satoshis) below which any outputs
|
||||
// should be trimmed. When an output is trimmed, it isn't materialized
|
||||
// as an actual output, but is instead burned to miner's fees.
|
||||
DustLimit btcutil.Amount
|
||||
|
||||
// ChanReserve is an absolute reservation on the channel for the
|
||||
// owner of this set of constraints. This means that the current
|
||||
// settled balance for this node CANNOT dip below the reservation
|
||||
// amount. This acts as a defense against costless attacks when
|
||||
// either side no longer has any skin in the game.
|
||||
ChanReserve btcutil.Amount
|
||||
|
||||
// MaxPendingAmount is the maximum pending HTLC value that the
|
||||
// owner of these constraints can offer the remote node at a
|
||||
// particular time.
|
||||
MaxPendingAmount lnwire.MilliSatoshi
|
||||
|
||||
// MinHTLC is the minimum HTLC value that the owner of these
|
||||
// constraints can offer the remote node. If any HTLCs below this
|
||||
// amount are offered, then the HTLC will be rejected. This, in
|
||||
// tandem with the dust limit allows a node to regulate the
|
||||
// smallest HTLC that it deems economically relevant.
|
||||
MinHTLC lnwire.MilliSatoshi
|
||||
|
||||
// MaxAcceptedHtlcs is the maximum number of HTLCs that the owner of
|
||||
// this set of constraints can offer the remote node. This allows each
|
||||
// node to limit their over all exposure to HTLCs that may need to be
|
||||
// acted upon in the case of a unilateral channel closure or a contract
|
||||
// breach.
|
||||
MaxAcceptedHtlcs uint16
|
||||
|
||||
// CsvDelay is the relative time lock delay expressed in blocks. Any
|
||||
// settled outputs that pay to the owner of this channel configuration
|
||||
// MUST ensure that the delay branch uses this value as the relative
|
||||
// time lock. Similarly, any HTLC's offered by this node should use
|
||||
// this value as well.
|
||||
CsvDelay uint16
|
||||
}
|
||||
|
||||
// ChannelConfig is a struct that houses the various configuration opens for
|
||||
// channels. Each side maintains an instance of this configuration file as it
|
||||
// governs: how the funding and commitment transaction to be created, the
|
||||
// nature of HTLC's allotted, the keys to be used for delivery, and relative
|
||||
// time lock parameters.
|
||||
type ChannelConfig struct {
|
||||
// ChannelConstraints is the set of constraints that must be upheld for
|
||||
// the duration of the channel for the owner of this channel
|
||||
// configuration. Constraints govern a number of flow control related
|
||||
// parameters, also including the smallest HTLC that will be accepted
|
||||
// by a participant.
|
||||
ChannelConstraints
|
||||
|
||||
// MultiSigKey is the key to be used within the 2-of-2 output script
|
||||
// for the owner of this channel config.
|
||||
MultiSigKey keychain.KeyDescriptor
|
||||
|
||||
// RevocationBasePoint is the base public key to be used when deriving
|
||||
// revocation keys for the remote node's commitment transaction. This
|
||||
// will be combined along with a per commitment secret to derive a
|
||||
// unique revocation key for each state.
|
||||
RevocationBasePoint keychain.KeyDescriptor
|
||||
|
||||
// PaymentBasePoint is the base public key to be used when deriving
|
||||
// the key used within the non-delayed pay-to-self output on the
|
||||
// commitment transaction for a node. This will be combined with a
|
||||
// tweak derived from the per-commitment point to ensure unique keys
|
||||
// for each commitment transaction.
|
||||
PaymentBasePoint keychain.KeyDescriptor
|
||||
|
||||
// DelayBasePoint is the base public key to be used when deriving the
|
||||
// key used within the delayed pay-to-self output on the commitment
|
||||
// transaction for a node. This will be combined with a tweak derived
|
||||
// from the per-commitment point to ensure unique keys for each
|
||||
// commitment transaction.
|
||||
DelayBasePoint keychain.KeyDescriptor
|
||||
|
||||
// HtlcBasePoint is the base public key to be used when deriving the
|
||||
// local HTLC key. The derived key (combined with the tweak derived
|
||||
// from the per-commitment point) is used within the "to self" clause
|
||||
// within any HTLC output scripts.
|
||||
HtlcBasePoint keychain.KeyDescriptor
|
||||
}
|
||||
|
||||
// ChannelCommitment is a snapshot of the commitment state at a particular
|
||||
// point in the commitment chain. With each state transition, a snapshot of the
|
||||
// current state along with all non-settled HTLCs are recorded. These snapshots
|
||||
// detail the state of the _remote_ party's commitment at a particular state
|
||||
// number. For ourselves (the local node) we ONLY store our most recent
|
||||
// (unrevoked) state for safety purposes.
|
||||
type ChannelCommitment struct {
|
||||
// CommitHeight is the update number that this ChannelDelta represents
|
||||
// the total number of commitment updates to this point. This can be
|
||||
// viewed as sort of a "commitment height" as this number is
|
||||
// monotonically increasing.
|
||||
CommitHeight uint64
|
||||
|
||||
// LocalLogIndex is the cumulative log index index of the local node at
|
||||
// this point in the commitment chain. This value will be incremented
|
||||
// for each _update_ added to the local update log.
|
||||
LocalLogIndex uint64
|
||||
|
||||
// LocalHtlcIndex is the current local running HTLC index. This value
|
||||
// will be incremented for each outgoing HTLC the local node offers.
|
||||
LocalHtlcIndex uint64
|
||||
|
||||
// RemoteLogIndex is the cumulative log index index of the remote node
|
||||
// at this point in the commitment chain. This value will be
|
||||
// incremented for each _update_ added to the remote update log.
|
||||
RemoteLogIndex uint64
|
||||
|
||||
// RemoteHtlcIndex is the current remote running HTLC index. This value
|
||||
// will be incremented for each outgoing HTLC the remote node offers.
|
||||
RemoteHtlcIndex uint64
|
||||
|
||||
// LocalBalance is the current available settled balance within the
|
||||
// channel directly spendable by us.
|
||||
LocalBalance lnwire.MilliSatoshi
|
||||
|
||||
// RemoteBalance is the current available settled balance within the
|
||||
// channel directly spendable by the remote node.
|
||||
RemoteBalance lnwire.MilliSatoshi
|
||||
|
||||
// CommitFee is the amount calculated to be paid in fees for the
|
||||
// current set of commitment transactions. The fee amount is persisted
|
||||
// with the channel in order to allow the fee amount to be removed and
|
||||
// recalculated with each channel state update, including updates that
|
||||
// happen after a system restart.
|
||||
CommitFee btcutil.Amount
|
||||
|
||||
// FeePerKw is the min satoshis/kilo-weight that should be paid within
|
||||
// the commitment transaction for the entire duration of the channel's
|
||||
// lifetime. This field may be updated during normal operation of the
|
||||
// channel as on-chain conditions change.
|
||||
//
|
||||
// TODO(halseth): make this SatPerKWeight. Cannot be done atm because
|
||||
// this will cause the import cycle lnwallet<->channeldb. Fee
|
||||
// estimation stuff should be in its own package.
|
||||
FeePerKw btcutil.Amount
|
||||
|
||||
// CommitTx is the latest version of the commitment state, broadcast
|
||||
// able by us.
|
||||
CommitTx *wire.MsgTx
|
||||
|
||||
// CommitSig is one half of the signature required to fully complete
|
||||
// the script for the commitment transaction above. This is the
|
||||
// signature signed by the remote party for our version of the
|
||||
// commitment transactions.
|
||||
CommitSig []byte
|
||||
|
||||
// Htlcs is the set of HTLC's that are pending at this particular
|
||||
// commitment height.
|
||||
Htlcs []HTLC
|
||||
|
||||
// TODO(roasbeef): pending commit pointer?
|
||||
// * lets just walk through
|
||||
}
|
||||
|
||||
// ChannelStatus is a bit vector used to indicate whether an OpenChannel is in
|
||||
// the default usable state, or a state where it shouldn't be used.
|
||||
type ChannelStatus uint8
|
||||
|
||||
var (
|
||||
// ChanStatusDefault is the normal state of an open channel.
|
||||
ChanStatusDefault ChannelStatus
|
||||
|
||||
// ChanStatusBorked indicates that the channel has entered an
|
||||
// irreconcilable state, triggered by a state desynchronization or
|
||||
// channel breach. Channels in this state should never be added to the
|
||||
// htlc switch.
|
||||
ChanStatusBorked ChannelStatus = 1
|
||||
|
||||
// ChanStatusCommitBroadcasted indicates that a commitment for this
|
||||
// channel has been broadcasted.
|
||||
ChanStatusCommitBroadcasted ChannelStatus = 1 << 1
|
||||
|
||||
// ChanStatusLocalDataLoss indicates that we have lost channel state
|
||||
// for this channel, and broadcasting our latest commitment might be
|
||||
// considered a breach.
|
||||
//
|
||||
// TODO(halseh): actually enforce that we are not force closing such a
|
||||
// channel.
|
||||
ChanStatusLocalDataLoss ChannelStatus = 1 << 2
|
||||
|
||||
// ChanStatusRestored is a status flag that signals that the channel
|
||||
// has been restored, and doesn't have all the fields a typical channel
|
||||
// will have.
|
||||
ChanStatusRestored ChannelStatus = 1 << 3
|
||||
)
|
||||
|
||||
// chanStatusStrings maps a ChannelStatus to a human friendly string that
|
||||
// describes that status.
|
||||
var chanStatusStrings = map[ChannelStatus]string{
|
||||
ChanStatusDefault: "ChanStatusDefault",
|
||||
ChanStatusBorked: "ChanStatusBorked",
|
||||
ChanStatusCommitBroadcasted: "ChanStatusCommitBroadcasted",
|
||||
ChanStatusLocalDataLoss: "ChanStatusLocalDataLoss",
|
||||
ChanStatusRestored: "ChanStatusRestored",
|
||||
}
|
||||
|
||||
// orderedChanStatusFlags is an in-order list of all that channel status flags.
|
||||
var orderedChanStatusFlags = []ChannelStatus{
|
||||
ChanStatusDefault,
|
||||
ChanStatusBorked,
|
||||
ChanStatusCommitBroadcasted,
|
||||
ChanStatusLocalDataLoss,
|
||||
ChanStatusRestored,
|
||||
}
|
||||
|
||||
// String returns a human-readable representation of the ChannelStatus.
|
||||
func (c ChannelStatus) String() string {
|
||||
// If no flags are set, then this is the default case.
|
||||
if c == 0 {
|
||||
return chanStatusStrings[ChanStatusDefault]
|
||||
}
|
||||
|
||||
// Add individual bit flags.
|
||||
statusStr := ""
|
||||
for _, flag := range orderedChanStatusFlags {
|
||||
if c&flag == flag {
|
||||
statusStr += chanStatusStrings[flag] + "|"
|
||||
c -= flag
|
||||
}
|
||||
}
|
||||
|
||||
// Remove anything to the right of the final bar, including it as well.
|
||||
statusStr = strings.TrimRight(statusStr, "|")
|
||||
|
||||
// Add any remaining flags which aren't accounted for as hex.
|
||||
if c != 0 {
|
||||
statusStr += "|0x" + strconv.FormatUint(uint64(c), 16)
|
||||
}
|
||||
|
||||
// If this was purely an unknown flag, then remove the extra bar at the
|
||||
// start of the string.
|
||||
statusStr = strings.TrimLeft(statusStr, "|")
|
||||
|
||||
return statusStr
|
||||
}
|
||||
|
||||
// OpenChannel encapsulates the persistent and dynamic state of an open channel
|
||||
// with a remote node. An open channel supports several options for on-disk
|
||||
// serialization depending on the exact context. Full (upon channel creation)
|
||||
// state commitments, and partial (due to a commitment update) writes are
|
||||
// supported. Each partial write due to a state update appends the new update
|
||||
// to an on-disk log, which can then subsequently be queried in order to
|
||||
// "time-travel" to a prior state.
|
||||
type OpenChannel struct {
|
||||
// ChanType denotes which type of channel this is.
|
||||
ChanType ChannelType
|
||||
|
||||
// ChainHash is a hash which represents the blockchain that this
|
||||
// channel will be opened within. This value is typically the genesis
|
||||
// hash. In the case that the original chain went through a contentious
|
||||
// hard-fork, then this value will be tweaked using the unique fork
|
||||
// point on each branch.
|
||||
ChainHash chainhash.Hash
|
||||
|
||||
// FundingOutpoint is the outpoint of the final funding transaction.
|
||||
// This value uniquely and globally identifies the channel within the
|
||||
// target blockchain as specified by the chain hash parameter.
|
||||
FundingOutpoint wire.OutPoint
|
||||
|
||||
// ShortChannelID encodes the exact location in the chain in which the
|
||||
// channel was initially confirmed. This includes: the block height,
|
||||
// transaction index, and the output within the target transaction.
|
||||
ShortChannelID lnwire.ShortChannelID
|
||||
|
||||
// IsPending indicates whether a channel's funding transaction has been
|
||||
// confirmed.
|
||||
IsPending bool
|
||||
|
||||
// IsInitiator is a bool which indicates if we were the original
|
||||
// initiator for the channel. This value may affect how higher levels
|
||||
// negotiate fees, or close the channel.
|
||||
IsInitiator bool
|
||||
|
||||
// FundingBroadcastHeight is the height in which the funding
|
||||
// transaction was broadcast. This value can be used by higher level
|
||||
// sub-systems to determine if a channel is stale and/or should have
|
||||
// been confirmed before a certain height.
|
||||
FundingBroadcastHeight uint32
|
||||
|
||||
// NumConfsRequired is the number of confirmations a channel's funding
|
||||
// transaction must have received in order to be considered available
|
||||
// for normal transactional use.
|
||||
NumConfsRequired uint16
|
||||
|
||||
// ChannelFlags holds the flags that were sent as part of the
|
||||
// open_channel message.
|
||||
ChannelFlags lnwire.FundingFlag
|
||||
|
||||
// IdentityPub is the identity public key of the remote node this
|
||||
// channel has been established with.
|
||||
IdentityPub *btcec.PublicKey
|
||||
|
||||
// Capacity is the total capacity of this channel.
|
||||
Capacity btcutil.Amount
|
||||
|
||||
// TotalMSatSent is the total number of milli-satoshis we've sent
|
||||
// within this channel.
|
||||
TotalMSatSent lnwire.MilliSatoshi
|
||||
|
||||
// TotalMSatReceived is the total number of milli-satoshis we've
|
||||
// received within this channel.
|
||||
TotalMSatReceived lnwire.MilliSatoshi
|
||||
|
||||
// LocalChanCfg is the channel configuration for the local node.
|
||||
LocalChanCfg ChannelConfig
|
||||
|
||||
// RemoteChanCfg is the channel configuration for the remote node.
|
||||
RemoteChanCfg ChannelConfig
|
||||
|
||||
// LocalCommitment is the current local commitment state for the local
|
||||
// party. This is stored distinct from the state of the remote party
|
||||
// as there are certain asymmetric parameters which affect the
|
||||
// structure of each commitment.
|
||||
LocalCommitment ChannelCommitment
|
||||
|
||||
// RemoteCommitment is the current remote commitment state for the
|
||||
// remote party. This is stored distinct from the state of the local
|
||||
// party as there are certain asymmetric parameters which affect the
|
||||
// structure of each commitment.
|
||||
RemoteCommitment ChannelCommitment
|
||||
|
||||
// RemoteCurrentRevocation is the current revocation for their
|
||||
// commitment transaction. However, since this the derived public key,
|
||||
// we don't yet have the private key so we aren't yet able to verify
|
||||
// that it's actually in the hash chain.
|
||||
RemoteCurrentRevocation *btcec.PublicKey
|
||||
|
||||
// RemoteNextRevocation is the revocation key to be used for the *next*
|
||||
// commitment transaction we create for the local node. Within the
|
||||
// specification, this value is referred to as the
|
||||
// per-commitment-point.
|
||||
RemoteNextRevocation *btcec.PublicKey
|
||||
|
||||
// RevocationProducer is used to generate the revocation in such a way
|
||||
// that remote side might store it efficiently and have the ability to
|
||||
// restore the revocation by index if needed. Current implementation of
|
||||
// secret producer is shachain producer.
|
||||
RevocationProducer shachain.Producer
|
||||
|
||||
// RevocationStore is used to efficiently store the revocations for
|
||||
// previous channels states sent to us by remote side. Current
|
||||
// implementation of secret store is shachain store.
|
||||
RevocationStore shachain.Store
|
||||
|
||||
// FundingTxn is the transaction containing this channel's funding
|
||||
// outpoint. Upon restarts, this txn will be rebroadcast if the channel
|
||||
// is found to be pending.
|
||||
//
|
||||
// NOTE: This value will only be populated for single-funder channels
|
||||
// for which we are the initiator.
|
||||
FundingTxn *wire.MsgTx
|
||||
|
||||
// TODO(roasbeef): eww
|
||||
Db *DB
|
||||
|
||||
// TODO(roasbeef): just need to store local and remote HTLC's?
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// ShortChanID returns the current ShortChannelID of this channel.
|
||||
func (c *OpenChannel) ShortChanID() lnwire.ShortChannelID {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
return c.ShortChannelID
|
||||
}
|
||||
|
||||
// HTLC is the on-disk representation of a hash time-locked contract. HTLCs are
|
||||
// contained within ChannelDeltas which encode the current state of the
|
||||
// commitment between state updates.
|
||||
//
|
||||
// TODO(roasbeef): save space by using smaller ints at tail end?
|
||||
type HTLC struct {
|
||||
// Signature is the signature for the second level covenant transaction
|
||||
// for this HTLC. The second level transaction is a timeout tx in the
|
||||
// case that this is an outgoing HTLC, and a success tx in the case
|
||||
// that this is an incoming HTLC.
|
||||
//
|
||||
// TODO(roasbeef): make [64]byte instead?
|
||||
Signature []byte
|
||||
|
||||
// RHash is the payment hash of the HTLC.
|
||||
RHash [32]byte
|
||||
|
||||
// Amt is the amount of milli-satoshis this HTLC escrows.
|
||||
Amt lnwire.MilliSatoshi
|
||||
|
||||
// RefundTimeout is the absolute timeout on the HTLC that the sender
|
||||
// must wait before reclaiming the funds in limbo.
|
||||
RefundTimeout uint32
|
||||
|
||||
// OutputIndex is the output index for this particular HTLC output
|
||||
// within the commitment transaction.
|
||||
OutputIndex int32
|
||||
|
||||
// Incoming denotes whether we're the receiver or the sender of this
|
||||
// HTLC.
|
||||
Incoming bool
|
||||
|
||||
// OnionBlob is an opaque blob which is used to complete multi-hop
|
||||
// routing.
|
||||
OnionBlob []byte
|
||||
|
||||
// HtlcIndex is the HTLC counter index of this active, outstanding
|
||||
// HTLC. This differs from the LogIndex, as the HtlcIndex is only
|
||||
// incremented for each offered HTLC, while they LogIndex is
|
||||
// incremented for each update (includes settle+fail).
|
||||
HtlcIndex uint64
|
||||
|
||||
// LogIndex is the cumulative log index of this HTLC. This differs
|
||||
// from the HtlcIndex as this will be incremented for each new log
|
||||
// update added.
|
||||
LogIndex uint64
|
||||
}
|
||||
|
||||
// CircuitKey is used by a channel to uniquely identify the HTLCs it receives
|
||||
// from the switch, and is used to purge our in-memory state of HTLCs that have
|
||||
// already been processed by a link. Two list of CircuitKeys are included in
|
||||
// each CommitDiff to allow a link to determine which in-memory htlcs directed
|
||||
// the opening and closing of circuits in the switch's circuit map.
|
||||
type CircuitKey struct {
|
||||
// ChanID is the short chanid indicating the HTLC's origin.
|
||||
//
|
||||
// NOTE: It is fine for this value to be blank, as this indicates a
|
||||
// locally-sourced payment.
|
||||
ChanID lnwire.ShortChannelID
|
||||
|
||||
// HtlcID is the unique htlc index predominately assigned by links,
|
||||
// though can also be assigned by switch in the case of locally-sourced
|
||||
// payments.
|
||||
HtlcID uint64
|
||||
}
|
||||
|
||||
// String returns a string representation of the CircuitKey.
|
||||
func (k CircuitKey) String() string {
|
||||
return fmt.Sprintf("(Chan ID=%s, HTLC ID=%d)", k.ChanID, k.HtlcID)
|
||||
}
|
||||
|
||||
// ClosureType is an enum like structure that details exactly _how_ a channel
|
||||
// was closed. Three closure types are currently possible: none, cooperative,
|
||||
// local force close, remote force close, and (remote) breach.
|
||||
type ClosureType uint8
|
||||
|
||||
const (
|
||||
// RemoteForceClose indicates that the remote peer has unilaterally
|
||||
// broadcast their current commitment state on-chain.
|
||||
RemoteForceClose ClosureType = 4
|
||||
)
|
||||
|
||||
// ChannelCloseSummary contains the final state of a channel at the point it
|
||||
// was closed. Once a channel is closed, all the information pertaining to that
|
||||
// channel within the openChannelBucket is deleted, and a compact summary is
|
||||
// put in place instead.
|
||||
type ChannelCloseSummary struct {
|
||||
// ChanPoint is the outpoint for this channel's funding transaction,
|
||||
// and is used as a unique identifier for the channel.
|
||||
ChanPoint wire.OutPoint
|
||||
|
||||
// ShortChanID encodes the exact location in the chain in which the
|
||||
// channel was initially confirmed. This includes: the block height,
|
||||
// transaction index, and the output within the target transaction.
|
||||
ShortChanID lnwire.ShortChannelID
|
||||
|
||||
// ChainHash is the hash of the genesis block that this channel resides
|
||||
// within.
|
||||
ChainHash chainhash.Hash
|
||||
|
||||
// ClosingTXID is the txid of the transaction which ultimately closed
|
||||
// this channel.
|
||||
ClosingTXID chainhash.Hash
|
||||
|
||||
// RemotePub is the public key of the remote peer that we formerly had
|
||||
// a channel with.
|
||||
RemotePub *btcec.PublicKey
|
||||
|
||||
// Capacity was the total capacity of the channel.
|
||||
Capacity btcutil.Amount
|
||||
|
||||
// CloseHeight is the height at which the funding transaction was
|
||||
// spent.
|
||||
CloseHeight uint32
|
||||
|
||||
// SettledBalance is our total balance settled balance at the time of
|
||||
// channel closure. This _does not_ include the sum of any outputs that
|
||||
// have been time-locked as a result of the unilateral channel closure.
|
||||
SettledBalance btcutil.Amount
|
||||
|
||||
// TimeLockedBalance is the sum of all the time-locked outputs at the
|
||||
// time of channel closure. If we triggered the force closure of this
|
||||
// channel, then this value will be non-zero if our settled output is
|
||||
// above the dust limit. If we were on the receiving side of a channel
|
||||
// force closure, then this value will be non-zero if we had any
|
||||
// outstanding outgoing HTLC's at the time of channel closure.
|
||||
TimeLockedBalance btcutil.Amount
|
||||
|
||||
// CloseType details exactly _how_ the channel was closed. Five closure
|
||||
// types are possible: cooperative, local force, remote force, breach
|
||||
// and funding canceled.
|
||||
CloseType ClosureType
|
||||
|
||||
// IsPending indicates whether this channel is in the 'pending close'
|
||||
// state, which means the channel closing transaction has been
|
||||
// confirmed, but not yet been fully resolved. In the case of a channel
|
||||
// that has been cooperatively closed, it will go straight into the
|
||||
// fully resolved state as soon as the closing transaction has been
|
||||
// confirmed. However, for channels that have been force closed, they'll
|
||||
// stay marked as "pending" until _all_ the pending funds have been
|
||||
// swept.
|
||||
IsPending bool
|
||||
|
||||
// RemoteCurrentRevocation is the current revocation for their
|
||||
// commitment transaction. However, since this is the derived public key,
|
||||
// we don't yet have the private key so we aren't yet able to verify
|
||||
// that it's actually in the hash chain.
|
||||
RemoteCurrentRevocation *btcec.PublicKey
|
||||
|
||||
// RemoteNextRevocation is the revocation key to be used for the *next*
|
||||
// commitment transaction we create for the local node. Within the
|
||||
// specification, this value is referred to as the
|
||||
// per-commitment-point.
|
||||
RemoteNextRevocation *btcec.PublicKey
|
||||
|
||||
// LocalChanCfg is the channel configuration for the local node.
|
||||
LocalChanConfig ChannelConfig
|
||||
|
||||
// LastChanSyncMsg is the ChannelReestablish message for this channel
|
||||
// for the state at the point where it was closed.
|
||||
LastChanSyncMsg *lnwire.ChannelReestablish
|
||||
}
|
||||
|
||||
func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error {
|
||||
err := WriteElements(w,
|
||||
cs.ChanPoint, cs.ShortChanID, cs.ChainHash, cs.ClosingTXID,
|
||||
cs.CloseHeight, cs.RemotePub, cs.Capacity, cs.SettledBalance,
|
||||
cs.TimeLockedBalance, cs.CloseType, cs.IsPending,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If this is a close channel summary created before the addition of
|
||||
// the new fields, then we can exit here.
|
||||
if cs.RemoteCurrentRevocation == nil {
|
||||
return WriteElements(w, false)
|
||||
}
|
||||
|
||||
// If fields are present, write boolean to indicate this, and continue.
|
||||
if err := WriteElements(w, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := WriteElements(w, cs.RemoteCurrentRevocation); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeChanConfig(w, &cs.LocalChanConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The RemoteNextRevocation field is optional, as it's possible for a
|
||||
// channel to be closed before we learn of the next unrevoked
|
||||
// revocation point for the remote party. Write a boolen indicating
|
||||
// whether this field is present or not.
|
||||
if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the field, if present.
|
||||
if cs.RemoteNextRevocation != nil {
|
||||
if err = WriteElements(w, cs.RemoteNextRevocation); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write whether the channel sync message is present.
|
||||
if err := WriteElements(w, cs.LastChanSyncMsg != nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the channel sync message, if present.
|
||||
if cs.LastChanSyncMsg != nil {
|
||||
if err := WriteElements(w, cs.LastChanSyncMsg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, error) {
|
||||
c := &ChannelCloseSummary{}
|
||||
|
||||
err := ReadElements(r,
|
||||
&c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID,
|
||||
&c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance,
|
||||
&c.TimeLockedBalance, &c.CloseType, &c.IsPending,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We'll now check to see if the channel close summary was encoded with
|
||||
// any of the additional optional fields.
|
||||
var hasNewFields bool
|
||||
err = ReadElements(r, &hasNewFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If fields are not present, we can return.
|
||||
if !hasNewFields {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Otherwise read the new fields.
|
||||
if err := ReadElements(r, &c.RemoteCurrentRevocation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Finally, we'll attempt to read the next unrevoked commitment point
|
||||
// for the remote party. If we closed the channel before receiving a
|
||||
// funding locked message then this might not be present. A boolean
|
||||
// indicating whether the field is present will come first.
|
||||
var hasRemoteNextRevocation bool
|
||||
err = ReadElements(r, &hasRemoteNextRevocation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If this field was written, read it.
|
||||
if hasRemoteNextRevocation {
|
||||
err = ReadElements(r, &c.RemoteNextRevocation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have a channel sync message to read.
|
||||
var hasChanSyncMsg bool
|
||||
err = ReadElements(r, &hasChanSyncMsg)
|
||||
if err == io.EOF {
|
||||
return c, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If a chan sync message is present, read it.
|
||||
if hasChanSyncMsg {
|
||||
// We must pass in reference to a lnwire.Message for the codec
|
||||
// to support it.
|
||||
var msg lnwire.Message
|
||||
if err := ReadElements(r, &msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chanSync, ok := msg.(*lnwire.ChannelReestablish)
|
||||
if !ok {
|
||||
return nil, errors.New("unable cast db Message to " +
|
||||
"ChannelReestablish")
|
||||
}
|
||||
c.LastChanSyncMsg = chanSync
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func writeChanConfig(b io.Writer, c *ChannelConfig) error {
|
||||
return WriteElements(b,
|
||||
c.DustLimit, c.MaxPendingAmount, c.ChanReserve, c.MinHTLC,
|
||||
c.MaxAcceptedHtlcs, c.CsvDelay, c.MultiSigKey,
|
||||
c.RevocationBasePoint, c.PaymentBasePoint, c.DelayBasePoint,
|
||||
c.HtlcBasePoint,
|
||||
)
|
||||
}
|
||||
|
||||
func readChanConfig(b io.Reader, c *ChannelConfig) error {
|
||||
return ReadElements(b,
|
||||
&c.DustLimit, &c.MaxPendingAmount, &c.ChanReserve,
|
||||
&c.MinHTLC, &c.MaxAcceptedHtlcs, &c.CsvDelay,
|
||||
&c.MultiSigKey, &c.RevocationBasePoint,
|
||||
&c.PaymentBasePoint, &c.DelayBasePoint,
|
||||
&c.HtlcBasePoint,
|
||||
)
|
||||
}
|
||||
448
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/codec.go
generated
vendored
Normal file
448
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/codec.go
generated
vendored
Normal file
@@ -0,0 +1,448 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/keychain"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/shachain"
|
||||
)
|
||||
|
||||
// writeOutpoint writes an outpoint to the passed writer using the minimal
|
||||
// amount of bytes possible.
|
||||
func writeOutpoint(w io.Writer, o *wire.OutPoint) error {
|
||||
if _, err := w.Write(o.Hash[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(w, byteOrder, o.Index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readOutpoint reads an outpoint from the passed reader that was previously
|
||||
// written using the writeOutpoint struct.
|
||||
func readOutpoint(r io.Reader, o *wire.OutPoint) error {
|
||||
if _, err := io.ReadFull(r, o.Hash[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Read(r, byteOrder, &o.Index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnknownElementType is an error returned when the codec is unable to encode or
|
||||
// decode a particular type.
|
||||
type UnknownElementType struct {
|
||||
method string
|
||||
element interface{}
|
||||
}
|
||||
|
||||
// Error returns the name of the method that encountered the error, as well as
|
||||
// the type that was unsupported.
|
||||
func (e UnknownElementType) Error() string {
|
||||
return fmt.Sprintf("Unknown type in %s: %T", e.method, e.element)
|
||||
}
|
||||
|
||||
// WriteElement is a one-stop shop to write the big endian representation of
|
||||
// any element which is to be serialized for storage on disk. The passed
|
||||
// io.Writer should be backed by an appropriately sized byte slice, or be able
|
||||
// to dynamically expand to accommodate additional data.
|
||||
func WriteElement(w io.Writer, element interface{}) error {
|
||||
switch e := element.(type) {
|
||||
case keychain.KeyDescriptor:
|
||||
if err := binary.Write(w, byteOrder, e.Family); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(w, byteOrder, e.Index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if e.PubKey != nil {
|
||||
if err := binary.Write(w, byteOrder, true); err != nil {
|
||||
return fmt.Errorf("error writing serialized element: %s", err)
|
||||
}
|
||||
|
||||
return WriteElement(w, e.PubKey)
|
||||
}
|
||||
|
||||
return binary.Write(w, byteOrder, false)
|
||||
case ChannelType:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case chainhash.Hash:
|
||||
if _, err := w.Write(e[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case wire.OutPoint:
|
||||
return writeOutpoint(w, &e)
|
||||
|
||||
case lnwire.ShortChannelID:
|
||||
if err := binary.Write(w, byteOrder, e.ToUint64()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case lnwire.ChannelID:
|
||||
if _, err := w.Write(e[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case int64, uint64:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case uint32:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case int32:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case uint16:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case uint8:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case bool:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case btcutil.Amount:
|
||||
if err := binary.Write(w, byteOrder, uint64(e)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case lnwire.MilliSatoshi:
|
||||
if err := binary.Write(w, byteOrder, uint64(e)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *btcec.PrivateKey:
|
||||
b := e.Serialize()
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *btcec.PublicKey:
|
||||
b := e.SerializeCompressed()
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case shachain.Producer:
|
||||
return e.Encode(w)
|
||||
|
||||
case shachain.Store:
|
||||
return e.Encode(w)
|
||||
|
||||
case *wire.MsgTx:
|
||||
return e.Serialize(w)
|
||||
|
||||
case [32]byte:
|
||||
if _, err := w.Write(e[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case []byte:
|
||||
if err := wire.WriteVarBytes(w, 0, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case lnwire.Message:
|
||||
if _, err := lnwire.WriteMessage(w, e, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case ChannelStatus:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case ClosureType:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case lnwire.FundingFlag:
|
||||
if err := binary.Write(w, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case net.Addr:
|
||||
if err := serializeAddr(w, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case []net.Addr:
|
||||
if err := WriteElement(w, uint32(len(e))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, addr := range e {
|
||||
if err := serializeAddr(w, addr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return UnknownElementType{"WriteElement", e}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteElements is writes each element in the elements slice to the passed
|
||||
// io.Writer using WriteElement.
|
||||
func WriteElements(w io.Writer, elements ...interface{}) error {
|
||||
for _, element := range elements {
|
||||
err := WriteElement(w, element)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadElement is a one-stop utility function to deserialize any datastructure
|
||||
// encoded using the serialization format of the database.
|
||||
func ReadElement(r io.Reader, element interface{}) error {
|
||||
switch e := element.(type) {
|
||||
case *keychain.KeyDescriptor:
|
||||
if err := binary.Read(r, byteOrder, &e.Family); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Read(r, byteOrder, &e.Index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var hasPubKey bool
|
||||
if err := binary.Read(r, byteOrder, &hasPubKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hasPubKey {
|
||||
return ReadElement(r, &e.PubKey)
|
||||
}
|
||||
|
||||
case *ChannelType:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *chainhash.Hash:
|
||||
if _, err := io.ReadFull(r, e[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *wire.OutPoint:
|
||||
return readOutpoint(r, e)
|
||||
|
||||
case *lnwire.ShortChannelID:
|
||||
var a uint64
|
||||
if err := binary.Read(r, byteOrder, &a); err != nil {
|
||||
return err
|
||||
}
|
||||
*e = lnwire.NewShortChanIDFromInt(a)
|
||||
|
||||
case *lnwire.ChannelID:
|
||||
if _, err := io.ReadFull(r, e[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *int64, *uint64:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *uint32:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *int32:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *uint16:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *uint8:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *bool:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *btcutil.Amount:
|
||||
var a uint64
|
||||
if err := binary.Read(r, byteOrder, &a); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = btcutil.Amount(a)
|
||||
|
||||
case *lnwire.MilliSatoshi:
|
||||
var a uint64
|
||||
if err := binary.Read(r, byteOrder, &a); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = lnwire.MilliSatoshi(a)
|
||||
|
||||
case **btcec.PrivateKey:
|
||||
var b [btcec.PrivKeyBytesLen]byte
|
||||
if _, err := io.ReadFull(r, b[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), b[:])
|
||||
*e = priv
|
||||
|
||||
case **btcec.PublicKey:
|
||||
var b [btcec.PubKeyBytesLenCompressed]byte
|
||||
if _, err := io.ReadFull(r, b[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pubKey, err := btcec.ParsePubKey(b[:], btcec.S256())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*e = pubKey
|
||||
|
||||
case *shachain.Producer:
|
||||
var root [32]byte
|
||||
if _, err := io.ReadFull(r, root[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(roasbeef): remove
|
||||
producer, err := shachain.NewRevocationProducerFromBytes(root[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = producer
|
||||
|
||||
case *shachain.Store:
|
||||
store, err := shachain.NewRevocationStoreFromBytes(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = store
|
||||
|
||||
case **wire.MsgTx:
|
||||
tx := wire.NewMsgTx(2)
|
||||
if err := tx.Deserialize(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = tx
|
||||
|
||||
case *[32]byte:
|
||||
if _, err := io.ReadFull(r, e[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *[]byte:
|
||||
bytes, err := wire.ReadVarBytes(r, 0, 66000, "[]byte")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = bytes
|
||||
|
||||
case *lnwire.Message:
|
||||
msg, err := lnwire.ReadMessage(r, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = msg
|
||||
|
||||
case *ChannelStatus:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *ClosureType:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *lnwire.FundingFlag:
|
||||
if err := binary.Read(r, byteOrder, e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *net.Addr:
|
||||
addr, err := deserializeAddr(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*e = addr
|
||||
|
||||
case *[]net.Addr:
|
||||
var numAddrs uint32
|
||||
if err := ReadElement(r, &numAddrs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*e = make([]net.Addr, numAddrs)
|
||||
for i := uint32(0); i < numAddrs; i++ {
|
||||
addr, err := deserializeAddr(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*e)[i] = addr
|
||||
}
|
||||
|
||||
default:
|
||||
return UnknownElementType{"ReadElement", e}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadElements deserializes a variable number of elements into the passed
|
||||
// io.Reader, with each element being deserialized according to the ReadElement
|
||||
// function.
|
||||
func ReadElements(r io.Reader, elements ...interface{}) error {
|
||||
for _, element := range elements {
|
||||
err := ReadElement(r, element)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
216
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/db.go
generated
vendored
Normal file
216
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/db.go
generated
vendored
Normal file
@@ -0,0 +1,216 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
)
|
||||
|
||||
const (
|
||||
dbName = "channel.db"
|
||||
dbFilePermission = 0600
|
||||
)
|
||||
|
||||
// migration is a function which takes a prior outdated version of the database
|
||||
// instances and mutates the key/bucket structure to arrive at a more
|
||||
// up-to-date version of the database.
|
||||
type migration func(tx kvdb.RwTx) error
|
||||
|
||||
var (
|
||||
// Big endian is the preferred byte order, due to cursor scans over
|
||||
// integer keys iterating in order.
|
||||
byteOrder = binary.BigEndian
|
||||
)
|
||||
|
||||
// DB is the primary datastore for the lnd daemon. The database stores
|
||||
// information related to nodes, routing data, open/closed channels, fee
|
||||
// schedules, and reputation data.
|
||||
type DB struct {
|
||||
kvdb.Backend
|
||||
dbPath string
|
||||
graph *ChannelGraph
|
||||
now func() time.Time
|
||||
}
|
||||
|
||||
// Open opens an existing channeldb. Any necessary schemas migrations due to
|
||||
// updates will take place as necessary.
|
||||
func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) {
|
||||
path := filepath.Join(dbPath, dbName)
|
||||
|
||||
if !fileExists(path) {
|
||||
if err := createChannelDB(dbPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
opts := DefaultOptions()
|
||||
for _, modifier := range modifiers {
|
||||
modifier(&opts)
|
||||
}
|
||||
|
||||
// Specify bbolt freelist options to reduce heap pressure in case the
|
||||
// freelist grows to be very large.
|
||||
bdb, err := kvdb.Open(kvdb.BoltBackendName, path, opts.NoFreelistSync)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chanDB := &DB{
|
||||
Backend: bdb,
|
||||
dbPath: dbPath,
|
||||
now: time.Now,
|
||||
}
|
||||
chanDB.graph = newChannelGraph(
|
||||
chanDB, opts.RejectCacheSize, opts.ChannelCacheSize,
|
||||
)
|
||||
|
||||
return chanDB, nil
|
||||
}
|
||||
|
||||
// createChannelDB creates and initializes a fresh version of channeldb. In
|
||||
// the case that the target path has not yet been created or doesn't yet exist,
|
||||
// then the path is created. Additionally, all required top-level buckets used
|
||||
// within the database are created.
|
||||
func createChannelDB(dbPath string) error {
|
||||
if !fileExists(dbPath) {
|
||||
if err := os.MkdirAll(dbPath, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
path := filepath.Join(dbPath, dbName)
|
||||
bdb, err := kvdb.Create(kvdb.BoltBackendName, path, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = kvdb.Update(bdb, func(tx kvdb.RwTx) error {
|
||||
if _, err := tx.CreateTopLevelBucket(openChannelBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tx.CreateTopLevelBucket(closedChannelBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.CreateTopLevelBucket(invoiceBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.CreateTopLevelBucket(paymentBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = nodes.CreateBucket(aliasIndexBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = nodes.CreateBucket(nodeUpdateIndexBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
edges, err := tx.CreateTopLevelBucket(edgeBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := edges.CreateBucket(edgeIndexBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := edges.CreateBucket(edgeUpdateIndexBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := edges.CreateBucket(channelPointBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := edges.CreateBucket(zombieBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
graphMeta, err := tx.CreateTopLevelBucket(graphMetaBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = graphMeta.CreateBucket(pruneLogBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.CreateTopLevelBucket(metaBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
meta := &Meta{
|
||||
DbVersionNumber: 0,
|
||||
}
|
||||
return putMeta(meta, tx)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create new channeldb")
|
||||
}
|
||||
|
||||
return bdb.Close()
|
||||
}
|
||||
|
||||
// fileExists returns true if the file exists, and false otherwise.
|
||||
func fileExists(path string) bool {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// FetchClosedChannels attempts to fetch all closed channels from the database.
|
||||
// The pendingOnly bool toggles if channels that aren't yet fully closed should
|
||||
// be returned in the response or not. When a channel was cooperatively closed,
|
||||
// it becomes fully closed after a single confirmation. When a channel was
|
||||
// forcibly closed, it will become fully closed after _all_ the pending funds
|
||||
// (if any) have been swept.
|
||||
func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) {
|
||||
var chanSummaries []*ChannelCloseSummary
|
||||
|
||||
if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
|
||||
closeBucket := tx.ReadBucket(closedChannelBucket)
|
||||
if closeBucket == nil {
|
||||
return ErrNoClosedChannels
|
||||
}
|
||||
|
||||
return closeBucket.ForEach(func(chanID []byte, summaryBytes []byte) error {
|
||||
summaryReader := bytes.NewReader(summaryBytes)
|
||||
chanSummary, err := deserializeCloseChannelSummary(summaryReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the query specified to only include pending
|
||||
// channels, then we'll skip any channels which aren't
|
||||
// currently pending.
|
||||
if !chanSummary.IsPending && pendingOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
chanSummaries = append(chanSummaries, chanSummary)
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return chanSummaries, nil
|
||||
}
|
||||
|
||||
// ChannelGraph returns a new instance of the directed channel graph.
|
||||
func (d *DB) ChannelGraph() *ChannelGraph {
|
||||
return d.graph
|
||||
}
|
||||
56
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/error.go
generated
vendored
Normal file
56
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/error.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoInvoicesCreated is returned when we don't have invoices in
|
||||
// our database to return.
|
||||
ErrNoInvoicesCreated = fmt.Errorf("there are no existing invoices")
|
||||
|
||||
// ErrNoPaymentsCreated is returned when bucket of payments hasn't been
|
||||
// created.
|
||||
ErrNoPaymentsCreated = fmt.Errorf("there are no existing payments")
|
||||
|
||||
// ErrGraphNotFound is returned when at least one of the components of
|
||||
// graph doesn't exist.
|
||||
ErrGraphNotFound = fmt.Errorf("graph bucket not initialized")
|
||||
|
||||
// ErrSourceNodeNotSet is returned if the source node of the graph
|
||||
// hasn't been added The source node is the center node within a
|
||||
// star-graph.
|
||||
ErrSourceNodeNotSet = fmt.Errorf("source node does not exist")
|
||||
|
||||
// ErrGraphNodeNotFound is returned when we're unable to find the target
|
||||
// node.
|
||||
ErrGraphNodeNotFound = fmt.Errorf("unable to find node")
|
||||
|
||||
// ErrEdgeNotFound is returned when an edge for the target chanID
|
||||
// can't be found.
|
||||
ErrEdgeNotFound = fmt.Errorf("edge not found")
|
||||
|
||||
// ErrUnknownAddressType is returned when a node's addressType is not
|
||||
// an expected value.
|
||||
ErrUnknownAddressType = fmt.Errorf("address type cannot be resolved")
|
||||
|
||||
// ErrNoClosedChannels is returned when a node is queries for all the
|
||||
// channels it has closed, but it hasn't yet closed any channels.
|
||||
ErrNoClosedChannels = fmt.Errorf("no channel have been closed yet")
|
||||
|
||||
// ErrEdgePolicyOptionalFieldNotFound is an error returned if a channel
|
||||
// policy field is not found in the db even though its message flags
|
||||
// indicate it should be.
|
||||
ErrEdgePolicyOptionalFieldNotFound = fmt.Errorf("optional field not " +
|
||||
"present")
|
||||
)
|
||||
|
||||
// ErrTooManyExtraOpaqueBytes creates an error which should be returned if the
|
||||
// caller attempts to write an announcement message which bares too many extra
|
||||
// opaque bytes. We limit this value in order to ensure that we don't waste
|
||||
// disk space due to nodes unnecessarily padding out their announcements with
|
||||
// garbage data.
|
||||
func ErrTooManyExtraOpaqueBytes(numBytes int) error {
|
||||
return fmt.Errorf("max allowed number of opaque bytes is %v, received "+
|
||||
"%v bytes", MaxAllowedExtraOpaqueBytes, numBytes)
|
||||
}
|
||||
1179
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/graph.go
generated
vendored
Normal file
1179
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/graph.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
550
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/invoices.go
generated
vendored
Normal file
550
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/invoices.go
generated
vendored
Normal file
@@ -0,0 +1,550 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/tlv"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
// invoiceBucket is the name of the bucket within the database that
|
||||
// stores all data related to invoices no matter their final state.
|
||||
// Within the invoice bucket, each invoice is keyed by its invoice ID
|
||||
// which is a monotonically increasing uint32.
|
||||
invoiceBucket = []byte("invoices")
|
||||
|
||||
// addIndexBucket is an index bucket that we'll use to create a
|
||||
// monotonically increasing set of add indexes. Each time we add a new
|
||||
// invoice, this sequence number will be incremented and then populated
|
||||
// within the new invoice.
|
||||
//
|
||||
// In addition to this sequence number, we map:
|
||||
//
|
||||
// addIndexNo => invoiceKey
|
||||
addIndexBucket = []byte("invoice-add-index")
|
||||
|
||||
// settleIndexBucket is an index bucket that we'll use to create a
|
||||
// monotonically increasing integer for tracking a "settle index". Each
|
||||
// time an invoice is settled, this sequence number will be incremented
|
||||
// as populate within the newly settled invoice.
|
||||
//
|
||||
// In addition to this sequence number, we map:
|
||||
//
|
||||
// settleIndexNo => invoiceKey
|
||||
settleIndexBucket = []byte("invoice-settle-index")
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxMemoSize is maximum size of the memo field within invoices stored
|
||||
// in the database.
|
||||
MaxMemoSize = 1024
|
||||
|
||||
// MaxReceiptSize is the maximum size of the payment receipt stored
|
||||
// within the database along side incoming/outgoing invoices.
|
||||
MaxReceiptSize = 1024
|
||||
|
||||
// MaxPaymentRequestSize is the max size of a payment request for
|
||||
// this invoice.
|
||||
// TODO(halseth): determine the max length payment request when field
|
||||
// lengths are final.
|
||||
MaxPaymentRequestSize = 4096
|
||||
|
||||
// A set of tlv type definitions used to serialize invoice htlcs to the
|
||||
// database.
|
||||
chanIDType tlv.Type = 1
|
||||
htlcIDType tlv.Type = 3
|
||||
amtType tlv.Type = 5
|
||||
acceptHeightType tlv.Type = 7
|
||||
acceptTimeType tlv.Type = 9
|
||||
resolveTimeType tlv.Type = 11
|
||||
expiryHeightType tlv.Type = 13
|
||||
stateType tlv.Type = 15
|
||||
)
|
||||
|
||||
// ContractState describes the state the invoice is in.
|
||||
type ContractState uint8
|
||||
|
||||
const (
|
||||
// ContractOpen means the invoice has only been created.
|
||||
ContractOpen ContractState = 0
|
||||
|
||||
// ContractSettled means the htlc is settled and the invoice has been
|
||||
// paid.
|
||||
ContractSettled ContractState = 1
|
||||
|
||||
// ContractCanceled means the invoice has been canceled.
|
||||
ContractCanceled ContractState = 2
|
||||
|
||||
// ContractAccepted means the HTLC has been accepted but not settled
|
||||
// yet.
|
||||
ContractAccepted ContractState = 3
|
||||
)
|
||||
|
||||
// String returns a human readable identifier for the ContractState type.
|
||||
func (c ContractState) String() string {
|
||||
switch c {
|
||||
case ContractOpen:
|
||||
return "Open"
|
||||
case ContractSettled:
|
||||
return "Settled"
|
||||
case ContractCanceled:
|
||||
return "Canceled"
|
||||
case ContractAccepted:
|
||||
return "Accepted"
|
||||
}
|
||||
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
// ContractTerm is a companion struct to the Invoice struct. This struct houses
|
||||
// the necessary conditions required before the invoice can be considered fully
|
||||
// settled by the payee.
|
||||
type ContractTerm struct {
|
||||
// PaymentPreimage is the preimage which is to be revealed in the
|
||||
// occasion that an HTLC paying to the hash of this preimage is
|
||||
// extended.
|
||||
PaymentPreimage lntypes.Preimage
|
||||
|
||||
// Value is the expected amount of milli-satoshis to be paid to an HTLC
|
||||
// which can be satisfied by the above preimage.
|
||||
Value lnwire.MilliSatoshi
|
||||
|
||||
// State describes the state the invoice is in.
|
||||
State ContractState
|
||||
}
|
||||
|
||||
// Invoice is a payment invoice generated by a payee in order to request
|
||||
// payment for some good or service. The inclusion of invoices within Lightning
|
||||
// creates a payment work flow for merchants very similar to that of the
|
||||
// existing financial system within PayPal, etc. Invoices are added to the
|
||||
// database when a payment is requested, then can be settled manually once the
|
||||
// payment is received at the upper layer. For record keeping purposes,
|
||||
// invoices are never deleted from the database, instead a bit is toggled
|
||||
// denoting the invoice has been fully settled. Within the database, all
|
||||
// invoices must have a unique payment hash which is generated by taking the
|
||||
// sha256 of the payment preimage.
|
||||
type Invoice struct {
|
||||
// Memo is an optional memo to be stored along side an invoice. The
|
||||
// memo may contain further details pertaining to the invoice itself,
|
||||
// or any other message which fits within the size constraints.
|
||||
Memo []byte
|
||||
|
||||
// Receipt is an optional field dedicated for storing a
|
||||
// cryptographically binding receipt of payment.
|
||||
//
|
||||
// TODO(roasbeef): document scheme.
|
||||
Receipt []byte
|
||||
|
||||
// PaymentRequest is an optional field where a payment request created
|
||||
// for this invoice can be stored.
|
||||
PaymentRequest []byte
|
||||
|
||||
// FinalCltvDelta is the minimum required number of blocks before htlc
|
||||
// expiry when the invoice is accepted.
|
||||
FinalCltvDelta int32
|
||||
|
||||
// Expiry defines how long after creation this invoice should expire.
|
||||
Expiry time.Duration
|
||||
|
||||
// CreationDate is the exact time the invoice was created.
|
||||
CreationDate time.Time
|
||||
|
||||
// SettleDate is the exact time the invoice was settled.
|
||||
SettleDate time.Time
|
||||
|
||||
// Terms are the contractual payment terms of the invoice. Once all the
|
||||
// terms have been satisfied by the payer, then the invoice can be
|
||||
// considered fully fulfilled.
|
||||
//
|
||||
// TODO(roasbeef): later allow for multiple terms to fulfill the final
|
||||
// invoice: payment fragmentation, etc.
|
||||
Terms ContractTerm
|
||||
|
||||
// AddIndex is an auto-incrementing integer that acts as a
|
||||
// monotonically increasing sequence number for all invoices created.
|
||||
// Clients can then use this field as a "checkpoint" of sorts when
|
||||
// implementing a streaming RPC to notify consumers of instances where
|
||||
// an invoice has been added before they re-connected.
|
||||
//
|
||||
// NOTE: This index starts at 1.
|
||||
AddIndex uint64
|
||||
|
||||
// SettleIndex is an auto-incrementing integer that acts as a
|
||||
// monotonically increasing sequence number for all settled invoices.
|
||||
// Clients can then use this field as a "checkpoint" of sorts when
|
||||
// implementing a streaming RPC to notify consumers of instances where
|
||||
// an invoice has been settled before they re-connected.
|
||||
//
|
||||
// NOTE: This index starts at 1.
|
||||
SettleIndex uint64
|
||||
|
||||
// AmtPaid is the final amount that we ultimately accepted for pay for
|
||||
// this invoice. We specify this value independently as it's possible
|
||||
// that the invoice originally didn't specify an amount, or the sender
|
||||
// overpaid.
|
||||
AmtPaid lnwire.MilliSatoshi
|
||||
|
||||
// Htlcs records all htlcs that paid to this invoice. Some of these
|
||||
// htlcs may have been marked as canceled.
|
||||
Htlcs map[CircuitKey]*InvoiceHTLC
|
||||
}
|
||||
|
||||
// HtlcState defines the states an htlc paying to an invoice can be in.
|
||||
type HtlcState uint8
|
||||
|
||||
// InvoiceHTLC contains details about an htlc paying to this invoice.
|
||||
type InvoiceHTLC struct {
|
||||
// Amt is the amount that is carried by this htlc.
|
||||
Amt lnwire.MilliSatoshi
|
||||
|
||||
// AcceptHeight is the block height at which the invoice registry
|
||||
// decided to accept this htlc as a payment to the invoice. At this
|
||||
// height, the invoice cltv delay must have been met.
|
||||
AcceptHeight uint32
|
||||
|
||||
// AcceptTime is the wall clock time at which the invoice registry
|
||||
// decided to accept the htlc.
|
||||
AcceptTime time.Time
|
||||
|
||||
// ResolveTime is the wall clock time at which the invoice registry
|
||||
// decided to settle the htlc.
|
||||
ResolveTime time.Time
|
||||
|
||||
// Expiry is the expiry height of this htlc.
|
||||
Expiry uint32
|
||||
|
||||
// State indicates the state the invoice htlc is currently in. A
|
||||
// canceled htlc isn't just removed from the invoice htlcs map, because
|
||||
// we need AcceptHeight to properly cancel the htlc back.
|
||||
State HtlcState
|
||||
}
|
||||
|
||||
func validateInvoice(i *Invoice) error {
|
||||
if len(i.Memo) > MaxMemoSize {
|
||||
return fmt.Errorf("max length a memo is %v, and invoice "+
|
||||
"of length %v was provided", MaxMemoSize, len(i.Memo))
|
||||
}
|
||||
if len(i.Receipt) > MaxReceiptSize {
|
||||
return fmt.Errorf("max length a receipt is %v, and invoice "+
|
||||
"of length %v was provided", MaxReceiptSize,
|
||||
len(i.Receipt))
|
||||
}
|
||||
if len(i.PaymentRequest) > MaxPaymentRequestSize {
|
||||
return fmt.Errorf("max length of payment request is %v, length "+
|
||||
"provided was %v", MaxPaymentRequestSize,
|
||||
len(i.PaymentRequest))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchAllInvoices returns all invoices currently stored within the database.
|
||||
// If the pendingOnly param is true, then only unsettled invoices will be
|
||||
// returned, skipping all invoices that are fully settled.
|
||||
func (d *DB) FetchAllInvoices(pendingOnly bool) ([]Invoice, error) {
|
||||
var invoices []Invoice
|
||||
|
||||
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
|
||||
invoiceB := tx.ReadBucket(invoiceBucket)
|
||||
if invoiceB == nil {
|
||||
return ErrNoInvoicesCreated
|
||||
}
|
||||
|
||||
// Iterate through the entire key space of the top-level
|
||||
// invoice bucket. If key with a non-nil value stores the next
|
||||
// invoice ID which maps to the corresponding invoice.
|
||||
return invoiceB.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
invoiceReader := bytes.NewReader(v)
|
||||
invoice, err := deserializeInvoice(invoiceReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pendingOnly &&
|
||||
invoice.Terms.State == ContractSettled {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
invoices = append(invoices, invoice)
|
||||
|
||||
return nil
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return invoices, nil
|
||||
}
|
||||
|
||||
// serializeInvoice serializes an invoice to a writer.
|
||||
//
|
||||
// Note: this function is in use for a migration. Before making changes that
|
||||
// would modify the on disk format, make a copy of the original code and store
|
||||
// it with the migration.
|
||||
func serializeInvoice(w io.Writer, i *Invoice) error {
|
||||
if err := wire.WriteVarBytes(w, 0, i.Memo[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wire.WriteVarBytes(w, 0, i.Receipt[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wire.WriteVarBytes(w, 0, i.PaymentRequest[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := binary.Write(w, byteOrder, i.FinalCltvDelta); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := binary.Write(w, byteOrder, int64(i.Expiry)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
birthBytes, err := i.CreationDate.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := wire.WriteVarBytes(w, 0, birthBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
settleBytes, err := i.SettleDate.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := wire.WriteVarBytes(w, 0, settleBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(i.Terms.PaymentPreimage[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var scratch [8]byte
|
||||
byteOrder.PutUint64(scratch[:], uint64(i.Terms.Value))
|
||||
if _, err := w.Write(scratch[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := binary.Write(w, byteOrder, i.Terms.State); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := binary.Write(w, byteOrder, i.AddIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(w, byteOrder, i.SettleIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(w, byteOrder, int64(i.AmtPaid)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := serializeHtlcs(w, i.Htlcs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// serializeHtlcs serializes a map containing circuit keys and invoice htlcs to
|
||||
// a writer.
|
||||
func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) error {
|
||||
for key, htlc := range htlcs {
|
||||
// Encode the htlc in a tlv stream.
|
||||
chanID := key.ChanID.ToUint64()
|
||||
amt := uint64(htlc.Amt)
|
||||
acceptTime := uint64(htlc.AcceptTime.UnixNano())
|
||||
resolveTime := uint64(htlc.ResolveTime.UnixNano())
|
||||
state := uint8(htlc.State)
|
||||
|
||||
tlvStream, err := tlv.NewStream(
|
||||
tlv.MakePrimitiveRecord(chanIDType, &chanID),
|
||||
tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID),
|
||||
tlv.MakePrimitiveRecord(amtType, &amt),
|
||||
tlv.MakePrimitiveRecord(
|
||||
acceptHeightType, &htlc.AcceptHeight,
|
||||
),
|
||||
tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime),
|
||||
tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime),
|
||||
tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry),
|
||||
tlv.MakePrimitiveRecord(stateType, &state),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
if err := tlvStream.Encode(&b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the length of the tlv stream followed by the stream
|
||||
// bytes.
|
||||
err = binary.Write(w, byteOrder, uint64(b.Len()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(b.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializeInvoice(r io.Reader) (Invoice, error) {
|
||||
var err error
|
||||
invoice := Invoice{}
|
||||
|
||||
// TODO(roasbeef): use read full everywhere
|
||||
invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
invoice.Receipt, err = wire.ReadVarBytes(r, 0, MaxReceiptSize, "")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
if err := binary.Read(r, byteOrder, &invoice.FinalCltvDelta); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
var expiry int64
|
||||
if err := binary.Read(r, byteOrder, &expiry); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
invoice.Expiry = time.Duration(expiry)
|
||||
|
||||
birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
var scratch [8]byte
|
||||
if _, err := io.ReadFull(r, scratch[:]); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
|
||||
|
||||
if err := binary.Read(r, byteOrder, &invoice.Terms.State); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
if err := binary.Read(r, byteOrder, &invoice.AddIndex); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
if err := binary.Read(r, byteOrder, &invoice.SettleIndex); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
if err := binary.Read(r, byteOrder, &invoice.AmtPaid); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
invoice.Htlcs, err = deserializeHtlcs(r)
|
||||
if err != nil {
|
||||
return Invoice{}, err
|
||||
}
|
||||
|
||||
return invoice, nil
|
||||
}
|
||||
|
||||
// deserializeHtlcs reads a list of invoice htlcs from a reader and returns it
|
||||
// as a map.
|
||||
func deserializeHtlcs(r io.Reader) (map[CircuitKey]*InvoiceHTLC, error) {
|
||||
htlcs := make(map[CircuitKey]*InvoiceHTLC, 0)
|
||||
|
||||
for {
|
||||
// Read the length of the tlv stream for this htlc.
|
||||
var streamLen uint64
|
||||
if err := binary.Read(r, byteOrder, &streamLen); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
streamBytes := make([]byte, streamLen)
|
||||
if _, err := r.Read(streamBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
streamReader := bytes.NewReader(streamBytes)
|
||||
|
||||
// Decode the contents into the htlc fields.
|
||||
var (
|
||||
htlc InvoiceHTLC
|
||||
key CircuitKey
|
||||
chanID uint64
|
||||
state uint8
|
||||
acceptTime, resolveTime uint64
|
||||
amt uint64
|
||||
)
|
||||
tlvStream, err := tlv.NewStream(
|
||||
tlv.MakePrimitiveRecord(chanIDType, &chanID),
|
||||
tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID),
|
||||
tlv.MakePrimitiveRecord(amtType, &amt),
|
||||
tlv.MakePrimitiveRecord(
|
||||
acceptHeightType, &htlc.AcceptHeight,
|
||||
),
|
||||
tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime),
|
||||
tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime),
|
||||
tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry),
|
||||
tlv.MakePrimitiveRecord(stateType, &state),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := tlvStream.Decode(streamReader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key.ChanID = lnwire.NewShortChanIDFromInt(chanID)
|
||||
htlc.AcceptTime = time.Unix(0, int64(acceptTime))
|
||||
htlc.ResolveTime = time.Unix(0, int64(resolveTime))
|
||||
htlc.State = HtlcState(state)
|
||||
htlc.Amt = lnwire.MilliSatoshi(amt)
|
||||
|
||||
htlcs[key] = &htlc
|
||||
}
|
||||
|
||||
return htlcs, nil
|
||||
}
|
||||
55
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/legacy_serialization.go
generated
vendored
Normal file
55
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/legacy_serialization.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// deserializeCloseChannelSummaryV6 reads the v6 database format for
|
||||
// ChannelCloseSummary.
|
||||
//
|
||||
// NOTE: deprecated, only for migration.
|
||||
func deserializeCloseChannelSummaryV6(r io.Reader) (*ChannelCloseSummary, error) {
|
||||
c := &ChannelCloseSummary{}
|
||||
|
||||
err := ReadElements(r,
|
||||
&c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID,
|
||||
&c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance,
|
||||
&c.TimeLockedBalance, &c.CloseType, &c.IsPending,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We'll now check to see if the channel close summary was encoded with
|
||||
// any of the additional optional fields.
|
||||
err = ReadElements(r, &c.RemoteCurrentRevocation)
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
return c, nil
|
||||
|
||||
// If we got a non-eof error, then we know there's an actually issue.
|
||||
// Otherwise, it may have been the case that this summary didn't have
|
||||
// the set of optional fields.
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Finally, we'll attempt to read the next unrevoked commitment point
|
||||
// for the remote party. If we closed the channel before receiving a
|
||||
// funding locked message, then this can be nil. As a result, we'll use
|
||||
// the same technique to read the field, only if there's still data
|
||||
// left in the buffer.
|
||||
err = ReadElements(r, &c.RemoteNextRevocation)
|
||||
if err != nil && err != io.EOF {
|
||||
// If we got a non-eof error, then we know there's an actually
|
||||
// issue. Otherwise, it may have been the case that this
|
||||
// summary didn't have the set of optional fields.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
14
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/log.go
generated
vendored
Normal file
14
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/log.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized as disabled. This means the package will
|
||||
// not perform any logging by default until a logger is set.
|
||||
var log = btclog.Disabled
|
||||
|
||||
// UseLogger uses a specified Logger to output package logging info.
|
||||
func UseLogger(logger btclog.Logger) {
|
||||
log = logger
|
||||
}
|
||||
39
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/meta.go
generated
vendored
Normal file
39
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/meta.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
)
|
||||
|
||||
var (
|
||||
// metaBucket stores all the meta information concerning the state of
|
||||
// the database.
|
||||
metaBucket = []byte("metadata")
|
||||
|
||||
// dbVersionKey is a boltdb key and it's used for storing/retrieving
|
||||
// current database version.
|
||||
dbVersionKey = []byte("dbp")
|
||||
)
|
||||
|
||||
// Meta structure holds the database meta information.
|
||||
type Meta struct {
|
||||
// DbVersionNumber is the current schema version of the database.
|
||||
DbVersionNumber uint32
|
||||
}
|
||||
|
||||
// putMeta is an internal helper function used in order to allow callers to
|
||||
// re-use a database transaction. See the publicly exported PutMeta method for
|
||||
// more information.
|
||||
func putMeta(meta *Meta, tx kvdb.RwTx) error {
|
||||
metaBucket, err := tx.CreateTopLevelBucket(metaBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return putDbVersion(metaBucket, meta)
|
||||
}
|
||||
|
||||
func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) error {
|
||||
scratch := make([]byte, 4)
|
||||
byteOrder.PutUint32(scratch, meta.DbVersionNumber)
|
||||
return metaBucket.Put(dbVersionKey, scratch)
|
||||
}
|
||||
496
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/migration_09_legacy_serialization.go
generated
vendored
Normal file
496
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/migration_09_legacy_serialization.go
generated
vendored
Normal file
@@ -0,0 +1,496 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
)
|
||||
|
||||
var (
|
||||
// paymentBucket is the name of the bucket within the database that
|
||||
// stores all data related to payments.
|
||||
//
|
||||
// Within the payments bucket, each invoice is keyed by its invoice ID
|
||||
// which is a monotonically increasing uint64. BoltDB's sequence
|
||||
// feature is used for generating monotonically increasing id.
|
||||
//
|
||||
// NOTE: Deprecated. Kept around for migration purposes.
|
||||
paymentBucket = []byte("payments")
|
||||
|
||||
// paymentStatusBucket is the name of the bucket within the database
|
||||
// that stores the status of a payment indexed by the payment's
|
||||
// preimage.
|
||||
//
|
||||
// NOTE: Deprecated. Kept around for migration purposes.
|
||||
paymentStatusBucket = []byte("payment-status")
|
||||
)
|
||||
|
||||
// outgoingPayment represents a successful payment between the daemon and a
|
||||
// remote node. Details such as the total fee paid, and the time of the payment
|
||||
// are stored.
|
||||
//
|
||||
// NOTE: Deprecated. Kept around for migration purposes.
|
||||
type outgoingPayment struct {
|
||||
Invoice
|
||||
|
||||
// Fee is the total fee paid for the payment in milli-satoshis.
|
||||
Fee lnwire.MilliSatoshi
|
||||
|
||||
// TotalTimeLock is the total cumulative time-lock in the HTLC extended
|
||||
// from the second-to-last hop to the destination.
|
||||
TimeLockLength uint32
|
||||
|
||||
// Path encodes the path the payment took through the network. The path
|
||||
// excludes the outgoing node and consists of the hex-encoded
|
||||
// compressed public key of each of the nodes involved in the payment.
|
||||
Path [][33]byte
|
||||
|
||||
// PaymentPreimage is the preImage of a successful payment. This is used
|
||||
// to calculate the PaymentHash as well as serve as a proof of payment.
|
||||
PaymentPreimage [32]byte
|
||||
}
|
||||
|
||||
// addPayment saves a successful payment to the database. It is assumed that
|
||||
// all payment are sent using unique payment hashes.
|
||||
//
|
||||
// NOTE: Deprecated. Kept around for migration purposes.
|
||||
func (db *DB) addPayment(payment *outgoingPayment) error {
|
||||
// Validate the field of the inner voice within the outgoing payment,
|
||||
// these must also adhere to the same constraints as regular invoices.
|
||||
if err := validateInvoice(&payment.Invoice); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We first serialize the payment before starting the database
|
||||
// transaction so we can avoid creating a DB payment in the case of a
|
||||
// serialization error.
|
||||
var b bytes.Buffer
|
||||
if err := serializeOutgoingPayment(&b, payment); err != nil {
|
||||
return err
|
||||
}
|
||||
paymentBytes := b.Bytes()
|
||||
|
||||
return kvdb.Update(db, func(tx kvdb.RwTx) error {
|
||||
payments, err := tx.CreateTopLevelBucket(paymentBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Obtain the new unique sequence number for this payment.
|
||||
paymentID, err := payments.NextSequence()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We use BigEndian for keys as it orders keys in
|
||||
// ascending order. This allows bucket scans to order payments
|
||||
// in the order in which they were created.
|
||||
paymentIDBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(paymentIDBytes, paymentID)
|
||||
|
||||
return payments.Put(paymentIDBytes, paymentBytes)
|
||||
})
|
||||
}
|
||||
|
||||
// fetchAllPayments returns all outgoing payments in DB.
|
||||
//
|
||||
// NOTE: Deprecated. Kept around for migration purposes.
|
||||
func (db *DB) fetchAllPayments() ([]*outgoingPayment, error) {
|
||||
var payments []*outgoingPayment
|
||||
|
||||
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
|
||||
bucket := tx.ReadBucket(paymentBucket)
|
||||
if bucket == nil {
|
||||
return ErrNoPaymentsCreated
|
||||
}
|
||||
|
||||
return bucket.ForEach(func(k, v []byte) error {
|
||||
// If the value is nil, then we ignore it as it may be
|
||||
// a sub-bucket.
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := bytes.NewReader(v)
|
||||
payment, err := deserializeOutgoingPayment(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
payments = append(payments, payment)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return payments, nil
|
||||
}
|
||||
|
||||
// fetchPaymentStatus returns the payment status for outgoing payment.
|
||||
// If status of the payment isn't found, it will default to "StatusUnknown".
|
||||
//
|
||||
// NOTE: Deprecated. Kept around for migration purposes.
|
||||
func (db *DB) fetchPaymentStatus(paymentHash [32]byte) (PaymentStatus, error) {
|
||||
var paymentStatus = StatusUnknown
|
||||
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
|
||||
var err error
|
||||
paymentStatus, err = fetchPaymentStatusTx(tx, paymentHash)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
return paymentStatus, nil
|
||||
}
|
||||
|
||||
// fetchPaymentStatusTx is a helper method that returns the payment status for
|
||||
// outgoing payment. If status of the payment isn't found, it will default to
|
||||
// "StatusUnknown". It accepts the boltdb transactions such that this method
|
||||
// can be composed into other atomic operations.
|
||||
//
|
||||
// NOTE: Deprecated. Kept around for migration purposes.
|
||||
func fetchPaymentStatusTx(tx kvdb.ReadTx, paymentHash [32]byte) (PaymentStatus, error) {
|
||||
// The default status for all payments that aren't recorded in database.
|
||||
var paymentStatus = StatusUnknown
|
||||
|
||||
bucket := tx.ReadBucket(paymentStatusBucket)
|
||||
if bucket == nil {
|
||||
return paymentStatus, nil
|
||||
}
|
||||
|
||||
paymentStatusBytes := bucket.Get(paymentHash[:])
|
||||
if paymentStatusBytes == nil {
|
||||
return paymentStatus, nil
|
||||
}
|
||||
|
||||
paymentStatus.FromBytes(paymentStatusBytes)
|
||||
|
||||
return paymentStatus, nil
|
||||
}
|
||||
|
||||
func serializeOutgoingPayment(w io.Writer, p *outgoingPayment) error {
|
||||
var scratch [8]byte
|
||||
|
||||
if err := serializeInvoiceLegacy(w, &p.Invoice); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
byteOrder.PutUint64(scratch[:], uint64(p.Fee))
|
||||
if _, err := w.Write(scratch[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// First write out the length of the bytes to prefix the value.
|
||||
pathLen := uint32(len(p.Path))
|
||||
byteOrder.PutUint32(scratch[:4], pathLen)
|
||||
if _, err := w.Write(scratch[:4]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then with the path written, we write out the series of public keys
|
||||
// involved in the path.
|
||||
for _, hop := range p.Path {
|
||||
if _, err := w.Write(hop[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
byteOrder.PutUint32(scratch[:4], p.TimeLockLength)
|
||||
if _, err := w.Write(scratch[:4]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(p.PaymentPreimage[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializeOutgoingPayment(r io.Reader) (*outgoingPayment, error) {
|
||||
var scratch [8]byte
|
||||
|
||||
p := &outgoingPayment{}
|
||||
|
||||
inv, err := deserializeInvoiceLegacy(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Invoice = inv
|
||||
|
||||
if _, err := r.Read(scratch[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Fee = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
|
||||
|
||||
if _, err = r.Read(scratch[:4]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pathLen := byteOrder.Uint32(scratch[:4])
|
||||
|
||||
path := make([][33]byte, pathLen)
|
||||
for i := uint32(0); i < pathLen; i++ {
|
||||
if _, err := r.Read(path[i][:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
p.Path = path
|
||||
|
||||
if _, err = r.Read(scratch[:4]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.TimeLockLength = byteOrder.Uint32(scratch[:4])
|
||||
|
||||
if _, err := r.Read(p.PaymentPreimage[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// serializePaymentAttemptInfoMigration9 is the serializePaymentAttemptInfo
|
||||
// version as existed when migration #9 was created. We keep this around, along
|
||||
// with the methods below to ensure that clients that upgrade will use the
|
||||
// correct version of this method.
|
||||
func serializePaymentAttemptInfoMigration9(w io.Writer, a *PaymentAttemptInfo) error {
|
||||
if err := WriteElements(w, a.PaymentID, a.SessionKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := serializeRouteMigration9(w, a.Route); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func serializeHopMigration9(w io.Writer, h *Hop) error {
|
||||
if err := WriteElements(w,
|
||||
h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock,
|
||||
h.AmtToForward,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func serializeRouteMigration9(w io.Writer, r Route) error {
|
||||
if err := WriteElements(w,
|
||||
r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:],
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := WriteElements(w, uint32(len(r.Hops))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, h := range r.Hops {
|
||||
if err := serializeHopMigration9(w, h); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializePaymentAttemptInfoMigration9(r io.Reader) (*PaymentAttemptInfo, error) {
|
||||
a := &PaymentAttemptInfo{}
|
||||
err := ReadElements(r, &a.PaymentID, &a.SessionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.Route, err = deserializeRouteMigration9(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func deserializeRouteMigration9(r io.Reader) (Route, error) {
|
||||
rt := Route{}
|
||||
if err := ReadElements(r,
|
||||
&rt.TotalTimeLock, &rt.TotalAmount,
|
||||
); err != nil {
|
||||
return rt, err
|
||||
}
|
||||
|
||||
var pub []byte
|
||||
if err := ReadElements(r, &pub); err != nil {
|
||||
return rt, err
|
||||
}
|
||||
copy(rt.SourcePubKey[:], pub)
|
||||
|
||||
var numHops uint32
|
||||
if err := ReadElements(r, &numHops); err != nil {
|
||||
return rt, err
|
||||
}
|
||||
|
||||
var hops []*Hop
|
||||
for i := uint32(0); i < numHops; i++ {
|
||||
hop, err := deserializeHopMigration9(r)
|
||||
if err != nil {
|
||||
return rt, err
|
||||
}
|
||||
hops = append(hops, hop)
|
||||
}
|
||||
rt.Hops = hops
|
||||
|
||||
return rt, nil
|
||||
}
|
||||
|
||||
func deserializeHopMigration9(r io.Reader) (*Hop, error) {
|
||||
h := &Hop{}
|
||||
|
||||
var pub []byte
|
||||
if err := ReadElements(r, &pub); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(h.PubKeyBytes[:], pub)
|
||||
|
||||
if err := ReadElements(r,
|
||||
&h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// fetchPaymentsMigration9 returns all sent payments found in the DB using the
|
||||
// payment attempt info format that was present as of migration #9. We need
|
||||
// this as otherwise, the current FetchPayments version will use the latest
|
||||
// decoding format. Note that we only need this for the
|
||||
// TestOutgoingPaymentsMigration migration test case.
|
||||
func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) {
|
||||
var payments []*Payment
|
||||
|
||||
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
|
||||
paymentsBucket := tx.ReadBucket(paymentsRootBucket)
|
||||
if paymentsBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return paymentsBucket.ForEach(func(k, v []byte) error {
|
||||
bucket := paymentsBucket.NestedReadBucket(k)
|
||||
if bucket == nil {
|
||||
// We only expect sub-buckets to be found in
|
||||
// this top-level bucket.
|
||||
return fmt.Errorf("non bucket element in " +
|
||||
"payments bucket")
|
||||
}
|
||||
|
||||
p, err := fetchPaymentMigration9(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
payments = append(payments, p)
|
||||
|
||||
// For older versions of lnd, duplicate payments to a
|
||||
// payment has was possible. These will be found in a
|
||||
// sub-bucket indexed by their sequence number if
|
||||
// available.
|
||||
dup := bucket.NestedReadBucket(paymentDuplicateBucket)
|
||||
if dup == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return dup.ForEach(func(k, v []byte) error {
|
||||
subBucket := dup.NestedReadBucket(k)
|
||||
if subBucket == nil {
|
||||
// We one bucket for each duplicate to
|
||||
// be found.
|
||||
return fmt.Errorf("non bucket element" +
|
||||
"in duplicate bucket")
|
||||
}
|
||||
|
||||
p, err := fetchPaymentMigration9(subBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
payments = append(payments, p)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Before returning, sort the payments by their sequence number.
|
||||
sort.Slice(payments, func(i, j int) bool {
|
||||
return payments[i].sequenceNum < payments[j].sequenceNum
|
||||
})
|
||||
|
||||
return payments, nil
|
||||
}
|
||||
|
||||
func fetchPaymentMigration9(bucket kvdb.ReadBucket) (*Payment, error) {
|
||||
var (
|
||||
err error
|
||||
p = &Payment{}
|
||||
)
|
||||
|
||||
seqBytes := bucket.Get(paymentSequenceKey)
|
||||
if seqBytes == nil {
|
||||
return nil, fmt.Errorf("sequence number not found")
|
||||
}
|
||||
|
||||
p.sequenceNum = binary.BigEndian.Uint64(seqBytes)
|
||||
|
||||
// Get the payment status.
|
||||
p.Status = fetchPaymentStatus(bucket)
|
||||
|
||||
// Get the PaymentCreationInfo.
|
||||
b := bucket.Get(paymentCreationInfoKey)
|
||||
if b == nil {
|
||||
return nil, fmt.Errorf("creation info not found")
|
||||
}
|
||||
|
||||
r := bytes.NewReader(b)
|
||||
p.Info, err = deserializePaymentCreationInfo(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
|
||||
// Get the PaymentAttemptInfo. This can be unset.
|
||||
b = bucket.Get(paymentAttemptInfoKey)
|
||||
if b != nil {
|
||||
r = bytes.NewReader(b)
|
||||
p.Attempt, err = deserializePaymentAttemptInfoMigration9(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Get the payment preimage. This is only found for
|
||||
// completed payments.
|
||||
b = bucket.Get(paymentSettleInfoKey)
|
||||
if b != nil {
|
||||
var preimg lntypes.Preimage
|
||||
copy(preimg[:], b[:])
|
||||
p.PaymentPreimage = &preimg
|
||||
}
|
||||
|
||||
// Get failure reason if available.
|
||||
b = bucket.Get(paymentFailInfoKey)
|
||||
if b != nil {
|
||||
reason := FailureReason(b[0])
|
||||
p.Failure = &reason
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
235
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/migration_10_route_tlv_records.go
generated
vendored
Normal file
235
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/migration_10_route_tlv_records.go
generated
vendored
Normal file
@@ -0,0 +1,235 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
)
|
||||
|
||||
// MigrateRouteSerialization migrates the way we serialize routes across the
|
||||
// entire database. At the time of writing of this migration, this includes our
|
||||
// payment attempts, as well as the payment results in mission control.
|
||||
func MigrateRouteSerialization(tx kvdb.RwTx) error {
|
||||
// First, we'll do all the payment attempts.
|
||||
rootPaymentBucket := tx.ReadWriteBucket(paymentsRootBucket)
|
||||
if rootPaymentBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// As we can't mutate a bucket while we're iterating over it with
|
||||
// ForEach, we'll need to collect all the known payment hashes in
|
||||
// memory first.
|
||||
var payHashes [][]byte
|
||||
err := rootPaymentBucket.ForEach(func(k, v []byte) error {
|
||||
if v != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
payHashes = append(payHashes, k)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now that we have all the payment hashes, we can carry out the
|
||||
// migration itself.
|
||||
for _, payHash := range payHashes {
|
||||
payHashBucket := rootPaymentBucket.NestedReadWriteBucket(payHash)
|
||||
|
||||
// First, we'll migrate the main (non duplicate) payment to
|
||||
// this hash.
|
||||
err := migrateAttemptEncoding(tx, payHashBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now that we've migrated the main payment, we'll also check
|
||||
// for any duplicate payments to the same payment hash.
|
||||
dupBucket := payHashBucket.NestedReadWriteBucket(paymentDuplicateBucket)
|
||||
|
||||
// If there's no dup bucket, then we can move on to the next
|
||||
// payment.
|
||||
if dupBucket == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise, we'll now iterate through all the duplicate pay
|
||||
// hashes and migrate those.
|
||||
var dupSeqNos [][]byte
|
||||
err = dupBucket.ForEach(func(k, v []byte) error {
|
||||
dupSeqNos = append(dupSeqNos, k)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now in this second pass, we'll re-serialize their duplicate
|
||||
// payment attempts under the new encoding.
|
||||
for _, seqNo := range dupSeqNos {
|
||||
dupPayHashBucket := dupBucket.NestedReadWriteBucket(seqNo)
|
||||
err := migrateAttemptEncoding(tx, dupPayHashBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Migration of route/hop serialization complete!")
|
||||
|
||||
log.Infof("Migrating to new mission control store by clearing " +
|
||||
"existing data")
|
||||
|
||||
resultsKey := []byte("missioncontrol-results")
|
||||
err = tx.DeleteTopLevelBucket(resultsKey)
|
||||
if err != nil && err != kvdb.ErrBucketNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Migration to new mission control completed!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateAttemptEncoding migrates payment attempts using the legacy format to
|
||||
// the new format.
|
||||
func migrateAttemptEncoding(tx kvdb.RwTx, payHashBucket kvdb.RwBucket) error {
|
||||
payAttemptBytes := payHashBucket.Get(paymentAttemptInfoKey)
|
||||
if payAttemptBytes == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// For our migration, we'll first read out the existing payment attempt
|
||||
// using the legacy serialization of the attempt.
|
||||
payAttemptReader := bytes.NewReader(payAttemptBytes)
|
||||
payAttempt, err := deserializePaymentAttemptInfoLegacy(
|
||||
payAttemptReader,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now that we have the old attempts, we'll explicitly mark this as
|
||||
// needing a legacy payload, since after this migration, the modern
|
||||
// payload will be the default if signalled.
|
||||
for _, hop := range payAttempt.Route.Hops {
|
||||
hop.LegacyPayload = true
|
||||
}
|
||||
|
||||
// Finally, we'll write out the payment attempt using the new encoding.
|
||||
var b bytes.Buffer
|
||||
err = serializePaymentAttemptInfo(&b, payAttempt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return payHashBucket.Put(paymentAttemptInfoKey, b.Bytes())
|
||||
}
|
||||
|
||||
func deserializePaymentAttemptInfoLegacy(r io.Reader) (*PaymentAttemptInfo, error) {
|
||||
a := &PaymentAttemptInfo{}
|
||||
err := ReadElements(r, &a.PaymentID, &a.SessionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.Route, err = deserializeRouteLegacy(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func serializePaymentAttemptInfoLegacy(w io.Writer, a *PaymentAttemptInfo) error {
|
||||
if err := WriteElements(w, a.PaymentID, a.SessionKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := serializeRouteLegacy(w, a.Route); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializeHopLegacy(r io.Reader) (*Hop, error) {
|
||||
h := &Hop{}
|
||||
|
||||
var pub []byte
|
||||
if err := ReadElements(r, &pub); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(h.PubKeyBytes[:], pub)
|
||||
|
||||
if err := ReadElements(r,
|
||||
&h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func serializeHopLegacy(w io.Writer, h *Hop) error {
|
||||
if err := WriteElements(w,
|
||||
h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock,
|
||||
h.AmtToForward,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializeRouteLegacy(r io.Reader) (Route, error) {
|
||||
rt := Route{}
|
||||
if err := ReadElements(r,
|
||||
&rt.TotalTimeLock, &rt.TotalAmount,
|
||||
); err != nil {
|
||||
return rt, err
|
||||
}
|
||||
|
||||
var pub []byte
|
||||
if err := ReadElements(r, &pub); err != nil {
|
||||
return rt, err
|
||||
}
|
||||
copy(rt.SourcePubKey[:], pub)
|
||||
|
||||
var numHops uint32
|
||||
if err := ReadElements(r, &numHops); err != nil {
|
||||
return rt, err
|
||||
}
|
||||
|
||||
var hops []*Hop
|
||||
for i := uint32(0); i < numHops; i++ {
|
||||
hop, err := deserializeHopLegacy(r)
|
||||
if err != nil {
|
||||
return rt, err
|
||||
}
|
||||
hops = append(hops, hop)
|
||||
}
|
||||
rt.Hops = hops
|
||||
|
||||
return rt, nil
|
||||
}
|
||||
|
||||
func serializeRouteLegacy(w io.Writer, r Route) error {
|
||||
if err := WriteElements(w,
|
||||
r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:],
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := WriteElements(w, uint32(len(r.Hops))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, h := range r.Hops {
|
||||
if err := serializeHopLegacy(w, h); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
230
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/migration_11_invoices.go
generated
vendored
Normal file
230
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/migration_11_invoices.go
generated
vendored
Normal file
@@ -0,0 +1,230 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
bitcoinCfg "github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/zpay32"
|
||||
litecoinCfg "github.com/ltcsuite/ltcd/chaincfg"
|
||||
)
|
||||
|
||||
// MigrateInvoices adds invoice htlcs and a separate cltv delta field to the
|
||||
// invoices.
|
||||
func MigrateInvoices(tx kvdb.RwTx) error {
|
||||
log.Infof("Migrating invoices to new invoice format")
|
||||
|
||||
invoiceB := tx.ReadWriteBucket(invoiceBucket)
|
||||
if invoiceB == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterate through the entire key space of the top-level invoice bucket.
|
||||
// If key with a non-nil value stores the next invoice ID which maps to
|
||||
// the corresponding invoice. Store those keys first, because it isn't
|
||||
// safe to modify the bucket inside a ForEach loop.
|
||||
var invoiceKeys [][]byte
|
||||
err := invoiceB.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
invoiceKeys = append(invoiceKeys, k)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nets := []*bitcoinCfg.Params{
|
||||
&bitcoinCfg.MainNetParams, &bitcoinCfg.SimNetParams,
|
||||
&bitcoinCfg.RegressionNetParams, &bitcoinCfg.TestNet3Params,
|
||||
}
|
||||
|
||||
ltcNets := []*litecoinCfg.Params{
|
||||
&litecoinCfg.MainNetParams, &litecoinCfg.SimNetParams,
|
||||
&litecoinCfg.RegressionNetParams, &litecoinCfg.TestNet4Params,
|
||||
}
|
||||
for _, net := range ltcNets {
|
||||
var convertedNet bitcoinCfg.Params
|
||||
convertedNet.Bech32HRPSegwit = net.Bech32HRPSegwit
|
||||
nets = append(nets, &convertedNet)
|
||||
}
|
||||
|
||||
// Iterate over all stored keys and migrate the invoices.
|
||||
for _, k := range invoiceKeys {
|
||||
v := invoiceB.Get(k)
|
||||
|
||||
// Deserialize the invoice with the deserializing function that
|
||||
// was in use for this version of the database.
|
||||
invoiceReader := bytes.NewReader(v)
|
||||
invoice, err := deserializeInvoiceLegacy(invoiceReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if invoice.Terms.State == ContractAccepted {
|
||||
return fmt.Errorf("cannot upgrade with invoice(s) " +
|
||||
"in accepted state, see release notes")
|
||||
}
|
||||
|
||||
// Try to decode the payment request for every possible net to
|
||||
// avoid passing a the active network to channeldb. This would
|
||||
// be a layering violation, while this migration is only running
|
||||
// once and will likely be removed in the future.
|
||||
var payReq *zpay32.Invoice
|
||||
for _, net := range nets {
|
||||
payReq, err = zpay32.Decode(
|
||||
string(invoice.PaymentRequest), net,
|
||||
)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if payReq == nil {
|
||||
return fmt.Errorf("cannot decode payreq")
|
||||
}
|
||||
invoice.FinalCltvDelta = int32(payReq.MinFinalCLTVExpiry())
|
||||
invoice.Expiry = payReq.Expiry()
|
||||
|
||||
// Serialize the invoice in the new format and use it to replace
|
||||
// the old invoice in the database.
|
||||
var buf bytes.Buffer
|
||||
if err := serializeInvoice(&buf, &invoice); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = invoiceB.Put(k, buf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Migration of invoices completed!")
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializeInvoiceLegacy(r io.Reader) (Invoice, error) {
|
||||
var err error
|
||||
invoice := Invoice{}
|
||||
|
||||
// TODO(roasbeef): use read full everywhere
|
||||
invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
invoice.Receipt, err = wire.ReadVarBytes(r, 0, MaxReceiptSize, "")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled")
|
||||
if err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
var scratch [8]byte
|
||||
if _, err := io.ReadFull(r, scratch[:]); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
|
||||
|
||||
if err := binary.Read(r, byteOrder, &invoice.Terms.State); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
if err := binary.Read(r, byteOrder, &invoice.AddIndex); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
if err := binary.Read(r, byteOrder, &invoice.SettleIndex); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
if err := binary.Read(r, byteOrder, &invoice.AmtPaid); err != nil {
|
||||
return invoice, err
|
||||
}
|
||||
|
||||
return invoice, nil
|
||||
}
|
||||
|
||||
// serializeInvoiceLegacy serializes an invoice in the format of the previous db
|
||||
// version.
|
||||
func serializeInvoiceLegacy(w io.Writer, i *Invoice) error {
|
||||
if err := wire.WriteVarBytes(w, 0, i.Memo[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wire.WriteVarBytes(w, 0, i.Receipt[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wire.WriteVarBytes(w, 0, i.PaymentRequest[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
birthBytes, err := i.CreationDate.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := wire.WriteVarBytes(w, 0, birthBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
settleBytes, err := i.SettleDate.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := wire.WriteVarBytes(w, 0, settleBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(i.Terms.PaymentPreimage[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var scratch [8]byte
|
||||
byteOrder.PutUint64(scratch[:], uint64(i.Terms.Value))
|
||||
if _, err := w.Write(scratch[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := binary.Write(w, byteOrder, i.Terms.State); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := binary.Write(w, byteOrder, i.AddIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(w, byteOrder, i.SettleIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(w, byteOrder, int64(i.AmtPaid)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
938
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/migrations.go
generated
vendored
Normal file
938
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/migrations.go
generated
vendored
Normal file
@@ -0,0 +1,938 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
)
|
||||
|
||||
// MigrateNodeAndEdgeUpdateIndex is a migration function that will update the
|
||||
// database from version 0 to version 1. In version 1, we add two new indexes
|
||||
// (one for nodes and one for edges) to keep track of the last time a node or
|
||||
// edge was updated on the network. These new indexes allow us to implement the
|
||||
// new graph sync protocol added.
|
||||
func MigrateNodeAndEdgeUpdateIndex(tx kvdb.RwTx) error {
|
||||
// First, we'll populating the node portion of the new index. Before we
|
||||
// can add new values to the index, we'll first create the new bucket
|
||||
// where these items will be housed.
|
||||
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create node bucket: %v", err)
|
||||
}
|
||||
nodeUpdateIndex, err := nodes.CreateBucketIfNotExists(
|
||||
nodeUpdateIndexBucket,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create node update index: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Populating new node update index bucket")
|
||||
|
||||
// Now that we know the bucket has been created, we'll iterate over the
|
||||
// entire node bucket so we can add the (updateTime || nodePub) key
|
||||
// into the node update index.
|
||||
err = nodes.ForEach(func(nodePub, nodeInfo []byte) error {
|
||||
if len(nodePub) != 33 {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Tracef("Adding %x to node update index", nodePub)
|
||||
|
||||
// The first 8 bytes of a node's serialize data is the update
|
||||
// time, so we can extract that without decoding the entire
|
||||
// structure.
|
||||
updateTime := nodeInfo[:8]
|
||||
|
||||
// Now that we have the update time, we can construct the key
|
||||
// to insert into the index.
|
||||
var indexKey [8 + 33]byte
|
||||
copy(indexKey[:8], updateTime)
|
||||
copy(indexKey[8:], nodePub)
|
||||
|
||||
return nodeUpdateIndex.Put(indexKey[:], nil)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to update node indexes: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Populating new edge update index bucket")
|
||||
|
||||
// With the set of nodes updated, we'll now update all edges to have a
|
||||
// corresponding entry in the edge update index.
|
||||
edges, err := tx.CreateTopLevelBucket(edgeBucket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create edge bucket: %v", err)
|
||||
}
|
||||
edgeUpdateIndex, err := edges.CreateBucketIfNotExists(
|
||||
edgeUpdateIndexBucket,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create edge update index: %v", err)
|
||||
}
|
||||
|
||||
// We'll now run through each edge policy in the database, and update
|
||||
// the index to ensure each edge has the proper record.
|
||||
err = edges.ForEach(func(edgeKey, edgePolicyBytes []byte) error {
|
||||
if len(edgeKey) != 41 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Now that we know this is the proper record, we'll grab the
|
||||
// channel ID (last 8 bytes of the key), and then decode the
|
||||
// edge policy so we can access the update time.
|
||||
chanID := edgeKey[33:]
|
||||
edgePolicyReader := bytes.NewReader(edgePolicyBytes)
|
||||
|
||||
edgePolicy, err := deserializeChanEdgePolicy(
|
||||
edgePolicyReader, nodes,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Tracef("Adding chan_id=%v to edge update index",
|
||||
edgePolicy.ChannelID)
|
||||
|
||||
// We'll now construct the index key using the channel ID, and
|
||||
// the last time it was updated: (updateTime || chanID).
|
||||
var indexKey [8 + 8]byte
|
||||
byteOrder.PutUint64(
|
||||
indexKey[:], uint64(edgePolicy.LastUpdate.Unix()),
|
||||
)
|
||||
copy(indexKey[8:], chanID)
|
||||
|
||||
return edgeUpdateIndex.Put(indexKey[:], nil)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to update edge indexes: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Migration to node and edge update indexes complete!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MigrateInvoiceTimeSeries is a database migration that assigns all existing
|
||||
// invoices an index in the add and/or the settle index. Additionally, all
|
||||
// existing invoices will have their bytes padded out in order to encode the
|
||||
// add+settle index as well as the amount paid.
|
||||
func MigrateInvoiceTimeSeries(tx kvdb.RwTx) error {
|
||||
invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addIndex, err := invoices.CreateBucketIfNotExists(
|
||||
addIndexBucket,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
settleIndex, err := invoices.CreateBucketIfNotExists(
|
||||
settleIndexBucket,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Migrating invoice database to new time series format")
|
||||
|
||||
// Now that we have all the buckets we need, we'll run through each
|
||||
// invoice in the database, and update it to reflect the new format
|
||||
// expected post migration.
|
||||
// NOTE: we store the converted invoices and put them back into the
|
||||
// database after the loop, since modifying the bucket within the
|
||||
// ForEach loop is not safe.
|
||||
var invoicesKeys [][]byte
|
||||
var invoicesValues [][]byte
|
||||
err = invoices.ForEach(func(invoiceNum, invoiceBytes []byte) error {
|
||||
// If this is a sub bucket, then we'll skip it.
|
||||
if invoiceBytes == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// First, we'll make a copy of the encoded invoice bytes.
|
||||
invoiceBytesCopy := make([]byte, len(invoiceBytes))
|
||||
copy(invoiceBytesCopy, invoiceBytes)
|
||||
|
||||
// With the bytes copied over, we'll append 24 additional
|
||||
// bytes. We do this so we can decode the invoice under the new
|
||||
// serialization format.
|
||||
padding := bytes.Repeat([]byte{0}, 24)
|
||||
invoiceBytesCopy = append(invoiceBytesCopy, padding...)
|
||||
|
||||
invoiceReader := bytes.NewReader(invoiceBytesCopy)
|
||||
invoice, err := deserializeInvoiceLegacy(invoiceReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to decode invoice: %v", err)
|
||||
}
|
||||
|
||||
// Now that we have the fully decoded invoice, we can update
|
||||
// the various indexes that we're added, and finally the
|
||||
// invoice itself before re-inserting it.
|
||||
|
||||
// First, we'll get the new sequence in the addIndex in order
|
||||
// to create the proper mapping.
|
||||
nextAddSeqNo, err := addIndex.NextSequence()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var seqNoBytes [8]byte
|
||||
byteOrder.PutUint64(seqNoBytes[:], nextAddSeqNo)
|
||||
err = addIndex.Put(seqNoBytes[:], invoiceNum[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Tracef("Adding invoice (preimage=%x, add_index=%v) to add "+
|
||||
"time series", invoice.Terms.PaymentPreimage[:],
|
||||
nextAddSeqNo)
|
||||
|
||||
// Next, we'll check if the invoice has been settled or not. If
|
||||
// so, then we'll also add it to the settle index.
|
||||
var nextSettleSeqNo uint64
|
||||
if invoice.Terms.State == ContractSettled {
|
||||
nextSettleSeqNo, err = settleIndex.NextSequence()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var seqNoBytes [8]byte
|
||||
byteOrder.PutUint64(seqNoBytes[:], nextSettleSeqNo)
|
||||
err := settleIndex.Put(seqNoBytes[:], invoiceNum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
invoice.AmtPaid = invoice.Terms.Value
|
||||
|
||||
log.Tracef("Adding invoice (preimage=%x, "+
|
||||
"settle_index=%v) to add time series",
|
||||
invoice.Terms.PaymentPreimage[:],
|
||||
nextSettleSeqNo)
|
||||
}
|
||||
|
||||
// Finally, we'll update the invoice itself with the new
|
||||
// indexing information as well as the amount paid if it has
|
||||
// been settled or not.
|
||||
invoice.AddIndex = nextAddSeqNo
|
||||
invoice.SettleIndex = nextSettleSeqNo
|
||||
|
||||
// We've fully migrated an invoice, so we'll now update the
|
||||
// invoice in-place.
|
||||
var b bytes.Buffer
|
||||
if err := serializeInvoiceLegacy(&b, &invoice); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the key and value pending update for after the ForEach
|
||||
// is done.
|
||||
invoicesKeys = append(invoicesKeys, invoiceNum)
|
||||
invoicesValues = append(invoicesValues, b.Bytes())
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now put the converted invoices into the DB.
|
||||
for i := range invoicesKeys {
|
||||
key := invoicesKeys[i]
|
||||
value := invoicesValues[i]
|
||||
if err := invoices.Put(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Migration to invoice time series index complete!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MigrateInvoiceTimeSeriesOutgoingPayments is a follow up to the
|
||||
// migrateInvoiceTimeSeries migration. As at the time of writing, the
|
||||
// OutgoingPayment struct embeddeds an instance of the Invoice struct. As a
|
||||
// result, we also need to migrate the internal invoice to the new format.
|
||||
func MigrateInvoiceTimeSeriesOutgoingPayments(tx kvdb.RwTx) error {
|
||||
payBucket := tx.ReadWriteBucket(paymentBucket)
|
||||
if payBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof("Migrating invoice database to new outgoing payment format")
|
||||
|
||||
// We store the keys and values we want to modify since it is not safe
|
||||
// to modify them directly within the ForEach loop.
|
||||
var paymentKeys [][]byte
|
||||
var paymentValues [][]byte
|
||||
err := payBucket.ForEach(func(payID, paymentBytes []byte) error {
|
||||
log.Tracef("Migrating payment %x", payID[:])
|
||||
|
||||
// The internal invoices for each payment only contain a
|
||||
// populated contract term, and creation date, as a result,
|
||||
// most of the bytes will be "empty".
|
||||
|
||||
// We'll calculate the end of the invoice index assuming a
|
||||
// "minimal" index that's embedded within the greater
|
||||
// OutgoingPayment. The breakdown is:
|
||||
// 3 bytes empty var bytes, 16 bytes creation date, 16 bytes
|
||||
// settled date, 32 bytes payment pre-image, 8 bytes value, 1
|
||||
// byte settled.
|
||||
endOfInvoiceIndex := 1 + 1 + 1 + 16 + 16 + 32 + 8 + 1
|
||||
|
||||
// We'll now extract the prefix of the pure invoice embedded
|
||||
// within.
|
||||
invoiceBytes := paymentBytes[:endOfInvoiceIndex]
|
||||
|
||||
// With the prefix extracted, we'll copy over the invoice, and
|
||||
// also add padding for the new 24 bytes of fields, and finally
|
||||
// append the remainder of the outgoing payment.
|
||||
paymentCopy := make([]byte, len(invoiceBytes))
|
||||
copy(paymentCopy[:], invoiceBytes)
|
||||
|
||||
padding := bytes.Repeat([]byte{0}, 24)
|
||||
paymentCopy = append(paymentCopy, padding...)
|
||||
paymentCopy = append(
|
||||
paymentCopy, paymentBytes[endOfInvoiceIndex:]...,
|
||||
)
|
||||
|
||||
// At this point, we now have the new format of the outgoing
|
||||
// payments, we'll attempt to deserialize it to ensure the
|
||||
// bytes are properly formatted.
|
||||
paymentReader := bytes.NewReader(paymentCopy)
|
||||
_, err := deserializeOutgoingPayment(paymentReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to deserialize payment: %v", err)
|
||||
}
|
||||
|
||||
// Now that we know the modifications was successful, we'll
|
||||
// store it to our slice of keys and values, and write it back
|
||||
// to disk in the new format after the ForEach loop is over.
|
||||
paymentKeys = append(paymentKeys, payID)
|
||||
paymentValues = append(paymentValues, paymentCopy)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finally store the updated payments to the bucket.
|
||||
for i := range paymentKeys {
|
||||
key := paymentKeys[i]
|
||||
value := paymentValues[i]
|
||||
if err := payBucket.Put(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Migration to outgoing payment invoices complete!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MigrateEdgePolicies is a migration function that will update the edges
|
||||
// bucket. It ensure that edges with unknown policies will also have an entry
|
||||
// in the bucket. After the migration, there will be two edge entries for
|
||||
// every channel, regardless of whether the policies are known.
|
||||
func MigrateEdgePolicies(tx kvdb.RwTx) error {
|
||||
nodes := tx.ReadWriteBucket(nodeBucket)
|
||||
if nodes == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
edges := tx.ReadWriteBucket(edgeBucket)
|
||||
if edges == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
|
||||
if edgeIndex == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkKey gets the policy from the database with a low-level call
|
||||
// so that it is still possible to distinguish between unknown and
|
||||
// not present.
|
||||
checkKey := func(channelId uint64, keyBytes []byte) error {
|
||||
var channelID [8]byte
|
||||
byteOrder.PutUint64(channelID[:], channelId)
|
||||
|
||||
_, err := fetchChanEdgePolicy(edges,
|
||||
channelID[:], keyBytes, nodes)
|
||||
|
||||
if err == ErrEdgeNotFound {
|
||||
log.Tracef("Adding unknown edge policy present for node %x, channel %v",
|
||||
keyBytes, channelId)
|
||||
|
||||
err := putChanEdgePolicyUnknown(edges, channelId, keyBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterate over all channels and check both edge policies.
|
||||
err := edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
|
||||
infoReader := bytes.NewReader(edgeInfoBytes)
|
||||
edgeInfo, err := deserializeChanEdgeInfo(infoReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, key := range [][]byte{edgeInfo.NodeKey1Bytes[:],
|
||||
edgeInfo.NodeKey2Bytes[:]} {
|
||||
|
||||
if err := checkKey(edgeInfo.ChannelID, key); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to update edge policies: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Migration of edge policies complete!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PaymentStatusesMigration is a database migration intended for adding payment
|
||||
// statuses for each existing payment entity in bucket to be able control
|
||||
// transitions of statuses and prevent cases such as double payment
|
||||
func PaymentStatusesMigration(tx kvdb.RwTx) error {
|
||||
// Get the bucket dedicated to storing statuses of payments,
|
||||
// where a key is payment hash, value is payment status.
|
||||
paymentStatuses, err := tx.CreateTopLevelBucket(paymentStatusBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Migrating database to support payment statuses")
|
||||
|
||||
circuitAddKey := []byte("circuit-adds")
|
||||
circuits := tx.ReadWriteBucket(circuitAddKey)
|
||||
if circuits != nil {
|
||||
log.Infof("Marking all known circuits with status InFlight")
|
||||
|
||||
err = circuits.ForEach(func(k, v []byte) error {
|
||||
// Parse the first 8 bytes as the short chan ID for the
|
||||
// circuit. We'll skip all short chan IDs are not
|
||||
// locally initiated, which includes all non-zero short
|
||||
// chan ids.
|
||||
chanID := binary.BigEndian.Uint64(k[:8])
|
||||
if chanID != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The payment hash is the third item in the serialized
|
||||
// payment circuit. The first two items are an AddRef
|
||||
// (10 bytes) and the incoming circuit key (16 bytes).
|
||||
const payHashOffset = 10 + 16
|
||||
|
||||
paymentHash := v[payHashOffset : payHashOffset+32]
|
||||
|
||||
return paymentStatuses.Put(
|
||||
paymentHash[:], StatusInFlight.Bytes(),
|
||||
)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Marking all existing payments with status Completed")
|
||||
|
||||
// Get the bucket dedicated to storing payments
|
||||
bucket := tx.ReadWriteBucket(paymentBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// For each payment in the bucket, deserialize the payment and mark it
|
||||
// as completed.
|
||||
err = bucket.ForEach(func(k, v []byte) error {
|
||||
// Ignores if it is sub-bucket.
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := bytes.NewReader(v)
|
||||
payment, err := deserializeOutgoingPayment(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Calculate payment hash for current payment.
|
||||
paymentHash := sha256.Sum256(payment.PaymentPreimage[:])
|
||||
|
||||
// Update status for current payment to completed. If it fails,
|
||||
// the migration is aborted and the payment bucket is returned
|
||||
// to its previous state.
|
||||
return paymentStatuses.Put(paymentHash[:], StatusSucceeded.Bytes())
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Migration of payment statuses complete!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MigratePruneEdgeUpdateIndex is a database migration that attempts to resolve
|
||||
// some lingering bugs with regards to edge policies and their update index.
|
||||
// Stale entries within the edge update index were not being properly pruned due
|
||||
// to a miscalculation on the offset of an edge's policy last update. This
|
||||
// migration also fixes the case where the public keys within edge policies were
|
||||
// being serialized with an extra byte, causing an even greater error when
|
||||
// attempting to perform the offset calculation described earlier.
|
||||
func MigratePruneEdgeUpdateIndex(tx kvdb.RwTx) error {
|
||||
// To begin the migration, we'll retrieve the update index bucket. If it
|
||||
// does not exist, we have nothing left to do so we can simply exit.
|
||||
edges := tx.ReadWriteBucket(edgeBucket)
|
||||
if edges == nil {
|
||||
return nil
|
||||
}
|
||||
edgeUpdateIndex := edges.NestedReadWriteBucket(edgeUpdateIndexBucket)
|
||||
if edgeUpdateIndex == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve some buckets that will be needed later on. These should
|
||||
// already exist given the assumption that the buckets above do as
|
||||
// well.
|
||||
edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating edge index bucket: %s", err)
|
||||
}
|
||||
if edgeIndex == nil {
|
||||
return fmt.Errorf("unable to create/fetch edge index " +
|
||||
"bucket")
|
||||
}
|
||||
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to make node bucket")
|
||||
}
|
||||
|
||||
log.Info("Migrating database to properly prune edge update index")
|
||||
|
||||
// We'll need to properly prune all the outdated entries within the edge
|
||||
// update index. To do so, we'll gather all of the existing policies
|
||||
// within the graph to re-populate them later on.
|
||||
var edgeKeys [][]byte
|
||||
err = edges.ForEach(func(edgeKey, edgePolicyBytes []byte) error {
|
||||
// All valid entries are indexed by a public key (33 bytes)
|
||||
// followed by a channel ID (8 bytes), so we'll skip any entries
|
||||
// with keys that do not match this.
|
||||
if len(edgeKey) != 33+8 {
|
||||
return nil
|
||||
}
|
||||
|
||||
edgeKeys = append(edgeKeys, edgeKey)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to gather existing edge policies: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
log.Info("Constructing set of edge update entries to purge.")
|
||||
|
||||
// Build the set of keys that we will remove from the edge update index.
|
||||
// This will include all keys contained within the bucket.
|
||||
var updateKeysToRemove [][]byte
|
||||
err = edgeUpdateIndex.ForEach(func(updKey, _ []byte) error {
|
||||
updateKeysToRemove = append(updateKeysToRemove, updKey)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to gather existing edge updates: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
log.Infof("Removing %d entries from edge update index.",
|
||||
len(updateKeysToRemove))
|
||||
|
||||
// With the set of keys contained in the edge update index constructed,
|
||||
// we'll proceed in purging all of them from the index.
|
||||
for _, updKey := range updateKeysToRemove {
|
||||
if err := edgeUpdateIndex.Delete(updKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Repopulating edge update index with %d valid entries.",
|
||||
len(edgeKeys))
|
||||
|
||||
// For each edge key, we'll retrieve the policy, deserialize it, and
|
||||
// re-add it to the different buckets. By doing so, we'll ensure that
|
||||
// all existing edge policies are serialized correctly within their
|
||||
// respective buckets and that the correct entries are populated within
|
||||
// the edge update index.
|
||||
for _, edgeKey := range edgeKeys {
|
||||
edgePolicyBytes := edges.Get(edgeKey)
|
||||
|
||||
// Skip any entries with unknown policies as there will not be
|
||||
// any entries for them in the edge update index.
|
||||
if bytes.Equal(edgePolicyBytes[:], unknownPolicy) {
|
||||
continue
|
||||
}
|
||||
|
||||
edgePolicy, err := deserializeChanEdgePolicy(
|
||||
bytes.NewReader(edgePolicyBytes), nodes,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = updateEdgePolicy(tx, edgePolicy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Migration to properly prune edge update index complete!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MigrateOptionalChannelCloseSummaryFields migrates the serialized format of
|
||||
// ChannelCloseSummary to a format where optional fields' presence is indicated
|
||||
// with boolean markers.
|
||||
func MigrateOptionalChannelCloseSummaryFields(tx kvdb.RwTx) error {
|
||||
closedChanBucket := tx.ReadWriteBucket(closedChannelBucket)
|
||||
if closedChanBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("Migrating to new closed channel format...")
|
||||
|
||||
// We store the converted keys and values and put them back into the
|
||||
// database after the loop, since modifying the bucket within the
|
||||
// ForEach loop is not safe.
|
||||
var closedChansKeys [][]byte
|
||||
var closedChansValues [][]byte
|
||||
err := closedChanBucket.ForEach(func(chanID, summary []byte) error {
|
||||
r := bytes.NewReader(summary)
|
||||
|
||||
// Read the old (v6) format from the database.
|
||||
c, err := deserializeCloseChannelSummaryV6(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize using the new format, and put back into the
|
||||
// bucket.
|
||||
var b bytes.Buffer
|
||||
if err := serializeChannelCloseSummary(&b, c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now that we know the modifications was successful, we'll
|
||||
// Store the key and value to our slices, and write it back to
|
||||
// disk in the new format after the ForEach loop is over.
|
||||
closedChansKeys = append(closedChansKeys, chanID)
|
||||
closedChansValues = append(closedChansValues, b.Bytes())
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to update closed channels: %v", err)
|
||||
}
|
||||
|
||||
// Now put the new format back into the DB.
|
||||
for i := range closedChansKeys {
|
||||
key := closedChansKeys[i]
|
||||
value := closedChansValues[i]
|
||||
if err := closedChanBucket.Put(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Migration to new closed channel format complete!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var messageStoreBucket = []byte("message-store")
|
||||
|
||||
// MigrateGossipMessageStoreKeys migrates the key format for gossip messages
|
||||
// found in the message store to a new one that takes into consideration the of
|
||||
// the message being stored.
|
||||
func MigrateGossipMessageStoreKeys(tx kvdb.RwTx) error {
|
||||
// We'll start by retrieving the bucket in which these messages are
|
||||
// stored within. If there isn't one, there's nothing left for us to do
|
||||
// so we can avoid the migration.
|
||||
messageStore := tx.ReadWriteBucket(messageStoreBucket)
|
||||
if messageStore == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("Migrating to the gossip message store new key format")
|
||||
|
||||
// Otherwise we'll proceed with the migration. We'll start by coalescing
|
||||
// all the current messages within the store, which are indexed by the
|
||||
// public key of the peer which they should be sent to, followed by the
|
||||
// short channel ID of the channel for which the message belongs to. We
|
||||
// should only expect to find channel announcement signatures as that
|
||||
// was the only support message type previously.
|
||||
msgs := make(map[[33 + 8]byte]*lnwire.AnnounceSignatures)
|
||||
err := messageStore.ForEach(func(k, v []byte) error {
|
||||
var msgKey [33 + 8]byte
|
||||
copy(msgKey[:], k)
|
||||
|
||||
msg := &lnwire.AnnounceSignatures{}
|
||||
if err := msg.Decode(bytes.NewReader(v), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgs[msgKey] = msg
|
||||
|
||||
return nil
|
||||
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then, we'll go over all of our messages, remove their previous entry,
|
||||
// and add another with the new key format. Once we've done this for
|
||||
// every message, we can consider the migration complete.
|
||||
for oldMsgKey, msg := range msgs {
|
||||
if err := messageStore.Delete(oldMsgKey[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the new key for which we'll find this message with
|
||||
// in the store. It'll be the same as the old, but we'll also
|
||||
// include the message type.
|
||||
var msgType [2]byte
|
||||
binary.BigEndian.PutUint16(msgType[:], uint16(msg.MsgType()))
|
||||
newMsgKey := append(oldMsgKey[:], msgType[:]...)
|
||||
|
||||
// Serialize the message with its wire encoding.
|
||||
var b bytes.Buffer
|
||||
if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := messageStore.Put(newMsgKey, b.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Migration to the gossip message store new key format complete!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MigrateOutgoingPayments moves the OutgoingPayments into a new bucket format
|
||||
// where they all reside in a top-level bucket indexed by the payment hash. In
|
||||
// this sub-bucket we store information relevant to this payment, such as the
|
||||
// payment status.
|
||||
//
|
||||
// Since the router cannot handle resumed payments that have the status
|
||||
// InFlight (we have no PaymentAttemptInfo available for pre-migration
|
||||
// payments) we delete those statuses, so only Completed payments remain in the
|
||||
// new bucket structure.
|
||||
func MigrateOutgoingPayments(tx kvdb.RwTx) error {
|
||||
log.Infof("Migrating outgoing payments to new bucket structure")
|
||||
|
||||
oldPayments := tx.ReadWriteBucket(paymentBucket)
|
||||
|
||||
// Return early if there are no payments to migrate.
|
||||
if oldPayments == nil {
|
||||
log.Infof("No outgoing payments found, nothing to migrate.")
|
||||
return nil
|
||||
}
|
||||
|
||||
newPayments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Helper method to get the source pubkey. We define it such that we
|
||||
// only attempt to fetch it if needed.
|
||||
sourcePub := func() ([33]byte, error) {
|
||||
var pub [33]byte
|
||||
nodes := tx.ReadWriteBucket(nodeBucket)
|
||||
if nodes == nil {
|
||||
return pub, ErrGraphNotFound
|
||||
}
|
||||
|
||||
selfPub := nodes.Get(sourceKey)
|
||||
if selfPub == nil {
|
||||
return pub, ErrSourceNodeNotSet
|
||||
}
|
||||
copy(pub[:], selfPub[:])
|
||||
return pub, nil
|
||||
}
|
||||
|
||||
err = oldPayments.ForEach(func(k, v []byte) error {
|
||||
// Ignores if it is sub-bucket.
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the old payment format.
|
||||
r := bytes.NewReader(v)
|
||||
payment, err := deserializeOutgoingPayment(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Calculate payment hash from the payment preimage.
|
||||
paymentHash := sha256.Sum256(payment.PaymentPreimage[:])
|
||||
|
||||
// Now create and add a PaymentCreationInfo to the bucket.
|
||||
c := &PaymentCreationInfo{
|
||||
PaymentHash: paymentHash,
|
||||
Value: payment.Terms.Value,
|
||||
CreationDate: payment.CreationDate,
|
||||
PaymentRequest: payment.PaymentRequest,
|
||||
}
|
||||
|
||||
var infoBuf bytes.Buffer
|
||||
if err := serializePaymentCreationInfo(&infoBuf, c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sourcePubKey, err := sourcePub()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the same for the PaymentAttemptInfo.
|
||||
totalAmt := payment.Terms.Value + payment.Fee
|
||||
rt := Route{
|
||||
TotalTimeLock: payment.TimeLockLength,
|
||||
TotalAmount: totalAmt,
|
||||
SourcePubKey: sourcePubKey,
|
||||
Hops: []*Hop{},
|
||||
}
|
||||
for _, hop := range payment.Path {
|
||||
rt.Hops = append(rt.Hops, &Hop{
|
||||
PubKeyBytes: hop,
|
||||
AmtToForward: totalAmt,
|
||||
})
|
||||
}
|
||||
|
||||
// Since the old format didn't store the fee for individual
|
||||
// hops, we let the last hop eat the whole fee for the total to
|
||||
// add up.
|
||||
if len(rt.Hops) > 0 {
|
||||
rt.Hops[len(rt.Hops)-1].AmtToForward = payment.Terms.Value
|
||||
}
|
||||
|
||||
// Since we don't have the session key for old payments, we
|
||||
// create a random one to be able to serialize the attempt
|
||||
// info.
|
||||
priv, _ := btcec.NewPrivateKey(btcec.S256())
|
||||
s := &PaymentAttemptInfo{
|
||||
PaymentID: 0, // unknown.
|
||||
SessionKey: priv, // unknown.
|
||||
Route: rt,
|
||||
}
|
||||
|
||||
var attemptBuf bytes.Buffer
|
||||
if err := serializePaymentAttemptInfoMigration9(&attemptBuf, s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reuse the existing payment sequence number.
|
||||
var seqNum [8]byte
|
||||
copy(seqNum[:], k)
|
||||
|
||||
// Create a bucket indexed by the payment hash.
|
||||
bucket, err := newPayments.CreateBucket(paymentHash[:])
|
||||
|
||||
// If the bucket already exists, it means that we are migrating
|
||||
// from a database containing duplicate payments to a payment
|
||||
// hash. To keep this information, we store such duplicate
|
||||
// payments in a sub-bucket.
|
||||
if err == kvdb.ErrBucketExists {
|
||||
pHashBucket := newPayments.NestedReadWriteBucket(paymentHash[:])
|
||||
|
||||
// Create a bucket for duplicate payments within this
|
||||
// payment hash's bucket.
|
||||
dup, err := pHashBucket.CreateBucketIfNotExists(
|
||||
paymentDuplicateBucket,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Each duplicate will get its own sub-bucket within
|
||||
// this bucket, so use their sequence number to index
|
||||
// them by.
|
||||
bucket, err = dup.CreateBucket(seqNum[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store the payment's information to the bucket.
|
||||
err = bucket.Put(paymentSequenceKey, seqNum[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = bucket.Put(paymentCreationInfoKey, infoBuf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = bucket.Put(paymentAttemptInfoKey, attemptBuf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = bucket.Put(paymentSettleInfoKey, payment.PaymentPreimage[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// To continue producing unique sequence numbers, we set the sequence
|
||||
// of the new bucket to that of the old one.
|
||||
seq := oldPayments.Sequence()
|
||||
if err := newPayments.SetSequence(seq); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now we delete the old buckets. Deleting the payment status buckets
|
||||
// deletes all payment statuses other than Complete.
|
||||
err = tx.DeleteTopLevelBucket(paymentStatusBucket)
|
||||
if err != nil && err != kvdb.ErrBucketNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finally delete the old payment bucket.
|
||||
err = tx.DeleteTopLevelBucket(paymentBucket)
|
||||
if err != nil && err != kvdb.ErrBucketNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Migration of outgoing payment bucket structure completed!")
|
||||
return nil
|
||||
}
|
||||
41
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/options.go
generated
vendored
Normal file
41
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/options.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package migration_01_to_11
|
||||
|
||||
const (
|
||||
// DefaultRejectCacheSize is the default number of rejectCacheEntries to
|
||||
// cache for use in the rejection cache of incoming gossip traffic. This
|
||||
// produces a cache size of around 1MB.
|
||||
DefaultRejectCacheSize = 50000
|
||||
|
||||
// DefaultChannelCacheSize is the default number of ChannelEdges cached
|
||||
// in order to reply to gossip queries. This produces a cache size of
|
||||
// around 40MB.
|
||||
DefaultChannelCacheSize = 20000
|
||||
)
|
||||
|
||||
// Options holds parameters for tuning and customizing a channeldb.DB.
|
||||
type Options struct {
|
||||
// RejectCacheSize is the maximum number of rejectCacheEntries to hold
|
||||
// in the rejection cache.
|
||||
RejectCacheSize int
|
||||
|
||||
// ChannelCacheSize is the maximum number of ChannelEdges to hold in the
|
||||
// channel cache.
|
||||
ChannelCacheSize int
|
||||
|
||||
// NoFreelistSync, if true, prevents the database from syncing its
|
||||
// freelist to disk, resulting in improved performance at the expense of
|
||||
// increased startup time.
|
||||
NoFreelistSync bool
|
||||
}
|
||||
|
||||
// DefaultOptions returns an Options populated with default values.
|
||||
func DefaultOptions() Options {
|
||||
return Options{
|
||||
RejectCacheSize: DefaultRejectCacheSize,
|
||||
ChannelCacheSize: DefaultChannelCacheSize,
|
||||
NoFreelistSync: true,
|
||||
}
|
||||
}
|
||||
|
||||
// OptionModifier is a function signature for modifying the default Options.
|
||||
type OptionModifier func(*Options)
|
||||
21
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/payment_control.go
generated
vendored
Normal file
21
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/payment_control.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import "github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
|
||||
// fetchPaymentStatus fetches the payment status of the payment. If the payment
|
||||
// isn't found, it will default to "StatusUnknown".
|
||||
func fetchPaymentStatus(bucket kvdb.ReadBucket) PaymentStatus {
|
||||
if bucket.Get(paymentSettleInfoKey) != nil {
|
||||
return StatusSucceeded
|
||||
}
|
||||
|
||||
if bucket.Get(paymentFailInfoKey) != nil {
|
||||
return StatusFailed
|
||||
}
|
||||
|
||||
if bucket.Get(paymentCreationInfoKey) != nil {
|
||||
return StatusInFlight
|
||||
}
|
||||
|
||||
return StatusUnknown
|
||||
}
|
||||
621
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/payments.go
generated
vendored
Normal file
621
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/payments.go
generated
vendored
Normal file
@@ -0,0 +1,621 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/tlv"
|
||||
)
|
||||
|
||||
var (
|
||||
// paymentsRootBucket is the name of the top-level bucket within the
|
||||
// database that stores all data related to payments. Within this
|
||||
// bucket, each payment hash its own sub-bucket keyed by its payment
|
||||
// hash.
|
||||
//
|
||||
// Bucket hierarchy:
|
||||
//
|
||||
// root-bucket
|
||||
// |
|
||||
// |-- <paymenthash>
|
||||
// | |--sequence-key: <sequence number>
|
||||
// | |--creation-info-key: <creation info>
|
||||
// | |--attempt-info-key: <attempt info>
|
||||
// | |--settle-info-key: <settle info>
|
||||
// | |--fail-info-key: <fail info>
|
||||
// | |
|
||||
// | |--duplicate-bucket (only for old, completed payments)
|
||||
// | |
|
||||
// | |-- <seq-num>
|
||||
// | | |--sequence-key: <sequence number>
|
||||
// | | |--creation-info-key: <creation info>
|
||||
// | | |--attempt-info-key: <attempt info>
|
||||
// | | |--settle-info-key: <settle info>
|
||||
// | | |--fail-info-key: <fail info>
|
||||
// | |
|
||||
// | |-- <seq-num>
|
||||
// | | |
|
||||
// | ... ...
|
||||
// |
|
||||
// |-- <paymenthash>
|
||||
// | |
|
||||
// | ...
|
||||
// ...
|
||||
//
|
||||
paymentsRootBucket = []byte("payments-root-bucket")
|
||||
|
||||
// paymentDublicateBucket is the name of a optional sub-bucket within
|
||||
// the payment hash bucket, that is used to hold duplicate payments to
|
||||
// a payment hash. This is needed to support information from earlier
|
||||
// versions of lnd, where it was possible to pay to a payment hash more
|
||||
// than once.
|
||||
paymentDuplicateBucket = []byte("payment-duplicate-bucket")
|
||||
|
||||
// paymentSequenceKey is a key used in the payment's sub-bucket to
|
||||
// store the sequence number of the payment.
|
||||
paymentSequenceKey = []byte("payment-sequence-key")
|
||||
|
||||
// paymentCreationInfoKey is a key used in the payment's sub-bucket to
|
||||
// store the creation info of the payment.
|
||||
paymentCreationInfoKey = []byte("payment-creation-info")
|
||||
|
||||
// paymentAttemptInfoKey is a key used in the payment's sub-bucket to
|
||||
// store the info about the latest attempt that was done for the
|
||||
// payment in question.
|
||||
paymentAttemptInfoKey = []byte("payment-attempt-info")
|
||||
|
||||
// paymentSettleInfoKey is a key used in the payment's sub-bucket to
|
||||
// store the settle info of the payment.
|
||||
paymentSettleInfoKey = []byte("payment-settle-info")
|
||||
|
||||
// paymentFailInfoKey is a key used in the payment's sub-bucket to
|
||||
// store information about the reason a payment failed.
|
||||
paymentFailInfoKey = []byte("payment-fail-info")
|
||||
)
|
||||
|
||||
// FailureReason encodes the reason a payment ultimately failed.
|
||||
type FailureReason byte
|
||||
|
||||
const (
|
||||
// FailureReasonTimeout indicates that the payment did timeout before a
|
||||
// successful payment attempt was made.
|
||||
FailureReasonTimeout FailureReason = 0
|
||||
|
||||
// FailureReasonNoRoute indicates no successful route to the
|
||||
// destination was found during path finding.
|
||||
FailureReasonNoRoute FailureReason = 1
|
||||
|
||||
// FailureReasonError indicates that an unexpected error happened during
|
||||
// payment.
|
||||
FailureReasonError FailureReason = 2
|
||||
|
||||
// FailureReasonIncorrectPaymentDetails indicates that either the hash
|
||||
// is unknown or the final cltv delta or amount is incorrect.
|
||||
FailureReasonIncorrectPaymentDetails FailureReason = 3
|
||||
|
||||
// TODO(halseth): cancel state.
|
||||
|
||||
// TODO(joostjager): Add failure reasons for:
|
||||
// LocalLiquidityInsufficient, RemoteCapacityInsufficient.
|
||||
)
|
||||
|
||||
// String returns a human readable FailureReason
|
||||
func (r FailureReason) String() string {
|
||||
switch r {
|
||||
case FailureReasonTimeout:
|
||||
return "timeout"
|
||||
case FailureReasonNoRoute:
|
||||
return "no_route"
|
||||
case FailureReasonError:
|
||||
return "error"
|
||||
case FailureReasonIncorrectPaymentDetails:
|
||||
return "incorrect_payment_details"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// PaymentStatus represent current status of payment
|
||||
type PaymentStatus byte
|
||||
|
||||
const (
|
||||
// StatusUnknown is the status where a payment has never been initiated
|
||||
// and hence is unknown.
|
||||
StatusUnknown PaymentStatus = 0
|
||||
|
||||
// StatusInFlight is the status where a payment has been initiated, but
|
||||
// a response has not been received.
|
||||
StatusInFlight PaymentStatus = 1
|
||||
|
||||
// StatusSucceeded is the status where a payment has been initiated and
|
||||
// the payment was completed successfully.
|
||||
StatusSucceeded PaymentStatus = 2
|
||||
|
||||
// StatusFailed is the status where a payment has been initiated and a
|
||||
// failure result has come back.
|
||||
StatusFailed PaymentStatus = 3
|
||||
)
|
||||
|
||||
// Bytes returns status as slice of bytes.
|
||||
func (ps PaymentStatus) Bytes() []byte {
|
||||
return []byte{byte(ps)}
|
||||
}
|
||||
|
||||
// FromBytes sets status from slice of bytes.
|
||||
func (ps *PaymentStatus) FromBytes(status []byte) error {
|
||||
if len(status) != 1 {
|
||||
return errors.New("payment status is empty")
|
||||
}
|
||||
|
||||
switch PaymentStatus(status[0]) {
|
||||
case StatusUnknown, StatusInFlight, StatusSucceeded, StatusFailed:
|
||||
*ps = PaymentStatus(status[0])
|
||||
default:
|
||||
return errors.New("unknown payment status")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns readable representation of payment status.
|
||||
func (ps PaymentStatus) String() string {
|
||||
switch ps {
|
||||
case StatusUnknown:
|
||||
return "Unknown"
|
||||
case StatusInFlight:
|
||||
return "In Flight"
|
||||
case StatusSucceeded:
|
||||
return "Succeeded"
|
||||
case StatusFailed:
|
||||
return "Failed"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// PaymentCreationInfo is the information necessary to have ready when
|
||||
// initiating a payment, moving it into state InFlight.
|
||||
type PaymentCreationInfo struct {
|
||||
// PaymentHash is the hash this payment is paying to.
|
||||
PaymentHash lntypes.Hash
|
||||
|
||||
// Value is the amount we are paying.
|
||||
Value lnwire.MilliSatoshi
|
||||
|
||||
// CreatingDate is the time when this payment was initiated.
|
||||
CreationDate time.Time
|
||||
|
||||
// PaymentRequest is the full payment request, if any.
|
||||
PaymentRequest []byte
|
||||
}
|
||||
|
||||
// PaymentAttemptInfo contains information about a specific payment attempt for
|
||||
// a given payment. This information is used by the router to handle any errors
|
||||
// coming back after an attempt is made, and to query the switch about the
|
||||
// status of a payment. For settled payment this will be the information for
|
||||
// the succeeding payment attempt.
|
||||
type PaymentAttemptInfo struct {
|
||||
// PaymentID is the unique ID used for this attempt.
|
||||
PaymentID uint64
|
||||
|
||||
// SessionKey is the ephemeral key used for this payment attempt.
|
||||
SessionKey *btcec.PrivateKey
|
||||
|
||||
// Route is the route attempted to send the HTLC.
|
||||
Route Route
|
||||
}
|
||||
|
||||
// Payment is a wrapper around a payment's PaymentCreationInfo,
|
||||
// PaymentAttemptInfo, and preimage. All payments will have the
|
||||
// PaymentCreationInfo set, the PaymentAttemptInfo will be set only if at least
|
||||
// one payment attempt has been made, while only completed payments will have a
|
||||
// non-zero payment preimage.
|
||||
type Payment struct {
|
||||
// sequenceNum is a unique identifier used to sort the payments in
|
||||
// order of creation.
|
||||
sequenceNum uint64
|
||||
|
||||
// Status is the current PaymentStatus of this payment.
|
||||
Status PaymentStatus
|
||||
|
||||
// Info holds all static information about this payment, and is
|
||||
// populated when the payment is initiated.
|
||||
Info *PaymentCreationInfo
|
||||
|
||||
// Attempt is the information about the last payment attempt made.
|
||||
//
|
||||
// NOTE: Can be nil if no attempt is yet made.
|
||||
Attempt *PaymentAttemptInfo
|
||||
|
||||
// PaymentPreimage is the preimage of a successful payment. This serves
|
||||
// as a proof of payment. It will only be non-nil for settled payments.
|
||||
//
|
||||
// NOTE: Can be nil if payment is not settled.
|
||||
PaymentPreimage *lntypes.Preimage
|
||||
|
||||
// Failure is a failure reason code indicating the reason the payment
|
||||
// failed. It is only non-nil for failed payments.
|
||||
//
|
||||
// NOTE: Can be nil if payment is not failed.
|
||||
Failure *FailureReason
|
||||
}
|
||||
|
||||
// FetchPayments returns all sent payments found in the DB.
|
||||
func (db *DB) FetchPayments() ([]*Payment, error) {
|
||||
var payments []*Payment
|
||||
|
||||
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
|
||||
paymentsBucket := tx.ReadBucket(paymentsRootBucket)
|
||||
if paymentsBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return paymentsBucket.ForEach(func(k, v []byte) error {
|
||||
bucket := paymentsBucket.NestedReadBucket(k)
|
||||
if bucket == nil {
|
||||
// We only expect sub-buckets to be found in
|
||||
// this top-level bucket.
|
||||
return fmt.Errorf("non bucket element in " +
|
||||
"payments bucket")
|
||||
}
|
||||
|
||||
p, err := fetchPayment(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
payments = append(payments, p)
|
||||
|
||||
// For older versions of lnd, duplicate payments to a
|
||||
// payment has was possible. These will be found in a
|
||||
// sub-bucket indexed by their sequence number if
|
||||
// available.
|
||||
dup := bucket.NestedReadBucket(paymentDuplicateBucket)
|
||||
if dup == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return dup.ForEach(func(k, v []byte) error {
|
||||
subBucket := dup.NestedReadBucket(k)
|
||||
if subBucket == nil {
|
||||
// We one bucket for each duplicate to
|
||||
// be found.
|
||||
return fmt.Errorf("non bucket element" +
|
||||
"in duplicate bucket")
|
||||
}
|
||||
|
||||
p, err := fetchPayment(subBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
payments = append(payments, p)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Before returning, sort the payments by their sequence number.
|
||||
sort.Slice(payments, func(i, j int) bool {
|
||||
return payments[i].sequenceNum < payments[j].sequenceNum
|
||||
})
|
||||
|
||||
return payments, nil
|
||||
}
|
||||
|
||||
func fetchPayment(bucket kvdb.ReadBucket) (*Payment, error) {
|
||||
var (
|
||||
err error
|
||||
p = &Payment{}
|
||||
)
|
||||
|
||||
seqBytes := bucket.Get(paymentSequenceKey)
|
||||
if seqBytes == nil {
|
||||
return nil, fmt.Errorf("sequence number not found")
|
||||
}
|
||||
|
||||
p.sequenceNum = binary.BigEndian.Uint64(seqBytes)
|
||||
|
||||
// Get the payment status.
|
||||
p.Status = fetchPaymentStatus(bucket)
|
||||
|
||||
// Get the PaymentCreationInfo.
|
||||
b := bucket.Get(paymentCreationInfoKey)
|
||||
if b == nil {
|
||||
return nil, fmt.Errorf("creation info not found")
|
||||
}
|
||||
|
||||
r := bytes.NewReader(b)
|
||||
p.Info, err = deserializePaymentCreationInfo(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
|
||||
// Get the PaymentAttemptInfo. This can be unset.
|
||||
b = bucket.Get(paymentAttemptInfoKey)
|
||||
if b != nil {
|
||||
r = bytes.NewReader(b)
|
||||
p.Attempt, err = deserializePaymentAttemptInfo(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Get the payment preimage. This is only found for
|
||||
// completed payments.
|
||||
b = bucket.Get(paymentSettleInfoKey)
|
||||
if b != nil {
|
||||
var preimg lntypes.Preimage
|
||||
copy(preimg[:], b[:])
|
||||
p.PaymentPreimage = &preimg
|
||||
}
|
||||
|
||||
// Get failure reason if available.
|
||||
b = bucket.Get(paymentFailInfoKey)
|
||||
if b != nil {
|
||||
reason := FailureReason(b[0])
|
||||
p.Failure = &reason
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func serializePaymentCreationInfo(w io.Writer, c *PaymentCreationInfo) error {
|
||||
var scratch [8]byte
|
||||
|
||||
if _, err := w.Write(c.PaymentHash[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
byteOrder.PutUint64(scratch[:], uint64(c.Value))
|
||||
if _, err := w.Write(scratch[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
byteOrder.PutUint64(scratch[:], uint64(c.CreationDate.Unix()))
|
||||
if _, err := w.Write(scratch[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
byteOrder.PutUint32(scratch[:4], uint32(len(c.PaymentRequest)))
|
||||
if _, err := w.Write(scratch[:4]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(c.PaymentRequest[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo, error) {
|
||||
var scratch [8]byte
|
||||
|
||||
c := &PaymentCreationInfo{}
|
||||
|
||||
if _, err := io.ReadFull(r, c.PaymentHash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(r, scratch[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
|
||||
|
||||
if _, err := io.ReadFull(r, scratch[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.CreationDate = time.Unix(int64(byteOrder.Uint64(scratch[:])), 0)
|
||||
|
||||
if _, err := io.ReadFull(r, scratch[:4]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqLen := uint32(byteOrder.Uint32(scratch[:4]))
|
||||
payReq := make([]byte, reqLen)
|
||||
if reqLen > 0 {
|
||||
if _, err := io.ReadFull(r, payReq[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
c.PaymentRequest = payReq
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func serializePaymentAttemptInfo(w io.Writer, a *PaymentAttemptInfo) error {
|
||||
if err := WriteElements(w, a.PaymentID, a.SessionKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := SerializeRoute(w, a.Route); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializePaymentAttemptInfo(r io.Reader) (*PaymentAttemptInfo, error) {
|
||||
a := &PaymentAttemptInfo{}
|
||||
err := ReadElements(r, &a.PaymentID, &a.SessionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.Route, err = DeserializeRoute(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func serializeHop(w io.Writer, h *Hop) error {
|
||||
if err := WriteElements(w,
|
||||
h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock,
|
||||
h.AmtToForward,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := binary.Write(w, byteOrder, h.LegacyPayload); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For legacy payloads, we don't need to write any TLV records, so
|
||||
// we'll write a zero indicating the our serialized TLV map has no
|
||||
// records.
|
||||
if h.LegacyPayload {
|
||||
return WriteElements(w, uint32(0))
|
||||
}
|
||||
|
||||
// Otherwise, we'll transform our slice of records into a map of the
|
||||
// raw bytes, then serialize them in-line with a length (number of
|
||||
// elements) prefix.
|
||||
mapRecords, err := tlv.RecordsToMap(h.TLVRecords)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numRecords := uint32(len(mapRecords))
|
||||
if err := WriteElements(w, numRecords); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for recordType, rawBytes := range mapRecords {
|
||||
if err := WriteElements(w, recordType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := wire.WriteVarBytes(w, 0, rawBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// maxOnionPayloadSize is the largest Sphinx payload possible, so we don't need
|
||||
// to read/write a TLV stream larger than this.
|
||||
const maxOnionPayloadSize = 1300
|
||||
|
||||
func deserializeHop(r io.Reader) (*Hop, error) {
|
||||
h := &Hop{}
|
||||
|
||||
var pub []byte
|
||||
if err := ReadElements(r, &pub); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(h.PubKeyBytes[:], pub)
|
||||
|
||||
if err := ReadElements(r,
|
||||
&h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(roasbeef): change field to allow LegacyPayload false to be the
|
||||
// legacy default?
|
||||
err := binary.Read(r, byteOrder, &h.LegacyPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var numElements uint32
|
||||
if err := ReadElements(r, &numElements); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there're no elements, then we can return early.
|
||||
if numElements == 0 {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
tlvMap := make(map[uint64][]byte)
|
||||
for i := uint32(0); i < numElements; i++ {
|
||||
var tlvType uint64
|
||||
if err := ReadElements(r, &tlvType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawRecordBytes, err := wire.ReadVarBytes(
|
||||
r, 0, maxOnionPayloadSize, "tlv",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tlvMap[tlvType] = rawRecordBytes
|
||||
}
|
||||
|
||||
h.TLVRecords = tlv.MapToRecords(tlvMap)
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// SerializeRoute serializes a route.
|
||||
func SerializeRoute(w io.Writer, r Route) error {
|
||||
if err := WriteElements(w,
|
||||
r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:],
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := WriteElements(w, uint32(len(r.Hops))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, h := range r.Hops {
|
||||
if err := serializeHop(w, h); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeserializeRoute deserializes a route.
|
||||
func DeserializeRoute(r io.Reader) (Route, error) {
|
||||
rt := Route{}
|
||||
if err := ReadElements(r,
|
||||
&rt.TotalTimeLock, &rt.TotalAmount,
|
||||
); err != nil {
|
||||
return rt, err
|
||||
}
|
||||
|
||||
var pub []byte
|
||||
if err := ReadElements(r, &pub); err != nil {
|
||||
return rt, err
|
||||
}
|
||||
copy(rt.SourcePubKey[:], pub)
|
||||
|
||||
var numHops uint32
|
||||
if err := ReadElements(r, &numHops); err != nil {
|
||||
return rt, err
|
||||
}
|
||||
|
||||
var hops []*Hop
|
||||
for i := uint32(0); i < numHops; i++ {
|
||||
hop, err := deserializeHop(r)
|
||||
if err != nil {
|
||||
return rt, err
|
||||
}
|
||||
hops = append(hops, hop)
|
||||
}
|
||||
rt.Hops = hops
|
||||
|
||||
return rt, nil
|
||||
}
|
||||
330
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/route.go
generated
vendored
Normal file
330
vendor/github.com/lightningnetwork/lnd/channeldb/migration_01_to_11/route.go
generated
vendored
Normal file
@@ -0,0 +1,330 @@
|
||||
package migration_01_to_11
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
sphinx "github.com/lightningnetwork/lightning-onion"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/record"
|
||||
"github.com/lightningnetwork/lnd/tlv"
|
||||
)
|
||||
|
||||
// VertexSize is the size of the array to store a vertex.
|
||||
const VertexSize = 33
|
||||
|
||||
// ErrNoRouteHopsProvided is returned when a caller attempts to construct a new
|
||||
// sphinx packet, but provides an empty set of hops for each route.
|
||||
var ErrNoRouteHopsProvided = fmt.Errorf("empty route hops provided")
|
||||
|
||||
// Vertex is a simple alias for the serialization of a compressed Bitcoin
|
||||
// public key.
|
||||
type Vertex [VertexSize]byte
|
||||
|
||||
// NewVertex returns a new Vertex given a public key.
|
||||
func NewVertex(pub *btcec.PublicKey) Vertex {
|
||||
var v Vertex
|
||||
copy(v[:], pub.SerializeCompressed())
|
||||
return v
|
||||
}
|
||||
|
||||
// NewVertexFromBytes returns a new Vertex based on a serialized pubkey in a
|
||||
// byte slice.
|
||||
func NewVertexFromBytes(b []byte) (Vertex, error) {
|
||||
vertexLen := len(b)
|
||||
if vertexLen != VertexSize {
|
||||
return Vertex{}, fmt.Errorf("invalid vertex length of %v, "+
|
||||
"want %v", vertexLen, VertexSize)
|
||||
}
|
||||
|
||||
var v Vertex
|
||||
copy(v[:], b)
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// NewVertexFromStr returns a new Vertex given its hex-encoded string format.
|
||||
func NewVertexFromStr(v string) (Vertex, error) {
|
||||
// Return error if hex string is of incorrect length.
|
||||
if len(v) != VertexSize*2 {
|
||||
return Vertex{}, fmt.Errorf("invalid vertex string length of "+
|
||||
"%v, want %v", len(v), VertexSize*2)
|
||||
}
|
||||
|
||||
vertex, err := hex.DecodeString(v)
|
||||
if err != nil {
|
||||
return Vertex{}, err
|
||||
}
|
||||
|
||||
return NewVertexFromBytes(vertex)
|
||||
}
|
||||
|
||||
// String returns a human readable version of the Vertex which is the
|
||||
// hex-encoding of the serialized compressed public key.
|
||||
func (v Vertex) String() string {
|
||||
return fmt.Sprintf("%x", v[:])
|
||||
}
|
||||
|
||||
// Hop represents an intermediate or final node of the route. This naming
|
||||
// is in line with the definition given in BOLT #4: Onion Routing Protocol.
|
||||
// The struct houses the channel along which this hop can be reached and
|
||||
// the values necessary to create the HTLC that needs to be sent to the
|
||||
// next hop. It is also used to encode the per-hop payload included within
|
||||
// the Sphinx packet.
|
||||
type Hop struct {
|
||||
// PubKeyBytes is the raw bytes of the public key of the target node.
|
||||
PubKeyBytes Vertex
|
||||
|
||||
// ChannelID is the unique channel ID for the channel. The first 3
|
||||
// bytes are the block height, the next 3 the index within the block,
|
||||
// and the last 2 bytes are the output index for the channel.
|
||||
ChannelID uint64
|
||||
|
||||
// OutgoingTimeLock is the timelock value that should be used when
|
||||
// crafting the _outgoing_ HTLC from this hop.
|
||||
OutgoingTimeLock uint32
|
||||
|
||||
// AmtToForward is the amount that this hop will forward to the next
|
||||
// hop. This value is less than the value that the incoming HTLC
|
||||
// carries as a fee will be subtracted by the hop.
|
||||
AmtToForward lnwire.MilliSatoshi
|
||||
|
||||
// TLVRecords if non-nil are a set of additional TLV records that
|
||||
// should be included in the forwarding instructions for this node.
|
||||
TLVRecords []tlv.Record
|
||||
|
||||
// LegacyPayload if true, then this signals that this node doesn't
|
||||
// understand the new TLV payload, so we must instead use the legacy
|
||||
// payload.
|
||||
LegacyPayload bool
|
||||
}
|
||||
|
||||
// PackHopPayload writes to the passed io.Writer, the series of byes that can
|
||||
// be placed directly into the per-hop payload (EOB) for this hop. This will
|
||||
// include the required routing fields, as well as serializing any of the
|
||||
// passed optional TLVRecords. nextChanID is the unique channel ID that
|
||||
// references the _outgoing_ channel ID that follows this hop. This field
|
||||
// follows the same semantics as the NextAddress field in the onion: it should
|
||||
// be set to zero to indicate the terminal hop.
|
||||
func (h *Hop) PackHopPayload(w io.Writer, nextChanID uint64) error {
|
||||
// If this is a legacy payload, then we'll exit here as this method
|
||||
// shouldn't be called.
|
||||
if h.LegacyPayload == true {
|
||||
return fmt.Errorf("cannot pack hop payloads for legacy " +
|
||||
"payloads")
|
||||
}
|
||||
|
||||
// Otherwise, we'll need to make a new stream that includes our
|
||||
// required routing fields, as well as these optional values.
|
||||
var records []tlv.Record
|
||||
|
||||
// Every hop must have an amount to forward and CLTV expiry.
|
||||
amt := uint64(h.AmtToForward)
|
||||
records = append(records,
|
||||
record.NewAmtToFwdRecord(&amt),
|
||||
record.NewLockTimeRecord(&h.OutgoingTimeLock),
|
||||
)
|
||||
|
||||
// BOLT 04 says the next_hop_id should be omitted for the final hop,
|
||||
// but present for all others.
|
||||
//
|
||||
// TODO(conner): test using hop.Exit once available
|
||||
if nextChanID != 0 {
|
||||
records = append(records,
|
||||
record.NewNextHopIDRecord(&nextChanID),
|
||||
)
|
||||
}
|
||||
|
||||
// Append any custom types destined for this hop.
|
||||
records = append(records, h.TLVRecords...)
|
||||
|
||||
// To ensure we produce a canonical stream, we'll sort the records
|
||||
// before encoding them as a stream in the hop payload.
|
||||
tlv.SortRecords(records)
|
||||
|
||||
tlvStream, err := tlv.NewStream(records...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tlvStream.Encode(w)
|
||||
}
|
||||
|
||||
// Route represents a path through the channel graph which runs over one or
|
||||
// more channels in succession. This struct carries all the information
|
||||
// required to craft the Sphinx onion packet, and send the payment along the
|
||||
// first hop in the path. A route is only selected as valid if all the channels
|
||||
// have sufficient capacity to carry the initial payment amount after fees are
|
||||
// accounted for.
|
||||
type Route struct {
|
||||
// TotalTimeLock is the cumulative (final) time lock across the entire
|
||||
// route. This is the CLTV value that should be extended to the first
|
||||
// hop in the route. All other hops will decrement the time-lock as
|
||||
// advertised, leaving enough time for all hops to wait for or present
|
||||
// the payment preimage to complete the payment.
|
||||
TotalTimeLock uint32
|
||||
|
||||
// TotalAmount is the total amount of funds required to complete a
|
||||
// payment over this route. This value includes the cumulative fees at
|
||||
// each hop. As a result, the HTLC extended to the first-hop in the
|
||||
// route will need to have at least this many satoshis, otherwise the
|
||||
// route will fail at an intermediate node due to an insufficient
|
||||
// amount of fees.
|
||||
TotalAmount lnwire.MilliSatoshi
|
||||
|
||||
// SourcePubKey is the pubkey of the node where this route originates
|
||||
// from.
|
||||
SourcePubKey Vertex
|
||||
|
||||
// Hops contains details concerning the specific forwarding details at
|
||||
// each hop.
|
||||
Hops []*Hop
|
||||
}
|
||||
|
||||
// HopFee returns the fee charged by the route hop indicated by hopIndex.
|
||||
func (r *Route) HopFee(hopIndex int) lnwire.MilliSatoshi {
|
||||
var incomingAmt lnwire.MilliSatoshi
|
||||
if hopIndex == 0 {
|
||||
incomingAmt = r.TotalAmount
|
||||
} else {
|
||||
incomingAmt = r.Hops[hopIndex-1].AmtToForward
|
||||
}
|
||||
|
||||
// Fee is calculated as difference between incoming and outgoing amount.
|
||||
return incomingAmt - r.Hops[hopIndex].AmtToForward
|
||||
}
|
||||
|
||||
// TotalFees is the sum of the fees paid at each hop within the final route. In
|
||||
// the case of a one-hop payment, this value will be zero as we don't need to
|
||||
// pay a fee to ourself.
|
||||
func (r *Route) TotalFees() lnwire.MilliSatoshi {
|
||||
if len(r.Hops) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return r.TotalAmount - r.Hops[len(r.Hops)-1].AmtToForward
|
||||
}
|
||||
|
||||
// NewRouteFromHops creates a new Route structure from the minimally required
|
||||
// information to perform the payment. It infers fee amounts and populates the
|
||||
// node, chan and prev/next hop maps.
|
||||
func NewRouteFromHops(amtToSend lnwire.MilliSatoshi, timeLock uint32,
|
||||
sourceVertex Vertex, hops []*Hop) (*Route, error) {
|
||||
|
||||
if len(hops) == 0 {
|
||||
return nil, ErrNoRouteHopsProvided
|
||||
}
|
||||
|
||||
// First, we'll create a route struct and populate it with the fields
|
||||
// for which the values are provided as arguments of this function.
|
||||
// TotalFees is determined based on the difference between the amount
|
||||
// that is send from the source and the final amount that is received
|
||||
// by the destination.
|
||||
route := &Route{
|
||||
SourcePubKey: sourceVertex,
|
||||
Hops: hops,
|
||||
TotalTimeLock: timeLock,
|
||||
TotalAmount: amtToSend,
|
||||
}
|
||||
|
||||
return route, nil
|
||||
}
|
||||
|
||||
// ToSphinxPath converts a complete route into a sphinx PaymentPath that
|
||||
// contains the per-hop paylods used to encoding the HTLC routing data for each
|
||||
// hop in the route. This method also accepts an optional EOB payload for the
|
||||
// final hop.
|
||||
func (r *Route) ToSphinxPath() (*sphinx.PaymentPath, error) {
|
||||
var path sphinx.PaymentPath
|
||||
|
||||
// For each hop encoded within the route, we'll convert the hop struct
|
||||
// to an OnionHop with matching per-hop payload within the path as used
|
||||
// by the sphinx package.
|
||||
for i, hop := range r.Hops {
|
||||
pub, err := btcec.ParsePubKey(
|
||||
hop.PubKeyBytes[:], btcec.S256(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// As a base case, the next hop is set to all zeroes in order
|
||||
// to indicate that the "last hop" as no further hops after it.
|
||||
nextHop := uint64(0)
|
||||
|
||||
// If we aren't on the last hop, then we set the "next address"
|
||||
// field to be the channel that directly follows it.
|
||||
if i != len(r.Hops)-1 {
|
||||
nextHop = r.Hops[i+1].ChannelID
|
||||
}
|
||||
|
||||
var payload sphinx.HopPayload
|
||||
|
||||
// If this is the legacy payload, then we can just include the
|
||||
// hop data as normal.
|
||||
if hop.LegacyPayload {
|
||||
// Before we encode this value, we'll pack the next hop
|
||||
// into the NextAddress field of the hop info to ensure
|
||||
// we point to the right now.
|
||||
hopData := sphinx.HopData{
|
||||
ForwardAmount: uint64(hop.AmtToForward),
|
||||
OutgoingCltv: hop.OutgoingTimeLock,
|
||||
}
|
||||
binary.BigEndian.PutUint64(
|
||||
hopData.NextAddress[:], nextHop,
|
||||
)
|
||||
|
||||
payload, err = sphinx.NewHopPayload(&hopData, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// For non-legacy payloads, we'll need to pack the
|
||||
// routing information, along with any extra TLV
|
||||
// information into the new per-hop payload format.
|
||||
// We'll also pass in the chan ID of the hop this
|
||||
// channel should be forwarded to so we can construct a
|
||||
// valid payload.
|
||||
var b bytes.Buffer
|
||||
err := hop.PackHopPayload(&b, nextHop)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(roasbeef): make better API for NewHopPayload?
|
||||
payload, err = sphinx.NewHopPayload(nil, b.Bytes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
path[i] = sphinx.OnionHop{
|
||||
NodePub: *pub,
|
||||
HopPayload: payload,
|
||||
}
|
||||
}
|
||||
|
||||
return &path, nil
|
||||
}
|
||||
|
||||
// String returns a human readable representation of the route.
|
||||
func (r *Route) String() string {
|
||||
var b strings.Builder
|
||||
|
||||
for i, hop := range r.Hops {
|
||||
if i > 0 {
|
||||
b.WriteString(",")
|
||||
}
|
||||
b.WriteString(strconv.FormatUint(hop.ChannelID, 10))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("amt=%v, fees=%v, tl=%v, chans=%v",
|
||||
r.TotalAmount-r.TotalFees(), r.TotalFees(), r.TotalTimeLock,
|
||||
b.String(),
|
||||
)
|
||||
}
|
||||
299
vendor/github.com/lightningnetwork/lnd/channeldb/mp_payment.go
generated
vendored
Normal file
299
vendor/github.com/lightningnetwork/lnd/channeldb/mp_payment.go
generated
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
)
|
||||
|
||||
// HTLCAttemptInfo contains static information about a specific HTLC attempt
|
||||
// for a payment. This information is used by the router to handle any errors
|
||||
// coming back after an attempt is made, and to query the switch about the
|
||||
// status of the attempt.
|
||||
type HTLCAttemptInfo struct {
|
||||
// AttemptID is the unique ID used for this attempt.
|
||||
AttemptID uint64
|
||||
|
||||
// SessionKey is the ephemeral key used for this attempt.
|
||||
SessionKey *btcec.PrivateKey
|
||||
|
||||
// Route is the route attempted to send the HTLC.
|
||||
Route route.Route
|
||||
|
||||
// AttemptTime is the time at which this HTLC was attempted.
|
||||
AttemptTime time.Time
|
||||
}
|
||||
|
||||
// HTLCAttempt contains information about a specific HTLC attempt for a given
|
||||
// payment. It contains the HTLCAttemptInfo used to send the HTLC, as well
|
||||
// as a timestamp and any known outcome of the attempt.
|
||||
type HTLCAttempt struct {
|
||||
HTLCAttemptInfo
|
||||
|
||||
// Settle is the preimage of a successful payment. This serves as a
|
||||
// proof of payment. It will only be non-nil for settled payments.
|
||||
//
|
||||
// NOTE: Can be nil if payment is not settled.
|
||||
Settle *HTLCSettleInfo
|
||||
|
||||
// Fail is a failure reason code indicating the reason the payment
|
||||
// failed. It is only non-nil for failed payments.
|
||||
//
|
||||
// NOTE: Can be nil if payment is not failed.
|
||||
Failure *HTLCFailInfo
|
||||
}
|
||||
|
||||
// HTLCSettleInfo encapsulates the information that augments an HTLCAttempt in
|
||||
// the event that the HTLC is successful.
|
||||
type HTLCSettleInfo struct {
|
||||
// Preimage is the preimage of a successful HTLC. This serves as a proof
|
||||
// of payment.
|
||||
Preimage lntypes.Preimage
|
||||
|
||||
// SettleTime is the time at which this HTLC was settled.
|
||||
SettleTime time.Time
|
||||
}
|
||||
|
||||
// HTLCFailReason is the reason an htlc failed.
|
||||
type HTLCFailReason byte
|
||||
|
||||
const (
|
||||
// HTLCFailUnknown is recorded for htlcs that failed with an unknown
|
||||
// reason.
|
||||
HTLCFailUnknown HTLCFailReason = 0
|
||||
|
||||
// HTLCFailUnknown is recorded for htlcs that had a failure message that
|
||||
// couldn't be decrypted.
|
||||
HTLCFailUnreadable HTLCFailReason = 1
|
||||
|
||||
// HTLCFailInternal is recorded for htlcs that failed because of an
|
||||
// internal error.
|
||||
HTLCFailInternal HTLCFailReason = 2
|
||||
|
||||
// HTLCFailMessage is recorded for htlcs that failed with a network
|
||||
// failure message.
|
||||
HTLCFailMessage HTLCFailReason = 3
|
||||
)
|
||||
|
||||
// HTLCFailInfo encapsulates the information that augments an HTLCAttempt in the
|
||||
// event that the HTLC fails.
|
||||
type HTLCFailInfo struct {
|
||||
// FailTime is the time at which this HTLC was failed.
|
||||
FailTime time.Time
|
||||
|
||||
// Message is the wire message that failed this HTLC. This field will be
|
||||
// populated when the failure reason is HTLCFailMessage.
|
||||
Message lnwire.FailureMessage
|
||||
|
||||
// Reason is the failure reason for this HTLC.
|
||||
Reason HTLCFailReason
|
||||
|
||||
// The position in the path of the intermediate or final node that
|
||||
// generated the failure message. Position zero is the sender node. This
|
||||
// field will be populated when the failure reason is either
|
||||
// HTLCFailMessage or HTLCFailUnknown.
|
||||
FailureSourceIndex uint32
|
||||
}
|
||||
|
||||
// MPPayment is a wrapper around a payment's PaymentCreationInfo and
|
||||
// HTLCAttempts. All payments will have the PaymentCreationInfo set, any
|
||||
// HTLCs made in attempts to be completed will populated in the HTLCs slice.
|
||||
// Each populated HTLCAttempt represents an attempted HTLC, each of which may
|
||||
// have the associated Settle or Fail struct populated if the HTLC is no longer
|
||||
// in-flight.
|
||||
type MPPayment struct {
|
||||
// SequenceNum is a unique identifier used to sort the payments in
|
||||
// order of creation.
|
||||
SequenceNum uint64
|
||||
|
||||
// Info holds all static information about this payment, and is
|
||||
// populated when the payment is initiated.
|
||||
Info *PaymentCreationInfo
|
||||
|
||||
// HTLCs holds the information about individual HTLCs that we send in
|
||||
// order to make the payment.
|
||||
HTLCs []HTLCAttempt
|
||||
|
||||
// FailureReason is the failure reason code indicating the reason the
|
||||
// payment failed.
|
||||
//
|
||||
// NOTE: Will only be set once the daemon has given up on the payment
|
||||
// altogether.
|
||||
FailureReason *FailureReason
|
||||
|
||||
// Status is the current PaymentStatus of this payment.
|
||||
Status PaymentStatus
|
||||
}
|
||||
|
||||
// TerminalInfo returns any HTLC settle info recorded. If no settle info is
|
||||
// recorded, any payment level failure will be returned. If neither a settle
|
||||
// nor a failure is recorded, both return values will be nil.
|
||||
func (m *MPPayment) TerminalInfo() (*HTLCSettleInfo, *FailureReason) {
|
||||
for _, h := range m.HTLCs {
|
||||
if h.Settle != nil {
|
||||
return h.Settle, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, m.FailureReason
|
||||
}
|
||||
|
||||
// SentAmt returns the sum of sent amount and fees for HTLCs that are either
|
||||
// settled or still in flight.
|
||||
func (m *MPPayment) SentAmt() (lnwire.MilliSatoshi, lnwire.MilliSatoshi) {
|
||||
var sent, fees lnwire.MilliSatoshi
|
||||
for _, h := range m.HTLCs {
|
||||
if h.Failure != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// The attempt was not failed, meaning the amount was
|
||||
// potentially sent to the receiver.
|
||||
sent += h.Route.ReceiverAmt()
|
||||
fees += h.Route.TotalFees()
|
||||
}
|
||||
|
||||
return sent, fees
|
||||
}
|
||||
|
||||
// InFlightHTLCs returns the HTLCs that are still in-flight, meaning they have
|
||||
// not been settled or failed.
|
||||
func (m *MPPayment) InFlightHTLCs() []HTLCAttempt {
|
||||
var inflights []HTLCAttempt
|
||||
for _, h := range m.HTLCs {
|
||||
if h.Settle != nil || h.Failure != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
inflights = append(inflights, h)
|
||||
}
|
||||
|
||||
return inflights
|
||||
}
|
||||
|
||||
// serializeHTLCSettleInfo serializes the details of a settled htlc.
|
||||
func serializeHTLCSettleInfo(w io.Writer, s *HTLCSettleInfo) error {
|
||||
if _, err := w.Write(s.Preimage[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := serializeTime(w, s.SettleTime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deserializeHTLCSettleInfo deserializes the details of a settled htlc.
|
||||
func deserializeHTLCSettleInfo(r io.Reader) (*HTLCSettleInfo, error) {
|
||||
s := &HTLCSettleInfo{}
|
||||
if _, err := io.ReadFull(r, s.Preimage[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
s.SettleTime, err = deserializeTime(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// serializeHTLCFailInfo serializes the details of a failed htlc including the
|
||||
// wire failure.
|
||||
func serializeHTLCFailInfo(w io.Writer, f *HTLCFailInfo) error {
|
||||
if err := serializeTime(w, f.FailTime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write failure. If there is no failure message, write an empty
|
||||
// byte slice.
|
||||
var messageBytes bytes.Buffer
|
||||
if f.Message != nil {
|
||||
err := lnwire.EncodeFailureMessage(&messageBytes, f.Message, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := wire.WriteVarBytes(w, 0, messageBytes.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return WriteElements(w, byte(f.Reason), f.FailureSourceIndex)
|
||||
}
|
||||
|
||||
// deserializeHTLCFailInfo deserializes the details of a failed htlc including
|
||||
// the wire failure.
|
||||
func deserializeHTLCFailInfo(r io.Reader) (*HTLCFailInfo, error) {
|
||||
f := &HTLCFailInfo{}
|
||||
var err error
|
||||
f.FailTime, err = deserializeTime(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read failure.
|
||||
failureBytes, err := wire.ReadVarBytes(
|
||||
r, 0, lnwire.FailureMessageLength, "failure",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(failureBytes) > 0 {
|
||||
f.Message, err = lnwire.DecodeFailureMessage(
|
||||
bytes.NewReader(failureBytes), 0,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var reason byte
|
||||
err = ReadElements(r, &reason, &f.FailureSourceIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Reason = HTLCFailReason(reason)
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// deserializeTime deserializes time as unix nanoseconds.
|
||||
func deserializeTime(r io.Reader) (time.Time, error) {
|
||||
var scratch [8]byte
|
||||
if _, err := io.ReadFull(r, scratch[:]); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
// Convert to time.Time. Interpret unix nano time zero as a zero
|
||||
// time.Time value.
|
||||
unixNano := byteOrder.Uint64(scratch[:])
|
||||
if unixNano == 0 {
|
||||
return time.Time{}, nil
|
||||
}
|
||||
|
||||
return time.Unix(0, int64(unixNano)), nil
|
||||
}
|
||||
|
||||
// serializeTime serializes time as unix nanoseconds.
|
||||
func serializeTime(w io.Writer, t time.Time) error {
|
||||
var scratch [8]byte
|
||||
|
||||
// Convert to unix nano seconds, but only if time is non-zero. Calling
|
||||
// UnixNano() on a zero time yields an undefined result.
|
||||
var unixNano int64
|
||||
if !t.IsZero() {
|
||||
unixNano = t.UnixNano()
|
||||
}
|
||||
|
||||
byteOrder.PutUint64(scratch[:], uint64(unixNano))
|
||||
_, err := w.Write(scratch[:])
|
||||
return err
|
||||
}
|
||||
316
vendor/github.com/lightningnetwork/lnd/channeldb/nodes.go
generated
vendored
Normal file
316
vendor/github.com/lightningnetwork/lnd/channeldb/nodes.go
generated
vendored
Normal file
@@ -0,0 +1,316 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
)
|
||||
|
||||
var (
|
||||
// nodeInfoBucket stores metadata pertaining to nodes that we've had
|
||||
// direct channel-based correspondence with. This bucket allows one to
|
||||
// query for all open channels pertaining to the node by exploring each
|
||||
// node's sub-bucket within the openChanBucket.
|
||||
nodeInfoBucket = []byte("nib")
|
||||
)
|
||||
|
||||
// LinkNode stores metadata related to node's that we have/had a direct
|
||||
// channel open with. Information such as the Bitcoin network the node
|
||||
// advertised, and its identity public key are also stored. Additionally, this
|
||||
// struct and the bucket its stored within have store data similar to that of
|
||||
// Bitcoin's addrmanager. The TCP address information stored within the struct
|
||||
// can be used to establish persistent connections will all channel
|
||||
// counterparties on daemon startup.
|
||||
//
|
||||
// TODO(roasbeef): also add current OnionKey plus rotation schedule?
|
||||
// TODO(roasbeef): add bitfield for supported services
|
||||
// * possibly add a wire.NetAddress type, type
|
||||
type LinkNode struct {
|
||||
// Network indicates the Bitcoin network that the LinkNode advertises
|
||||
// for incoming channel creation.
|
||||
Network wire.BitcoinNet
|
||||
|
||||
// IdentityPub is the node's current identity public key. Any
|
||||
// channel/topology related information received by this node MUST be
|
||||
// signed by this public key.
|
||||
IdentityPub *btcec.PublicKey
|
||||
|
||||
// LastSeen tracks the last time this node was seen within the network.
|
||||
// A node should be marked as seen if the daemon either is able to
|
||||
// establish an outgoing connection to the node or receives a new
|
||||
// incoming connection from the node. This timestamp (stored in unix
|
||||
// epoch) may be used within a heuristic which aims to determine when a
|
||||
// channel should be unilaterally closed due to inactivity.
|
||||
//
|
||||
// TODO(roasbeef): replace with block hash/height?
|
||||
// * possibly add a time-value metric into the heuristic?
|
||||
LastSeen time.Time
|
||||
|
||||
// Addresses is a list of IP address in which either we were able to
|
||||
// reach the node over in the past, OR we received an incoming
|
||||
// authenticated connection for the stored identity public key.
|
||||
Addresses []net.Addr
|
||||
|
||||
db *DB
|
||||
}
|
||||
|
||||
// NewLinkNode creates a new LinkNode from the provided parameters, which is
|
||||
// backed by an instance of channeldb.
|
||||
func (db *DB) NewLinkNode(bitNet wire.BitcoinNet, pub *btcec.PublicKey,
|
||||
addrs ...net.Addr) *LinkNode {
|
||||
|
||||
return &LinkNode{
|
||||
Network: bitNet,
|
||||
IdentityPub: pub,
|
||||
LastSeen: time.Now(),
|
||||
Addresses: addrs,
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateLastSeen updates the last time this node was directly encountered on
|
||||
// the Lightning Network.
|
||||
func (l *LinkNode) UpdateLastSeen(lastSeen time.Time) error {
|
||||
l.LastSeen = lastSeen
|
||||
|
||||
return l.Sync()
|
||||
}
|
||||
|
||||
// AddAddress appends the specified TCP address to the list of known addresses
|
||||
// this node is/was known to be reachable at.
|
||||
func (l *LinkNode) AddAddress(addr net.Addr) error {
|
||||
for _, a := range l.Addresses {
|
||||
if a.String() == addr.String() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
l.Addresses = append(l.Addresses, addr)
|
||||
|
||||
return l.Sync()
|
||||
}
|
||||
|
||||
// Sync performs a full database sync which writes the current up-to-date data
|
||||
// within the struct to the database.
|
||||
func (l *LinkNode) Sync() error {
|
||||
|
||||
// Finally update the database by storing the link node and updating
|
||||
// any relevant indexes.
|
||||
return kvdb.Update(l.db, func(tx kvdb.RwTx) error {
|
||||
nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket)
|
||||
if nodeMetaBucket == nil {
|
||||
return ErrLinkNodesNotFound
|
||||
}
|
||||
|
||||
return putLinkNode(nodeMetaBucket, l)
|
||||
})
|
||||
}
|
||||
|
||||
// putLinkNode serializes then writes the encoded version of the passed link
|
||||
// node into the nodeMetaBucket. This function is provided in order to allow
|
||||
// the ability to re-use a database transaction across many operations.
|
||||
func putLinkNode(nodeMetaBucket kvdb.RwBucket, l *LinkNode) error {
|
||||
// First serialize the LinkNode into its raw-bytes encoding.
|
||||
var b bytes.Buffer
|
||||
if err := serializeLinkNode(&b, l); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finally insert the link-node into the node metadata bucket keyed
|
||||
// according to the its pubkey serialized in compressed form.
|
||||
nodePub := l.IdentityPub.SerializeCompressed()
|
||||
return nodeMetaBucket.Put(nodePub, b.Bytes())
|
||||
}
|
||||
|
||||
// DeleteLinkNode removes the link node with the given identity from the
|
||||
// database.
|
||||
func (db *DB) DeleteLinkNode(identity *btcec.PublicKey) error {
|
||||
return kvdb.Update(db, func(tx kvdb.RwTx) error {
|
||||
return db.deleteLinkNode(tx, identity)
|
||||
})
|
||||
}
|
||||
|
||||
func (db *DB) deleteLinkNode(tx kvdb.RwTx, identity *btcec.PublicKey) error {
|
||||
nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket)
|
||||
if nodeMetaBucket == nil {
|
||||
return ErrLinkNodesNotFound
|
||||
}
|
||||
|
||||
pubKey := identity.SerializeCompressed()
|
||||
return nodeMetaBucket.Delete(pubKey)
|
||||
}
|
||||
|
||||
// FetchLinkNode attempts to lookup the data for a LinkNode based on a target
|
||||
// identity public key. If a particular LinkNode for the passed identity public
|
||||
// key cannot be found, then ErrNodeNotFound if returned.
|
||||
func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, error) {
|
||||
var linkNode *LinkNode
|
||||
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
|
||||
node, err := fetchLinkNode(tx, identity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
linkNode = node
|
||||
return nil
|
||||
})
|
||||
|
||||
return linkNode, err
|
||||
}
|
||||
|
||||
func fetchLinkNode(tx kvdb.ReadTx, targetPub *btcec.PublicKey) (*LinkNode, error) {
|
||||
// First fetch the bucket for storing node metadata, bailing out early
|
||||
// if it hasn't been created yet.
|
||||
nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
|
||||
if nodeMetaBucket == nil {
|
||||
return nil, ErrLinkNodesNotFound
|
||||
}
|
||||
|
||||
// If a link node for that particular public key cannot be located,
|
||||
// then exit early with an ErrNodeNotFound.
|
||||
pubKey := targetPub.SerializeCompressed()
|
||||
nodeBytes := nodeMetaBucket.Get(pubKey)
|
||||
if nodeBytes == nil {
|
||||
return nil, ErrNodeNotFound
|
||||
}
|
||||
|
||||
// Finally, decode and allocate a fresh LinkNode object to be returned
|
||||
// to the caller.
|
||||
nodeReader := bytes.NewReader(nodeBytes)
|
||||
return deserializeLinkNode(nodeReader)
|
||||
}
|
||||
|
||||
// TODO(roasbeef): update link node addrs in server upon connection
|
||||
|
||||
// FetchAllLinkNodes starts a new database transaction to fetch all nodes with
|
||||
// whom we have active channels with.
|
||||
func (db *DB) FetchAllLinkNodes() ([]*LinkNode, error) {
|
||||
var linkNodes []*LinkNode
|
||||
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
|
||||
nodes, err := db.fetchAllLinkNodes(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
linkNodes = nodes
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return linkNodes, nil
|
||||
}
|
||||
|
||||
// fetchAllLinkNodes uses an existing database transaction to fetch all nodes
|
||||
// with whom we have active channels with.
|
||||
func (db *DB) fetchAllLinkNodes(tx kvdb.ReadTx) ([]*LinkNode, error) {
|
||||
nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
|
||||
if nodeMetaBucket == nil {
|
||||
return nil, ErrLinkNodesNotFound
|
||||
}
|
||||
|
||||
var linkNodes []*LinkNode
|
||||
err := nodeMetaBucket.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
nodeReader := bytes.NewReader(v)
|
||||
linkNode, err := deserializeLinkNode(nodeReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
linkNodes = append(linkNodes, linkNode)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return linkNodes, nil
|
||||
}
|
||||
|
||||
func serializeLinkNode(w io.Writer, l *LinkNode) error {
|
||||
var buf [8]byte
|
||||
|
||||
byteOrder.PutUint32(buf[:4], uint32(l.Network))
|
||||
if _, err := w.Write(buf[:4]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serializedID := l.IdentityPub.SerializeCompressed()
|
||||
if _, err := w.Write(serializedID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
seenUnix := uint64(l.LastSeen.Unix())
|
||||
byteOrder.PutUint64(buf[:], seenUnix)
|
||||
if _, err := w.Write(buf[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numAddrs := uint32(len(l.Addresses))
|
||||
byteOrder.PutUint32(buf[:4], numAddrs)
|
||||
if _, err := w.Write(buf[:4]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, addr := range l.Addresses {
|
||||
if err := serializeAddr(w, addr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializeLinkNode(r io.Reader) (*LinkNode, error) {
|
||||
var (
|
||||
err error
|
||||
buf [8]byte
|
||||
)
|
||||
|
||||
node := &LinkNode{}
|
||||
|
||||
if _, err := io.ReadFull(r, buf[:4]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.Network = wire.BitcoinNet(byteOrder.Uint32(buf[:4]))
|
||||
|
||||
var pub [33]byte
|
||||
if _, err := io.ReadFull(r, pub[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.IdentityPub, err = btcec.ParsePubKey(pub[:], btcec.S256())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(r, buf[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.LastSeen = time.Unix(int64(byteOrder.Uint64(buf[:])), 0)
|
||||
|
||||
if _, err := io.ReadFull(r, buf[:4]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
numAddrs := byteOrder.Uint32(buf[:4])
|
||||
|
||||
node.Addresses = make([]net.Addr, numAddrs)
|
||||
for i := uint32(0); i < numAddrs; i++ {
|
||||
addr, err := deserializeAddr(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.Addresses[i] = addr
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
87
vendor/github.com/lightningnetwork/lnd/channeldb/options.go
generated
vendored
Normal file
87
vendor/github.com/lightningnetwork/lnd/channeldb/options.go
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
package channeldb
|
||||
|
||||
import "github.com/lightningnetwork/lnd/clock"
|
||||
|
||||
const (
|
||||
// DefaultRejectCacheSize is the default number of rejectCacheEntries to
|
||||
// cache for use in the rejection cache of incoming gossip traffic. This
|
||||
// produces a cache size of around 1MB.
|
||||
DefaultRejectCacheSize = 50000
|
||||
|
||||
// DefaultChannelCacheSize is the default number of ChannelEdges cached
|
||||
// in order to reply to gossip queries. This produces a cache size of
|
||||
// around 40MB.
|
||||
DefaultChannelCacheSize = 20000
|
||||
)
|
||||
|
||||
// Options holds parameters for tuning and customizing a channeldb.DB.
|
||||
type Options struct {
|
||||
// RejectCacheSize is the maximum number of rejectCacheEntries to hold
|
||||
// in the rejection cache.
|
||||
RejectCacheSize int
|
||||
|
||||
// ChannelCacheSize is the maximum number of ChannelEdges to hold in the
|
||||
// channel cache.
|
||||
ChannelCacheSize int
|
||||
|
||||
// NoFreelistSync, if true, prevents the database from syncing its
|
||||
// freelist to disk, resulting in improved performance at the expense of
|
||||
// increased startup time.
|
||||
NoFreelistSync bool
|
||||
|
||||
// clock is the time source used by the database.
|
||||
clock clock.Clock
|
||||
|
||||
// dryRun will fail to commit a successful migration when opening the
|
||||
// database if set to true.
|
||||
dryRun bool
|
||||
}
|
||||
|
||||
// DefaultOptions returns an Options populated with default values.
|
||||
func DefaultOptions() Options {
|
||||
return Options{
|
||||
RejectCacheSize: DefaultRejectCacheSize,
|
||||
ChannelCacheSize: DefaultChannelCacheSize,
|
||||
NoFreelistSync: true,
|
||||
clock: clock.NewDefaultClock(),
|
||||
}
|
||||
}
|
||||
|
||||
// OptionModifier is a function signature for modifying the default Options.
|
||||
type OptionModifier func(*Options)
|
||||
|
||||
// OptionSetRejectCacheSize sets the RejectCacheSize to n.
|
||||
func OptionSetRejectCacheSize(n int) OptionModifier {
|
||||
return func(o *Options) {
|
||||
o.RejectCacheSize = n
|
||||
}
|
||||
}
|
||||
|
||||
// OptionSetChannelCacheSize sets the ChannelCacheSize to n.
|
||||
func OptionSetChannelCacheSize(n int) OptionModifier {
|
||||
return func(o *Options) {
|
||||
o.ChannelCacheSize = n
|
||||
}
|
||||
}
|
||||
|
||||
// OptionSetSyncFreelist allows the database to sync its freelist.
|
||||
func OptionSetSyncFreelist(b bool) OptionModifier {
|
||||
return func(o *Options) {
|
||||
o.NoFreelistSync = !b
|
||||
}
|
||||
}
|
||||
|
||||
// OptionClock sets a non-default clock dependency.
|
||||
func OptionClock(clock clock.Clock) OptionModifier {
|
||||
return func(o *Options) {
|
||||
o.clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
// OptionDryRunMigration controls whether or not to intentially fail to commit a
|
||||
// successful migration that occurs when opening the database.
|
||||
func OptionDryRunMigration(dryRun bool) OptionModifier {
|
||||
return func(o *Options) {
|
||||
o.dryRun = dryRun
|
||||
}
|
||||
}
|
||||
678
vendor/github.com/lightningnetwork/lnd/channeldb/payment_control.go
generated
vendored
Normal file
678
vendor/github.com/lightningnetwork/lnd/channeldb/payment_control.go
generated
vendored
Normal file
@@ -0,0 +1,678 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrAlreadyPaid signals we have already paid this payment hash.
|
||||
ErrAlreadyPaid = errors.New("invoice is already paid")
|
||||
|
||||
// ErrPaymentInFlight signals that payment for this payment hash is
|
||||
// already "in flight" on the network.
|
||||
ErrPaymentInFlight = errors.New("payment is in transition")
|
||||
|
||||
// ErrPaymentNotInitiated is returned if the payment wasn't initiated.
|
||||
ErrPaymentNotInitiated = errors.New("payment isn't initiated")
|
||||
|
||||
// ErrPaymentAlreadySucceeded is returned in the event we attempt to
|
||||
// change the status of a payment already succeeded.
|
||||
ErrPaymentAlreadySucceeded = errors.New("payment is already succeeded")
|
||||
|
||||
// ErrPaymentAlreadyFailed is returned in the event we attempt to alter
|
||||
// a failed payment.
|
||||
ErrPaymentAlreadyFailed = errors.New("payment has already failed")
|
||||
|
||||
// ErrUnknownPaymentStatus is returned when we do not recognize the
|
||||
// existing state of a payment.
|
||||
ErrUnknownPaymentStatus = errors.New("unknown payment status")
|
||||
|
||||
// ErrPaymentTerminal is returned if we attempt to alter a payment that
|
||||
// already has reached a terminal condition.
|
||||
ErrPaymentTerminal = errors.New("payment has reached terminal condition")
|
||||
|
||||
// ErrAttemptAlreadySettled is returned if we try to alter an already
|
||||
// settled HTLC attempt.
|
||||
ErrAttemptAlreadySettled = errors.New("attempt already settled")
|
||||
|
||||
// ErrAttemptAlreadyFailed is returned if we try to alter an already
|
||||
// failed HTLC attempt.
|
||||
ErrAttemptAlreadyFailed = errors.New("attempt already failed")
|
||||
|
||||
// ErrValueMismatch is returned if we try to register a non-MPP attempt
|
||||
// with an amount that doesn't match the payment amount.
|
||||
ErrValueMismatch = errors.New("attempted value doesn't match payment" +
|
||||
"amount")
|
||||
|
||||
// ErrValueExceedsAmt is returned if we try to register an attempt that
|
||||
// would take the total sent amount above the payment amount.
|
||||
ErrValueExceedsAmt = errors.New("attempted value exceeds payment" +
|
||||
"amount")
|
||||
|
||||
// ErrNonMPPayment is returned if we try to register an MPP attempt for
|
||||
// a payment that already has a non-MPP attempt regitered.
|
||||
ErrNonMPPayment = errors.New("payment has non-MPP attempts")
|
||||
|
||||
// ErrMPPayment is returned if we try to register a non-MPP attempt for
|
||||
// a payment that already has an MPP attempt regitered.
|
||||
ErrMPPayment = errors.New("payment has MPP attempts")
|
||||
|
||||
// ErrMPPPaymentAddrMismatch is returned if we try to register an MPP
|
||||
// shard where the payment address doesn't match existing shards.
|
||||
ErrMPPPaymentAddrMismatch = errors.New("payment address mismatch")
|
||||
|
||||
// ErrMPPTotalAmountMismatch is returned if we try to register an MPP
|
||||
// shard where the total amount doesn't match existing shards.
|
||||
ErrMPPTotalAmountMismatch = errors.New("mp payment total amount mismatch")
|
||||
|
||||
// errNoAttemptInfo is returned when no attempt info is stored yet.
|
||||
errNoAttemptInfo = errors.New("unable to find attempt info for " +
|
||||
"inflight payment")
|
||||
)
|
||||
|
||||
// PaymentControl implements persistence for payments and payment attempts.
|
||||
type PaymentControl struct {
|
||||
db *DB
|
||||
}
|
||||
|
||||
// NewPaymentControl creates a new instance of the PaymentControl.
|
||||
func NewPaymentControl(db *DB) *PaymentControl {
|
||||
return &PaymentControl{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// InitPayment checks or records the given PaymentCreationInfo with the DB,
|
||||
// making sure it does not already exist as an in-flight payment. When this
|
||||
// method returns successfully, the payment is guranteeed to be in the InFlight
|
||||
// state.
|
||||
func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash,
|
||||
info *PaymentCreationInfo) error {
|
||||
|
||||
var b bytes.Buffer
|
||||
if err := serializePaymentCreationInfo(&b, info); err != nil {
|
||||
return err
|
||||
}
|
||||
infoBytes := b.Bytes()
|
||||
|
||||
var updateErr error
|
||||
err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
|
||||
// Reset the update error, to avoid carrying over an error
|
||||
// from a previous execution of the batched db transaction.
|
||||
updateErr = nil
|
||||
|
||||
bucket, err := createPaymentBucket(tx, paymentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the existing status of this payment, if any.
|
||||
paymentStatus, err := fetchPaymentStatus(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch paymentStatus {
|
||||
|
||||
// We allow retrying failed payments.
|
||||
case StatusFailed:
|
||||
|
||||
// This is a new payment that is being initialized for the
|
||||
// first time.
|
||||
case StatusUnknown:
|
||||
|
||||
// We already have an InFlight payment on the network. We will
|
||||
// disallow any new payments.
|
||||
case StatusInFlight:
|
||||
updateErr = ErrPaymentInFlight
|
||||
return nil
|
||||
|
||||
// We've already succeeded a payment to this payment hash,
|
||||
// forbid the switch from sending another.
|
||||
case StatusSucceeded:
|
||||
updateErr = ErrAlreadyPaid
|
||||
return nil
|
||||
|
||||
default:
|
||||
updateErr = ErrUnknownPaymentStatus
|
||||
return nil
|
||||
}
|
||||
|
||||
// Obtain a new sequence number for this payment. This is used
|
||||
// to sort the payments in order of creation, and also acts as
|
||||
// a unique identifier for each payment.
|
||||
sequenceNum, err := nextPaymentSequence(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = bucket.Put(paymentSequenceKey, sequenceNum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the payment info to the bucket, which contains the
|
||||
// static information for this payment
|
||||
err = bucket.Put(paymentCreationInfoKey, infoBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We'll delete any lingering HTLCs to start with, in case we
|
||||
// are initializing a payment that was attempted earlier, but
|
||||
// left in a state where we could retry.
|
||||
err = bucket.DeleteNestedBucket(paymentHtlcsBucket)
|
||||
if err != nil && err != kvdb.ErrBucketNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
// Also delete any lingering failure info now that we are
|
||||
// re-attempting.
|
||||
return bucket.Delete(paymentFailInfoKey)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return updateErr
|
||||
}
|
||||
|
||||
// RegisterAttempt atomically records the provided HTLCAttemptInfo to the
|
||||
// DB.
|
||||
func (p *PaymentControl) RegisterAttempt(paymentHash lntypes.Hash,
|
||||
attempt *HTLCAttemptInfo) (*MPPayment, error) {
|
||||
|
||||
// Serialize the information before opening the db transaction.
|
||||
var a bytes.Buffer
|
||||
err := serializeHTLCAttemptInfo(&a, attempt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
htlcInfoBytes := a.Bytes()
|
||||
|
||||
htlcIDBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(htlcIDBytes, attempt.AttemptID)
|
||||
|
||||
var payment *MPPayment
|
||||
err = kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
|
||||
bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, err := fetchPayment(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure the payment is in-flight.
|
||||
if err := ensureInFlight(p); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We cannot register a new attempt if the payment already has
|
||||
// reached a terminal condition:
|
||||
settle, fail := p.TerminalInfo()
|
||||
if settle != nil || fail != nil {
|
||||
return ErrPaymentTerminal
|
||||
}
|
||||
|
||||
// Make sure any existing shards match the new one with regards
|
||||
// to MPP options.
|
||||
mpp := attempt.Route.FinalHop().MPP
|
||||
for _, h := range p.InFlightHTLCs() {
|
||||
hMpp := h.Route.FinalHop().MPP
|
||||
|
||||
switch {
|
||||
|
||||
// We tried to register a non-MPP attempt for a MPP
|
||||
// payment.
|
||||
case mpp == nil && hMpp != nil:
|
||||
return ErrMPPayment
|
||||
|
||||
// We tried to register a MPP shard for a non-MPP
|
||||
// payment.
|
||||
case mpp != nil && hMpp == nil:
|
||||
return ErrNonMPPayment
|
||||
|
||||
// Non-MPP payment, nothing more to validate.
|
||||
case mpp == nil:
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that MPP options match.
|
||||
if mpp.PaymentAddr() != hMpp.PaymentAddr() {
|
||||
return ErrMPPPaymentAddrMismatch
|
||||
}
|
||||
|
||||
if mpp.TotalMsat() != hMpp.TotalMsat() {
|
||||
return ErrMPPTotalAmountMismatch
|
||||
}
|
||||
}
|
||||
|
||||
// If this is a non-MPP attempt, it must match the total amount
|
||||
// exactly.
|
||||
amt := attempt.Route.ReceiverAmt()
|
||||
if mpp == nil && amt != p.Info.Value {
|
||||
return ErrValueMismatch
|
||||
}
|
||||
|
||||
// Ensure we aren't sending more than the total payment amount.
|
||||
sentAmt, _ := p.SentAmt()
|
||||
if sentAmt+amt > p.Info.Value {
|
||||
return ErrValueExceedsAmt
|
||||
}
|
||||
|
||||
htlcsBucket, err := bucket.CreateBucketIfNotExists(
|
||||
paymentHtlcsBucket,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create bucket for this attempt. Fail if the bucket already
|
||||
// exists.
|
||||
htlcBucket, err := htlcsBucket.CreateBucket(htlcIDBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = htlcBucket.Put(htlcAttemptInfoKey, htlcInfoBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Retrieve attempt info for the notification.
|
||||
payment, err = fetchPayment(bucket)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return payment, err
|
||||
}
|
||||
|
||||
// SettleAttempt marks the given attempt settled with the preimage. If this is
|
||||
// a multi shard payment, this might implicitly mean that the full payment
|
||||
// succeeded.
|
||||
//
|
||||
// After invoking this method, InitPayment should always return an error to
|
||||
// prevent us from making duplicate payments to the same payment hash. The
|
||||
// provided preimage is atomically saved to the DB for record keeping.
|
||||
func (p *PaymentControl) SettleAttempt(hash lntypes.Hash,
|
||||
attemptID uint64, settleInfo *HTLCSettleInfo) (*MPPayment, error) {
|
||||
|
||||
var b bytes.Buffer
|
||||
if err := serializeHTLCSettleInfo(&b, settleInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
settleBytes := b.Bytes()
|
||||
|
||||
return p.updateHtlcKey(hash, attemptID, htlcSettleInfoKey, settleBytes)
|
||||
}
|
||||
|
||||
// FailAttempt marks the given payment attempt failed.
|
||||
func (p *PaymentControl) FailAttempt(hash lntypes.Hash,
|
||||
attemptID uint64, failInfo *HTLCFailInfo) (*MPPayment, error) {
|
||||
|
||||
var b bytes.Buffer
|
||||
if err := serializeHTLCFailInfo(&b, failInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
failBytes := b.Bytes()
|
||||
|
||||
return p.updateHtlcKey(hash, attemptID, htlcFailInfoKey, failBytes)
|
||||
}
|
||||
|
||||
// updateHtlcKey updates a database key for the specified htlc.
|
||||
func (p *PaymentControl) updateHtlcKey(paymentHash lntypes.Hash,
|
||||
attemptID uint64, key, value []byte) (*MPPayment, error) {
|
||||
|
||||
htlcIDBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(htlcIDBytes, attemptID)
|
||||
|
||||
var payment *MPPayment
|
||||
err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
|
||||
payment = nil
|
||||
|
||||
bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, err := fetchPayment(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We can only update keys of in-flight payments. We allow
|
||||
// updating keys even if the payment has reached a terminal
|
||||
// condition, since the HTLC outcomes must still be updated.
|
||||
if err := ensureInFlight(p); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
htlcsBucket := bucket.NestedReadWriteBucket(paymentHtlcsBucket)
|
||||
if htlcsBucket == nil {
|
||||
return fmt.Errorf("htlcs bucket not found")
|
||||
}
|
||||
|
||||
htlcBucket := htlcsBucket.NestedReadWriteBucket(htlcIDBytes)
|
||||
if htlcBucket == nil {
|
||||
return fmt.Errorf("HTLC with ID %v not registered",
|
||||
attemptID)
|
||||
}
|
||||
|
||||
// Make sure the shard is not already failed or settled.
|
||||
if htlcBucket.Get(htlcFailInfoKey) != nil {
|
||||
return ErrAttemptAlreadyFailed
|
||||
}
|
||||
|
||||
if htlcBucket.Get(htlcSettleInfoKey) != nil {
|
||||
return ErrAttemptAlreadySettled
|
||||
}
|
||||
|
||||
// Add or update the key for this htlc.
|
||||
err = htlcBucket.Put(key, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Retrieve attempt info for the notification.
|
||||
payment, err = fetchPayment(bucket)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return payment, err
|
||||
}
|
||||
|
||||
// Fail transitions a payment into the Failed state, and records the reason the
|
||||
// payment failed. After invoking this method, InitPayment should return nil on
|
||||
// its next call for this payment hash, allowing the switch to make a
|
||||
// subsequent payment.
|
||||
func (p *PaymentControl) Fail(paymentHash lntypes.Hash,
|
||||
reason FailureReason) (*MPPayment, error) {
|
||||
|
||||
var (
|
||||
updateErr error
|
||||
payment *MPPayment
|
||||
)
|
||||
err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
|
||||
// Reset the update error, to avoid carrying over an error
|
||||
// from a previous execution of the batched db transaction.
|
||||
updateErr = nil
|
||||
payment = nil
|
||||
|
||||
bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
|
||||
if err == ErrPaymentNotInitiated {
|
||||
updateErr = ErrPaymentNotInitiated
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We mark the payent as failed as long as it is known. This
|
||||
// lets the last attempt to fail with a terminal write its
|
||||
// failure to the PaymentControl without synchronizing with
|
||||
// other attempts.
|
||||
paymentStatus, err := fetchPaymentStatus(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if paymentStatus == StatusUnknown {
|
||||
updateErr = ErrPaymentNotInitiated
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the failure reason in the bucket for record keeping.
|
||||
v := []byte{byte(reason)}
|
||||
err = bucket.Put(paymentFailInfoKey, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Retrieve attempt info for the notification, if available.
|
||||
payment, err = fetchPayment(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return payment, updateErr
|
||||
}
|
||||
|
||||
// FetchPayment returns information about a payment from the database.
|
||||
func (p *PaymentControl) FetchPayment(paymentHash lntypes.Hash) (
|
||||
*MPPayment, error) {
|
||||
|
||||
var payment *MPPayment
|
||||
err := kvdb.View(p.db, func(tx kvdb.ReadTx) error {
|
||||
bucket, err := fetchPaymentBucket(tx, paymentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
payment, err = fetchPayment(bucket)
|
||||
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return payment, nil
|
||||
}
|
||||
|
||||
// createPaymentBucket creates or fetches the sub-bucket assigned to this
|
||||
// payment hash.
|
||||
func createPaymentBucket(tx kvdb.RwTx, paymentHash lntypes.Hash) (
|
||||
kvdb.RwBucket, error) {
|
||||
|
||||
payments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return payments.CreateBucketIfNotExists(paymentHash[:])
|
||||
}
|
||||
|
||||
// fetchPaymentBucket fetches the sub-bucket assigned to this payment hash. If
|
||||
// the bucket does not exist, it returns ErrPaymentNotInitiated.
|
||||
func fetchPaymentBucket(tx kvdb.ReadTx, paymentHash lntypes.Hash) (
|
||||
kvdb.ReadBucket, error) {
|
||||
|
||||
payments := tx.ReadBucket(paymentsRootBucket)
|
||||
if payments == nil {
|
||||
return nil, ErrPaymentNotInitiated
|
||||
}
|
||||
|
||||
bucket := payments.NestedReadBucket(paymentHash[:])
|
||||
if bucket == nil {
|
||||
return nil, ErrPaymentNotInitiated
|
||||
}
|
||||
|
||||
return bucket, nil
|
||||
|
||||
}
|
||||
|
||||
// fetchPaymentBucketUpdate is identical to fetchPaymentBucket, but it returns a
|
||||
// bucket that can be written to.
|
||||
func fetchPaymentBucketUpdate(tx kvdb.RwTx, paymentHash lntypes.Hash) (
|
||||
kvdb.RwBucket, error) {
|
||||
|
||||
payments := tx.ReadWriteBucket(paymentsRootBucket)
|
||||
if payments == nil {
|
||||
return nil, ErrPaymentNotInitiated
|
||||
}
|
||||
|
||||
bucket := payments.NestedReadWriteBucket(paymentHash[:])
|
||||
if bucket == nil {
|
||||
return nil, ErrPaymentNotInitiated
|
||||
}
|
||||
|
||||
return bucket, nil
|
||||
}
|
||||
|
||||
// nextPaymentSequence returns the next sequence number to store for a new
|
||||
// payment.
|
||||
func nextPaymentSequence(tx kvdb.RwTx) ([]byte, error) {
|
||||
payments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
seq, err := payments.NextSequence()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, seq)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// fetchPaymentStatus fetches the payment status of the payment. If the payment
|
||||
// isn't found, it will default to "StatusUnknown".
|
||||
func fetchPaymentStatus(bucket kvdb.ReadBucket) (PaymentStatus, error) {
|
||||
// Creation info should be set for all payments, regardless of state.
|
||||
// If not, it is unknown.
|
||||
if bucket.Get(paymentCreationInfoKey) == nil {
|
||||
return StatusUnknown, nil
|
||||
}
|
||||
|
||||
payment, err := fetchPayment(bucket)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return payment.Status, nil
|
||||
}
|
||||
|
||||
// ensureInFlight checks whether the payment found in the given bucket has
|
||||
// status InFlight, and returns an error otherwise. This should be used to
|
||||
// ensure we only mark in-flight payments as succeeded or failed.
|
||||
func ensureInFlight(payment *MPPayment) error {
|
||||
paymentStatus := payment.Status
|
||||
|
||||
switch {
|
||||
|
||||
// The payment was indeed InFlight.
|
||||
case paymentStatus == StatusInFlight:
|
||||
return nil
|
||||
|
||||
// Our records show the payment as unknown, meaning it never
|
||||
// should have left the switch.
|
||||
case paymentStatus == StatusUnknown:
|
||||
return ErrPaymentNotInitiated
|
||||
|
||||
// The payment succeeded previously.
|
||||
case paymentStatus == StatusSucceeded:
|
||||
return ErrPaymentAlreadySucceeded
|
||||
|
||||
// The payment was already failed.
|
||||
case paymentStatus == StatusFailed:
|
||||
return ErrPaymentAlreadyFailed
|
||||
|
||||
default:
|
||||
return ErrUnknownPaymentStatus
|
||||
}
|
||||
}
|
||||
|
||||
// InFlightPayment is a wrapper around a payment that has status InFlight.
|
||||
type InFlightPayment struct {
|
||||
// Info is the PaymentCreationInfo of the in-flight payment.
|
||||
Info *PaymentCreationInfo
|
||||
|
||||
// Attempts is the set of payment attempts that was made to this
|
||||
// payment hash.
|
||||
//
|
||||
// NOTE: Might be empty.
|
||||
Attempts []HTLCAttemptInfo
|
||||
}
|
||||
|
||||
// FetchInFlightPayments returns all payments with status InFlight.
|
||||
func (p *PaymentControl) FetchInFlightPayments() ([]*InFlightPayment, error) {
|
||||
var inFlights []*InFlightPayment
|
||||
err := kvdb.View(p.db, func(tx kvdb.ReadTx) error {
|
||||
payments := tx.ReadBucket(paymentsRootBucket)
|
||||
if payments == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return payments.ForEach(func(k, _ []byte) error {
|
||||
bucket := payments.NestedReadBucket(k)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("non bucket element")
|
||||
}
|
||||
|
||||
// If the status is not InFlight, we can return early.
|
||||
paymentStatus, err := fetchPaymentStatus(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if paymentStatus != StatusInFlight {
|
||||
return nil
|
||||
}
|
||||
|
||||
inFlight := &InFlightPayment{}
|
||||
|
||||
// Get the CreationInfo.
|
||||
inFlight.Info, err = fetchCreationInfo(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
htlcsBucket := bucket.NestedReadBucket(
|
||||
paymentHtlcsBucket,
|
||||
)
|
||||
if htlcsBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch all HTLCs attempted for this payment.
|
||||
htlcs, err := fetchHtlcAttempts(htlcsBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We only care about the static info for the HTLCs
|
||||
// still in flight, so convert the result to a slice of
|
||||
// HTLCAttemptInfos.
|
||||
for _, h := range htlcs {
|
||||
// Skip HTLCs not in flight.
|
||||
if h.Settle != nil || h.Failure != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
inFlight.Attempts = append(
|
||||
inFlight.Attempts, h.HTLCAttemptInfo,
|
||||
)
|
||||
}
|
||||
|
||||
inFlights = append(inFlights, inFlight)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return inFlights, nil
|
||||
}
|
||||
909
vendor/github.com/lightningnetwork/lnd/channeldb/payments.go
generated
vendored
Normal file
909
vendor/github.com/lightningnetwork/lnd/channeldb/payments.go
generated
vendored
Normal file
@@ -0,0 +1,909 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/record"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/tlv"
|
||||
)
|
||||
|
||||
var (
|
||||
// paymentsRootBucket is the name of the top-level bucket within the
|
||||
// database that stores all data related to payments. Within this
|
||||
// bucket, each payment hash its own sub-bucket keyed by its payment
|
||||
// hash.
|
||||
//
|
||||
// Bucket hierarchy:
|
||||
//
|
||||
// root-bucket
|
||||
// |
|
||||
// |-- <paymenthash>
|
||||
// | |--sequence-key: <sequence number>
|
||||
// | |--creation-info-key: <creation info>
|
||||
// | |--fail-info-key: <(optional) fail info>
|
||||
// | |
|
||||
// | |--payment-htlcs-bucket (shard-bucket)
|
||||
// | | |
|
||||
// | | |-- <htlc attempt ID>
|
||||
// | | | |--htlc-attempt-info-key: <htlc attempt info>
|
||||
// | | | |--htlc-settle-info-key: <(optional) settle info>
|
||||
// | | | |--htlc-fail-info-key: <(optional) fail info>
|
||||
// | | |
|
||||
// | | |-- <htlc attempt ID>
|
||||
// | | | |
|
||||
// | | ... ...
|
||||
// | |
|
||||
// | |
|
||||
// | |--duplicate-bucket (only for old, completed payments)
|
||||
// | |
|
||||
// | |-- <seq-num>
|
||||
// | | |--sequence-key: <sequence number>
|
||||
// | | |--creation-info-key: <creation info>
|
||||
// | | |--attempt-info-key: <attempt info>
|
||||
// | | |--settle-info-key: <settle info>
|
||||
// | | |--fail-info-key: <fail info>
|
||||
// | |
|
||||
// | |-- <seq-num>
|
||||
// | | |
|
||||
// | ... ...
|
||||
// |
|
||||
// |-- <paymenthash>
|
||||
// | |
|
||||
// | ...
|
||||
// ...
|
||||
//
|
||||
paymentsRootBucket = []byte("payments-root-bucket")
|
||||
|
||||
// paymentSequenceKey is a key used in the payment's sub-bucket to
|
||||
// store the sequence number of the payment.
|
||||
paymentSequenceKey = []byte("payment-sequence-key")
|
||||
|
||||
// paymentCreationInfoKey is a key used in the payment's sub-bucket to
|
||||
// store the creation info of the payment.
|
||||
paymentCreationInfoKey = []byte("payment-creation-info")
|
||||
|
||||
// paymentHtlcsBucket is a bucket where we'll store the information
|
||||
// about the HTLCs that were attempted for a payment.
|
||||
paymentHtlcsBucket = []byte("payment-htlcs-bucket")
|
||||
|
||||
// htlcAttemptInfoKey is a key used in a HTLC's sub-bucket to store the
|
||||
// info about the attempt that was done for the HTLC in question.
|
||||
htlcAttemptInfoKey = []byte("htlc-attempt-info")
|
||||
|
||||
// htlcSettleInfoKey is a key used in a HTLC's sub-bucket to store the
|
||||
// settle info, if any.
|
||||
htlcSettleInfoKey = []byte("htlc-settle-info")
|
||||
|
||||
// htlcFailInfoKey is a key used in a HTLC's sub-bucket to store
|
||||
// failure information, if any.
|
||||
htlcFailInfoKey = []byte("htlc-fail-info")
|
||||
|
||||
// paymentFailInfoKey is a key used in the payment's sub-bucket to
|
||||
// store information about the reason a payment failed.
|
||||
paymentFailInfoKey = []byte("payment-fail-info")
|
||||
)
|
||||
|
||||
// FailureReason encodes the reason a payment ultimately failed.
|
||||
type FailureReason byte
|
||||
|
||||
const (
|
||||
// FailureReasonTimeout indicates that the payment did timeout before a
|
||||
// successful payment attempt was made.
|
||||
FailureReasonTimeout FailureReason = 0
|
||||
|
||||
// FailureReasonNoRoute indicates no successful route to the
|
||||
// destination was found during path finding.
|
||||
FailureReasonNoRoute FailureReason = 1
|
||||
|
||||
// FailureReasonError indicates that an unexpected error happened during
|
||||
// payment.
|
||||
FailureReasonError FailureReason = 2
|
||||
|
||||
// FailureReasonPaymentDetails indicates that either the hash is unknown
|
||||
// or the final cltv delta or amount is incorrect.
|
||||
FailureReasonPaymentDetails FailureReason = 3
|
||||
|
||||
// FailureReasonInsufficientBalance indicates that we didn't have enough
|
||||
// balance to complete the payment.
|
||||
FailureReasonInsufficientBalance FailureReason = 4
|
||||
|
||||
// TODO(halseth): cancel state.
|
||||
|
||||
// TODO(joostjager): Add failure reasons for:
|
||||
// LocalLiquidityInsufficient, RemoteCapacityInsufficient.
|
||||
)
|
||||
|
||||
// Error returns a human readable error string for the FailureReason.
|
||||
func (r FailureReason) Error() string {
|
||||
return r.String()
|
||||
}
|
||||
|
||||
// String returns a human readable FailureReason.
|
||||
func (r FailureReason) String() string {
|
||||
switch r {
|
||||
case FailureReasonTimeout:
|
||||
return "timeout"
|
||||
case FailureReasonNoRoute:
|
||||
return "no_route"
|
||||
case FailureReasonError:
|
||||
return "error"
|
||||
case FailureReasonPaymentDetails:
|
||||
return "incorrect_payment_details"
|
||||
case FailureReasonInsufficientBalance:
|
||||
return "insufficient_balance"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// PaymentStatus represent current status of payment
|
||||
type PaymentStatus byte
|
||||
|
||||
const (
|
||||
// StatusUnknown is the status where a payment has never been initiated
|
||||
// and hence is unknown.
|
||||
StatusUnknown PaymentStatus = 0
|
||||
|
||||
// StatusInFlight is the status where a payment has been initiated, but
|
||||
// a response has not been received.
|
||||
StatusInFlight PaymentStatus = 1
|
||||
|
||||
// StatusSucceeded is the status where a payment has been initiated and
|
||||
// the payment was completed successfully.
|
||||
StatusSucceeded PaymentStatus = 2
|
||||
|
||||
// StatusFailed is the status where a payment has been initiated and a
|
||||
// failure result has come back.
|
||||
StatusFailed PaymentStatus = 3
|
||||
)
|
||||
|
||||
// String returns readable representation of payment status.
|
||||
func (ps PaymentStatus) String() string {
|
||||
switch ps {
|
||||
case StatusUnknown:
|
||||
return "Unknown"
|
||||
case StatusInFlight:
|
||||
return "In Flight"
|
||||
case StatusSucceeded:
|
||||
return "Succeeded"
|
||||
case StatusFailed:
|
||||
return "Failed"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// PaymentCreationInfo is the information necessary to have ready when
|
||||
// initiating a payment, moving it into state InFlight.
|
||||
type PaymentCreationInfo struct {
|
||||
// PaymentHash is the hash this payment is paying to.
|
||||
PaymentHash lntypes.Hash
|
||||
|
||||
// Value is the amount we are paying.
|
||||
Value lnwire.MilliSatoshi
|
||||
|
||||
// CreationTime is the time when this payment was initiated.
|
||||
CreationTime time.Time
|
||||
|
||||
// PaymentRequest is the full payment request, if any.
|
||||
PaymentRequest []byte
|
||||
}
|
||||
|
||||
// FetchPayments returns all sent payments found in the DB.
|
||||
//
|
||||
// nolint: dupl
|
||||
func (db *DB) FetchPayments() ([]*MPPayment, error) {
|
||||
var payments []*MPPayment
|
||||
|
||||
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
|
||||
paymentsBucket := tx.ReadBucket(paymentsRootBucket)
|
||||
if paymentsBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return paymentsBucket.ForEach(func(k, v []byte) error {
|
||||
bucket := paymentsBucket.NestedReadBucket(k)
|
||||
if bucket == nil {
|
||||
// We only expect sub-buckets to be found in
|
||||
// this top-level bucket.
|
||||
return fmt.Errorf("non bucket element in " +
|
||||
"payments bucket")
|
||||
}
|
||||
|
||||
p, err := fetchPayment(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
payments = append(payments, p)
|
||||
|
||||
// For older versions of lnd, duplicate payments to a
|
||||
// payment has was possible. These will be found in a
|
||||
// sub-bucket indexed by their sequence number if
|
||||
// available.
|
||||
duplicatePayments, err := fetchDuplicatePayments(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
payments = append(payments, duplicatePayments...)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Before returning, sort the payments by their sequence number.
|
||||
sort.Slice(payments, func(i, j int) bool {
|
||||
return payments[i].SequenceNum < payments[j].SequenceNum
|
||||
})
|
||||
|
||||
return payments, nil
|
||||
}
|
||||
|
||||
func fetchCreationInfo(bucket kvdb.ReadBucket) (*PaymentCreationInfo, error) {
|
||||
b := bucket.Get(paymentCreationInfoKey)
|
||||
if b == nil {
|
||||
return nil, fmt.Errorf("creation info not found")
|
||||
}
|
||||
|
||||
r := bytes.NewReader(b)
|
||||
return deserializePaymentCreationInfo(r)
|
||||
}
|
||||
|
||||
func fetchPayment(bucket kvdb.ReadBucket) (*MPPayment, error) {
|
||||
seqBytes := bucket.Get(paymentSequenceKey)
|
||||
if seqBytes == nil {
|
||||
return nil, fmt.Errorf("sequence number not found")
|
||||
}
|
||||
|
||||
sequenceNum := binary.BigEndian.Uint64(seqBytes)
|
||||
|
||||
// Get the PaymentCreationInfo.
|
||||
creationInfo, err := fetchCreationInfo(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
|
||||
var htlcs []HTLCAttempt
|
||||
htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket)
|
||||
if htlcsBucket != nil {
|
||||
// Get the payment attempts. This can be empty.
|
||||
htlcs, err = fetchHtlcAttempts(htlcsBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Get failure reason if available.
|
||||
var failureReason *FailureReason
|
||||
b := bucket.Get(paymentFailInfoKey)
|
||||
if b != nil {
|
||||
reason := FailureReason(b[0])
|
||||
failureReason = &reason
|
||||
}
|
||||
|
||||
// Go through all HTLCs for this payment, noting whether we have any
|
||||
// settled HTLC, and any still in-flight.
|
||||
var inflight, settled bool
|
||||
for _, h := range htlcs {
|
||||
if h.Failure != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if h.Settle != nil {
|
||||
settled = true
|
||||
continue
|
||||
}
|
||||
|
||||
// If any of the HTLCs are not failed nor settled, we
|
||||
// still have inflight HTLCs.
|
||||
inflight = true
|
||||
}
|
||||
|
||||
// Use the DB state to determine the status of the payment.
|
||||
var paymentStatus PaymentStatus
|
||||
|
||||
switch {
|
||||
|
||||
// If any of the the HTLCs did succeed and there are no HTLCs in
|
||||
// flight, the payment succeeded.
|
||||
case !inflight && settled:
|
||||
paymentStatus = StatusSucceeded
|
||||
|
||||
// If we have no in-flight HTLCs, and the payment failure is set, the
|
||||
// payment is considered failed.
|
||||
case !inflight && failureReason != nil:
|
||||
paymentStatus = StatusFailed
|
||||
|
||||
// Otherwise it is still in flight.
|
||||
default:
|
||||
paymentStatus = StatusInFlight
|
||||
}
|
||||
|
||||
return &MPPayment{
|
||||
SequenceNum: sequenceNum,
|
||||
Info: creationInfo,
|
||||
HTLCs: htlcs,
|
||||
FailureReason: failureReason,
|
||||
Status: paymentStatus,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// fetchHtlcAttempts retrives all htlc attempts made for the payment found in
|
||||
// the given bucket.
|
||||
func fetchHtlcAttempts(bucket kvdb.ReadBucket) ([]HTLCAttempt, error) {
|
||||
htlcs := make([]HTLCAttempt, 0)
|
||||
|
||||
err := bucket.ForEach(func(k, _ []byte) error {
|
||||
aid := byteOrder.Uint64(k)
|
||||
htlcBucket := bucket.NestedReadBucket(k)
|
||||
|
||||
attemptInfo, err := fetchHtlcAttemptInfo(
|
||||
htlcBucket,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attemptInfo.AttemptID = aid
|
||||
|
||||
htlc := HTLCAttempt{
|
||||
HTLCAttemptInfo: *attemptInfo,
|
||||
}
|
||||
|
||||
// Settle info might be nil.
|
||||
htlc.Settle, err = fetchHtlcSettleInfo(htlcBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Failure info might be nil.
|
||||
htlc.Failure, err = fetchHtlcFailInfo(htlcBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
htlcs = append(htlcs, htlc)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return htlcs, nil
|
||||
}
|
||||
|
||||
// fetchHtlcAttemptInfo fetches the payment attempt info for this htlc from the
|
||||
// bucket.
|
||||
func fetchHtlcAttemptInfo(bucket kvdb.ReadBucket) (*HTLCAttemptInfo, error) {
|
||||
b := bucket.Get(htlcAttemptInfoKey)
|
||||
if b == nil {
|
||||
return nil, errNoAttemptInfo
|
||||
}
|
||||
|
||||
r := bytes.NewReader(b)
|
||||
return deserializeHTLCAttemptInfo(r)
|
||||
}
|
||||
|
||||
// fetchHtlcSettleInfo retrieves the settle info for the htlc. If the htlc isn't
|
||||
// settled, nil is returned.
|
||||
func fetchHtlcSettleInfo(bucket kvdb.ReadBucket) (*HTLCSettleInfo, error) {
|
||||
b := bucket.Get(htlcSettleInfoKey)
|
||||
if b == nil {
|
||||
// Settle info is optional.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
r := bytes.NewReader(b)
|
||||
return deserializeHTLCSettleInfo(r)
|
||||
}
|
||||
|
||||
// fetchHtlcFailInfo retrieves the failure info for the htlc. If the htlc hasn't
|
||||
// failed, nil is returned.
|
||||
func fetchHtlcFailInfo(bucket kvdb.ReadBucket) (*HTLCFailInfo, error) {
|
||||
b := bucket.Get(htlcFailInfoKey)
|
||||
if b == nil {
|
||||
// Fail info is optional.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
r := bytes.NewReader(b)
|
||||
return deserializeHTLCFailInfo(r)
|
||||
}
|
||||
|
||||
// PaymentsQuery represents a query to the payments database starting or ending
|
||||
// at a certain offset index. The number of retrieved records can be limited.
|
||||
type PaymentsQuery struct {
|
||||
// IndexOffset determines the starting point of the payments query and
|
||||
// is always exclusive. In normal order, the query starts at the next
|
||||
// higher (available) index compared to IndexOffset. In reversed order,
|
||||
// the query ends at the next lower (available) index compared to the
|
||||
// IndexOffset. In the case of a zero index_offset, the query will start
|
||||
// with the oldest payment when paginating forwards, or will end with
|
||||
// the most recent payment when paginating backwards.
|
||||
IndexOffset uint64
|
||||
|
||||
// MaxPayments is the maximal number of payments returned in the
|
||||
// payments query.
|
||||
MaxPayments uint64
|
||||
|
||||
// Reversed gives a meaning to the IndexOffset. If reversed is set to
|
||||
// true, the query will fetch payments with indices lower than the
|
||||
// IndexOffset, otherwise, it will return payments with indices greater
|
||||
// than the IndexOffset.
|
||||
Reversed bool
|
||||
|
||||
// If IncludeIncomplete is true, then return payments that have not yet
|
||||
// fully completed. This means that pending payments, as well as failed
|
||||
// payments will show up if this field is set to true.
|
||||
IncludeIncomplete bool
|
||||
}
|
||||
|
||||
// PaymentsResponse contains the result of a query to the payments database.
|
||||
// It includes the set of payments that match the query and integers which
|
||||
// represent the index of the first and last item returned in the series of
|
||||
// payments. These integers allow callers to resume their query in the event
|
||||
// that the query's response exceeds the max number of returnable events.
|
||||
type PaymentsResponse struct {
|
||||
// Payments is the set of payments returned from the database for the
|
||||
// PaymentsQuery.
|
||||
Payments []*MPPayment
|
||||
|
||||
// FirstIndexOffset is the index of the first element in the set of
|
||||
// returned MPPayments. Callers can use this to resume their query
|
||||
// in the event that the slice has too many events to fit into a single
|
||||
// response. The offset can be used to continue reverse pagination.
|
||||
FirstIndexOffset uint64
|
||||
|
||||
// LastIndexOffset is the index of the last element in the set of
|
||||
// returned MPPayments. Callers can use this to resume their query
|
||||
// in the event that the slice has too many events to fit into a single
|
||||
// response. The offset can be used to continue forward pagination.
|
||||
LastIndexOffset uint64
|
||||
}
|
||||
|
||||
// QueryPayments is a query to the payments database which is restricted
|
||||
// to a subset of payments by the payments query, containing an offset
|
||||
// index and a maximum number of returned payments.
|
||||
func (db *DB) QueryPayments(query PaymentsQuery) (PaymentsResponse, error) {
|
||||
var resp PaymentsResponse
|
||||
|
||||
allPayments, err := db.FetchPayments()
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if len(allPayments) == 0 {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
indexExclusiveLimit := query.IndexOffset
|
||||
// In backward pagination, if the index limit is the default 0 value,
|
||||
// we set our limit to maxint to include all payments from the highest
|
||||
// sequence number on.
|
||||
if query.Reversed && indexExclusiveLimit == 0 {
|
||||
indexExclusiveLimit = math.MaxInt64
|
||||
}
|
||||
|
||||
for i := range allPayments {
|
||||
var payment *MPPayment
|
||||
|
||||
// If we have the max number of payments we want, exit.
|
||||
if uint64(len(resp.Payments)) == query.MaxPayments {
|
||||
break
|
||||
}
|
||||
|
||||
if query.Reversed {
|
||||
payment = allPayments[len(allPayments)-1-i]
|
||||
|
||||
// In the reversed direction, skip over all payments
|
||||
// that have sequence numbers greater than or equal to
|
||||
// the index offset. We skip payments with equal index
|
||||
// because the offset is exclusive.
|
||||
if payment.SequenceNum >= indexExclusiveLimit {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
payment = allPayments[i]
|
||||
|
||||
// In the forward direction, skip over all payments that
|
||||
// have sequence numbers less than or equal to the index
|
||||
// offset. We skip payments with equal indexes because
|
||||
// the index offset is exclusive.
|
||||
if payment.SequenceNum <= indexExclusiveLimit {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// To keep compatibility with the old API, we only return
|
||||
// non-succeeded payments if requested.
|
||||
if payment.Status != StatusSucceeded &&
|
||||
!query.IncludeIncomplete {
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
resp.Payments = append(resp.Payments, payment)
|
||||
}
|
||||
|
||||
// Need to swap the payments slice order if reversed order.
|
||||
if query.Reversed {
|
||||
for l, r := 0, len(resp.Payments)-1; l < r; l, r = l+1, r-1 {
|
||||
resp.Payments[l], resp.Payments[r] =
|
||||
resp.Payments[r], resp.Payments[l]
|
||||
}
|
||||
}
|
||||
|
||||
// Set the first and last index of the returned payments so that the
|
||||
// caller can resume from this point later on.
|
||||
if len(resp.Payments) > 0 {
|
||||
resp.FirstIndexOffset = resp.Payments[0].SequenceNum
|
||||
resp.LastIndexOffset =
|
||||
resp.Payments[len(resp.Payments)-1].SequenceNum
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// DeletePayments deletes all completed and failed payments from the DB.
|
||||
func (db *DB) DeletePayments() error {
|
||||
return kvdb.Update(db, func(tx kvdb.RwTx) error {
|
||||
payments := tx.ReadWriteBucket(paymentsRootBucket)
|
||||
if payments == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var deleteBuckets [][]byte
|
||||
err := payments.ForEach(func(k, _ []byte) error {
|
||||
bucket := payments.NestedReadWriteBucket(k)
|
||||
if bucket == nil {
|
||||
// We only expect sub-buckets to be found in
|
||||
// this top-level bucket.
|
||||
return fmt.Errorf("non bucket element in " +
|
||||
"payments bucket")
|
||||
}
|
||||
|
||||
// If the status is InFlight, we cannot safely delete
|
||||
// the payment information, so we return early.
|
||||
paymentStatus, err := fetchPaymentStatus(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the status is InFlight, we cannot safely delete
|
||||
// the payment information, so we return early.
|
||||
if paymentStatus == StatusInFlight {
|
||||
return nil
|
||||
}
|
||||
|
||||
deleteBuckets = append(deleteBuckets, k)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, k := range deleteBuckets {
|
||||
if err := payments.DeleteNestedBucket(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// nolint: dupl
|
||||
func serializePaymentCreationInfo(w io.Writer, c *PaymentCreationInfo) error {
|
||||
var scratch [8]byte
|
||||
|
||||
if _, err := w.Write(c.PaymentHash[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
byteOrder.PutUint64(scratch[:], uint64(c.Value))
|
||||
if _, err := w.Write(scratch[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := serializeTime(w, c.CreationTime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
byteOrder.PutUint32(scratch[:4], uint32(len(c.PaymentRequest)))
|
||||
if _, err := w.Write(scratch[:4]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(c.PaymentRequest[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo, error) {
|
||||
var scratch [8]byte
|
||||
|
||||
c := &PaymentCreationInfo{}
|
||||
|
||||
if _, err := io.ReadFull(r, c.PaymentHash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(r, scratch[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
|
||||
|
||||
creationTime, err := deserializeTime(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.CreationTime = creationTime
|
||||
|
||||
if _, err := io.ReadFull(r, scratch[:4]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqLen := uint32(byteOrder.Uint32(scratch[:4]))
|
||||
payReq := make([]byte, reqLen)
|
||||
if reqLen > 0 {
|
||||
if _, err := io.ReadFull(r, payReq); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
c.PaymentRequest = payReq
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func serializeHTLCAttemptInfo(w io.Writer, a *HTLCAttemptInfo) error {
|
||||
if err := WriteElements(w, a.SessionKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := SerializeRoute(w, a.Route); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return serializeTime(w, a.AttemptTime)
|
||||
}
|
||||
|
||||
func deserializeHTLCAttemptInfo(r io.Reader) (*HTLCAttemptInfo, error) {
|
||||
a := &HTLCAttemptInfo{}
|
||||
err := ReadElements(r, &a.SessionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.Route, err = DeserializeRoute(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.AttemptTime, err = deserializeTime(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func serializeHop(w io.Writer, h *route.Hop) error {
|
||||
if err := WriteElements(w,
|
||||
h.PubKeyBytes[:],
|
||||
h.ChannelID,
|
||||
h.OutgoingTimeLock,
|
||||
h.AmtToForward,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := binary.Write(w, byteOrder, h.LegacyPayload); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For legacy payloads, we don't need to write any TLV records, so
|
||||
// we'll write a zero indicating the our serialized TLV map has no
|
||||
// records.
|
||||
if h.LegacyPayload {
|
||||
return WriteElements(w, uint32(0))
|
||||
}
|
||||
|
||||
// Gather all non-primitive TLV records so that they can be serialized
|
||||
// as a single blob.
|
||||
//
|
||||
// TODO(conner): add migration to unify all fields in a single TLV
|
||||
// blobs. The split approach will cause headaches down the road as more
|
||||
// fields are added, which we can avoid by having a single TLV stream
|
||||
// for all payload fields.
|
||||
var records []tlv.Record
|
||||
if h.MPP != nil {
|
||||
records = append(records, h.MPP.Record())
|
||||
}
|
||||
|
||||
// Final sanity check to absolutely rule out custom records that are not
|
||||
// custom and write into the standard range.
|
||||
if err := h.CustomRecords.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert custom records to tlv and add to the record list.
|
||||
// MapToRecords sorts the list, so adding it here will keep the list
|
||||
// canonical.
|
||||
tlvRecords := tlv.MapToRecords(h.CustomRecords)
|
||||
records = append(records, tlvRecords...)
|
||||
|
||||
// Otherwise, we'll transform our slice of records into a map of the
|
||||
// raw bytes, then serialize them in-line with a length (number of
|
||||
// elements) prefix.
|
||||
mapRecords, err := tlv.RecordsToMap(records)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numRecords := uint32(len(mapRecords))
|
||||
if err := WriteElements(w, numRecords); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for recordType, rawBytes := range mapRecords {
|
||||
if err := WriteElements(w, recordType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := wire.WriteVarBytes(w, 0, rawBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// maxOnionPayloadSize is the largest Sphinx payload possible, so we don't need
|
||||
// to read/write a TLV stream larger than this.
|
||||
const maxOnionPayloadSize = 1300
|
||||
|
||||
func deserializeHop(r io.Reader) (*route.Hop, error) {
|
||||
h := &route.Hop{}
|
||||
|
||||
var pub []byte
|
||||
if err := ReadElements(r, &pub); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(h.PubKeyBytes[:], pub)
|
||||
|
||||
if err := ReadElements(r,
|
||||
&h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(roasbeef): change field to allow LegacyPayload false to be the
|
||||
// legacy default?
|
||||
err := binary.Read(r, byteOrder, &h.LegacyPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var numElements uint32
|
||||
if err := ReadElements(r, &numElements); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there're no elements, then we can return early.
|
||||
if numElements == 0 {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
tlvMap := make(map[uint64][]byte)
|
||||
for i := uint32(0); i < numElements; i++ {
|
||||
var tlvType uint64
|
||||
if err := ReadElements(r, &tlvType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawRecordBytes, err := wire.ReadVarBytes(
|
||||
r, 0, maxOnionPayloadSize, "tlv",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tlvMap[tlvType] = rawRecordBytes
|
||||
}
|
||||
|
||||
// If the MPP type is present, remove it from the generic TLV map and
|
||||
// parse it back into a proper MPP struct.
|
||||
//
|
||||
// TODO(conner): add migration to unify all fields in a single TLV
|
||||
// blobs. The split approach will cause headaches down the road as more
|
||||
// fields are added, which we can avoid by having a single TLV stream
|
||||
// for all payload fields.
|
||||
mppType := uint64(record.MPPOnionType)
|
||||
if mppBytes, ok := tlvMap[mppType]; ok {
|
||||
delete(tlvMap, mppType)
|
||||
|
||||
var (
|
||||
mpp = &record.MPP{}
|
||||
mppRec = mpp.Record()
|
||||
r = bytes.NewReader(mppBytes)
|
||||
)
|
||||
err := mppRec.Decode(r, uint64(len(mppBytes)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h.MPP = mpp
|
||||
}
|
||||
|
||||
h.CustomRecords = tlvMap
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// SerializeRoute serializes a route.
|
||||
func SerializeRoute(w io.Writer, r route.Route) error {
|
||||
if err := WriteElements(w,
|
||||
r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:],
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := WriteElements(w, uint32(len(r.Hops))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, h := range r.Hops {
|
||||
if err := serializeHop(w, h); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeserializeRoute deserializes a route.
|
||||
func DeserializeRoute(r io.Reader) (route.Route, error) {
|
||||
rt := route.Route{}
|
||||
if err := ReadElements(r,
|
||||
&rt.TotalTimeLock, &rt.TotalAmount,
|
||||
); err != nil {
|
||||
return rt, err
|
||||
}
|
||||
|
||||
var pub []byte
|
||||
if err := ReadElements(r, &pub); err != nil {
|
||||
return rt, err
|
||||
}
|
||||
copy(rt.SourcePubKey[:], pub)
|
||||
|
||||
var numHops uint32
|
||||
if err := ReadElements(r, &numHops); err != nil {
|
||||
return rt, err
|
||||
}
|
||||
|
||||
var hops []*route.Hop
|
||||
for i := uint32(0); i < numHops; i++ {
|
||||
hop, err := deserializeHop(r)
|
||||
if err != nil {
|
||||
return rt, err
|
||||
}
|
||||
hops = append(hops, hop)
|
||||
}
|
||||
rt.Hops = hops
|
||||
|
||||
return rt, nil
|
||||
}
|
||||
95
vendor/github.com/lightningnetwork/lnd/channeldb/reject_cache.go
generated
vendored
Normal file
95
vendor/github.com/lightningnetwork/lnd/channeldb/reject_cache.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
package channeldb
|
||||
|
||||
// rejectFlags is a compact representation of various metadata stored by the
|
||||
// reject cache about a particular channel.
|
||||
type rejectFlags uint8
|
||||
|
||||
const (
|
||||
// rejectFlagExists is a flag indicating whether the channel exists,
|
||||
// i.e. the channel is open and has a recent channel update. If this
|
||||
// flag is not set, the channel is either a zombie or unknown.
|
||||
rejectFlagExists rejectFlags = 1 << iota
|
||||
|
||||
// rejectFlagZombie is a flag indicating whether the channel is a
|
||||
// zombie, i.e. the channel is open but has no recent channel updates.
|
||||
rejectFlagZombie
|
||||
)
|
||||
|
||||
// packRejectFlags computes the rejectFlags corresponding to the passed boolean
|
||||
// values indicating whether the edge exists or is a zombie.
|
||||
func packRejectFlags(exists, isZombie bool) rejectFlags {
|
||||
var flags rejectFlags
|
||||
if exists {
|
||||
flags |= rejectFlagExists
|
||||
}
|
||||
if isZombie {
|
||||
flags |= rejectFlagZombie
|
||||
}
|
||||
|
||||
return flags
|
||||
}
|
||||
|
||||
// unpack returns the booleans packed into the rejectFlags. The first indicates
|
||||
// if the edge exists in our graph, the second indicates if the edge is a
|
||||
// zombie.
|
||||
func (f rejectFlags) unpack() (bool, bool) {
|
||||
return f&rejectFlagExists == rejectFlagExists,
|
||||
f&rejectFlagZombie == rejectFlagZombie
|
||||
}
|
||||
|
||||
// rejectCacheEntry caches frequently accessed information about a channel,
|
||||
// including the timestamps of its latest edge policies and whether or not the
|
||||
// channel exists in the graph.
|
||||
type rejectCacheEntry struct {
|
||||
upd1Time int64
|
||||
upd2Time int64
|
||||
flags rejectFlags
|
||||
}
|
||||
|
||||
// rejectCache is an in-memory cache used to improve the performance of
|
||||
// HasChannelEdge. It caches information about the whether or channel exists, as
|
||||
// well as the most recent timestamps for each policy (if they exists).
|
||||
type rejectCache struct {
|
||||
n int
|
||||
edges map[uint64]rejectCacheEntry
|
||||
}
|
||||
|
||||
// newRejectCache creates a new rejectCache with maximum capacity of n entries.
|
||||
func newRejectCache(n int) *rejectCache {
|
||||
return &rejectCache{
|
||||
n: n,
|
||||
edges: make(map[uint64]rejectCacheEntry, n),
|
||||
}
|
||||
}
|
||||
|
||||
// get returns the entry from the cache for chanid, if it exists.
|
||||
func (c *rejectCache) get(chanid uint64) (rejectCacheEntry, bool) {
|
||||
entry, ok := c.edges[chanid]
|
||||
return entry, ok
|
||||
}
|
||||
|
||||
// insert adds the entry to the reject cache. If an entry for chanid already
|
||||
// exists, it will be replaced with the new entry. If the entry doesn't exists,
|
||||
// it will be inserted to the cache, performing a random eviction if the cache
|
||||
// is at capacity.
|
||||
func (c *rejectCache) insert(chanid uint64, entry rejectCacheEntry) {
|
||||
// If entry exists, replace it.
|
||||
if _, ok := c.edges[chanid]; ok {
|
||||
c.edges[chanid] = entry
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, evict an entry at random and insert.
|
||||
if len(c.edges) == c.n {
|
||||
for id := range c.edges {
|
||||
delete(c.edges, id)
|
||||
break
|
||||
}
|
||||
}
|
||||
c.edges[chanid] = entry
|
||||
}
|
||||
|
||||
// remove deletes an entry for chanid from the cache, if it exists.
|
||||
func (c *rejectCache) remove(chanid uint64) {
|
||||
delete(c.edges, chanid)
|
||||
}
|
||||
251
vendor/github.com/lightningnetwork/lnd/channeldb/waitingproof.go
generated
vendored
Normal file
251
vendor/github.com/lightningnetwork/lnd/channeldb/waitingproof.go
generated
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"io"
|
||||
|
||||
"bytes"
|
||||
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
)
|
||||
|
||||
var (
|
||||
// waitingProofsBucketKey byte string name of the waiting proofs store.
|
||||
waitingProofsBucketKey = []byte("waitingproofs")
|
||||
|
||||
// ErrWaitingProofNotFound is returned if waiting proofs haven't been
|
||||
// found by db.
|
||||
ErrWaitingProofNotFound = errors.New("waiting proofs haven't been " +
|
||||
"found")
|
||||
|
||||
// ErrWaitingProofAlreadyExist is returned if waiting proofs haven't been
|
||||
// found by db.
|
||||
ErrWaitingProofAlreadyExist = errors.New("waiting proof with such " +
|
||||
"key already exist")
|
||||
)
|
||||
|
||||
// WaitingProofStore is the bold db map-like storage for half announcement
|
||||
// signatures. The one responsibility of this storage is to be able to
|
||||
// retrieve waiting proofs after client restart.
|
||||
type WaitingProofStore struct {
|
||||
// cache is used in order to reduce the number of redundant get
|
||||
// calls, when object isn't stored in it.
|
||||
cache map[WaitingProofKey]struct{}
|
||||
db *DB
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewWaitingProofStore creates new instance of proofs storage.
|
||||
func NewWaitingProofStore(db *DB) (*WaitingProofStore, error) {
|
||||
s := &WaitingProofStore{
|
||||
db: db,
|
||||
cache: make(map[WaitingProofKey]struct{}),
|
||||
}
|
||||
|
||||
if err := s.ForAll(func(proof *WaitingProof) error {
|
||||
s.cache[proof.Key()] = struct{}{}
|
||||
return nil
|
||||
}); err != nil && err != ErrWaitingProofNotFound {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Add adds new waiting proof in the storage.
|
||||
func (s *WaitingProofStore) Add(proof *WaitingProof) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
err := kvdb.Update(s.db, func(tx kvdb.RwTx) error {
|
||||
var err error
|
||||
var b bytes.Buffer
|
||||
|
||||
// Get or create the bucket.
|
||||
bucket, err := tx.CreateTopLevelBucket(waitingProofsBucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Encode the objects and place it in the bucket.
|
||||
if err := proof.Encode(&b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := proof.Key()
|
||||
|
||||
return bucket.Put(key[:], b.Bytes())
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Knowing that the write succeeded, we can now update the in-memory
|
||||
// cache with the proof's key.
|
||||
s.cache[proof.Key()] = struct{}{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes the proof from storage by its key.
|
||||
func (s *WaitingProofStore) Remove(key WaitingProofKey) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if _, ok := s.cache[key]; !ok {
|
||||
return ErrWaitingProofNotFound
|
||||
}
|
||||
|
||||
err := kvdb.Update(s.db, func(tx kvdb.RwTx) error {
|
||||
// Get or create the top bucket.
|
||||
bucket := tx.ReadWriteBucket(waitingProofsBucketKey)
|
||||
if bucket == nil {
|
||||
return ErrWaitingProofNotFound
|
||||
}
|
||||
|
||||
return bucket.Delete(key[:])
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since the proof was successfully deleted from the store, we can now
|
||||
// remove it from the in-memory cache.
|
||||
delete(s.cache, key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForAll iterates thought all waiting proofs and passing the waiting proof
|
||||
// in the given callback.
|
||||
func (s *WaitingProofStore) ForAll(cb func(*WaitingProof) error) error {
|
||||
return kvdb.View(s.db, func(tx kvdb.ReadTx) error {
|
||||
bucket := tx.ReadBucket(waitingProofsBucketKey)
|
||||
if bucket == nil {
|
||||
return ErrWaitingProofNotFound
|
||||
}
|
||||
|
||||
// Iterate over objects buckets.
|
||||
return bucket.ForEach(func(k, v []byte) error {
|
||||
// Skip buckets fields.
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := bytes.NewReader(v)
|
||||
proof := &WaitingProof{}
|
||||
if err := proof.Decode(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cb(proof)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Get returns the object which corresponds to the given index.
|
||||
func (s *WaitingProofStore) Get(key WaitingProofKey) (*WaitingProof, error) {
|
||||
proof := &WaitingProof{}
|
||||
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
if _, ok := s.cache[key]; !ok {
|
||||
return nil, ErrWaitingProofNotFound
|
||||
}
|
||||
|
||||
err := kvdb.View(s.db, func(tx kvdb.ReadTx) error {
|
||||
bucket := tx.ReadBucket(waitingProofsBucketKey)
|
||||
if bucket == nil {
|
||||
return ErrWaitingProofNotFound
|
||||
}
|
||||
|
||||
// Iterate over objects buckets.
|
||||
v := bucket.Get(key[:])
|
||||
if v == nil {
|
||||
return ErrWaitingProofNotFound
|
||||
}
|
||||
|
||||
r := bytes.NewReader(v)
|
||||
return proof.Decode(r)
|
||||
})
|
||||
|
||||
return proof, err
|
||||
}
|
||||
|
||||
// WaitingProofKey is the proof key which uniquely identifies the waiting
|
||||
// proof object. The goal of this key is distinguish the local and remote
|
||||
// proof for the same channel id.
|
||||
type WaitingProofKey [9]byte
|
||||
|
||||
// WaitingProof is the storable object, which encapsulate the half proof and
|
||||
// the information about from which side this proof came. This structure is
|
||||
// needed to make channel proof exchange persistent, so that after client
|
||||
// restart we may receive remote/local half proof and process it.
|
||||
type WaitingProof struct {
|
||||
*lnwire.AnnounceSignatures
|
||||
isRemote bool
|
||||
}
|
||||
|
||||
// NewWaitingProof constructs a new waiting prof instance.
|
||||
func NewWaitingProof(isRemote bool, proof *lnwire.AnnounceSignatures) *WaitingProof {
|
||||
return &WaitingProof{
|
||||
AnnounceSignatures: proof,
|
||||
isRemote: isRemote,
|
||||
}
|
||||
}
|
||||
|
||||
// OppositeKey returns the key which uniquely identifies opposite waiting proof.
|
||||
func (p *WaitingProof) OppositeKey() WaitingProofKey {
|
||||
var key [9]byte
|
||||
binary.BigEndian.PutUint64(key[:8], p.ShortChannelID.ToUint64())
|
||||
|
||||
if !p.isRemote {
|
||||
key[8] = 1
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// Key returns the key which uniquely identifies waiting proof.
|
||||
func (p *WaitingProof) Key() WaitingProofKey {
|
||||
var key [9]byte
|
||||
binary.BigEndian.PutUint64(key[:8], p.ShortChannelID.ToUint64())
|
||||
|
||||
if p.isRemote {
|
||||
key[8] = 1
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// Encode writes the internal representation of waiting proof in byte stream.
|
||||
func (p *WaitingProof) Encode(w io.Writer) error {
|
||||
if err := binary.Write(w, byteOrder, p.isRemote); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.AnnounceSignatures.Encode(w, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode reads the data from the byte stream and initializes the
|
||||
// waiting proof object with it.
|
||||
func (p *WaitingProof) Decode(r io.Reader) error {
|
||||
if err := binary.Read(r, byteOrder, &p.isRemote); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg := &lnwire.AnnounceSignatures{}
|
||||
if err := msg.Decode(r, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
(*p).AnnounceSignatures = msg
|
||||
return nil
|
||||
}
|
||||
229
vendor/github.com/lightningnetwork/lnd/channeldb/witness_cache.go
generated
vendored
Normal file
229
vendor/github.com/lightningnetwork/lnd/channeldb/witness_cache.go
generated
vendored
Normal file
@@ -0,0 +1,229 @@
|
||||
package channeldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoWitnesses is an error that's returned when no new witnesses have
|
||||
// been added to the WitnessCache.
|
||||
ErrNoWitnesses = fmt.Errorf("no witnesses")
|
||||
|
||||
// ErrUnknownWitnessType is returned if a caller attempts to
|
||||
ErrUnknownWitnessType = fmt.Errorf("unknown witness type")
|
||||
)
|
||||
|
||||
// WitnessType is enum that denotes what "type" of witness is being
|
||||
// stored/retrieved. As the WitnessCache itself is agnostic and doesn't enforce
|
||||
// any structure on added witnesses, we use this type to partition the
|
||||
// witnesses on disk, and also to know how to map a witness to its look up key.
|
||||
type WitnessType uint8
|
||||
|
||||
var (
|
||||
// Sha256HashWitness is a witness that is simply the pre image to a
|
||||
// hash image. In order to map to its key, we'll use sha256.
|
||||
Sha256HashWitness WitnessType = 1
|
||||
)
|
||||
|
||||
// toDBKey is a helper method that maps a witness type to the key that we'll
|
||||
// use to store it within the database.
|
||||
func (w WitnessType) toDBKey() ([]byte, error) {
|
||||
switch w {
|
||||
|
||||
case Sha256HashWitness:
|
||||
return []byte{byte(w)}, nil
|
||||
|
||||
default:
|
||||
return nil, ErrUnknownWitnessType
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// witnessBucketKey is the name of the bucket that we use to store all
|
||||
// witnesses encountered. Within this bucket, we'll create a sub-bucket for
|
||||
// each witness type.
|
||||
witnessBucketKey = []byte("byte")
|
||||
)
|
||||
|
||||
// WitnessCache is a persistent cache of all witnesses we've encountered on the
|
||||
// network. In the case of multi-hop, multi-step contracts, a cache of all
|
||||
// witnesses can be useful in the case of partial contract resolution. If
|
||||
// negotiations break down, we may be forced to locate the witness for a
|
||||
// portion of the contract on-chain. In this case, we'll then add that witness
|
||||
// to the cache so the incoming contract can fully resolve witness.
|
||||
// Additionally, as one MUST always use a unique witness on the network, we may
|
||||
// use this cache to detect duplicate witnesses.
|
||||
//
|
||||
// TODO(roasbeef): need expiry policy?
|
||||
// * encrypt?
|
||||
type WitnessCache struct {
|
||||
db *DB
|
||||
}
|
||||
|
||||
// NewWitnessCache returns a new instance of the witness cache.
|
||||
func (d *DB) NewWitnessCache() *WitnessCache {
|
||||
return &WitnessCache{
|
||||
db: d,
|
||||
}
|
||||
}
|
||||
|
||||
// witnessEntry is a key-value struct that holds each key -> witness pair, used
|
||||
// when inserting records into the cache.
|
||||
type witnessEntry struct {
|
||||
key []byte
|
||||
witness []byte
|
||||
}
|
||||
|
||||
// AddSha256Witnesses adds a batch of new sha256 preimages into the witness
|
||||
// cache. This is an alias for AddWitnesses that uses Sha256HashWitness as the
|
||||
// preimages' witness type.
|
||||
func (w *WitnessCache) AddSha256Witnesses(preimages ...lntypes.Preimage) error {
|
||||
// Optimistically compute the preimages' hashes before attempting to
|
||||
// start the db transaction.
|
||||
entries := make([]witnessEntry, 0, len(preimages))
|
||||
for i := range preimages {
|
||||
hash := preimages[i].Hash()
|
||||
entries = append(entries, witnessEntry{
|
||||
key: hash[:],
|
||||
witness: preimages[i][:],
|
||||
})
|
||||
}
|
||||
|
||||
return w.addWitnessEntries(Sha256HashWitness, entries)
|
||||
}
|
||||
|
||||
// addWitnessEntries inserts the witnessEntry key-value pairs into the cache,
|
||||
// using the appropriate witness type to segment the namespace of possible
|
||||
// witness types.
|
||||
func (w *WitnessCache) addWitnessEntries(wType WitnessType,
|
||||
entries []witnessEntry) error {
|
||||
|
||||
// Exit early if there are no witnesses to add.
|
||||
if len(entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error {
|
||||
witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
witnessTypeBucketKey, err := wType.toDBKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
witnessTypeBucket, err := witnessBucket.CreateBucketIfNotExists(
|
||||
witnessTypeBucketKey,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
err = witnessTypeBucket.Put(entry.key, entry.witness)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// LookupSha256Witness attempts to lookup the preimage for a sha256 hash. If
|
||||
// the witness isn't found, ErrNoWitnesses will be returned.
|
||||
func (w *WitnessCache) LookupSha256Witness(hash lntypes.Hash) (lntypes.Preimage, error) {
|
||||
witness, err := w.lookupWitness(Sha256HashWitness, hash[:])
|
||||
if err != nil {
|
||||
return lntypes.Preimage{}, err
|
||||
}
|
||||
|
||||
return lntypes.MakePreimage(witness)
|
||||
}
|
||||
|
||||
// lookupWitness attempts to lookup a witness according to its type and also
|
||||
// its witness key. In the case that the witness isn't found, ErrNoWitnesses
|
||||
// will be returned.
|
||||
func (w *WitnessCache) lookupWitness(wType WitnessType, witnessKey []byte) ([]byte, error) {
|
||||
var witness []byte
|
||||
err := kvdb.View(w.db, func(tx kvdb.ReadTx) error {
|
||||
witnessBucket := tx.ReadBucket(witnessBucketKey)
|
||||
if witnessBucket == nil {
|
||||
return ErrNoWitnesses
|
||||
}
|
||||
|
||||
witnessTypeBucketKey, err := wType.toDBKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
witnessTypeBucket := witnessBucket.NestedReadBucket(witnessTypeBucketKey)
|
||||
if witnessTypeBucket == nil {
|
||||
return ErrNoWitnesses
|
||||
}
|
||||
|
||||
dbWitness := witnessTypeBucket.Get(witnessKey)
|
||||
if dbWitness == nil {
|
||||
return ErrNoWitnesses
|
||||
}
|
||||
|
||||
witness = make([]byte, len(dbWitness))
|
||||
copy(witness[:], dbWitness)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return witness, nil
|
||||
}
|
||||
|
||||
// DeleteSha256Witness attempts to delete a sha256 preimage identified by hash.
|
||||
func (w *WitnessCache) DeleteSha256Witness(hash lntypes.Hash) error {
|
||||
return w.deleteWitness(Sha256HashWitness, hash[:])
|
||||
}
|
||||
|
||||
// deleteWitness attempts to delete a particular witness from the database.
|
||||
func (w *WitnessCache) deleteWitness(wType WitnessType, witnessKey []byte) error {
|
||||
return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error {
|
||||
witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
witnessTypeBucketKey, err := wType.toDBKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
witnessTypeBucket, err := witnessBucket.CreateBucketIfNotExists(
|
||||
witnessTypeBucketKey,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return witnessTypeBucket.Delete(witnessKey)
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteWitnessClass attempts to delete an *entire* class of witnesses. After
|
||||
// this function return with a non-nil error,
|
||||
func (w *WitnessCache) DeleteWitnessClass(wType WitnessType) error {
|
||||
return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error {
|
||||
witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
witnessTypeBucketKey, err := wType.toDBKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return witnessBucket.DeleteNestedBucket(witnessTypeBucketKey)
|
||||
})
|
||||
}
|
||||
Reference in New Issue
Block a user