Release v0.3.0

This commit is contained in:
Manu Herrera
2020-11-09 10:05:29 -03:00
parent 4e9aa7a3c5
commit 8107c4478b
1265 changed files with 440488 additions and 107809 deletions

View File

@@ -0,0 +1,2 @@
vendor/
.idea

View File

@@ -0,0 +1,17 @@
language: go
cache:
directories:
- $GOCACHE
- $GOPATH/pkg/mod
- $GOPATH/src/github.com/btcsuite
- $GOPATH/src/github.com/golang
go:
- "1.13.x"
sudo: required
script:
- export PATH=$PATH:$HOME/gopath/bin
- export GO111MODULE=on
- go test -v

View File

@@ -0,0 +1,19 @@
Copyright (C) 2015-2016 The Lightning Network Developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -0,0 +1,66 @@
# lightning-onion
This repository houses an implementation of the [Lightning
Network's](lightning.network) onion routing protocol. The Lightning Network
uses onion routing to securely, and privately route HTLC's
(Hash-Time-Locked-Contracts, basically a conditional payment) within the
network. (A full specification of the protocol can be found amongst the
lighting-rfc repository, specifically within
[BOLT#04](https://github.com/lightningnetwork/lightning-rfc/blob/master/04-onion-routing.md).
The Lightning Network is composed of a series of "payment channels" which are
essentially tubes of money whose balances can instantaneous be reallocated
between two participants. By linking these payment channels in a pair-wise
manner, a network of connect payment channels are created.
Within the Lightning Network,
[source-routing](https://en.wikipedia.org/wiki/Source_routing) is utilized in
order to give nodes _full_ control over the route their payment follows within
the network. This level of control is highly desirable as with it, senders are
able to fully specify: the total number of hops in their routes, the total
cumulative fee they'll pay to send the payment, and finally the total
worst-case time-lock period enforced by the conditional payment contract.
In line with Bitcoin's spirit of decentralization and censorship resistance, we
employ an onion routing scheme within the [Lightning
protocol](https://github.com/lightningnetwork/lightning-rfc) to prevent the
ability of participants on the network to easily censor payments, as the
participants are not aware of the final destination of any given payment.
Additionally, by encoding payment routes within a mix-net like packet, we are
able to achieve the following security and privacy features:
* Participants in a route don't know their exact position within the route
* Participants within a route don't know the source of the payment, nor the
ultimate destination of the payment
* Participants within a route aren't aware _exactly_ how many other
participants were involved in the payment route
* Each new payment route is computationally indistinguishable from any other
payment route
Our current onion routing protocol utilizes a message format derived from
[Sphinx](http://www.cypherpunks.ca/~iang/pubs/Sphinx_Oakland09.pdf). In order
to cater Sphinx's mix-format to our specification application, we've made the
following modifications:
* We've added a MAC over the entire mix-header as we have no use for SURB's
(single-use-reply-blocks) in our protocol.
* Additionally, the end-to-end payload to the destination has been removed in
order to cut down on the packet-size, and also as we don't currently have a
use for a large message from payment sender to recipient.
* We've dropped usage of LIONESS (as we don't need SURB's), and instead
utilize chacha20 uniformly throughout as a stream cipher.
* Finally, the mix-header has been extended with a per-hop-payload which
provides each hops with exact instructions as to how and where to forward
the payment. This includes the amount to forward, the destination chain,
and the time-lock value to attach to the outgoing HTLC.
For further information see these resources:
* [Olaoluwa's original post to the lightning-dev mailing
list](http://lists.linuxfoundation.org/pipermail/lightning-dev/2015-December/000384.html).
* [Privacy Preserving Decentralized Micropayments](https://scalingbitcoin.org/milan2016/presentations/D1%20-%206%20-%20Olaoluwa%20Osuntokun.pdf) -- presented at Scaling Bitcoin Hong Kong.
In the near future, this repository will be extended to also includes a
application specific version of
[HORNET](https://www.scion-architecture.net/pdf/2015-HORNET.pdf).

View File

@@ -0,0 +1,100 @@
package sphinx
import "errors"
// ErrAlreadyCommitted signals that an entry could not be added to the
// batch because it has already been persisted.
var ErrAlreadyCommitted = errors.New("cannot add to batch after committing")
// Batch is an object used to incrementally construct a set of entries to add to
// the replay log. After construction is completed, it can be added to the log
// using the PutBatch method.
type Batch struct {
// IsCommitted denotes whether or not this batch has been successfully
// written to disk.
IsCommitted bool
// ID is a unique, caller chosen identifier for this batch.
ID []byte
// ReplaySet contains the sequence numbers of all entries that were
// detected as replays. The set is finalized upon writing the batch to
// disk, and merges replays detected by the replay cache and on-disk
// replay log.
ReplaySet *ReplaySet
// entries stores the set of all potential entries that might get
// written to the replay log. Some entries may be skipped after
// examining the on-disk content at the time of commit..
entries map[uint16]batchEntry
// replayCache is an in memory lookup-table, which stores the hash
// prefix of entries already added to this batch. This allows a quick
// mechanism for intra-batch duplicate detection.
replayCache map[HashPrefix]struct{}
}
// NewBatch initializes an object for constructing a set of entries to
// atomically add to a replay log. Batches are identified by byte slice, which
// allows the caller to safely process the same batch twice and get an
// idempotent result.
func NewBatch(id []byte) *Batch {
return &Batch{
ID: id,
ReplaySet: NewReplaySet(),
entries: make(map[uint16]batchEntry),
replayCache: make(map[HashPrefix]struct{}),
}
}
// Put inserts a hash-prefix/CLTV pair into the current batch. This method only
// returns an error in the event that the batch was already committed to disk.
// Decisions regarding whether or not a particular sequence number is a replay
// is ultimately reported via the batch's ReplaySet after committing to disk.
func (b *Batch) Put(seqNum uint16, hashPrefix *HashPrefix, cltv uint32) error {
// Abort if this batch was already written to disk.
if b.IsCommitted {
return ErrAlreadyCommitted
}
// Check to see if this hash prefix is already included in this batch.
// If so, we will opportunistically mark this index as replayed.
if _, ok := b.replayCache[*hashPrefix]; ok {
b.ReplaySet.Add(seqNum)
return nil
}
// Otherwise, this is a distinct hash prefix for this batch. Add it to
// our list of entries that we will try to write to disk. Each of these
// entries will be checked again during the commit to see if any other
// on-disk entries contain the same hash prefix.
b.entries[seqNum] = batchEntry{
hashPrefix: *hashPrefix,
cltv: cltv,
}
// Finally, add this hash prefix to our in-memory replay cache, this
// will be consulted upon further adds to check for duplicates in the
// same batch.
b.replayCache[*hashPrefix] = struct{}{}
return nil
}
// ForEach iterates through each entry in the batch and calls the provided
// function with the sequence number and entry contents as arguments.
func (b *Batch) ForEach(fn func(seqNum uint16, hashPrefix *HashPrefix, cltv uint32) error) error {
for seqNum, entry := range b.entries {
if err := fn(seqNum, &entry.hashPrefix, entry.cltv); err != nil {
return err
}
}
return nil
}
// batchEntry is a tuple of a secret's hash prefix and the corresponding CLTV at
// which the onion blob from which the secret was derived expires.
type batchEntry struct {
hashPrefix HashPrefix
cltv uint32
}

View File

@@ -0,0 +1,289 @@
package sphinx
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"errors"
"fmt"
"github.com/aead/chacha20"
"github.com/btcsuite/btcd/btcec"
)
const (
// HMACSize is the length of the HMACs used to verify the integrity of
// the onion. Any value lower than 32 will truncate the HMAC both
// during onion creation as well as during the verification.
HMACSize = 32
)
// Hash256 is a statically sized, 32-byte array, typically containing
// the output of a SHA256 hash.
type Hash256 [sha256.Size]byte
// DecryptedError contains the decrypted error message and its sender.
type DecryptedError struct {
// Sender is the node that sent the error. Note that a node may occur in
// the path multiple times. If that is the case, the sender pubkey does
// not tell the caller on which visit the error occurred.
Sender *btcec.PublicKey
// SenderIdx is the position of the error sending node in the path.
// Index zero is the self node. SenderIdx allows to distinguish between
// errors from nodes that occur in the path multiple times.
SenderIdx int
// Message is the decrypted error message.
Message []byte
}
// zeroHMAC is the special HMAC value that allows the final node to determine
// if it is the payment destination or not.
var zeroHMAC [HMACSize]byte
// calcMac calculates HMAC-SHA-256 over the message using the passed secret key
// as input to the HMAC.
func calcMac(key [keyLen]byte, msg []byte) [HMACSize]byte {
hmac := hmac.New(sha256.New, key[:])
hmac.Write(msg)
h := hmac.Sum(nil)
var mac [HMACSize]byte
copy(mac[:], h[:HMACSize])
return mac
}
// xor computes the byte wise XOR of a and b, storing the result in dst. Only
// the frist `min(len(a), len(b))` bytes will be xor'd.
func xor(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
for i := 0; i < n; i++ {
dst[i] = a[i] ^ b[i]
}
return n
}
// generateKey generates a new key for usage in Sphinx packet
// construction/processing based off of the denoted keyType. Within Sphinx
// various keys are used within the same onion packet for padding generation,
// MAC generation, and encryption/decryption.
func generateKey(keyType string, sharedKey *Hash256) [keyLen]byte {
mac := hmac.New(sha256.New, []byte(keyType))
mac.Write(sharedKey[:])
h := mac.Sum(nil)
var key [keyLen]byte
copy(key[:], h[:keyLen])
return key
}
// generateCipherStream generates a stream of cryptographic psuedo-random bytes
// intended to be used to encrypt a message using a one-time-pad like
// construction.
func generateCipherStream(key [keyLen]byte, numBytes uint) []byte {
var (
nonce [8]byte
)
cipher, err := chacha20.NewCipher(nonce[:], key[:])
if err != nil {
panic(err)
}
output := make([]byte, numBytes)
cipher.XORKeyStream(output, output)
return output
}
// computeBlindingFactor for the next hop given the ephemeral pubKey and
// sharedSecret for this hop. The blinding factor is computed as the
// sha-256(pubkey || sharedSecret).
func computeBlindingFactor(hopPubKey *btcec.PublicKey,
hopSharedSecret []byte) Hash256 {
sha := sha256.New()
sha.Write(hopPubKey.SerializeCompressed())
sha.Write(hopSharedSecret)
var hash Hash256
copy(hash[:], sha.Sum(nil))
return hash
}
// blindGroupElement blinds the group element P by performing scalar
// multiplication of the group element by blindingFactor: blindingFactor * P.
func blindGroupElement(hopPubKey *btcec.PublicKey, blindingFactor []byte) *btcec.PublicKey {
newX, newY := btcec.S256().ScalarMult(hopPubKey.X, hopPubKey.Y, blindingFactor[:])
return &btcec.PublicKey{btcec.S256(), newX, newY}
}
// blindBaseElement blinds the groups's generator G by performing scalar base
// multiplication using the blindingFactor: blindingFactor * G.
func blindBaseElement(blindingFactor []byte) *btcec.PublicKey {
newX, newY := btcec.S256().ScalarBaseMult(blindingFactor)
return &btcec.PublicKey{btcec.S256(), newX, newY}
}
// sharedSecretGenerator is an interface that abstracts away exactly *how* the
// shared secret for each hop is generated.
//
// TODO(roasbef): rename?
type sharedSecretGenerator interface {
// generateSharedSecret given a public key, generates a shared secret
// using private data of the underlying sharedSecretGenerator.
generateSharedSecret(dhKey *btcec.PublicKey) (Hash256, error)
}
// generateSharedSecret generates the shared secret by given ephemeral key.
func (r *Router) generateSharedSecret(dhKey *btcec.PublicKey) (Hash256, error) {
var sharedSecret Hash256
// Ensure that the public key is on our curve.
if !btcec.S256().IsOnCurve(dhKey.X, dhKey.Y) {
return sharedSecret, ErrInvalidOnionKey
}
// Compute our shared secret.
sharedSecret = generateSharedSecret(dhKey, r.onionKey)
return sharedSecret, nil
}
// generateSharedSecret generates the shared secret for a particular hop. The
// shared secret is generated by taking the group element contained in the
// mix-header, and performing an ECDH operation with the node's long term onion
// key. We then take the _entire_ point generated by the ECDH operation,
// serialize that using a compressed format, then feed the raw bytes through a
// single SHA256 invocation. The resulting value is the shared secret.
func generateSharedSecret(pub *btcec.PublicKey, priv *btcec.PrivateKey) Hash256 {
s := &btcec.PublicKey{}
s.X, s.Y = btcec.S256().ScalarMult(pub.X, pub.Y, priv.D.Bytes())
return sha256.Sum256(s.SerializeCompressed())
}
// onionEncrypt obfuscates the data with compliance with BOLT#4. As we use a
// stream cipher, calling onionEncrypt on an already encrypted piece of data
// will decrypt it.
func onionEncrypt(sharedSecret *Hash256, data []byte) []byte {
p := make([]byte, len(data))
ammagKey := generateKey("ammag", sharedSecret)
streamBytes := generateCipherStream(ammagKey, uint(len(data)))
xor(p, data, streamBytes)
return p
}
// onionErrorLength is the expected length of the onion error message.
// Including padding, all messages on the wire should be 256 bytes. We then add
// the size of the sha256 HMAC as well.
const onionErrorLength = 2 + 2 + 256 + sha256.Size
// DecryptError attempts to decrypt the passed encrypted error response. The
// onion failure is encrypted in backward manner, starting from the node where
// error have occurred. As a result, in order to decrypt the error we need get
// all shared secret and apply decryption in the reverse order. A structure is
// returned that contains the decrypted error message and information on the
// sender.
func (o *OnionErrorDecrypter) DecryptError(encryptedData []byte) (
*DecryptedError, error) {
// Ensure the error message length is as expected.
if len(encryptedData) != onionErrorLength {
return nil, fmt.Errorf("invalid error length: "+
"expected %v got %v", onionErrorLength,
len(encryptedData))
}
sharedSecrets := generateSharedSecrets(
o.circuit.PaymentPath,
o.circuit.SessionKey,
)
var (
sender int
msg []byte
dummySecret Hash256
)
copy(dummySecret[:], bytes.Repeat([]byte{1}, 32))
// We'll iterate a constant amount of hops to ensure that we don't give
// away an timing information pertaining to the position in the route
// that the error emanated from.
for i := 0; i < NumMaxHops; i++ {
var sharedSecret Hash256
// If we've already found the sender, then we'll use our dummy
// secret to continue decryption attempts to fill out the rest
// of the loop. Otherwise, we'll use the next shared secret in
// line.
if sender != 0 || i > len(sharedSecrets)-1 {
sharedSecret = dummySecret
} else {
sharedSecret = sharedSecrets[i]
}
// With the shared secret, we'll now strip off a layer of
// encryption from the encrypted error payload.
encryptedData = onionEncrypt(&sharedSecret, encryptedData)
// Next, we'll need to separate the data, from the MAC itself
// so we can reconstruct and verify it.
expectedMac := encryptedData[:sha256.Size]
data := encryptedData[sha256.Size:]
// With the data split, we'll now re-generate the MAC using its
// specified key.
umKey := generateKey("um", &sharedSecret)
h := hmac.New(sha256.New, umKey[:])
h.Write(data)
// If the MAC matches up, then we've found the sender of the
// error and have also obtained the fully decrypted message.
realMac := h.Sum(nil)
if hmac.Equal(realMac, expectedMac) && sender == 0 {
sender = i + 1
msg = data
}
}
// If the sender index is still zero, then we haven't found the sender,
// meaning we've failed to decrypt.
if sender == 0 {
return nil, errors.New("unable to retrieve onion failure")
}
return &DecryptedError{
SenderIdx: sender,
Sender: o.circuit.PaymentPath[sender-1],
Message: msg,
}, nil
}
// EncryptError is used to make data obfuscation using the generated shared
// secret.
//
// In context of Lightning Network is either used by the nodes in order to make
// initial obfuscation with the creation of the hmac or by the forwarding nodes
// for backward failure obfuscation of the onion failure blob. By obfuscating
// the onion failure on every node in the path we are adding additional step of
// the security and barrier for malware nodes to retrieve valuable information.
// The reason for using onion obfuscation is to not give
// away to the nodes in the payment path the information about the exact
// failure and its origin.
func (o *OnionErrorEncrypter) EncryptError(initial bool, data []byte) []byte {
if initial {
umKey := generateKey("um", &o.sharedSecret)
hash := hmac.New(sha256.New, umKey[:])
hash.Write(data)
h := hash.Sum(nil)
data = append(h, data...)
}
return onionEncrypt(&o.sharedSecret, data)
}

View File

@@ -0,0 +1,27 @@
package sphinx
import "fmt"
var (
// ErrReplayedPacket is an error returned when a packet is rejected
// during processing due to being an attempted replay or probing
// attempt.
ErrReplayedPacket = fmt.Errorf("sphinx packet replay attempted")
// ErrInvalidOnionVersion is returned during decoding of the onion
// packet, when the received packet has an unknown version byte.
ErrInvalidOnionVersion = fmt.Errorf("invalid onion packet version")
// ErrInvalidOnionHMAC is returned during onion parsing process, when received
// mac does not corresponds to the generated one.
ErrInvalidOnionHMAC = fmt.Errorf("invalid mismatched mac")
// ErrInvalidOnionKey is returned during onion parsing process, when
// onion key is invalid.
ErrInvalidOnionKey = fmt.Errorf("invalid onion key: pubkey isn't on " +
"secp256k1 curve")
// ErrLogEntryNotFound is an error returned when a packet lookup in a replay
// log fails because it is missing.
ErrLogEntryNotFound = fmt.Errorf("sphinx packet is not in log")
)

View File

@@ -0,0 +1,13 @@
module github.com/lightningnetwork/lightning-onion
require (
github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da
github.com/btcsuite/btcd v0.0.0-20190629003639-c26ffa870fd8
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d
github.com/davecgh/go-spew v1.1.1
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67
golang.org/x/sys v0.0.0-20190209173611-3b5209105503 // indirect
)
go 1.13

View File

@@ -0,0 +1,42 @@
github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da h1:KjTM2ks9d14ZYCvmHS9iAKVt9AyzRSqNU1qabPih5BY=
github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA=
github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/btcsuite/btcd v0.0.0-20190629003639-c26ffa870fd8 h1:mOg8/RgDSHTQ1R0IR+LMDuW4TDShPv+JzYHuR4GLoNA=
github.com/btcsuite/btcd v0.0.0-20190629003639-c26ffa870fd8/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -0,0 +1,114 @@
package sphinx
import (
"github.com/btcsuite/btcd/btcec"
"golang.org/x/crypto/ripemd160"
)
// TODO(roasbeef): Might need to change? due to the PRG* requirements?
const fSLength = 48
// Hmm appears that they use k = 128 throughout the paper?
// HMAC -> SHA-256
// * or could use Poly1035: https://godoc.org/golang.org/x/crypto/poly1305
// * but, the paper specs: {0, 1}^k x {0, 1}* -> {0, 1}^k
// * Poly1035 is actually: {0, 1}^k x {0, 1}* -> {0, 1}^(2/k)
// * Also with Poly, I guess the key is treated as a nonce, tagging two messages
// with the same key allows an attacker to forge message or something like that
// Size of a forwarding segment is 32 bytes, the MAC is 16 bytes, so c = 48 bytes
// * NOTE: this doesn't include adding R to the forwarding segment, and w/e esle
// Hmmm since each uses diff key, just use AES-CTR with blank nonce, given key,
// encrypt plaintext of all zeros, this'll give us our len(plaintext) rand bytes.
// PRG0 -> {0, 1}^k -> {0, 1}^r(c+k) or {0, 1}^1280 (assuming 20 hops, like rusty, but, is that too large? maybe, idk)
// PRG1 -> {0, 1}^k -> {0, 1}^r(c+k) or {0, 1}^1280 (assuming 20 hops)
// PRG2 -> {0, 1}^k -> {0, 1}^rc or {0, 1}^960 (assuming 20 hops, c=48)
// * NOTE: in second version of paper (accepted to CCS'15), all the PRG*'s are like PRG2
// * so makes it simpler
// PRP -> AES? or
// * {0, 1}^k x {0, 1}^a -> {0, 1}^a
// Do we need AEAD for the below? Or are is the per-hop MAC okay?
// ENC: AES-CTR or CHACHA20?
// DEC: AES-CTR or CHACHA20?
// h_op: G^* -> {0, 1}^k
// * op (elem of) {MAC, PRGO, PRG!, PRP, ENC, DEC}
// * key gen for the above essentially
// RoutingSegment...
// NOTE: Length of routing segment in the paper is 8 bytes (enough for their
// imaginary network, I guess). But, looking like they'll be (20 + 33 bytes)
// 53 bytes. Or 52 if we use curve25519
type routingSegment struct {
nextHop *btcec.PublicKey // NOTE: or, is this a LN addr? w/e that is?
// nextHop [32]byte
rCommitment [ripemd160.Size]byte
// stuff perhaps?
}
// SphinxPayload...
type sphinxPayload struct {
}
// ForwardingSegment....
type forwardingSegment struct {
// Here's hash(R), attempt to make an HTLC with the next hop. If
// successful, then pass along the onion so we can finish getting the
// payment circuit set up.
// TODO(roasbeef): Do we create HTLC's with the minimum amount
// possible? 1 satoshi or is it 1 mili-satoshi?
rs routingSegment
// To defend against replay attacks. Intermediate nodes will drop the
// FS if it deems it's expired.
expiration uint64
// Key shared by intermediate node with the source, used to peel a layer
// off the onion for the next hop.
sharedSymmetricKey [32]byte // TODO(roasbeef): or, 16?
}
// AnonymousHeader...
type anonymousHeader struct {
// Forwarding info for the current hop. When serialized, it'll be
// encrypted with SV, the secret key for this node known to no-one but
// the node. It also contains a secret key shared with this node and the
// source, so it can peel off a layer of the onion for the next hop.
fs forwardingSegment
mac [32]byte // TODO(roasbeef): or, 16?
}
// CommonHeader...
type commonHeader struct {
// TODO(roasbeef): maybe can use this to extend HORNET with additiona control signals
// for LN nodes?
controlType uint8
hops uint8
nonce [8]byte // either interpreted as EXP or nonce, little-endian? idk
}
// DataPacket...
type dataPacket struct {
chdr commonHeader
ahdr anonymousHeader // TODO(roasbeef): MAC in ahdr includes the chdr?
onion [fSLength * NumMaxHops]byte // TODO(roasbeef): or, is it NumMaxHops - 1?
}
type sphinxHeader struct {
}
// SessionSetupPacket...
type sessionSetupPacket struct {
chdr commonHeader
shdr sphinxHeader
sp sphinxPayload
fsPayload [fSLength * NumMaxHops]byte // ? r*c
// TODO(roabeef): hmm does this implcitly mean messages are a max of 48 bytes?
}

View File

@@ -0,0 +1,42 @@
package sphinx
import "github.com/btcsuite/btclog"
// sphxLog is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var sphxLog btclog.Logger
// The default amount of logging is none.
func init() {
DisableLog()
}
// DisableLog disables all library log output. Logging output is disabled
// by default until UseLogger is called.
func DisableLog() {
sphxLog = btclog.Disabled
}
// UseLogger uses a specified Logger to output package logging info.
// This should be used in preference to SetLogWriter if the caller is also
// using btclog.
func UseLogger(logger btclog.Logger) {
sphxLog = logger
}
// logClosure is used to provide a closure over expensive logging operations
// so don't have to be performed when the logging level doesn't warrant it.
type logClosure func() string
// String invokes the underlying function and returns the result.
func (c logClosure) String() string {
return c()
}
// newLogClosure returns a new closure over a function that returns a string
// which itself provides a Stringer interface so that it can be used with the
// logging system.
func newLogClosure(c func() string) logClosure {
return logClosure(c)
}

View File

@@ -0,0 +1,126 @@
package sphinx
import (
"io"
"github.com/btcsuite/btcd/btcec"
)
// OnionErrorEncrypter is a struct that's used to implement onion error
// encryption as defined within BOLT0004.
type OnionErrorEncrypter struct {
sharedSecret Hash256
}
// NewOnionErrorEncrypter creates new instance of the onion encrypter backed by
// the passed router, with encryption to be doing using the passed
// ephemeralKey.
func NewOnionErrorEncrypter(router *Router,
ephemeralKey *btcec.PublicKey) (*OnionErrorEncrypter, error) {
sharedSecret, err := router.generateSharedSecret(ephemeralKey)
if err != nil {
return nil, err
}
return &OnionErrorEncrypter{
sharedSecret: sharedSecret,
}, nil
}
// Encode writes the encrypter's shared secret to the provided io.Writer.
func (o *OnionErrorEncrypter) Encode(w io.Writer) error {
_, err := w.Write(o.sharedSecret[:])
return err
}
// Decode restores the encrypter's share secret from the provided io.Reader.
func (o *OnionErrorEncrypter) Decode(r io.Reader) error {
_, err := io.ReadFull(r, o.sharedSecret[:])
return err
}
// Circuit is used encapsulate the data which is needed for data deobfuscation.
type Circuit struct {
// SessionKey is the key which have been used during generation of the
// shared secrets.
SessionKey *btcec.PrivateKey
// PaymentPath is the pub keys of the nodes in the payment path.
PaymentPath []*btcec.PublicKey
}
// Decode initializes the circuit from the byte stream.
func (c *Circuit) Decode(r io.Reader) error {
var keyLength [1]byte
if _, err := r.Read(keyLength[:]); err != nil {
return err
}
sessionKeyData := make([]byte, uint8(keyLength[0]))
if _, err := r.Read(sessionKeyData[:]); err != nil {
return err
}
c.SessionKey, _ = btcec.PrivKeyFromBytes(btcec.S256(), sessionKeyData)
var pathLength [1]byte
if _, err := r.Read(pathLength[:]); err != nil {
return err
}
c.PaymentPath = make([]*btcec.PublicKey, uint8(pathLength[0]))
for i := 0; i < len(c.PaymentPath); i++ {
var pubKeyData [btcec.PubKeyBytesLenCompressed]byte
if _, err := r.Read(pubKeyData[:]); err != nil {
return err
}
pubKey, err := btcec.ParsePubKey(pubKeyData[:], btcec.S256())
if err != nil {
return err
}
c.PaymentPath[i] = pubKey
}
return nil
}
// Encode writes converted circuit in the byte stream.
func (c *Circuit) Encode(w io.Writer) error {
var keyLength [1]byte
keyLength[0] = uint8(len(c.SessionKey.Serialize()))
if _, err := w.Write(keyLength[:]); err != nil {
return err
}
if _, err := w.Write(c.SessionKey.Serialize()); err != nil {
return err
}
var pathLength [1]byte
pathLength[0] = uint8(len(c.PaymentPath))
if _, err := w.Write(pathLength[:]); err != nil {
return err
}
for _, pubKey := range c.PaymentPath {
if _, err := w.Write(pubKey.SerializeCompressed()); err != nil {
return err
}
}
return nil
}
// OnionErrorDecrypter is a struct that's used to decrypt onion errors in
// response to failed HTLC routing attempts according to BOLT#4.
type OnionErrorDecrypter struct {
circuit *Circuit
}
// NewOnionErrorDecrypter creates new instance of onion decrypter.
func NewOnionErrorDecrypter(circuit *Circuit) *OnionErrorDecrypter {
return &OnionErrorDecrypter{
circuit: circuit,
}
}

View File

@@ -0,0 +1,61 @@
package sphinx
import (
"crypto/rand"
"github.com/aead/chacha20"
"github.com/btcsuite/btcd/btcec"
)
// PacketFiller is a function type to be specified by the caller to provide a
// stream of random bytes derived from a CSPRNG to fill out the starting packet
// in order to ensure we don't leak information on the true route length to the
// receiver. The packet filler may also use the session key to generate a set
// of filler bytes if it wishes to be deterministic.
type PacketFiller func(*btcec.PrivateKey, *[routingInfoSize]byte) error
// RandPacketFiller is a packet filler that reads a set of random bytes from a
// CSPRNG.
func RandPacketFiller(_ *btcec.PrivateKey, mixHeader *[routingInfoSize]byte) error {
// Read out random bytes to fill out the rest of the starting packet
// after the hop payload for the final node. This mitigates a privacy
// leak that may reveal a lower bound on the true path length to the
// receiver.
if _, err := rand.Read(mixHeader[:]); err != nil {
return err
}
return nil
}
// BlankPacketFiller is a packet filler that doesn't attempt to fill out the
// packet at all. It should ONLY be used for generating test vectors or other
// instances that required deterministic packet generation.
func BlankPacketFiller(_ *btcec.PrivateKey, _ *[routingInfoSize]byte) error {
return nil
}
// DeterministicPacketFiller is a packet filler that generates a deterministic
// set of filler bytes by using chacha20 with a key derived from the session
// key.
func DeterministicPacketFiller(sessionKey *btcec.PrivateKey,
mixHeader *[routingInfoSize]byte) error {
// First, we'll generate a new key that'll be used to generate some
// random bytes for our padding purposes. To derive this new key, we
// essentially calculate: HMAC("pad", sessionKey).
var sessionKeyBytes Hash256
copy(sessionKeyBytes[:], sessionKey.Serialize())
paddingKey := generateKey("pad", &sessionKeyBytes)
// Now that we have our target key, we'll use chacha20 to generate a
// series of random bytes directly into the passed mixHeader packet.
var nonce [8]byte
padCipher, err := chacha20.NewCipher(nonce[:], paddingKey[:])
if err != nil {
return err
}
padCipher.XORKeyStream(mixHeader[:], mixHeader[:])
return nil
}

View File

@@ -0,0 +1,395 @@
package sphinx
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
)
// HopData is the information destined for individual hops. It is a fixed size
// 64 bytes, prefixed with a 1 byte realm that indicates how to interpret it.
// For now we simply assume it's the bitcoin realm (0x00) and hence the format
// is fixed. The last 32 bytes are always the HMAC to be passed to the next
// hop, or zero if this is the packet is not to be forwarded, since this is the
// last hop.
type HopData struct {
// Realm denotes the "real" of target chain of the next hop. For
// bitcoin, this value will be 0x00.
Realm [RealmByteSize]byte
// NextAddress is the address of the next hop that this packet should
// be forward to.
NextAddress [AddressSize]byte
// ForwardAmount is the HTLC amount that the next hop should forward.
// This value should take into account the fee require by this
// particular hop, and the cumulative fee for the entire route.
ForwardAmount uint64
// OutgoingCltv is the value of the outgoing absolute time-lock that
// should be included in the HTLC forwarded.
OutgoingCltv uint32
// ExtraBytes is the set of unused bytes within the onion payload. This
// extra set of bytes can be utilized by higher level applications to
// package additional data within the per-hop payload, or signal that a
// portion of the remaining set of hops are to be consumed as Extra
// Onion Blobs.
//
// TODO(roasbeef): rename to padding bytes?
ExtraBytes [NumPaddingBytes]byte
}
// Encode writes the serialized version of the target HopData into the passed
// io.Writer.
func (hd *HopData) Encode(w io.Writer) error {
if _, err := w.Write(hd.Realm[:]); err != nil {
return err
}
if _, err := w.Write(hd.NextAddress[:]); err != nil {
return err
}
if err := binary.Write(w, binary.BigEndian, hd.ForwardAmount); err != nil {
return err
}
if err := binary.Write(w, binary.BigEndian, hd.OutgoingCltv); err != nil {
return err
}
if _, err := w.Write(hd.ExtraBytes[:]); err != nil {
return err
}
return nil
}
// Decodes populates the target HopData with the contents of a serialized
// HopData packed into the passed io.Reader.
func (hd *HopData) Decode(r io.Reader) error {
if _, err := io.ReadFull(r, hd.Realm[:]); err != nil {
return err
}
if _, err := io.ReadFull(r, hd.NextAddress[:]); err != nil {
return err
}
err := binary.Read(r, binary.BigEndian, &hd.ForwardAmount)
if err != nil {
return err
}
err = binary.Read(r, binary.BigEndian, &hd.OutgoingCltv)
if err != nil {
return err
}
_, err = io.ReadFull(r, hd.ExtraBytes[:])
return err
}
// PayloadType denotes the type of the payload included in the onion packet.
// Serialization of a raw HopPayload will depend on the payload type, as some
// include a varint length prefix, while others just encode the raw payload.
type PayloadType uint8
const (
// PayloadLegacy is the legacy payload type. It includes a fixed 32
// bytes, 12 of which are padding, and uses a "zero length" (the old
// realm) prefix.
PayloadLegacy PayloadType = iota
// PayloadTLV is the new modern TLV based format. This payload includes
// a set of opaque bytes with a varint length prefix. The varint used
// is the same CompactInt as used in the Bitcoin protocol.
PayloadTLV
)
// HopPayload is a slice of bytes and associated payload-type that are destined
// for a specific hop in the PaymentPath. The payload itself is treated as an
// opaque data field by the onion router. The included Type field informs the
// serialization/deserialziation of the raw payload.
type HopPayload struct {
// Type is the type of the payload.
Type PayloadType
// Payload is the raw bytes of the per-hop payload for this hop.
// Depending on the realm, this pay be the regular legacy hop data, or
// a set of opaque blobs to be parsed by higher layers.
Payload []byte
// HMAC is an HMAC computed over the entire per-hop payload that also
// includes the higher-level (optional) associated data bytes.
HMAC [HMACSize]byte
}
// NewHopPayload creates a new hop payload given an optional set of forwarding
// instructions for a hop, and a set of optional opaque extra onion bytes to
// drop off at the target hop. If both values are not specified, then an error
// is returned.
func NewHopPayload(hopData *HopData, eob []byte) (HopPayload, error) {
var (
h HopPayload
b bytes.Buffer
)
// We can't proceed if neither the hop data or the EOB has been
// specified by the caller.
switch {
case hopData == nil && len(eob) == 0:
return h, fmt.Errorf("either hop data or eob must " +
"be specified")
case hopData != nil && len(eob) > 0:
return h, fmt.Errorf("cannot provide both hop data AND an eob")
}
// If the hop data is specified, then we'll write that now, as it
// should proceed the EOB portion of the payload.
if hopData != nil {
if err := hopData.Encode(&b); err != nil {
return h, nil
}
// We'll also mark that this particular hop will be using the
// legacy format as the modern format packs the existing hop
// data information into the EOB space as a TLV stream.
h.Type = PayloadLegacy
} else {
// Otherwise, we'll write out the raw EOB which contains a set
// of opaque bytes that the recipient can decode to make a
// forwarding decision.
if _, err := b.Write(eob); err != nil {
return h, nil
}
h.Type = PayloadTLV
}
h.Payload = b.Bytes()
return h, nil
}
// NumBytes returns the number of bytes it will take to serialize the full
// payload. Depending on the payload type, this may include some additional
// signalling bytes.
func (hp *HopPayload) NumBytes() int {
// The base size is the size of the raw payload, and the size of the
// HMAC.
size := len(hp.Payload) + HMACSize
// If this is the new TLV format, then we'll also accumulate the number
// of bytes that it would take to encode the size of the payload.
if hp.Type == PayloadTLV {
payloadSize := len(hp.Payload)
size += int(wire.VarIntSerializeSize(uint64(payloadSize)))
}
return size
}
// Encode encodes the hop payload into the passed writer.
func (hp *HopPayload) Encode(w io.Writer) error {
switch hp.Type {
// For the legacy payload, we don't need to add any additional bytes as
// our realm byte serves as our zero prefix byte.
case PayloadLegacy:
break
// For the TLV payload, we'll first prepend the length of the payload
// as a var-int.
case PayloadTLV:
var b [8]byte
err := WriteVarInt(w, uint64(len(hp.Payload)), &b)
if err != nil {
return err
}
}
// Finally, we'll write out the raw payload, then the HMAC in series.
if _, err := w.Write(hp.Payload); err != nil {
return err
}
if _, err := w.Write(hp.HMAC[:]); err != nil {
return err
}
return nil
}
// Decode unpacks an encoded HopPayload from the passed reader into the target
// HopPayload.
func (hp *HopPayload) Decode(r io.Reader) error {
bufReader := bufio.NewReader(r)
// In order to properly parse the payload, we'll need to check the
// first byte. We'll use a bufio reader to peek at it without consuming
// it from the buffer.
peekByte, err := bufReader.Peek(1)
if err != nil {
return err
}
var payloadSize uint32
switch int(peekByte[0]) {
// If the first byte is a zero (the realm), then this is the normal
// payload.
case 0x00:
// Our size is just the payload, without the HMAC. This means
// that this is the legacy payload type.
payloadSize = LegacyHopDataSize - HMACSize
hp.Type = PayloadLegacy
default:
// Otherwise, this is the new TLV based payload type, so we'll
// extract the payload length encoded as a var-int.
var b [8]byte
varInt, err := ReadVarInt(bufReader, &b)
if err != nil {
return err
}
payloadSize = uint32(varInt)
hp.Type = PayloadTLV
}
// Now that we know the payload size, we'll create a new buffer to
// read it out in full.
//
// TODO(roasbeef): can avoid all these copies
hp.Payload = make([]byte, payloadSize)
if _, err := io.ReadFull(bufReader, hp.Payload[:]); err != nil {
return err
}
if _, err := io.ReadFull(bufReader, hp.HMAC[:]); err != nil {
return err
}
return nil
}
// HopData attempts to extract a set of forwarding instructions from the target
// HopPayload. If the realm isn't what we expect, then an error is returned.
// This method also returns the left over EOB that remain after the hop data
// has been parsed. Callers may want to map this blob into something more
// concrete.
func (hp *HopPayload) HopData() (*HopData, error) {
payloadReader := bytes.NewBuffer(hp.Payload)
// If this isn't the "base" realm, then we can't extract the expected
// hop payload structure from the payload.
if hp.Type != PayloadLegacy {
return nil, nil
}
// Now that we know the payload has the structure we expect, we'll
// decode the payload into the HopData.
var hd HopData
if err := hd.Decode(payloadReader); err != nil {
return nil, err
}
return &hd, nil
}
// NumMaxHops is the maximum path length. There is a maximum of 1300 bytes in
// the routing info block. Legacy hop payloads are always 65 bytes, while tlv
// payloads are at least 47 bytes (tlvlen 1, amt 2, timelock 2, nextchan 10,
// hmac 32) for the intermediate hops and 37 bytes (tlvlen 1, amt 2, timelock 2,
// hmac 32) for the exit hop. The maximum path length can therefore only be
// reached by using tlv payloads only. With that, the maximum number of
// intermediate hops is: Floor((1300 - 37) / 47) = 26. Including the exit hop,
// the maximum path length is 27 hops.
const NumMaxHops = 27
// PaymentPath represents a series of hops within the Lightning Network
// starting at a sender and terminating at a receiver. Each hop contains a set
// of mandatory data which contains forwarding instructions for that hop.
// Additionally, we can also transmit additional data to each hop by utilizing
// the un-used hops (see TrueRouteLength()) to pack in additional data. In
// order to do this, we encrypt the several hops with the same node public key,
// and unroll the extra data into the space used for route forwarding
// information.
type PaymentPath [NumMaxHops]OnionHop
// OnionHop represents an abstract hop (a link between two nodes) within the
// Lightning Network. A hop is composed of the incoming node (able to decrypt
// the encrypted routing information), and the routing information itself.
// Optionally, the crafter of a route can indicate that additional data aside
// from the routing information is be delivered, which will manifest as
// additional hops to pack the data.
type OnionHop struct {
// NodePub is the target node for this hop. The payload will enter this
// hop, it'll decrypt the routing information, and hand off the
// internal packet to the next hop.
NodePub btcec.PublicKey
// HopPayload is the opaque payload provided to this node. If the
// HopData above is specified, then it'll be packed into this payload.
HopPayload HopPayload
}
// IsEmpty returns true if the hop isn't populated.
func (o OnionHop) IsEmpty() bool {
return o.NodePub.X == nil || o.NodePub.Y == nil
}
// NodeKeys returns a slice pointing to node keys that this route comprises of.
// The size of the returned slice will be TrueRouteLength().
func (p *PaymentPath) NodeKeys() []*btcec.PublicKey {
var nodeKeys [NumMaxHops]*btcec.PublicKey
routeLen := p.TrueRouteLength()
for i := 0; i < routeLen; i++ {
nodeKeys[i] = &p[i].NodePub
}
return nodeKeys[:routeLen]
}
// TrueRouteLength returns the "true" length of the PaymentPath. The max
// payment path is NumMaxHops size, but in practice routes are much smaller.
// This method will return the number of actual hops (nodes) involved in this
// route. For references, a direct path has a length of 1, path through an
// intermediate node has a length of 2 (3 nodes involved).
func (p *PaymentPath) TrueRouteLength() int {
var routeLength int
for _, hop := range p {
// When we hit the first empty hop, we know we're now in the
// zero'd out portion of the array.
if hop.IsEmpty() {
return routeLength
}
routeLength++
}
return routeLength
}
// TotalPayloadSize returns the sum of the size of each payload in the "true"
// route.
func (p *PaymentPath) TotalPayloadSize() int {
var totalSize int
for _, hop := range p {
if hop.IsEmpty() {
continue
}
totalSize += hop.HopPayload.NumBytes()
}
return totalSize
}

View File

@@ -0,0 +1,81 @@
package sphinx
import (
"encoding/binary"
"io"
)
// ReplaySet is a data structure used to efficiently record the occurrence of
// replays, identified by sequence number, when processing a Batch. Its primary
// functionality includes set construction, membership queries, and merging of
// replay sets.
type ReplaySet struct {
replays map[uint16]struct{}
}
// NewReplaySet initializes an empty replay set.
func NewReplaySet() *ReplaySet {
return &ReplaySet{
replays: make(map[uint16]struct{}),
}
}
// Size returns the number of elements in the replay set.
func (rs *ReplaySet) Size() int {
return len(rs.replays)
}
// Add inserts the provided index into the replay set.
func (rs *ReplaySet) Add(idx uint16) {
rs.replays[idx] = struct{}{}
}
// Contains queries the contents of the replay set for membership of a
// particular index.
func (rs *ReplaySet) Contains(idx uint16) bool {
_, ok := rs.replays[idx]
return ok
}
// Merge adds the contents of the provided replay set to the receiver's set.
func (rs *ReplaySet) Merge(rs2 *ReplaySet) {
for seqNum := range rs2.replays {
rs.Add(seqNum)
}
}
// Encode serializes the replay set into an io.Writer suitable for storage. The
// replay set can be recovered using Decode.
func (rs *ReplaySet) Encode(w io.Writer) error {
for seqNum := range rs.replays {
err := binary.Write(w, binary.BigEndian, seqNum)
if err != nil {
return err
}
}
return nil
}
// Decode reconstructs a replay set given a io.Reader. The byte
// slice is assumed to be even in length, otherwise resulting in failure.
func (rs *ReplaySet) Decode(r io.Reader) error {
for {
// seqNum provides to buffer to read the next uint16 index.
var seqNum uint16
err := binary.Read(r, binary.BigEndian, &seqNum)
switch err {
case nil:
// Successful read, proceed.
case io.EOF:
return nil
default:
// Can return ErrShortBuffer or ErrUnexpectedEOF.
return err
}
// Add this decoded sequence number to the set.
rs.Add(seqNum)
}
}

View File

@@ -0,0 +1,188 @@
package sphinx
import (
"crypto/sha256"
"errors"
)
const (
// HashPrefixSize is the size in bytes of the keys we will be storing
// in the ReplayLog. It represents the first 20 bytes of a truncated
// sha-256 hash of a secret generated by ECDH.
HashPrefixSize = 20
)
// HashPrefix is a statically size, 20-byte array containing the prefix
// of a Hash256, and is used to detect duplicate sphinx packets.
type HashPrefix [HashPrefixSize]byte
// errReplayLogAlreadyStarted is an error returned when Start() is called on a
// ReplayLog after it is started and before it is stopped.
var errReplayLogAlreadyStarted error = errors.New(
"Replay log has already been started")
// errReplayLogNotStarted is an error returned when methods other than Start()
// are called on a ReplayLog before it is started or after it is stopped.
var errReplayLogNotStarted error = errors.New(
"Replay log has not been started")
// hashSharedSecret Sha-256 hashes the shared secret and returns the first
// HashPrefixSize bytes of the hash.
func hashSharedSecret(sharedSecret *Hash256) *HashPrefix {
// Sha256 hash of sharedSecret
h := sha256.New()
h.Write(sharedSecret[:])
var sharedHash HashPrefix
// Copy bytes to sharedHash
copy(sharedHash[:], h.Sum(nil))
return &sharedHash
}
// ReplayLog is an interface that defines a log of incoming sphinx packets,
// enabling strong replay protection. The interface is general to allow
// implementations near-complete autonomy. All methods must be safe for
// concurrent access.
type ReplayLog interface {
// Start starts up the log. It returns an error if one occurs.
Start() error
// Stop safely stops the log. It returns an error if one occurs.
Stop() error
// Get retrieves an entry from the log given its hash prefix. It returns the
// value stored and an error if one occurs. It returns ErrLogEntryNotFound
// if the entry is not in the log.
Get(*HashPrefix) (uint32, error)
// Put stores an entry into the log given its hash prefix and an
// accompanying purposefully general type. It returns ErrReplayedPacket if
// the provided hash prefix already exists in the log.
Put(*HashPrefix, uint32) error
// Delete deletes an entry from the log given its hash prefix.
Delete(*HashPrefix) error
// PutBatch stores a batch of sphinx packets into the log given their hash
// prefixes and accompanying values. Returns the set of entries in the batch
// that are replays and an error if one occurs.
PutBatch(*Batch) (*ReplaySet, error)
}
// MemoryReplayLog is a simple ReplayLog implementation that stores all added
// sphinx packets and processed batches in memory with no persistence.
//
// This is designed for use just in testing.
type MemoryReplayLog struct {
batches map[string]*ReplaySet
entries map[HashPrefix]uint32
}
// NewMemoryReplayLog constructs a new MemoryReplayLog.
func NewMemoryReplayLog() *MemoryReplayLog {
return &MemoryReplayLog{}
}
// Start initializes the log and must be called before any other methods.
func (rl *MemoryReplayLog) Start() error {
rl.batches = make(map[string]*ReplaySet)
rl.entries = make(map[HashPrefix]uint32)
return nil
}
// Stop wipes the state of the log.
func (rl *MemoryReplayLog) Stop() error {
if rl.entries == nil || rl.batches == nil {
return errReplayLogNotStarted
}
rl.batches = nil
rl.entries = nil
return nil
}
// Get retrieves an entry from the log given its hash prefix. It returns the
// value stored and an error if one occurs. It returns ErrLogEntryNotFound
// if the entry is not in the log.
func (rl *MemoryReplayLog) Get(hash *HashPrefix) (uint32, error) {
if rl.entries == nil || rl.batches == nil {
return 0, errReplayLogNotStarted
}
cltv, exists := rl.entries[*hash]
if !exists {
return 0, ErrLogEntryNotFound
}
return cltv, nil
}
// Put stores an entry into the log given its hash prefix and an accompanying
// purposefully general type. It returns ErrReplayedPacket if the provided hash
// prefix already exists in the log.
func (rl *MemoryReplayLog) Put(hash *HashPrefix, cltv uint32) error {
if rl.entries == nil || rl.batches == nil {
return errReplayLogNotStarted
}
_, exists := rl.entries[*hash]
if exists {
return ErrReplayedPacket
}
rl.entries[*hash] = cltv
return nil
}
// Delete deletes an entry from the log given its hash prefix.
func (rl *MemoryReplayLog) Delete(hash *HashPrefix) error {
if rl.entries == nil || rl.batches == nil {
return errReplayLogNotStarted
}
delete(rl.entries, *hash)
return nil
}
// PutBatch stores a batch of sphinx packets into the log given their hash
// prefixes and accompanying values. Returns the set of entries in the batch
// that are replays and an error if one occurs.
func (rl *MemoryReplayLog) PutBatch(batch *Batch) (*ReplaySet, error) {
if rl.entries == nil || rl.batches == nil {
return nil, errReplayLogNotStarted
}
// Return the result when the batch was first processed to provide
// idempotence.
replays, exists := rl.batches[string(batch.ID)]
if !exists {
replays = NewReplaySet()
err := batch.ForEach(func(seqNum uint16, hashPrefix *HashPrefix, cltv uint32) error {
err := rl.Put(hashPrefix, cltv)
if err == ErrReplayedPacket {
replays.Add(seqNum)
return nil
}
// An error would be bad because we have already updated the entries
// map, but no errors other than ErrReplayedPacket should occur.
return err
})
if err != nil {
return nil, err
}
replays.Merge(batch.ReplaySet)
rl.batches[string(batch.ID)] = replays
}
batch.ReplaySet = replays
batch.IsCommitted = true
return replays, nil
}
// A compile time asserting *MemoryReplayLog implements the RelayLog interface.
var _ ReplayLog = (*MemoryReplayLog)(nil)

View File

@@ -0,0 +1,778 @@
package sphinx
import (
"bytes"
"crypto/ecdsa"
"crypto/hmac"
"crypto/sha256"
"fmt"
"io"
"math/big"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcutil"
)
const (
// addressSize is the length of the serialized address used to uniquely
// identify the next hop to forward the onion to. BOLT 04 defines this
// as 8 byte channel_id.
AddressSize = 8
// RealmByteSize is the number of bytes that the realm byte occupies.
RealmByteSize = 1
// AmtForwardSize is the number of bytes that the amount to forward
// occupies.
AmtForwardSize = 8
// OutgoingCLTVSize is the number of bytes that the outgoing CLTV value
// occupies.
OutgoingCLTVSize = 4
// NumPaddingBytes is the number of padding bytes in the hopData. These
// bytes are currently unused within the protocol, and are reserved for
// future use. However, if a hop contains extra data, then we'll
// utilize this space to pack in the unrolled bytes.
NumPaddingBytes = 12
// LegacyHopDataSize is the fixed size of hop_data. BOLT 04 currently
// specifies this to be 1 byte realm, 8 byte channel_id, 8 byte amount
// to forward, 4 byte outgoing CLTV value, 12 bytes padding and 32 bytes
// HMAC for a total of 65 bytes per hop.
LegacyHopDataSize = (RealmByteSize + AddressSize + AmtForwardSize +
OutgoingCLTVSize + NumPaddingBytes + HMACSize)
// MaxPayloadSize is the maximum size a payload for a single hop can be.
// This is the worst case scenario of a single hop, consuming all
// available space. We need to know this in order to generate a
// sufficiently long stream of pseudo-random bytes when
// encrypting/decrypting the payload.
MaxPayloadSize = routingInfoSize
// routingInfoSize is the fixed size of the the routing info. This
// consists of a addressSize byte address and a HMACSize byte HMAC for
// each hop of the route, the first pair in cleartext and the following
// pairs increasingly obfuscated. If not all space is used up, the
// remainder is padded with null-bytes, also obfuscated.
routingInfoSize = 1300
// numStreamBytes is the number of bytes produced by our CSPRG for the
// key stream implementing our stream cipher to encrypt/decrypt the mix
// header. The MaxPayloadSize bytes at the end are used to
// encrypt/decrypt the fillers when processing the packet of generating
// the HMACs when creating the packet.
numStreamBytes = routingInfoSize * 2
// keyLen is the length of the keys used to generate cipher streams and
// encrypt payloads. Since we use SHA256 to generate the keys, the
// maximum length currently is 32 bytes.
keyLen = 32
// baseVersion represent the current supported version of onion packet.
baseVersion = 0
)
var (
ErrMaxRoutingInfoSizeExceeded = fmt.Errorf(
"max routing info size of %v bytes exceeded", routingInfoSize)
)
// OnionPacket is the onion wrapped hop-to-hop routing information necessary to
// propagate a message through the mix-net without intermediate nodes having
// knowledge of their position within the route, the source, the destination,
// and finally the identities of the past/future nodes in the route. At each
// hop the ephemeral key is used by the node to perform ECDH between itself and
// the source node. This derived secret key is used to check the MAC of the
// entire mix header, decrypt the next set of routing information, and
// re-randomize the ephemeral key for the next node in the path. This per-hop
// re-randomization allows us to only propagate a single group element through
// the onion route.
type OnionPacket struct {
// Version denotes the version of this onion packet. The version
// indicates how a receiver of the packet should interpret the bytes
// following this version byte. Currently, a version of 0x00 is the
// only defined version type.
Version byte
// EphemeralKey is the public key that each hop will used in
// combination with the private key in an ECDH to derive the shared
// secret used to check the HMAC on the packet and also decrypted the
// routing information.
EphemeralKey *btcec.PublicKey
// RoutingInfo is the full routing information for this onion packet.
// This encodes all the forwarding instructions for this current hop
// and all the hops in the route.
RoutingInfo [routingInfoSize]byte
// HeaderMAC is an HMAC computed with the shared secret of the routing
// data and the associated data for this route. Including the
// associated data lets each hop authenticate higher-level data that is
// critical for the forwarding of this HTLC.
HeaderMAC [HMACSize]byte
}
// generateSharedSecrets by the given nodes pubkeys, generates the shared
// secrets.
func generateSharedSecrets(paymentPath []*btcec.PublicKey,
sessionKey *btcec.PrivateKey) []Hash256 {
// Each hop performs ECDH with our ephemeral key pair to arrive at a
// shared secret. Additionally, each hop randomizes the group element
// for the next hop by multiplying it by the blinding factor. This way
// we only need to transmit a single group element, and hops can't link
// a session back to us if they have several nodes in the path.
numHops := len(paymentPath)
hopSharedSecrets := make([]Hash256, numHops)
// Compute the triplet for the first hop outside of the main loop.
// Within the loop each new triplet will be computed recursively based
// off of the blinding factor of the last hop.
lastEphemeralPubKey := sessionKey.PubKey()
hopSharedSecrets[0] = generateSharedSecret(paymentPath[0], sessionKey)
lastBlindingFactor := computeBlindingFactor(lastEphemeralPubKey, hopSharedSecrets[0][:])
// The cached blinding factor will contain the running product of the
// session private key x and blinding factors b_i, computed as
// c_0 = x
// c_i = c_{i-1} * b_{i-1} (mod |F(G)|).
// = x * b_0 * b_1 * ... * b_{i-1} (mod |F(G)|).
//
// We begin with just the session private key x, so that base case
// c_0 = x. At the beginning of each iteration, the previous blinding
// factor is aggregated into the modular product, and used as the scalar
// value in deriving the hop ephemeral keys and shared secrets.
var cachedBlindingFactor big.Int
cachedBlindingFactor.SetBytes(sessionKey.D.Bytes())
// Now recursively compute the cached blinding factor, ephemeral ECDH
// pub keys, and shared secret for each hop.
var nextBlindingFactor big.Int
for i := 1; i <= numHops-1; i++ {
// Update the cached blinding factor with b_{i-1}.
nextBlindingFactor.SetBytes(lastBlindingFactor[:])
cachedBlindingFactor.Mul(&cachedBlindingFactor, &nextBlindingFactor)
cachedBlindingFactor.Mod(&cachedBlindingFactor, btcec.S256().Params().N)
// a_i = g ^ c_i
// = g^( x * b_0 * ... * b_{i-1} )
// = X^( b_0 * ... * b_{i-1} )
// X_our_session_pub_key x all prev blinding factors
lastEphemeralPubKey = blindBaseElement(cachedBlindingFactor.Bytes())
// e_i = Y_i ^ c_i
// = ( Y_i ^ x )^( b_0 * ... * b_{i-1} )
// (Y_their_pub_key x x_our_priv) x all prev blinding factors
hopBlindedPubKey := blindGroupElement(
paymentPath[i], cachedBlindingFactor.Bytes(),
)
// s_i = sha256( e_i )
// = sha256( Y_i ^ (x * b_0 * ... * b_{i-1} )
hopSharedSecrets[i] = sha256.Sum256(hopBlindedPubKey.SerializeCompressed())
// Only need to evaluate up to the penultimate blinding factor.
if i >= numHops-1 {
break
}
// b_i = sha256( a_i || s_i )
lastBlindingFactor = computeBlindingFactor(
lastEphemeralPubKey, hopSharedSecrets[i][:],
)
}
return hopSharedSecrets
}
// NewOnionPacket creates a new onion packet which is capable of obliviously
// routing a message through the mix-net path outline by 'paymentPath'.
func NewOnionPacket(paymentPath *PaymentPath, sessionKey *btcec.PrivateKey,
assocData []byte, pktFiller PacketFiller) (*OnionPacket, error) {
// Check whether total payload size doesn't exceed the hard maximum.
if paymentPath.TotalPayloadSize() > routingInfoSize {
return nil, ErrMaxRoutingInfoSizeExceeded
}
// If we don't actually have a partially populated route, then we'll
// exit early.
numHops := paymentPath.TrueRouteLength()
if numHops == 0 {
return nil, fmt.Errorf("route of length zero passed in")
}
// We'll force the caller to provide a packet filler, as otherwise we
// may default to an insecure filling method (which should only really
// be used to generate test vectors).
if pktFiller == nil {
return nil, fmt.Errorf("packet filler must be specified")
}
hopSharedSecrets := generateSharedSecrets(
paymentPath.NodeKeys(), sessionKey,
)
// Generate the padding, called "filler strings" in the paper.
filler := generateHeaderPadding("rho", paymentPath, hopSharedSecrets)
// Allocate zero'd out byte slices to store the final mix header packet
// and the hmac for each hop.
var (
mixHeader [routingInfoSize]byte
nextHmac [HMACSize]byte
hopPayloadBuf bytes.Buffer
)
// Fill the packet using the caller specified methodology.
if err := pktFiller(sessionKey, &mixHeader); err != nil {
return nil, err
}
// Now we compute the routing information for each hop, along with a
// MAC of the routing info using the shared key for that hop.
for i := numHops - 1; i >= 0; i-- {
// We'll derive the two keys we need for each hop in order to:
// generate our stream cipher bytes for the mixHeader, and
// calculate the MAC over the entire constructed packet.
rhoKey := generateKey("rho", &hopSharedSecrets[i])
muKey := generateKey("mu", &hopSharedSecrets[i])
// The HMAC for the final hop is simply zeroes. This allows the
// last hop to recognize that it is the destination for a
// particular payment.
paymentPath[i].HopPayload.HMAC = nextHmac
// Next, using the key dedicated for our stream cipher, we'll
// generate enough bytes to obfuscate this layer of the onion
// packet.
streamBytes := generateCipherStream(rhoKey, routingInfoSize)
payload := paymentPath[i].HopPayload
// Before we assemble the packet, we'll shift the current
// mix-header to the right in order to make room for this next
// per-hop data.
shiftSize := payload.NumBytes()
rightShift(mixHeader[:], shiftSize)
err := payload.Encode(&hopPayloadBuf)
if err != nil {
return nil, err
}
copy(mixHeader[:], hopPayloadBuf.Bytes())
// Once the packet for this hop has been assembled, we'll
// re-encrypt the packet by XOR'ing with a stream of bytes
// generated using our shared secret.
xor(mixHeader[:], mixHeader[:], streamBytes[:])
// If this is the "last" hop, then we'll override the tail of
// the hop data.
if i == numHops-1 {
copy(mixHeader[len(mixHeader)-len(filler):], filler)
}
// The packet for this hop consists of: mixHeader. When
// calculating the MAC, we'll also include the optional
// associated data which can allow higher level applications to
// prevent replay attacks.
packet := append(mixHeader[:], assocData...)
nextHmac = calcMac(muKey, packet)
hopPayloadBuf.Reset()
}
return &OnionPacket{
Version: baseVersion,
EphemeralKey: sessionKey.PubKey(),
RoutingInfo: mixHeader,
HeaderMAC: nextHmac,
}, nil
}
// rightShift shifts the byte-slice by the given number of bytes to the right
// and 0-fill the resulting gap.
func rightShift(slice []byte, num int) {
for i := len(slice) - num - 1; i >= 0; i-- {
slice[num+i] = slice[i]
}
for i := 0; i < num; i++ {
slice[i] = 0
}
}
// generateHeaderPadding derives the bytes for padding the mix header to ensure
// it remains fixed sized throughout route transit. At each step, we add
// 'frameSize*frames' padding of zeroes, concatenate it to the previous filler,
// then decrypt it (XOR) with the secret key of the current hop. When
// encrypting the mix header we essentially do the reverse of this operation:
// we "encrypt" the padding, and drop 'frameSize*frames' number of zeroes. As
// nodes process the mix header they add the padding ('frameSize*frames') in
// order to check the MAC and decrypt the next routing information eventually
// leaving only the original "filler" bytes produced by this function at the
// last hop. Using this methodology, the size of the field stays constant at
// each hop.
func generateHeaderPadding(key string, path *PaymentPath, sharedSecrets []Hash256) []byte {
numHops := path.TrueRouteLength()
// We have to generate a filler that matches all but the last hop (the
// last hop won't generate an HMAC)
fillerSize := path.TotalPayloadSize() - path[numHops-1].HopPayload.NumBytes()
filler := make([]byte, fillerSize)
for i := 0; i < numHops-1; i++ {
// Sum up how many frames were used by prior hops.
fillerStart := routingInfoSize
for _, p := range path[:i] {
fillerStart -= p.HopPayload.NumBytes()
}
// The filler is the part dangling off of the end of the
// routingInfo, so offset it from there, and use the current
// hop's frame count as its size.
fillerEnd := routingInfoSize + path[i].HopPayload.NumBytes()
streamKey := generateKey(key, &sharedSecrets[i])
streamBytes := generateCipherStream(streamKey, numStreamBytes)
xor(filler, filler, streamBytes[fillerStart:fillerEnd])
}
return filler
}
// Encode serializes the raw bytes of the onion packet into the passed
// io.Writer. The form encoded within the passed io.Writer is suitable for
// either storing on disk, or sending over the network.
func (f *OnionPacket) Encode(w io.Writer) error {
ephemeral := f.EphemeralKey.SerializeCompressed()
if _, err := w.Write([]byte{f.Version}); err != nil {
return err
}
if _, err := w.Write(ephemeral); err != nil {
return err
}
if _, err := w.Write(f.RoutingInfo[:]); err != nil {
return err
}
if _, err := w.Write(f.HeaderMAC[:]); err != nil {
return err
}
return nil
}
// Decode fully populates the target ForwardingMessage from the raw bytes
// encoded within the io.Reader. In the case of any decoding errors, an error
// will be returned. If the method success, then the new OnionPacket is ready
// to be processed by an instance of SphinxNode.
func (f *OnionPacket) Decode(r io.Reader) error {
var err error
var buf [1]byte
if _, err := io.ReadFull(r, buf[:]); err != nil {
return err
}
f.Version = buf[0]
// If version of the onion packet protocol unknown for us than in might
// lead to improperly decoded data.
if f.Version != baseVersion {
return ErrInvalidOnionVersion
}
var ephemeral [33]byte
if _, err := io.ReadFull(r, ephemeral[:]); err != nil {
return err
}
f.EphemeralKey, err = btcec.ParsePubKey(ephemeral[:], btcec.S256())
if err != nil {
return ErrInvalidOnionKey
}
if _, err := io.ReadFull(r, f.RoutingInfo[:]); err != nil {
return err
}
if _, err := io.ReadFull(r, f.HeaderMAC[:]); err != nil {
return err
}
return nil
}
// ProcessCode is an enum-like type which describes to the high-level package
// user which action should be taken after processing a Sphinx packet.
type ProcessCode int
const (
// ExitNode indicates that the node which processed the Sphinx packet
// is the destination hop in the route.
ExitNode = iota
// MoreHops indicates that there are additional hops left within the
// route. Therefore the caller should forward the packet to the node
// denoted as the "NextHop".
MoreHops
// Failure indicates that a failure occurred during packet processing.
Failure
)
// String returns a human readable string for each of the ProcessCodes.
func (p ProcessCode) String() string {
switch p {
case ExitNode:
return "ExitNode"
case MoreHops:
return "MoreHops"
case Failure:
return "Failure"
default:
return "Unknown"
}
}
// ProcessedPacket encapsulates the resulting state generated after processing
// an OnionPacket. A processed packet communicates to the caller what action
// should be taken after processing.
type ProcessedPacket struct {
// Action represents the action the caller should take after processing
// the packet.
Action ProcessCode
// ForwardingInstructions is the per-hop payload recovered from the
// initial encrypted onion packet. It details how the packet should be
// forwarded and also includes information that allows the processor of
// the packet to authenticate the information passed within the HTLC.
//
// NOTE: This field will only be populated iff the above Action is
// MoreHops.
ForwardingInstructions *HopData
// Payload is the raw payload as extracted from the packet. If the
// ForwardingInstructions field above is nil, then this is a modern TLV
// payload. As a result, the caller should parse the contents to obtain
// the new set of forwarding instructions.
Payload HopPayload
// NextPacket is the onion packet that should be forwarded to the next
// hop as denoted by the ForwardingInstructions field.
//
// NOTE: This field will only be populated iff the above Action is
// MoreHops.
NextPacket *OnionPacket
}
// Router is an onion router within the Sphinx network. The router is capable
// of processing incoming Sphinx onion packets thereby "peeling" a layer off
// the onion encryption which the packet is wrapped with.
type Router struct {
nodeID [AddressSize]byte
nodeAddr *btcutil.AddressPubKeyHash
onionKey *btcec.PrivateKey
log ReplayLog
}
// NewRouter creates a new instance of a Sphinx onion Router given the node's
// currently advertised onion private key, and the target Bitcoin network.
func NewRouter(nodeKey *btcec.PrivateKey, net *chaincfg.Params, log ReplayLog) *Router {
var nodeID [AddressSize]byte
copy(nodeID[:], btcutil.Hash160(nodeKey.PubKey().SerializeCompressed()))
// Safe to ignore the error here, nodeID is 20 bytes.
nodeAddr, _ := btcutil.NewAddressPubKeyHash(nodeID[:], net)
return &Router{
nodeID: nodeID,
nodeAddr: nodeAddr,
onionKey: &btcec.PrivateKey{
PublicKey: ecdsa.PublicKey{
Curve: btcec.S256(),
X: nodeKey.X,
Y: nodeKey.Y,
},
D: nodeKey.D,
},
log: log,
}
}
// Start starts / opens the ReplayLog's channeldb and its accompanying
// garbage collector goroutine.
func (r *Router) Start() error {
return r.log.Start()
}
// Stop stops / closes the ReplayLog's channeldb and its accompanying
// garbage collector goroutine.
func (r *Router) Stop() {
r.log.Stop()
}
// ProcessOnionPacket processes an incoming onion packet which has been forward
// to the target Sphinx router. If the encoded ephemeral key isn't on the
// target Elliptic Curve, then the packet is rejected. Similarly, if the
// derived shared secret has been seen before the packet is rejected. Finally
// if the MAC doesn't check the packet is again rejected.
//
// In the case of a successful packet processing, and ProcessedPacket struct is
// returned which houses the newly parsed packet, along with instructions on
// what to do next.
func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket,
assocData []byte, incomingCltv uint32) (*ProcessedPacket, error) {
// Compute the shared secret for this onion packet.
sharedSecret, err := r.generateSharedSecret(onionPkt.EphemeralKey)
if err != nil {
return nil, err
}
// Additionally, compute the hash prefix of the shared secret, which
// will serve as an identifier for detecting replayed packets.
hashPrefix := hashSharedSecret(&sharedSecret)
// Continue to optimistically process this packet, deferring replay
// protection until the end to reduce the penalty of multiple IO
// operations.
packet, err := processOnionPacket(onionPkt, &sharedSecret, assocData, r)
if err != nil {
return nil, err
}
// Atomically compare this hash prefix with the contents of the on-disk
// log, persisting it only if this entry was not detected as a replay.
if err := r.log.Put(hashPrefix, incomingCltv); err != nil {
return nil, err
}
return packet, nil
}
// ReconstructOnionPacket rederives the subsequent onion packet.
//
// NOTE: This method does not do any sort of replay protection, and should only
// be used to reconstruct packets that were successfully processed previously.
func (r *Router) ReconstructOnionPacket(onionPkt *OnionPacket,
assocData []byte) (*ProcessedPacket, error) {
// Compute the shared secret for this onion packet.
sharedSecret, err := r.generateSharedSecret(onionPkt.EphemeralKey)
if err != nil {
return nil, err
}
return processOnionPacket(onionPkt, &sharedSecret, assocData, r)
}
// unwrapPacket wraps a layer of the passed onion packet using the specified
// shared secret and associated data. The associated data will be used to check
// the HMAC at each hop to ensure the same data is passed along with the onion
// packet. This function returns the next inner onion packet layer, along with
// the hop data extracted from the outer onion packet.
func unwrapPacket(onionPkt *OnionPacket, sharedSecret *Hash256,
assocData []byte) (*OnionPacket, *HopPayload, error) {
dhKey := onionPkt.EphemeralKey
routeInfo := onionPkt.RoutingInfo
headerMac := onionPkt.HeaderMAC
// Using the derived shared secret, ensure the integrity of the routing
// information by checking the attached MAC without leaking timing
// information.
message := append(routeInfo[:], assocData...)
calculatedMac := calcMac(generateKey("mu", sharedSecret), message)
if !hmac.Equal(headerMac[:], calculatedMac[:]) {
return nil, nil, ErrInvalidOnionHMAC
}
// Attach the padding zeroes in order to properly strip an encryption
// layer off the routing info revealing the routing information for the
// next hop.
streamBytes := generateCipherStream(
generateKey("rho", sharedSecret), numStreamBytes,
)
zeroBytes := bytes.Repeat([]byte{0}, MaxPayloadSize)
headerWithPadding := append(routeInfo[:], zeroBytes...)
var hopInfo [numStreamBytes]byte
xor(hopInfo[:], headerWithPadding, streamBytes)
// Randomize the DH group element for the next hop using the
// deterministic blinding factor.
blindingFactor := computeBlindingFactor(dhKey, sharedSecret[:])
nextDHKey := blindGroupElement(dhKey, blindingFactor[:])
// With the MAC checked, and the payload decrypted, we can now parse
// out the payload so we can derive the specified forwarding
// instructions.
var hopPayload HopPayload
if err := hopPayload.Decode(bytes.NewReader(hopInfo[:])); err != nil {
return nil, nil, err
}
// With the necessary items extracted, we'll copy of the onion packet
// for the next node, snipping off our per-hop data.
var nextMixHeader [routingInfoSize]byte
copy(nextMixHeader[:], hopInfo[hopPayload.NumBytes():])
innerPkt := &OnionPacket{
Version: onionPkt.Version,
EphemeralKey: nextDHKey,
RoutingInfo: nextMixHeader,
HeaderMAC: hopPayload.HMAC,
}
return innerPkt, &hopPayload, nil
}
// processOnionPacket performs the primary key derivation and handling of onion
// packets. The processed packets returned from this method should only be used
// if the packet was not flagged as a replayed packet.
func processOnionPacket(onionPkt *OnionPacket, sharedSecret *Hash256,
assocData []byte,
sharedSecretGen sharedSecretGenerator) (*ProcessedPacket, error) {
// First, we'll unwrap an initial layer of the onion packet. Typically,
// we'll only have a single layer to unwrap, However, if the sender has
// additional data for us within the Extra Onion Blobs (EOBs), then we
// may have to unwrap additional layers. By default, the inner most
// mix header is the one that we'll want to pass onto the next hop so
// they can properly check the HMAC and unwrap a layer for their
// handoff hop.
innerPkt, outerHopPayload, err := unwrapPacket(
onionPkt, sharedSecret, assocData,
)
if err != nil {
return nil, err
}
// By default we'll assume that there are additional hops in the route.
// However if the uncovered 'nextMac' is all zeroes, then this
// indicates that we're the final hop in the route.
var action ProcessCode = MoreHops
if bytes.Compare(zeroHMAC[:], outerHopPayload.HMAC[:]) == 0 {
action = ExitNode
}
hopData, err := outerHopPayload.HopData()
if err != nil {
return nil, err
}
// Finally, we'll return a fully processed packet with the outer most
// hop data (where the primary forwarding instructions lie) and the
// inner most onion packet that we unwrapped.
return &ProcessedPacket{
Action: action,
ForwardingInstructions: hopData,
Payload: *outerHopPayload,
NextPacket: innerPkt,
}, nil
}
// Tx is a transaction consisting of a number of sphinx packets to be atomically
// written to the replay log. This structure helps to coordinate construction of
// the underlying Batch object, and to ensure that the result of the processing
// is idempotent.
type Tx struct {
// batch is the set of packets to be incrementally processed and
// ultimately committed in this transaction
batch *Batch
// router is a reference to the sphinx router that created this
// transaction. Committing this transaction will utilize this router's
// replay log.
router *Router
// packets contains a potentially sparse list of optimistically processed
// packets for this batch. The contents of a particular index should
// only be accessed if the index is *not* included in the replay set, or
// otherwise failed any other stage of the processing.
packets []ProcessedPacket
}
// BeginTxn creates a new transaction that can later be committed back to the
// sphinx router's replay log.
//
// NOTE: The nels parameter should represent the maximum number of that could
// be added to the batch, using sequence numbers that match or exceed this
// value could result in an out-of-bounds panic.
func (r *Router) BeginTxn(id []byte, nels int) *Tx {
return &Tx{
batch: NewBatch(id),
router: r,
packets: make([]ProcessedPacket, nels),
}
}
// ProcessOnionPacket processes an incoming onion packet which has been forward
// to the target Sphinx router. If the encoded ephemeral key isn't on the
// target Elliptic Curve, then the packet is rejected. Similarly, if the
// derived shared secret has been seen before the packet is rejected. Finally
// if the MAC doesn't check the packet is again rejected.
//
// In the case of a successful packet processing, and ProcessedPacket struct is
// returned which houses the newly parsed packet, along with instructions on
// what to do next.
func (t *Tx) ProcessOnionPacket(seqNum uint16, onionPkt *OnionPacket,
assocData []byte, incomingCltv uint32) error {
// Compute the shared secret for this onion packet.
sharedSecret, err := t.router.generateSharedSecret(
onionPkt.EphemeralKey,
)
if err != nil {
return err
}
// Additionally, compute the hash prefix of the shared secret, which
// will serve as an identifier for detecting replayed packets.
hashPrefix := hashSharedSecret(&sharedSecret)
// Continue to optimistically process this packet, deferring replay
// protection until the end to reduce the penalty of multiple IO
// operations.
packet, err := processOnionPacket(
onionPkt, &sharedSecret, assocData, t.router,
)
if err != nil {
return err
}
// Add the hash prefix to pending batch of shared secrets that will be
// written later via Commit().
err = t.batch.Put(seqNum, hashPrefix, incomingCltv)
if err != nil {
return err
}
// If we successfully added this packet to the batch, cache the
// processed packet within the Tx which can be accessed after
// committing if this sequence number does not appear in the replay
// set.
t.packets[seqNum] = *packet
return nil
}
// Commit writes this transaction's batch of sphinx packets to the replay log,
// performing a final check against the log for replays.
func (t *Tx) Commit() ([]ProcessedPacket, *ReplaySet, error) {
if t.batch.IsCommitted {
return t.packets, t.batch.ReplaySet, nil
}
rs, err := t.router.log.PutBatch(t.batch)
return t.packets, rs, err
}

View File

@@ -0,0 +1,109 @@
package sphinx
import (
"encoding/binary"
"errors"
"io"
)
// ErrVarIntNotCanonical signals that the decoded varint was not minimally encoded.
var ErrVarIntNotCanonical = errors.New("decoded varint is not canonical")
// ReadVarInt reads a variable length integer from r and returns it as a uint64.
func ReadVarInt(r io.Reader, buf *[8]byte) (uint64, error) {
_, err := io.ReadFull(r, buf[:1])
if err != nil {
return 0, err
}
discriminant := buf[0]
var rv uint64
switch {
case discriminant < 0xfd:
rv = uint64(discriminant)
case discriminant == 0xfd:
_, err := io.ReadFull(r, buf[:2])
switch {
case err == io.EOF:
return 0, io.ErrUnexpectedEOF
case err != nil:
return 0, err
}
rv = uint64(binary.BigEndian.Uint16(buf[:2]))
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
if rv < 0xfd {
return 0, ErrVarIntNotCanonical
}
case discriminant == 0xfe:
_, err := io.ReadFull(r, buf[:4])
switch {
case err == io.EOF:
return 0, io.ErrUnexpectedEOF
case err != nil:
return 0, err
}
rv = uint64(binary.BigEndian.Uint32(buf[:4]))
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
if rv <= 0xffff {
return 0, ErrVarIntNotCanonical
}
default:
_, err := io.ReadFull(r, buf[:])
switch {
case err == io.EOF:
return 0, io.ErrUnexpectedEOF
case err != nil:
return 0, err
}
rv = binary.BigEndian.Uint64(buf[:])
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
if rv <= 0xffffffff {
return 0, ErrVarIntNotCanonical
}
}
return rv, nil
}
// WriteVarInt serializes val to w using a variable number of bytes depending
// on its value.
func WriteVarInt(w io.Writer, val uint64, buf *[8]byte) error {
var length int
switch {
case val < 0xfd:
buf[0] = uint8(val)
length = 1
case val <= 0xffff:
buf[0] = uint8(0xfd)
binary.BigEndian.PutUint16(buf[1:3], uint16(val))
length = 3
case val <= 0xffffffff:
buf[0] = uint8(0xfe)
binary.BigEndian.PutUint32(buf[1:5], uint32(val))
length = 5
default:
buf[0] = uint8(0xff)
_, err := w.Write(buf[:1])
if err != nil {
return err
}
binary.BigEndian.PutUint64(buf[:], uint64(val))
length = 8
}
_, err := w.Write(buf[:length])
return err
}