Release v0.1.0

This commit is contained in:
Manu Herrera
2019-10-01 12:22:30 -03:00
parent 41e6aad190
commit d301c63596
915 changed files with 378049 additions and 11 deletions

View File

@@ -0,0 +1,189 @@
package headerfs
import (
"bytes"
"fmt"
"os"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
)
// ErrHeaderNotFound is returned when a target header on disk (flat file) can't
// be found.
type ErrHeaderNotFound struct {
error
}
// appendRaw appends a new raw header to the end of the flat file.
func (h *headerStore) appendRaw(header []byte) error {
if _, err := h.file.Write(header); err != nil {
return err
}
return nil
}
// readRaw reads a raw header from disk from a particular seek distance. The
// amount of bytes read past the seek distance is determined by the specified
// header type.
func (h *headerStore) readRaw(seekDist uint64) ([]byte, error) {
var headerSize uint32
// Based on the defined header type, we'll determine the number of
// bytes that we need to read past the sync point.
switch h.indexType {
case Block:
headerSize = 80
case RegularFilter:
headerSize = 32
default:
return nil, fmt.Errorf("unknown index type: %v", h.indexType)
}
// TODO(roasbeef): add buffer pool
// With the number of bytes to read determined, we'll create a slice
// for that number of bytes, and read directly from the file into the
// buffer.
rawHeader := make([]byte, headerSize)
if _, err := h.file.ReadAt(rawHeader[:], int64(seekDist)); err != nil {
return nil, &ErrHeaderNotFound{err}
}
return rawHeader[:], nil
}
// readHeaderRange will attempt to fetch a series of block headers within the
// target height range. This method batches a set of reads into a single system
// call thereby increasing performance when reading a set of contiguous
// headers.
//
// NOTE: The end height is _inclusive_ so we'll fetch all headers from the
// startHeight up to the end height, including the final header.
func (h *blockHeaderStore) readHeaderRange(startHeight uint32,
endHeight uint32) ([]wire.BlockHeader, error) {
// Based on the defined header type, we'll determine the number of
// bytes that we need to read from the file.
headerReader, err := readHeadersFromFile(
h.file, BlockHeaderSize, startHeight, endHeight,
)
if err != nil {
return nil, err
}
// We'll now incrementally parse out the set of individual headers from
// our set of serialized contiguous raw headers.
numHeaders := endHeight - startHeight + 1
headers := make([]wire.BlockHeader, 0, numHeaders)
for headerReader.Len() != 0 {
var nextHeader wire.BlockHeader
if err := nextHeader.Deserialize(headerReader); err != nil {
return nil, err
}
headers = append(headers, nextHeader)
}
return headers, nil
}
// readHeader reads a full block header from the flat-file. The header read is
// determined by the hight value.
func (h *blockHeaderStore) readHeader(height uint32) (wire.BlockHeader, error) {
var header wire.BlockHeader
// Each header is 80 bytes, so using this information, we'll seek a
// distance to cover that height based on the size of block headers.
seekDistance := uint64(height) * 80
// With the distance calculated, we'll raw a raw header start from that
// offset.
rawHeader, err := h.readRaw(seekDistance)
if err != nil {
return header, err
}
headerReader := bytes.NewReader(rawHeader)
// Finally, decode the raw bytes into a proper bitcoin header.
if err := header.Deserialize(headerReader); err != nil {
return header, err
}
return header, nil
}
// readHeader reads a single filter header at the specified height from the
// flat files on disk.
func (f *FilterHeaderStore) readHeader(height uint32) (*chainhash.Hash, error) {
seekDistance := uint64(height) * 32
rawHeader, err := f.readRaw(seekDistance)
if err != nil {
return nil, err
}
return chainhash.NewHash(rawHeader)
}
// readHeaderRange will attempt to fetch a series of filter headers within the
// target height range. This method batches a set of reads into a single system
// call thereby increasing performance when reading a set of contiguous
// headers.
//
// NOTE: The end height is _inclusive_ so we'll fetch all headers from the
// startHeight up to the end height, including the final header.
func (f *FilterHeaderStore) readHeaderRange(startHeight uint32,
endHeight uint32) ([]chainhash.Hash, error) {
// Based on the defined header type, we'll determine the number of
// bytes that we need to read from the file.
headerReader, err := readHeadersFromFile(
f.file, RegularFilterHeaderSize, startHeight, endHeight,
)
if err != nil {
return nil, err
}
// We'll now incrementally parse out the set of individual headers from
// our set of serialized contiguous raw headers.
numHeaders := endHeight - startHeight + 1
headers := make([]chainhash.Hash, 0, numHeaders)
for headerReader.Len() != 0 {
var nextHeader chainhash.Hash
if _, err := headerReader.Read(nextHeader[:]); err != nil {
return nil, err
}
headers = append(headers, nextHeader)
}
return headers, nil
}
// readHeadersFromFile reads a chunk of headers, each of size headerSize, from
// the given file, from startHeight to endHeight.
func readHeadersFromFile(f *os.File, headerSize, startHeight,
endHeight uint32) (*bytes.Reader, error) {
// Each header is headerSize bytes, so using this information, we'll
// seek a distance to cover that height based on the size the headers.
seekDistance := uint64(startHeight) * uint64(headerSize)
// Based on the number of headers in the range, we'll allocate a single
// slice that's able to hold the entire range of headers.
numHeaders := endHeight - startHeight + 1
rawHeaderBytes := make([]byte, headerSize*numHeaders)
// Now that we have our slice allocated, we'll read out the entire
// range of headers with a single system call.
_, err := f.ReadAt(rawHeaderBytes, int64(seekDistance))
if err != nil {
return nil, err
}
return bytes.NewReader(rawHeaderBytes), nil
}

View File

@@ -0,0 +1,305 @@
package headerfs
import (
"bytes"
"encoding/binary"
"fmt"
"sort"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcwallet/walletdb"
)
var (
// indexBucket is the main top-level bucket for the header index.
// Nothing is stored in this bucket other than the sub-buckets which
// contains the indexes for the various header types.
indexBucket = []byte("header-index")
// bitcoinTip is the key which tracks the "tip" of the block header
// chain. The value of this key will be the current block hash of the
// best known chain that we're synced to.
bitcoinTip = []byte("bitcoin")
// regFilterTip is the key which tracks the "tip" of the regular
// compact filter header chain. The value of this key will be the
// current block hash of the best known chain that the headers for
// regular filter are synced to.
regFilterTip = []byte("regular")
// extFilterTip is the key which tracks the "tip" of the extended
// compact filter header chain. The value of this key will be the
// current block hash of the best known chain that the headers for
// extended filter are synced to.
extFilterTip = []byte("ext")
)
var (
// ErrHeightNotFound is returned when a specified height isn't found in
// a target index.
ErrHeightNotFound = fmt.Errorf("target height not found in index")
// ErrHashNotFound is returned when a specified block hash isn't found
// in a target index.
ErrHashNotFound = fmt.Errorf("target hash not found in index")
)
// HeaderType is an enum-like type which defines the various header types that
// are stored within the index.
type HeaderType uint8
const (
// Block is the header type that represents regular Bitcoin block
// headers.
Block HeaderType = iota
// RegularFilter is a header type that represents the basic filter
// header type for the filter header chain.
RegularFilter
)
const (
// BlockHeaderSize is the size in bytes of the Block header type.
BlockHeaderSize = 80
// RegularFilterHeaderSize is the size in bytes of the RegularFilter
// header type.
RegularFilterHeaderSize = 32
)
// headerIndex is an index stored within the database that allows for random
// access into the on-disk header file. This, in conjunction with a flat file
// of headers consists of header database. The keys have been specifically
// crafted in order to ensure maximum write performance during IBD, and also to
// provide the necessary indexing properties required.
type headerIndex struct {
db walletdb.DB
indexType HeaderType
}
// newHeaderIndex creates a new headerIndex given an already open database, and
// a particular header type.
func newHeaderIndex(db walletdb.DB, indexType HeaderType) (*headerIndex, error) {
// As an initially step, we'll attempt to create all the buckets
// necessary for functioning of the index. If these buckets has already
// been created, then we can exit early.
err := walletdb.Update(db, func(tx walletdb.ReadWriteTx) error {
_, err := tx.CreateTopLevelBucket(indexBucket)
return err
})
if err != nil && err != walletdb.ErrBucketExists {
return nil, err
}
return &headerIndex{
db: db,
indexType: indexType,
}, nil
}
// headerEntry is an internal type that's used to quickly map a (height, hash)
// pair into the proper key that'll be stored within the database.
type headerEntry struct {
hash chainhash.Hash
height uint32
}
// headerBatch is a batch of header entries to be written to disk.
//
// NOTE: The entries within a batch SHOULD be properly sorted by hash in
// order to ensure the batch is written in a sequential write.
type headerBatch []headerEntry
// Len returns the number of routes in the collection.
//
// NOTE: This is part of the sort.Interface implementation.
func (h headerBatch) Len() int {
return len(h)
}
// Less reports where the entry with index i should sort before the entry with
// index j. As we want to ensure the items are written in sequential order,
// items with the "first" hash.
//
// NOTE: This is part of the sort.Interface implementation.
func (h headerBatch) Less(i, j int) bool {
return bytes.Compare(h[i].hash[:], h[j].hash[:]) < 0
}
// Swap swaps the elements with indexes i and j.
//
// NOTE: This is part of the sort.Interface implementation.
func (h headerBatch) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
// addHeaders writes a batch of header entries in a single atomic batch
func (h *headerIndex) addHeaders(batch headerBatch) error {
// If we're writing a 0-length batch, make no changes and return.
if len(batch) == 0 {
return nil
}
// In order to ensure optimal write performance, we'll ensure that the
// items are sorted by their hash before insertion into the database.
sort.Sort(batch)
return walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error {
rootBucket := tx.ReadWriteBucket(indexBucket)
var tipKey []byte
// Based on the specified index type of this instance of the
// index, we'll grab the key that tracks the tip of the chain
// so we can update the index once all the header entries have
// been updated.
// TODO(roasbeef): only need block tip?
switch h.indexType {
case Block:
tipKey = bitcoinTip
case RegularFilter:
tipKey = regFilterTip
default:
return fmt.Errorf("unknown index type: %v", h.indexType)
}
var (
chainTipHash chainhash.Hash
chainTipHeight uint32
)
for _, header := range batch {
var heightBytes [4]byte
binary.BigEndian.PutUint32(heightBytes[:], header.height)
err := rootBucket.Put(header.hash[:], heightBytes[:])
if err != nil {
return err
}
// TODO(roasbeef): need to remedy if side-chain
// tracking added
if header.height >= chainTipHeight {
chainTipHash = header.hash
chainTipHeight = header.height
}
}
return rootBucket.Put(tipKey, chainTipHash[:])
})
}
// heightFromHash returns the height of the entry that matches the specified
// height. With this height, the caller is then able to seek to the appropriate
// spot in the flat files in order to extract the true header.
func (h *headerIndex) heightFromHash(hash *chainhash.Hash) (uint32, error) {
var height uint32
err := walletdb.View(h.db, func(tx walletdb.ReadTx) error {
rootBucket := tx.ReadBucket(indexBucket)
heightBytes := rootBucket.Get(hash[:])
if heightBytes == nil {
// If the hash wasn't found, then we don't know of this
// hash within the index.
return ErrHashNotFound
}
height = binary.BigEndian.Uint32(heightBytes)
return nil
})
if err != nil {
return 0, err
}
return height, nil
}
// chainTip returns the best hash and height that the index knows of.
func (h *headerIndex) chainTip() (*chainhash.Hash, uint32, error) {
var (
tipHeight uint32
tipHash *chainhash.Hash
)
err := walletdb.View(h.db, func(tx walletdb.ReadTx) error {
rootBucket := tx.ReadBucket(indexBucket)
var tipKey []byte
// Based on the specified index type of this instance of the
// index, we'll grab the particular key that tracks the chain
// tip.
switch h.indexType {
case Block:
tipKey = bitcoinTip
case RegularFilter:
tipKey = regFilterTip
default:
return fmt.Errorf("unknown chain tip index type: %v", h.indexType)
}
// Now that we have the particular tip key for this header
// type, we'll fetch the hash for this tip, then using that
// we'll fetch the height that corresponds to that hash.
tipHashBytes := rootBucket.Get(tipKey)
tipHeightBytes := rootBucket.Get(tipHashBytes)
if len(tipHeightBytes) != 4 {
return ErrHeightNotFound
}
// With the height fetched, we can now populate our return
// parameters.
h, err := chainhash.NewHash(tipHashBytes)
if err != nil {
return err
}
tipHash = h
tipHeight = binary.BigEndian.Uint32(tipHeightBytes)
return nil
})
if err != nil {
return nil, 0, err
}
return tipHash, tipHeight, nil
}
// truncateIndex truncates the index for a particluar header type by a single
// header entry. The passed newTip pointer should point to the hash of the new
// chain tip. Optionally, if the entry is to be deleted as well, then the
// delete flag should be set to true.
func (h *headerIndex) truncateIndex(newTip *chainhash.Hash, delete bool) error {
return walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error {
rootBucket := tx.ReadWriteBucket(indexBucket)
var tipKey []byte
// Based on the specified index type of this instance of the
// index, we'll grab the key that tracks the tip of the chain
// we need to update.
switch h.indexType {
case Block:
tipKey = bitcoinTip
case RegularFilter:
tipKey = regFilterTip
default:
return fmt.Errorf("unknown index type: %v", h.indexType)
}
// If the delete flag is set, then we'll also delete this entry
// from the database as the primary index (block headers) is
// being rolled back.
if delete {
prevTipHash := rootBucket.Get(tipKey)
if err := rootBucket.Delete(prevTipHash); err != nil {
return err
}
}
// With the now stale entry deleted, we'll update the chain tip
// to point to the new hash.
return rootBucket.Put(tipKey, newTip[:])
})
}

View File

@@ -0,0 +1,939 @@
package headerfs
import (
"bytes"
"encoding/binary"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil/gcs/builder"
"github.com/btcsuite/btcwallet/walletdb"
)
// BlockStamp represents a block, identified by its height and time stamp in
// the chain. We also lift the timestamp from the block header itself into this
// struct as well.
type BlockStamp struct {
// Height is the height of the target block.
Height int32
// Hash is the hash that uniquely identifies this block.
Hash chainhash.Hash
// Timestamp is the timestamp of the block in the chain.
Timestamp time.Time
}
// BlockHeaderStore is an interface that provides an abstraction for a generic
// store for block headers.
type BlockHeaderStore interface {
// ChainTip returns the best known block header and height for the
// BlockHeaderStore.
ChainTip() (*wire.BlockHeader, uint32, error)
// LatestBlockLocator returns the latest block locator object based on
// the tip of the current main chain from the PoV of the
// BlockHeaderStore.
LatestBlockLocator() (blockchain.BlockLocator, error)
// FetchHeaderByHeight attempts to retrieve a target block header based
// on a block height.
FetchHeaderByHeight(height uint32) (*wire.BlockHeader, error)
// FetchHeaderAncestors fetches the numHeaders block headers that are
// the ancestors of the target stop hash. A total of numHeaders+1
// headers will be returned, as we'll walk back numHeaders distance to
// collect each header, then return the final header specified by the
// stop hash. We'll also return the starting height of the header range
// as well so callers can compute the height of each header without
// knowing the height of the stop hash.
FetchHeaderAncestors(uint32, *chainhash.Hash) ([]wire.BlockHeader,
uint32, error)
// HeightFromHash returns the height of a particular block header given
// its hash.
HeightFromHash(*chainhash.Hash) (uint32, error)
// FetchHeader attempts to retrieve a block header determined by the
// passed block height.
FetchHeader(*chainhash.Hash) (*wire.BlockHeader, uint32, error)
// WriteHeaders adds a set of headers to the BlockHeaderStore in a
// single atomic transaction.
WriteHeaders(...BlockHeader) error
// RollbackLastBlock rolls back the BlockHeaderStore by a _single_
// header. This method is meant to be used in the case of re-org which
// disconnects the latest block header from the end of the main chain.
// The information about the new header tip after truncation is
// returned.
RollbackLastBlock() (*BlockStamp, error)
}
// headerBufPool is a pool of bytes.Buffer that will be re-used by the various
// headerStore implementations to batch their header writes to disk. By
// utilizing this variable we can minimize the total number of allocations when
// writing headers to disk.
var headerBufPool = sync.Pool{
New: func() interface{} { return new(bytes.Buffer) },
}
// headerStore combines a on-disk set of headers within a flat file in addition
// to a databse which indexes that flat file. Together, these two abstractions
// can be used in order to build an indexed header store for any type of
// "header" as it deals only with raw bytes, and leaves it to a higher layer to
// interpret those raw bytes accordingly.
//
// TODO(roasbeef): quickcheck coverage
type headerStore struct {
mtx sync.RWMutex
fileName string
file *os.File
*headerIndex
}
// newHeaderStore creates a new headerStore given an already open database, a
// target file path for the flat-file and a particular header type. The target
// file will be created as necessary.
func newHeaderStore(db walletdb.DB, filePath string,
hType HeaderType) (*headerStore, error) {
var flatFileName string
switch hType {
case Block:
flatFileName = "block_headers.bin"
case RegularFilter:
flatFileName = "reg_filter_headers.bin"
default:
return nil, fmt.Errorf("unrecognized filter type: %v", hType)
}
flatFileName = filepath.Join(filePath, flatFileName)
// We'll open the file, creating it if necessary and ensuring that all
// writes are actually appends to the end of the file.
fileFlags := os.O_RDWR | os.O_APPEND | os.O_CREATE
headerFile, err := os.OpenFile(flatFileName, fileFlags, 0644)
if err != nil {
return nil, err
}
// With the file open, we'll then create the header index so we can
// have random access into the flat files.
index, err := newHeaderIndex(db, hType)
if err != nil {
return nil, err
}
return &headerStore{
fileName: flatFileName,
file: headerFile,
headerIndex: index,
}, nil
}
// blockHeaderStore is an implementation of the BlockHeaderStore interface, a
// fully fledged database for Bitcoin block headers. The blockHeaderStore
// combines a flat file to store the block headers with a database instance for
// managing the index into the set of flat files.
type blockHeaderStore struct {
*headerStore
}
// A compile-time check to ensure the blockHeaderStore adheres to the
// BlockHeaderStore interface.
var _ BlockHeaderStore = (*blockHeaderStore)(nil)
// NewBlockHeaderStore creates a new instance of the blockHeaderStore based on
// a target file path, an open database instance, and finally a set of
// parameters for the target chain. These parameters are required as if this is
// the initial start up of the blockHeaderStore, then the initial genesis
// header will need to be inserted.
func NewBlockHeaderStore(filePath string, db walletdb.DB,
netParams *chaincfg.Params) (BlockHeaderStore, error) {
hStore, err := newHeaderStore(db, filePath, Block)
if err != nil {
return nil, err
}
// With the header store created, we'll fetch the file size to see if
// we need to initialize it with the first header or not.
fileInfo, err := hStore.file.Stat()
if err != nil {
return nil, err
}
bhs := &blockHeaderStore{
headerStore: hStore,
}
// If the size of the file is zero, then this means that we haven't yet
// written the initial genesis header to disk, so we'll do so now.
if fileInfo.Size() == 0 {
genesisHeader := BlockHeader{
BlockHeader: &netParams.GenesisBlock.Header,
Height: 0,
}
if err := bhs.WriteHeaders(genesisHeader); err != nil {
return nil, err
}
return bhs, nil
}
// As a final initialization step (if this isn't the first time), we'll
// ensure that the header tip within the flat files, is in sync with
// out database index.
tipHash, tipHeight, err := bhs.chainTip()
if err != nil {
return nil, err
}
// First, we'll compute the size of the current file so we can
// calculate the latest header written to disk.
fileHeight := uint32(fileInfo.Size()/80) - 1
// Using the file's current height, fetch the latest on-disk header.
latestFileHeader, err := bhs.readHeader(fileHeight)
if err != nil {
return nil, err
}
// If the index's tip hash, and the file on-disk match, then we're
// done here.
latestBlockHash := latestFileHeader.BlockHash()
if tipHash.IsEqual(&latestBlockHash) {
return bhs, nil
}
// TODO(roasbeef): below assumes index can never get ahead?
// * we always update files _then_ indexes
// * need to dual pointer walk back for max safety
// Otherwise, we'll need to truncate the file until it matches the
// current index tip.
for fileHeight > tipHeight {
if err := bhs.singleTruncate(); err != nil {
return nil, err
}
fileHeight--
}
return bhs, nil
}
// FetchHeader attempts to retrieve a block header determined by the passed
// block height.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) FetchHeader(hash *chainhash.Hash) (*wire.BlockHeader, uint32, error) {
// Lock store for read.
h.mtx.RLock()
defer h.mtx.RUnlock()
// First, we'll query the index to obtain the block height of the
// passed block hash.
height, err := h.heightFromHash(hash)
if err != nil {
return nil, 0, err
}
// With the height known, we can now read the header from disk.
header, err := h.readHeader(height)
if err != nil {
return nil, 0, err
}
return &header, height, nil
}
// FetchHeaderByHeight attempts to retrieve a target block header based on a
// block height.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) FetchHeaderByHeight(height uint32) (*wire.BlockHeader, error) {
// Lock store for read.
h.mtx.RLock()
defer h.mtx.RUnlock()
// For this query, we don't need to consult the index, and can instead
// just seek into the flat file based on the target height and return
// the full header.
header, err := h.readHeader(height)
if err != nil {
return nil, err
}
return &header, nil
}
// FetchHeaderAncestors fetches the numHeaders block headers that are the
// ancestors of the target stop hash. A total of numHeaders+1 headers will be
// returned, as we'll walk back numHeaders distance to collect each header,
// then return the final header specified by the stop hash. We'll also return
// the starting height of the header range as well so callers can compute the
// height of each header without knowing the height of the stop hash.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) FetchHeaderAncestors(numHeaders uint32,
stopHash *chainhash.Hash) ([]wire.BlockHeader, uint32, error) {
// First, we'll find the final header in the range, this will be the
// ending height of our scan.
endHeight, err := h.heightFromHash(stopHash)
if err != nil {
return nil, 0, err
}
startHeight := endHeight - numHeaders
headers, err := h.readHeaderRange(startHeight, endHeight)
if err != nil {
return nil, 0, err
}
return headers, startHeight, nil
}
// HeightFromHash returns the height of a particular block header given its
// hash.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) HeightFromHash(hash *chainhash.Hash) (uint32, error) {
return h.heightFromHash(hash)
}
// RollbackLastBlock rollsback both the index, and on-disk header file by a
// _single_ header. This method is meant to be used in the case of re-org which
// disconnects the latest block header from the end of the main chain. The
// information about the new header tip after truncation is returned.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) RollbackLastBlock() (*BlockStamp, error) {
// Lock store for write.
h.mtx.Lock()
defer h.mtx.Unlock()
// First, we'll obtain the latest height that the index knows of.
_, chainTipHeight, err := h.chainTip()
if err != nil {
return nil, err
}
// With this height obtained, we'll use it to read the latest header
// from disk, so we can populate our return value which requires the
// prev header hash.
bestHeader, err := h.readHeader(chainTipHeight)
if err != nil {
return nil, err
}
prevHeaderHash := bestHeader.PrevBlock
// Now that we have the information we need to return from this
// function, we can now truncate the header file, and then use the hash
// of the prevHeader to set the proper index chain tip.
if err := h.singleTruncate(); err != nil {
return nil, err
}
if err := h.truncateIndex(&prevHeaderHash, true); err != nil {
return nil, err
}
return &BlockStamp{
Height: int32(chainTipHeight) - 1,
Hash: prevHeaderHash,
}, nil
}
// BlockHeader is a Bitcoin block header that also has its height included.
type BlockHeader struct {
*wire.BlockHeader
// Height is the height of this block header within the current main
// chain.
Height uint32
}
// toIndexEntry converts the BlockHeader into a matching headerEntry. This
// method is used when a header is to be written to disk.
func (b *BlockHeader) toIndexEntry() headerEntry {
return headerEntry{
hash: b.BlockHash(),
height: b.Height,
}
}
// WriteHeaders writes a set of headers to disk and updates the index in a
// single atomic transaction.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) WriteHeaders(hdrs ...BlockHeader) error {
// Lock store for write.
h.mtx.Lock()
defer h.mtx.Unlock()
// First, we'll grab a buffer from the write buffer pool so we an
// reduce our total number of allocations, and also write the headers
// in a single swoop.
headerBuf := headerBufPool.Get().(*bytes.Buffer)
headerBuf.Reset()
defer headerBufPool.Put(headerBuf)
// Next, we'll write out all the passed headers in series into the
// buffer we just extracted from the pool.
for _, header := range hdrs {
if err := header.Serialize(headerBuf); err != nil {
return err
}
}
// With all the headers written to the buffer, we'll now write out the
// entire batch in a single write call.
if err := h.appendRaw(headerBuf.Bytes()); err != nil {
return err
}
// Once those are written, we'll then collate all the headers into
// headerEntry instances so we can write them all into the index in a
// single atomic batch.
headerLocs := make([]headerEntry, len(hdrs))
for i, header := range hdrs {
headerLocs[i] = header.toIndexEntry()
}
return h.addHeaders(headerLocs)
}
// blockLocatorFromHash takes a given block hash and then creates a block
// locator using it as the root of the locator. We'll start by taking a single
// step backwards, then keep doubling the distance until genesis after we get
// 10 locators.
//
// TODO(roasbeef): make into single transaction.
func (h *blockHeaderStore) blockLocatorFromHash(hash *chainhash.Hash) (
blockchain.BlockLocator, error) {
var locator blockchain.BlockLocator
// Append the initial hash
locator = append(locator, hash)
// If hash isn't found in DB or this is the genesis block, return the
// locator as is
height, err := h.heightFromHash(hash)
if height == 0 || err != nil {
return locator, nil
}
decrement := uint32(1)
for height > 0 && len(locator) < wire.MaxBlockLocatorsPerMsg {
// Decrement by 1 for the first 10 blocks, then double the jump
// until we get to the genesis hash
if len(locator) > 10 {
decrement *= 2
}
if decrement > height {
height = 0
} else {
height -= decrement
}
blockHeader, err := h.FetchHeaderByHeight(height)
if err != nil {
return locator, err
}
headerHash := blockHeader.BlockHash()
locator = append(locator, &headerHash)
}
return locator, nil
}
// LatestBlockLocator returns the latest block locator object based on the tip
// of the current main chain from the PoV of the database and flat files.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) LatestBlockLocator() (blockchain.BlockLocator, error) {
// Lock store for read.
h.mtx.RLock()
defer h.mtx.RUnlock()
var locator blockchain.BlockLocator
chainTipHash, _, err := h.chainTip()
if err != nil {
return locator, err
}
return h.blockLocatorFromHash(chainTipHash)
}
// BlockLocatorFromHash computes a block locator given a particular hash. The
// standard Bitcoin algorithm to compute block locators are employed.
func (h *blockHeaderStore) BlockLocatorFromHash(hash *chainhash.Hash) (
blockchain.BlockLocator, error) {
// Lock store for read.
h.mtx.RLock()
defer h.mtx.RUnlock()
return h.blockLocatorFromHash(hash)
}
// CheckConnectivity cycles through all of the block headers on disk, from last
// to first, and makes sure they all connect to each other. Additionally, at
// each block header, we also ensure that the index entry for that height and
// hash also match up properly.
func (h *blockHeaderStore) CheckConnectivity() error {
// Lock store for read.
h.mtx.RLock()
defer h.mtx.RUnlock()
return walletdb.View(h.db, func(tx walletdb.ReadTx) error {
// First, we'll fetch the root bucket, in order to use that to
// fetch the bucket that houses the header index.
rootBucket := tx.ReadBucket(indexBucket)
// With the header bucket retrieved, we'll now fetch the chain
// tip so we can start our backwards scan.
tipHash := rootBucket.Get(bitcoinTip)
tipHeightBytes := rootBucket.Get(tipHash)
// With the height extracted, we'll now read the _last_ block
// header within the file before we kick off our connectivity
// loop.
tipHeight := binary.BigEndian.Uint32(tipHeightBytes)
header, err := h.readHeader(tipHeight)
if err != nil {
return err
}
// We'll now cycle backwards, seeking backwards along the
// header file to ensure each header connects properly and the
// index entries are also accurate. To do this, we start from a
// height of one before our current tip.
var newHeader wire.BlockHeader
for height := tipHeight - 1; height > 0; height-- {
// First, read the block header for this block height,
// and also compute the block hash for it.
newHeader, err = h.readHeader(height)
if err != nil {
return fmt.Errorf("Couldn't retrieve header %s:"+
" %s", header.PrevBlock, err)
}
newHeaderHash := newHeader.BlockHash()
// With the header retrieved, we'll now fetch the
// height for this current header hash to ensure the
// on-disk state and the index matches up properly.
indexHeightBytes := rootBucket.Get(newHeaderHash[:])
if indexHeightBytes == nil {
return fmt.Errorf("index and on-disk file out of sync "+
"at height: %v", height)
}
indexHeight := binary.BigEndian.Uint32(indexHeightBytes)
// With the index entry retrieved, we'll now assert
// that the height matches up with our current height
// in this backwards walk.
if indexHeight != height {
return fmt.Errorf("index height isn't monotonically " +
"increasing")
}
// Finally, we'll assert that this new header is
// actually the prev header of the target header from
// the last loop. This ensures connectivity.
if newHeader.BlockHash() != header.PrevBlock {
return fmt.Errorf("Block %s doesn't match "+
"block %s's PrevBlock (%s)",
newHeader.BlockHash(),
header.BlockHash(), header.PrevBlock)
}
// As all the checks have passed, we'll now reset our
// header pointer to this current location, and
// continue our backwards walk.
header = newHeader
}
return nil
})
}
// ChainTip returns the best known block header and height for the
// blockHeaderStore.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) ChainTip() (*wire.BlockHeader, uint32, error) {
// Lock store for read.
h.mtx.RLock()
defer h.mtx.RUnlock()
_, tipHeight, err := h.chainTip()
if err != nil {
return nil, 0, err
}
latestHeader, err := h.readHeader(tipHeight)
if err != nil {
return nil, 0, err
}
return &latestHeader, tipHeight, nil
}
// FilterHeaderStore is an implementation of a fully fledged database for any
// variant of filter headers. The FilterHeaderStore combines a flat file to
// store the block headers with a database instance for managing the index into
// the set of flat files.
type FilterHeaderStore struct {
*headerStore
}
// NewFilterHeaderStore returns a new instance of the FilterHeaderStore based
// on a target file path, filter type, and target net parameters. These
// parameters are required as if this is the initial start up of the
// FilterHeaderStore, then the initial genesis filter header will need to be
// inserted.
func NewFilterHeaderStore(filePath string, db walletdb.DB,
filterType HeaderType, netParams *chaincfg.Params,
headerStateAssertion *FilterHeader) (*FilterHeaderStore, error) {
fStore, err := newHeaderStore(db, filePath, filterType)
if err != nil {
return nil, err
}
// With the header store created, we'll fetch the fiie size to see if
// we need to initialize it with the first header or not.
fileInfo, err := fStore.file.Stat()
if err != nil {
return nil, err
}
fhs := &FilterHeaderStore{
fStore,
}
// TODO(roasbeef): also reconsile with block header state due to way
// roll back works atm
// If the size of the file is zero, then this means that we haven't yet
// written the initial genesis header to disk, so we'll do so now.
if fileInfo.Size() == 0 {
var genesisFilterHash chainhash.Hash
switch filterType {
case RegularFilter:
basicFilter, err := builder.BuildBasicFilter(
netParams.GenesisBlock, nil,
)
if err != nil {
return nil, err
}
genesisFilterHash, err = builder.MakeHeaderForFilter(
basicFilter,
netParams.GenesisBlock.Header.PrevBlock,
)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unknown filter type: %v", filterType)
}
genesisHeader := FilterHeader{
HeaderHash: *netParams.GenesisHash,
FilterHash: genesisFilterHash,
Height: 0,
}
if err := fhs.WriteHeaders(genesisHeader); err != nil {
return nil, err
}
return fhs, nil
}
// If we have a state assertion then we'll check it now to see if we
// need to modify our filter header files before we proceed.
if headerStateAssertion != nil {
reset, err := fhs.maybeResetHeaderState(
headerStateAssertion,
)
if err != nil {
return nil, err
}
// If the filter header store was reset, we'll re-initialize it
// to recreate our on-disk state.
if reset {
return NewFilterHeaderStore(
filePath, db, filterType, netParams, nil,
)
}
}
// As a final initialization step, we'll ensure that the header tip
// within the flat files, is in sync with out database index.
tipHash, tipHeight, err := fhs.chainTip()
if err != nil {
return nil, err
}
// First, we'll compute the size of the current file so we can
// calculate the latest header written to disk.
fileHeight := uint32(fileInfo.Size()/32) - 1
// Using the file's current height, fetch the latest on-disk header.
latestFileHeader, err := fhs.readHeader(fileHeight)
if err != nil {
return nil, err
}
// If the index's tip hash, and the file on-disk match, then we're
// doing here.
if tipHash.IsEqual(latestFileHeader) {
return fhs, nil
}
// Otherwise, we'll need to truncate the file until it matches the
// current index tip.
for fileHeight > tipHeight {
if err := fhs.singleTruncate(); err != nil {
return nil, err
}
fileHeight--
}
// TODO(roasbeef): make above into func
return fhs, nil
}
// maybeResetHeaderState will reset the header state if the header assertion
// fails, but only if the target height is found. The boolean returned indicates
// that header state was reset.
func (f *FilterHeaderStore) maybeResetHeaderState(
headerStateAssertion *FilterHeader) (bool, error) {
// First, we'll attempt to locate the header at this height. If no such
// header is found, then we'll exit early.
assertedHeader, err := f.FetchHeaderByHeight(
headerStateAssertion.Height,
)
if _, ok := err.(*ErrHeaderNotFound); ok {
return false, nil
}
if err != nil {
return false, err
}
// If our on disk state and the provided header assertion don't match,
// then we'll purge this state so we can sync it anew once we fully
// start up.
if *assertedHeader != headerStateAssertion.FilterHash {
// Close the file before removing it. This is required by some
// OS, e.g., Windows.
if err := f.file.Close(); err != nil {
return true, err
}
if err := os.Remove(f.fileName); err != nil {
return true, err
}
return true, nil
}
return false, nil
}
// FetchHeader returns the filter header that corresponds to the passed block
// height.
func (f *FilterHeaderStore) FetchHeader(hash *chainhash.Hash) (*chainhash.Hash, error) {
// Lock store for read.
f.mtx.RLock()
defer f.mtx.RUnlock()
height, err := f.heightFromHash(hash)
if err != nil {
return nil, err
}
return f.readHeader(height)
}
// FetchHeaderByHeight returns the filter header for a particular block height.
func (f *FilterHeaderStore) FetchHeaderByHeight(height uint32) (*chainhash.Hash, error) {
// Lock store for read.
f.mtx.RLock()
defer f.mtx.RUnlock()
return f.readHeader(height)
}
// FetchHeaderAncestors fetches the numHeaders filter headers that are the
// ancestors of the target stop block hash. A total of numHeaders+1 headers will be
// returned, as we'll walk back numHeaders distance to collect each header,
// then return the final header specified by the stop hash. We'll also return
// the starting height of the header range as well so callers can compute the
// height of each header without knowing the height of the stop hash.
func (f *FilterHeaderStore) FetchHeaderAncestors(numHeaders uint32,
stopHash *chainhash.Hash) ([]chainhash.Hash, uint32, error) {
// First, we'll find the final header in the range, this will be the
// ending height of our scan.
endHeight, err := f.heightFromHash(stopHash)
if err != nil {
return nil, 0, err
}
startHeight := endHeight - numHeaders
headers, err := f.readHeaderRange(startHeight, endHeight)
if err != nil {
return nil, 0, err
}
return headers, startHeight, nil
}
// FilterHeader represents a filter header (basic or extended). The filter
// header itself is coupled with the block height and hash of the filter's
// block.
type FilterHeader struct {
// HeaderHash is the hash of the block header that this filter header
// corresponds to.
HeaderHash chainhash.Hash
// FilterHash is the filter header itself.
FilterHash chainhash.Hash
// Height is the block height of the filter header in the main chain.
Height uint32
}
// toIndexEntry converts the filter header into a index entry to be stored
// within the database.
func (f *FilterHeader) toIndexEntry() headerEntry {
return headerEntry{
hash: f.HeaderHash,
height: f.Height,
}
}
// WriteHeaders writes a batch of filter headers to persistent storage. The
// headers themselves are appended to the flat file, and then the index updated
// to reflect the new entires.
func (f *FilterHeaderStore) WriteHeaders(hdrs ...FilterHeader) error {
// Lock store for write.
f.mtx.Lock()
defer f.mtx.Unlock()
// If there are 0 headers to be written, return immediately. This
// prevents the newTip assignment from panicking because of an index
// of -1.
if len(hdrs) == 0 {
return nil
}
// First, we'll grab a buffer from the write buffer pool so we an
// reduce our total number of allocations, and also write the headers
// in a single swoop.
headerBuf := headerBufPool.Get().(*bytes.Buffer)
headerBuf.Reset()
defer headerBufPool.Put(headerBuf)
// Next, we'll write out all the passed headers in series into the
// buffer we just extracted from the pool.
for _, header := range hdrs {
if _, err := headerBuf.Write(header.FilterHash[:]); err != nil {
return err
}
}
// With all the headers written to the buffer, we'll now write out the
// entire batch in a single write call.
if err := f.appendRaw(headerBuf.Bytes()); err != nil {
return err
}
// As the block headers should already be written, we only need to
// update the tip pointer for this particular header type.
newTip := hdrs[len(hdrs)-1].toIndexEntry().hash
return f.truncateIndex(&newTip, false)
}
// ChainTip returns the latest filter header and height known to the
// FilterHeaderStore.
func (f *FilterHeaderStore) ChainTip() (*chainhash.Hash, uint32, error) {
// Lock store for read.
f.mtx.RLock()
defer f.mtx.RUnlock()
_, tipHeight, err := f.chainTip()
if err != nil {
return nil, 0, fmt.Errorf("unable to fetch chain tip: %v", err)
}
latestHeader, err := f.readHeader(tipHeight)
if err != nil {
return nil, 0, fmt.Errorf("unable to read header: %v", err)
}
return latestHeader, tipHeight, nil
}
// RollbackLastBlock rollsback both the index, and on-disk header file by a
// _single_ filter header. This method is meant to be used in the case of
// re-org which disconnects the latest filter header from the end of the main
// chain. The information about the latest header tip after truncation is
// returned.
func (f *FilterHeaderStore) RollbackLastBlock(newTip *chainhash.Hash) (*BlockStamp, error) {
// Lock store for write.
f.mtx.Lock()
defer f.mtx.Unlock()
// First, we'll obtain the latest height that the index knows of.
_, chainTipHeight, err := f.chainTip()
if err != nil {
return nil, err
}
// With this height obtained, we'll use it to read what will be the new
// chain tip from disk.
newHeightTip := chainTipHeight - 1
newHeaderTip, err := f.readHeader(newHeightTip)
if err != nil {
return nil, err
}
// Now that we have the information we need to return from this
// function, we can now truncate both the header file and the index.
if err := f.singleTruncate(); err != nil {
return nil, err
}
if err := f.truncateIndex(newTip, false); err != nil {
return nil, err
}
// TODO(roasbeef): return chain hash also?
return &BlockStamp{
Height: int32(newHeightTip),
Hash: *newHeaderTip,
}, nil
}

View File

@@ -0,0 +1,38 @@
// +build !windows
package headerfs
import "fmt"
// singleTruncate truncates a single header from the end of the header file.
// This can be used in the case of a re-org to remove the last header from the
// end of the main chain.
//
// TODO(roasbeef): define this and the two methods above on a headerFile
// struct?
func (h *headerStore) singleTruncate() error {
// In order to truncate the file, we'll need to grab the absolute size
// of the file as it stands currently.
fileInfo, err := h.file.Stat()
if err != nil {
return err
}
fileSize := fileInfo.Size()
// Next, we'll determine the number of bytes we need to truncate from
// the end of the file.
var truncateLength int64
switch h.indexType {
case Block:
truncateLength = 80
case RegularFilter:
truncateLength = 32
default:
return fmt.Errorf("unknown index type: %v", h.indexType)
}
// Finally, we'll use both of these values to calculate the new size of
// the file and truncate it accordingly.
newSize := fileSize - truncateLength
return h.file.Truncate(newSize)
}

View File

@@ -0,0 +1,56 @@
// +build windows
package headerfs
import (
"fmt"
"os"
)
// singleTruncate truncates a single header from the end of the header file.
// This can be used in the case of a re-org to remove the last header from the
// end of the main chain.
//
// TODO(roasbeef): define this and the two methods above on a headerFile
// struct?
func (h *headerStore) singleTruncate() error {
// In order to truncate the file, we'll need to grab the absolute size
// of the file as it stands currently.
fileInfo, err := h.file.Stat()
if err != nil {
return err
}
fileSize := fileInfo.Size()
// Next, we'll determine the number of bytes we need to truncate from
// the end of the file.
var truncateLength int64
switch h.indexType {
case Block:
truncateLength = 80
case RegularFilter:
truncateLength = 32
default:
return fmt.Errorf("unknown index type: %v", h.indexType)
}
// Finally, we'll use both of these values to calculate the new size of
// the file.
newSize := fileSize - truncateLength
// On Windows, a file can't be truncated while open, even if using a
// file handle to truncate it. This means we have to close, truncate,
// and reopen it.
fileName := h.file.Name()
if err = h.file.Close(); err != nil {
return err
}
if err = os.Truncate(fileName, newSize); err != nil {
return err
}
fileFlags := os.O_RDWR | os.O_APPEND | os.O_CREATE
h.file, err = os.OpenFile(fileName, fileFlags, 0644)
return err
}