Release v0.1.0

This commit is contained in:
Manu Herrera
2019-10-01 12:22:30 -03:00
parent 41e6aad190
commit d301c63596
915 changed files with 378049 additions and 11 deletions

19
vendor/github.com/lightningnetwork/lnd/queue/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,19 @@
Copyright (C) 2015-2018 Lightning Labs and The Lightning Network Developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -0,0 +1,196 @@
package queue
import (
"container/list"
"sync"
"time"
"github.com/lightningnetwork/lnd/ticker"
)
// GCQueue is garbage collecting queue, which dynamically grows and contracts
// based on load. If the queue has items which have been returned, the queue
// will check every gcInterval amount of time to see if any elements are
// eligible to be released back to the runtime. Elements that have been in the
// queue for a duration of least expiryInterval will be released upon the next
// iteration of the garbage collection, thus the maximum amount of time an
// element remain in the queue is expiryInterval+gcInterval. The gc ticker will
// be disabled after all items in the queue have been taken or released to
// ensure that the GCQueue becomes quiescent, and imposes minimal overhead in
// the steady state.
type GCQueue struct {
// takeBuffer coordinates the delivery of items taken from the queue
// such that they are delivered to requesters.
takeBuffer chan interface{}
// returnBuffer coordinates the return of items back into the queue,
// where they will be kept until retaken or released.
returnBuffer chan interface{}
// newItem is a constructor, used to generate new elements if none are
// otherwise available for reuse.
newItem func() interface{}
// expiryInterval is the minimum amount of time an element will remain
// in the queue before being released.
expiryInterval time.Duration
// recycleTicker is a resumable ticker used to trigger a sweep to
// release elements that have been in the queue longer than
// expiryInterval.
recycleTicker ticker.Ticker
// freeList maintains a list of gcQueueEntries, sorted in order of
// increasing time of arrival.
freeList *list.List
wg sync.WaitGroup
quit chan struct{}
}
// NewGCQueue creates a new garbage collecting queue, which dynamically grows
// and contracts based on load. If the queue has items which have been returned,
// the queue will check every gcInterval amount of time to see if any elements
// are eligible to be released back to the runtime. Elements that have been in
// the queue for a duration of least expiryInterval will be released upon the
// next iteration of the garbage collection, thus the maximum amount of time an
// element remain in the queue is expiryInterval+gcInterval. The gc ticker will
// be disabled after all items in the queue have been taken or released to
// ensure that the GCQueue becomes quiescent, and imposes minimal overhead in
// the steady state. The returnQueueSize parameter is used to size the maximal
// number of items that can be returned without being dropped during large
// bursts in attempts to return items to the GCQUeue.
func NewGCQueue(newItem func() interface{}, returnQueueSize int,
gcInterval, expiryInterval time.Duration) *GCQueue {
q := &GCQueue{
takeBuffer: make(chan interface{}),
returnBuffer: make(chan interface{}, returnQueueSize),
expiryInterval: expiryInterval,
freeList: list.New(),
recycleTicker: ticker.New(gcInterval),
newItem: newItem,
quit: make(chan struct{}),
}
go q.queueManager()
return q
}
// Take returns either a recycled element from the queue, or creates a new item
// if none are available.
func (q *GCQueue) Take() interface{} {
select {
case item := <-q.takeBuffer:
return item
case <-time.After(time.Millisecond):
return q.newItem()
}
}
// Return adds the returned item to freelist if the queue's returnBuffer has
// available capacity. Under load, items may be dropped to ensure this method
// does not block.
func (q *GCQueue) Return(item interface{}) {
select {
case q.returnBuffer <- item:
default:
}
}
// gcQueueEntry is a tuple containing an interface{} and the time at which the
// item was added to the queue. The recorded time is used to determine when the
// entry becomes stale, and can be released if it has not already been taken.
type gcQueueEntry struct {
item interface{}
time time.Time
}
// queueManager maintains the free list of elements by popping the head of the
// queue when items are needed, and appending them to the end of the queue when
// items are returned. The queueManager will periodically attempt to release any
// items that have been in the queue longer than the expiry interval.
//
// NOTE: This method SHOULD be run as a goroutine.
func (q *GCQueue) queueManager() {
for {
// If the pool is empty, initialize a buffer pool to serve a
// client that takes a buffer immediately. If this happens, this
// is either:
// 1) the first iteration of the loop,
// 2) after all entries were garbage collected, or
// 3) the freelist was emptied after the last entry was taken.
//
// In all of these cases, it is safe to pause the recycle ticker
// since it will be resumed as soon an entry is returned to the
// freelist.
if q.freeList.Len() == 0 {
q.freeList.PushBack(gcQueueEntry{
item: q.newItem(),
time: time.Now(),
})
q.recycleTicker.Pause()
}
next := q.freeList.Front()
select {
// If a client requests a new write buffer, deliver the buffer
// at the head of the freelist to them.
case q.takeBuffer <- next.Value.(gcQueueEntry).item:
q.freeList.Remove(next)
// If a client is returning a write buffer, add it to the free
// list and resume the recycle ticker so that it can be cleared
// if the entries are not quickly reused.
case item := <-q.returnBuffer:
// Add the returned buffer to the freelist, recording
// the current time so we can determine when the entry
// expires.
q.freeList.PushBack(gcQueueEntry{
item: item,
time: time.Now(),
})
// Adding the buffer implies that we now have a non-zero
// number of elements in the free list. Resume the
// recycle ticker to cleanup any entries that go unused.
q.recycleTicker.Resume()
// If the recycle ticker fires, we will aggresively release any
// write buffers in the freelist for which the expiryInterval
// has elapsed since their insertion. If after doing so, no
// elements remain, we will pause the recylce ticker.
case <-q.recycleTicker.Ticks():
// Since the insert time of all entries will be
// monotonically increasing, iterate over elements and
// remove all entries that have expired.
var next *list.Element
for e := q.freeList.Front(); e != nil; e = next {
// Cache the next element, since it will become
// unreachable from the current element if it is
// removed.
next = e.Next()
entry := e.Value.(gcQueueEntry)
// Use now - insertTime <= expiryInterval to
// determine if this entry has not expired.
if time.Since(entry.time) <= q.expiryInterval {
// If this entry hasn't expired, then
// all entries that follow will still be
// valid.
break
}
// Otherwise, remove the expired entry from the
// linked-list.
q.freeList.Remove(e)
entry.item = nil
e.Value = nil
}
}
}
}

5
vendor/github.com/lightningnetwork/lnd/queue/go.mod generated vendored Normal file
View File

@@ -0,0 +1,5 @@
module github.com/lightningnetwork/lnd/queue
require github.com/lightningnetwork/lnd/ticker v1.0.0
replace github.com/lightningnetwork/lnd/ticker v1.0.0 => ../ticker

2
vendor/github.com/lightningnetwork/lnd/queue/go.sum generated vendored Normal file
View File

@@ -0,0 +1,2 @@
github.com/lightningnetwork/lnd/ticker v1.0.0 h1:S1b60TEGoTtCe2A0yeB+ecoj/kkS4qpwh6l+AkQEZwU=
github.com/lightningnetwork/lnd/ticker v1.0.0/go.mod h1:iaLXJiVgI1sPANIF2qYYUJXjoksPNvGNYowB8aRbpX0=

105
vendor/github.com/lightningnetwork/lnd/queue/queue.go generated vendored Normal file
View File

@@ -0,0 +1,105 @@
package queue
import (
"container/list"
"sync"
"sync/atomic"
)
// ConcurrentQueue is a concurrent-safe FIFO queue with unbounded capacity.
// Clients interact with the queue by pushing items into the in channel and
// popping items from the out channel. There is a goroutine that manages moving
// items from the in channel to the out channel in the correct order that must
// be started by calling Start().
type ConcurrentQueue struct {
started uint32 // to be used atomically
stopped uint32 // to be used atomically
chanIn chan interface{}
chanOut chan interface{}
overflow *list.List
wg sync.WaitGroup
quit chan struct{}
}
// NewConcurrentQueue constructs a ConcurrentQueue. The bufferSize parameter is
// the capacity of the output channel. When the size of the queue is below this
// threshold, pushes do not incur the overhead of the less efficient overflow
// structure.
func NewConcurrentQueue(bufferSize int) *ConcurrentQueue {
return &ConcurrentQueue{
chanIn: make(chan interface{}),
chanOut: make(chan interface{}, bufferSize),
overflow: list.New(),
quit: make(chan struct{}),
}
}
// ChanIn returns a channel that can be used to push new items into the queue.
func (cq *ConcurrentQueue) ChanIn() chan<- interface{} {
return cq.chanIn
}
// ChanOut returns a channel that can be used to pop items from the queue.
func (cq *ConcurrentQueue) ChanOut() <-chan interface{} {
return cq.chanOut
}
// Start begins a goroutine that manages moving items from the in channel to the
// out channel. The queue tries to move items directly to the out channel
// minimize overhead, but if the out channel is full it pushes items to an
// overflow queue. This must be called before using the queue.
func (cq *ConcurrentQueue) Start() {
if !atomic.CompareAndSwapUint32(&cq.started, 0, 1) {
return
}
cq.wg.Add(1)
go func() {
defer cq.wg.Done()
for {
nextElement := cq.overflow.Front()
if nextElement == nil {
// Overflow queue is empty so incoming items can be pushed
// directly to the output channel. If output channel is full
// though, push to overflow.
select {
case item := <-cq.chanIn:
select {
case cq.chanOut <- item:
// Optimistically push directly to chanOut
default:
cq.overflow.PushBack(item)
}
case <-cq.quit:
return
}
} else {
// Overflow queue is not empty, so any new items get pushed to
// the back to preserve order.
select {
case item := <-cq.chanIn:
cq.overflow.PushBack(item)
case cq.chanOut <- nextElement.Value:
cq.overflow.Remove(nextElement)
case <-cq.quit:
return
}
}
}
}()
}
// Stop ends the goroutine that moves items from the in channel to the out
// channel. This does not clear the queue state, so the queue can be restarted
// without dropping items.
func (cq *ConcurrentQueue) Stop() {
if !atomic.CompareAndSwapUint32(&cq.stopped, 0, 1) {
return
}
close(cq.quit)
cq.wg.Wait()
}