2020-08-31 17:19:28 +00:00
|
|
|
package stream
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
2020-10-04 19:12:35 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2020-08-31 17:19:28 +00:00
|
|
|
)
|
|
|
|
|
2020-10-06 20:21:58 +00:00
|
|
|
type EvictCallbackFn func(events *structs.Events)
|
|
|
|
|
2020-08-31 17:19:28 +00:00
|
|
|
// eventBuffer is a single-writer, multiple-reader, fixed length concurrent
|
|
|
|
// buffer of events that have been published. The buffer is
|
|
|
|
// the head and tail of an atomically updated single-linked list. Atomic
|
|
|
|
// accesses are usually to be suspected as premature optimization but this
|
|
|
|
// specific design has several important features that significantly simplify a
|
|
|
|
// lot of our PubSub machinery.
|
|
|
|
//
|
|
|
|
// eventBuffer is an adaptation of conuls agent/stream/event eventBuffer but
|
|
|
|
// has been updated to be a max length buffer to work for Nomad's usecase.
|
|
|
|
//
|
|
|
|
// The eventBuffer only tracks the most recent set of published events,
|
|
|
|
// up to the max configured size, older events are dropped from the buffer
|
|
|
|
// but will only be garbage collected once the slowest reader drops the item.
|
|
|
|
// Consumers are notified of new events by closing a channel on the previous head
|
|
|
|
// allowing efficient broadcast to many watchers without having to run multiple
|
|
|
|
// goroutines or deliver to O(N) separate channels.
|
|
|
|
//
|
|
|
|
// Because eventBuffer is a linked list with atomically updated pointers, readers don't
|
2020-10-06 20:21:58 +00:00
|
|
|
// have to take a lock and can consume at their own pace. Slow readers will eventually
|
|
|
|
// be forced to reconnect to the lastest head by being notified via a bufferItem's droppedCh.
|
2020-08-31 17:19:28 +00:00
|
|
|
//
|
|
|
|
// A new buffer is constructed with a sentinel "empty" bufferItem that has a nil
|
|
|
|
// Events array. This enables subscribers to start watching for the next update
|
|
|
|
// immediately.
|
|
|
|
//
|
|
|
|
// The zero value eventBuffer is _not_ usable, as it has not been
|
|
|
|
// initialized with an empty bufferItem so can not be used to wait for the first
|
|
|
|
// published event. Call newEventBuffer to construct a new buffer.
|
|
|
|
//
|
2020-10-06 20:21:58 +00:00
|
|
|
// Calls to Append or purne that mutate the head must be externally
|
2020-08-31 17:19:28 +00:00
|
|
|
// synchronized. This allows systems that already serialize writes to append
|
2020-10-06 20:21:58 +00:00
|
|
|
// without lock overhead.
|
2020-08-31 17:19:28 +00:00
|
|
|
type eventBuffer struct {
|
|
|
|
size *int64
|
|
|
|
|
|
|
|
head atomic.Value
|
|
|
|
tail atomic.Value
|
|
|
|
|
2020-10-08 18:27:52 +00:00
|
|
|
maxSize int64
|
|
|
|
onEvict EvictCallbackFn
|
2020-08-31 17:19:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// newEventBuffer creates an eventBuffer ready for use.
|
2020-10-08 18:27:52 +00:00
|
|
|
func newEventBuffer(size int64, onEvict EvictCallbackFn) *eventBuffer {
|
2020-08-31 17:19:28 +00:00
|
|
|
zero := int64(0)
|
|
|
|
b := &eventBuffer{
|
2020-10-08 18:27:52 +00:00
|
|
|
maxSize: size,
|
|
|
|
size: &zero,
|
|
|
|
onEvict: onEvict,
|
2020-08-31 17:19:28 +00:00
|
|
|
}
|
|
|
|
|
2020-10-06 20:21:58 +00:00
|
|
|
item := newBufferItem(&structs.Events{Index: 0, Events: nil})
|
2020-08-31 17:19:28 +00:00
|
|
|
|
|
|
|
b.head.Store(item)
|
|
|
|
b.tail.Store(item)
|
|
|
|
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append a set of events from one raft operation to the buffer and notify
|
|
|
|
// watchers. After calling append, the caller must not make any further
|
|
|
|
// mutations to the events as they may have been exposed to subscribers in other
|
|
|
|
// goroutines. Append only supports a single concurrent caller and must be
|
2020-10-08 18:27:52 +00:00
|
|
|
// externally synchronized with other Append calls.
|
2020-10-06 20:21:58 +00:00
|
|
|
func (b *eventBuffer) Append(events *structs.Events) {
|
2020-10-04 19:12:35 +00:00
|
|
|
b.appendItem(newBufferItem(events))
|
2020-08-31 17:19:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *eventBuffer) appendItem(item *bufferItem) {
|
|
|
|
// Store the next item to the old tail
|
|
|
|
oldTail := b.Tail()
|
|
|
|
oldTail.link.next.Store(item)
|
|
|
|
|
|
|
|
// Update the tail to the new item
|
|
|
|
b.tail.Store(item)
|
|
|
|
|
|
|
|
// Increment the buffer size
|
2020-10-06 20:21:58 +00:00
|
|
|
atomic.AddInt64(b.size, int64(len(item.Events.Events)))
|
2020-08-31 17:19:28 +00:00
|
|
|
|
2020-10-06 20:21:58 +00:00
|
|
|
// Advance Head until we are under allowable size
|
|
|
|
for atomic.LoadInt64(b.size) > b.maxSize {
|
2020-08-31 17:19:28 +00:00
|
|
|
b.advanceHead()
|
|
|
|
}
|
|
|
|
|
|
|
|
// notify waiters next event is available
|
|
|
|
close(oldTail.link.ch)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-10-06 20:21:58 +00:00
|
|
|
func newSentinelItem() *bufferItem {
|
2020-10-08 18:27:52 +00:00
|
|
|
return newBufferItem(&structs.Events{})
|
2020-10-06 20:21:58 +00:00
|
|
|
}
|
|
|
|
|
2020-08-31 17:19:28 +00:00
|
|
|
// advanceHead drops the current Head buffer item and notifies readers
|
|
|
|
// that the item should be discarded by closing droppedCh.
|
|
|
|
// Slow readers will prevent the old head from being GC'd until they
|
|
|
|
// discard it.
|
|
|
|
func (b *eventBuffer) advanceHead() {
|
|
|
|
old := b.Head()
|
2020-10-06 20:21:58 +00:00
|
|
|
|
2020-08-31 17:19:28 +00:00
|
|
|
next := old.link.next.Load()
|
2020-10-06 20:21:58 +00:00
|
|
|
// if the next item is nil replace it with a sentinel value
|
|
|
|
if next == nil {
|
|
|
|
next = newSentinelItem()
|
|
|
|
}
|
2020-08-31 17:19:28 +00:00
|
|
|
|
2020-10-06 20:21:58 +00:00
|
|
|
// notify readers that old is being dropped
|
2020-08-31 17:19:28 +00:00
|
|
|
close(old.link.droppedCh)
|
2020-10-06 20:21:58 +00:00
|
|
|
|
|
|
|
// store the next value to head
|
2020-08-31 17:19:28 +00:00
|
|
|
b.head.Store(next)
|
|
|
|
|
2020-10-06 20:21:58 +00:00
|
|
|
// If the old head is equal to the tail
|
|
|
|
// update the tail value as well
|
|
|
|
if old == b.Tail() {
|
|
|
|
b.tail.Store(next)
|
|
|
|
}
|
|
|
|
|
|
|
|
// update the amount of events we have in the buffer
|
|
|
|
rmCount := len(old.Events.Events)
|
|
|
|
atomic.AddInt64(b.size, -int64(rmCount))
|
|
|
|
|
|
|
|
// Call evict callback if the item isn't a sentinel value
|
|
|
|
if b.onEvict != nil && old.Events.Index != 0 {
|
|
|
|
b.onEvict(old.Events)
|
|
|
|
}
|
2020-08-31 17:19:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Head returns the current head of the buffer. It will always exist but it may
|
|
|
|
// be a "sentinel" empty item with a nil Events slice to allow consumers to
|
|
|
|
// watch for the next update. Consumers should always check for empty Events and
|
|
|
|
// treat them as no-ops. Will panic if eventBuffer was not initialized correctly
|
|
|
|
// with NewEventBuffer
|
|
|
|
func (b *eventBuffer) Head() *bufferItem {
|
|
|
|
return b.head.Load().(*bufferItem)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tail returns the current tail of the buffer. It will always exist but it may
|
|
|
|
// be a "sentinel" empty item with a Nil Events slice to allow consumers to
|
|
|
|
// watch for the next update. Consumers should always check for empty Events and
|
|
|
|
// treat them as no-ops. Will panic if eventBuffer was not initialized correctly
|
|
|
|
// with NewEventBuffer
|
|
|
|
func (b *eventBuffer) Tail() *bufferItem {
|
|
|
|
return b.tail.Load().(*bufferItem)
|
|
|
|
}
|
|
|
|
|
|
|
|
// StarStartAtClosest returns the closest bufferItem to a requested starting
|
|
|
|
// index as well as the offset between the requested index and returned one.
|
|
|
|
func (b *eventBuffer) StartAtClosest(index uint64) (*bufferItem, int) {
|
|
|
|
item := b.Head()
|
2020-10-06 20:21:58 +00:00
|
|
|
if index < item.Events.Index {
|
|
|
|
return item, int(item.Events.Index) - int(index)
|
2020-08-31 17:19:28 +00:00
|
|
|
}
|
2020-10-06 20:21:58 +00:00
|
|
|
if item.Events.Index == index {
|
2020-08-31 17:19:28 +00:00
|
|
|
return item, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
prev := item
|
|
|
|
item = item.NextNoBlock()
|
|
|
|
if item == nil {
|
2020-10-06 20:21:58 +00:00
|
|
|
return prev, int(index) - int(prev.Events.Index)
|
2020-08-31 17:19:28 +00:00
|
|
|
}
|
2020-10-06 20:21:58 +00:00
|
|
|
if index < item.Events.Index {
|
|
|
|
return item, int(item.Events.Index) - int(index)
|
2020-08-31 17:19:28 +00:00
|
|
|
}
|
2020-10-06 20:21:58 +00:00
|
|
|
if index == item.Events.Index {
|
2020-08-31 17:19:28 +00:00
|
|
|
return item, 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Len returns the current length of the buffer
|
|
|
|
func (b *eventBuffer) Len() int {
|
|
|
|
return int(atomic.LoadInt64(b.size))
|
|
|
|
}
|
|
|
|
|
|
|
|
// bufferItem represents a set of events published by a single raft operation.
|
|
|
|
// The first item returned by a newly constructed buffer will have nil Events.
|
|
|
|
// It is a sentinel value which is used to wait on the next events via Next.
|
|
|
|
//
|
|
|
|
// To iterate to the next event, a Next method may be called which may block if
|
|
|
|
// there is no next element yet.
|
|
|
|
//
|
|
|
|
// Holding a pointer to the item keeps all the events published since in memory
|
|
|
|
// so it's important that subscribers don't hold pointers to buffer items after
|
|
|
|
// they have been delivered except where it's intentional to maintain a cache or
|
|
|
|
// trailing store of events for performance reasons.
|
|
|
|
//
|
|
|
|
// Subscribers must not mutate the bufferItem or the Events or Encoded payloads
|
|
|
|
// inside as these are shared between all readers.
|
|
|
|
type bufferItem struct {
|
|
|
|
// Events is the set of events published at one raft index. This may be nil as
|
|
|
|
// a sentinel value to allow watching for the first event in a buffer. Callers
|
|
|
|
// should check and skip nil Events at any point in the buffer. It will also
|
|
|
|
// be nil if the producer appends an Error event because they can't complete
|
|
|
|
// the request to populate the buffer. Err will be non-nil in this case.
|
2020-10-06 20:21:58 +00:00
|
|
|
Events *structs.Events
|
2020-08-31 17:19:28 +00:00
|
|
|
|
|
|
|
// Err is non-nil if the producer can't complete their task and terminates the
|
|
|
|
// buffer. Subscribers should return the error to clients and cease attempting
|
|
|
|
// to read from the buffer.
|
|
|
|
Err error
|
|
|
|
|
|
|
|
// link holds the next pointer and channel. This extra bit of indirection
|
|
|
|
// allows us to splice buffers together at arbitrary points without including
|
|
|
|
// events in one buffer just for the side-effect of watching for the next set.
|
|
|
|
// The link may not be mutated once the event is appended to a buffer.
|
|
|
|
link *bufferLink
|
|
|
|
|
|
|
|
createdAt time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
type bufferLink struct {
|
|
|
|
// next is an atomically updated pointer to the next event in the buffer. It
|
|
|
|
// is written exactly once by the single published and will always be set if
|
|
|
|
// ch is closed.
|
|
|
|
next atomic.Value
|
|
|
|
|
|
|
|
// ch is closed when the next event is published. It should never be mutated
|
|
|
|
// (e.g. set to nil) as that is racey, but is closed once when the next event
|
|
|
|
// is published. the next pointer will have been set by the time this is
|
|
|
|
// closed.
|
|
|
|
ch chan struct{}
|
|
|
|
|
|
|
|
// droppedCh is closed when the event is dropped from the buffer due to
|
|
|
|
// sizing constraints.
|
|
|
|
droppedCh chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// newBufferItem returns a blank buffer item with a link and chan ready to have
|
|
|
|
// the fields set and be appended to a buffer.
|
2020-10-06 20:21:58 +00:00
|
|
|
func newBufferItem(events *structs.Events) *bufferItem {
|
2020-08-31 17:19:28 +00:00
|
|
|
return &bufferItem{
|
|
|
|
link: &bufferLink{
|
|
|
|
ch: make(chan struct{}),
|
|
|
|
droppedCh: make(chan struct{}),
|
|
|
|
},
|
2020-10-06 20:21:58 +00:00
|
|
|
Events: events,
|
2020-08-31 17:19:28 +00:00
|
|
|
createdAt: time.Now(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next return the next buffer item in the buffer. It may block until ctx is
|
|
|
|
// cancelled or until the next item is published.
|
|
|
|
func (i *bufferItem) Next(ctx context.Context, forceClose <-chan struct{}) (*bufferItem, error) {
|
|
|
|
// See if there is already a next value, block if so. Note we don't rely on
|
|
|
|
// state change (chan nil) as that's not threadsafe but detecting close is.
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2020-10-06 20:21:58 +00:00
|
|
|
return nil, ctx.Err()
|
2020-08-31 17:19:28 +00:00
|
|
|
case <-forceClose:
|
|
|
|
return nil, fmt.Errorf("subscription closed")
|
|
|
|
case <-i.link.ch:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the reader is too slow and the event buffer as discarded the event
|
2020-10-06 20:21:58 +00:00
|
|
|
// This must happen after the above select to prevent a random selection
|
|
|
|
// between linkCh and droppedCh
|
2020-08-31 17:19:28 +00:00
|
|
|
select {
|
|
|
|
case <-i.link.droppedCh:
|
|
|
|
return nil, fmt.Errorf("event dropped from buffer")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// If channel closed, there must be a next item to read
|
|
|
|
nextRaw := i.link.next.Load()
|
|
|
|
if nextRaw == nil {
|
|
|
|
// shouldn't be possible
|
|
|
|
return nil, errors.New("invalid next item")
|
|
|
|
}
|
|
|
|
next := nextRaw.(*bufferItem)
|
|
|
|
if next.Err != nil {
|
|
|
|
return nil, next.Err
|
|
|
|
}
|
|
|
|
return next, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NextNoBlock returns the next item in the buffer without blocking. If it
|
|
|
|
// reaches the most recent item it will return nil.
|
|
|
|
func (i *bufferItem) NextNoBlock() *bufferItem {
|
|
|
|
nextRaw := i.link.next.Load()
|
|
|
|
if nextRaw == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return nextRaw.(*bufferItem)
|
|
|
|
}
|