2020-07-06 20:15:13 +00:00
|
|
|
package stream
|
2020-06-02 22:37:10 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
2020-07-08 04:31:22 +00:00
|
|
|
// EventPublisher receives change events from Publish, and sends the events to
|
|
|
|
// all subscribers of the event Topic.
|
2020-06-02 22:37:10 +00:00
|
|
|
type EventPublisher struct {
|
|
|
|
// snapCacheTTL controls how long we keep snapshots in our cache before
|
|
|
|
// allowing them to be garbage collected and a new one made for subsequent
|
|
|
|
// requests for that topic and key. In general this should be pretty short to
|
|
|
|
// keep memory overhead of duplicated event data low - snapshots are typically
|
|
|
|
// not that expensive, but having a cache for a few seconds can help
|
|
|
|
// de-duplicate building the same snapshot over and over again when a
|
|
|
|
// thundering herd of watchers all subscribe to the same topic within a few
|
2020-06-15 20:59:09 +00:00
|
|
|
// seconds.
|
2020-06-02 22:37:10 +00:00
|
|
|
snapCacheTTL time.Duration
|
|
|
|
|
2020-06-18 22:11:42 +00:00
|
|
|
// This lock protects the topicBuffers, and snapCache
|
2020-06-02 22:37:10 +00:00
|
|
|
lock sync.RWMutex
|
|
|
|
|
|
|
|
// topicBuffers stores the head of the linked-list buffer to publish events to
|
|
|
|
// for a topic.
|
2020-07-06 21:29:45 +00:00
|
|
|
topicBuffers map[Topic]*eventBuffer
|
2020-06-02 22:37:10 +00:00
|
|
|
|
2020-06-15 20:59:09 +00:00
|
|
|
// snapCache if a cache of EventSnapshots indexed by topic and key.
|
2020-07-08 18:45:18 +00:00
|
|
|
// TODO(streaming): new snapshotCache struct for snapCache and snapCacheTTL
|
2020-07-06 21:29:45 +00:00
|
|
|
snapCache map[Topic]map[string]*eventSnapshot
|
2020-06-02 22:37:10 +00:00
|
|
|
|
2020-06-18 22:11:42 +00:00
|
|
|
subscriptions *subscriptions
|
2020-06-02 22:37:10 +00:00
|
|
|
|
2020-06-15 20:59:09 +00:00
|
|
|
// publishCh is used to send messages from an active txn to a goroutine which
|
|
|
|
// publishes events, so that publishing can happen asynchronously from
|
|
|
|
// the Commit call in the FSM hot path.
|
2020-07-06 18:34:58 +00:00
|
|
|
publishCh chan changeEvents
|
2020-06-17 22:15:45 +00:00
|
|
|
|
2020-07-06 22:44:51 +00:00
|
|
|
snapshotHandlers SnapshotHandlers
|
2020-06-02 22:37:10 +00:00
|
|
|
}
|
|
|
|
|
2020-06-18 22:11:42 +00:00
|
|
|
type subscriptions struct {
|
2020-06-18 22:29:06 +00:00
|
|
|
// lock for byToken. If both subscription.lock and EventPublisher.lock need
|
|
|
|
// to be held, EventPublisher.lock MUST always be acquired first.
|
2020-06-18 22:11:42 +00:00
|
|
|
lock sync.RWMutex
|
|
|
|
|
2020-06-18 22:29:06 +00:00
|
|
|
// byToken is an mapping of active Subscriptions indexed by a the token and
|
|
|
|
// a pointer to the request.
|
|
|
|
// When the token is modified all subscriptions under that token will be
|
|
|
|
// reloaded.
|
|
|
|
// A subscription may be unsubscribed by using the pointer to the request.
|
2020-07-06 20:15:13 +00:00
|
|
|
byToken map[string]map[*SubscribeRequest]*Subscription
|
2020-06-18 22:11:42 +00:00
|
|
|
}
|
|
|
|
|
2020-07-06 18:34:58 +00:00
|
|
|
type changeEvents struct {
|
2020-07-06 20:15:13 +00:00
|
|
|
events []Event
|
2020-06-02 22:37:10 +00:00
|
|
|
}
|
|
|
|
|
2020-07-06 22:44:51 +00:00
|
|
|
// SnapshotHandlers is a mapping of Topic to a function which produces a snapshot
|
|
|
|
// of events for the SubscribeRequest. Events are appended to the snapshot using SnapshotAppender.
|
2020-07-08 04:31:22 +00:00
|
|
|
// The nil Topic is reserved and should not be used.
|
2020-07-06 22:44:51 +00:00
|
|
|
type SnapshotHandlers map[Topic]func(*SubscribeRequest, SnapshotAppender) (index uint64, err error)
|
2020-07-06 18:24:30 +00:00
|
|
|
|
2020-07-06 21:29:45 +00:00
|
|
|
// SnapshotAppender appends groups of events to create a Snapshot of state.
|
|
|
|
type SnapshotAppender interface {
|
2020-07-08 18:45:18 +00:00
|
|
|
// Append events to the snapshot. Every event in the slice must have the same
|
|
|
|
// Index, indicating that it is part of the same raft transaction.
|
2020-07-06 21:29:45 +00:00
|
|
|
Append(events []Event)
|
|
|
|
}
|
|
|
|
|
2020-06-19 16:26:36 +00:00
|
|
|
// NewEventPublisher returns an EventPublisher for publishing change events.
|
|
|
|
// Handlers are used to convert the memDB changes into events.
|
|
|
|
// A goroutine is run in the background to publish events to all subscribes.
|
|
|
|
// Cancelling the context will shutdown the goroutine, to free resources,
|
|
|
|
// and stop all publishing.
|
2020-07-06 22:44:51 +00:00
|
|
|
func NewEventPublisher(ctx context.Context, handlers SnapshotHandlers, snapCacheTTL time.Duration) *EventPublisher {
|
2020-06-02 22:37:10 +00:00
|
|
|
e := &EventPublisher{
|
2020-06-17 22:15:45 +00:00
|
|
|
snapCacheTTL: snapCacheTTL,
|
2020-07-06 21:29:45 +00:00
|
|
|
topicBuffers: make(map[Topic]*eventBuffer),
|
|
|
|
snapCache: make(map[Topic]map[string]*eventSnapshot),
|
2020-07-06 18:34:58 +00:00
|
|
|
publishCh: make(chan changeEvents, 64),
|
2020-06-18 22:11:42 +00:00
|
|
|
subscriptions: &subscriptions{
|
2020-07-06 20:15:13 +00:00
|
|
|
byToken: make(map[string]map[*SubscribeRequest]*Subscription),
|
2020-06-18 22:11:42 +00:00
|
|
|
},
|
2020-07-06 22:44:51 +00:00
|
|
|
snapshotHandlers: handlers,
|
2020-06-02 22:37:10 +00:00
|
|
|
}
|
|
|
|
|
2020-06-19 16:26:36 +00:00
|
|
|
go e.handleUpdates(ctx)
|
2020-06-02 22:37:10 +00:00
|
|
|
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
|
2020-07-08 04:31:22 +00:00
|
|
|
// Publish events to all subscribers of the event Topic.
|
|
|
|
func (e *EventPublisher) Publish(events []Event) {
|
2020-07-06 22:44:51 +00:00
|
|
|
if len(events) > 0 {
|
|
|
|
e.publishCh <- changeEvents{events: events}
|
2020-06-02 22:37:10 +00:00
|
|
|
}
|
2020-06-18 22:11:42 +00:00
|
|
|
}
|
2020-06-02 22:37:10 +00:00
|
|
|
|
2020-06-19 16:26:36 +00:00
|
|
|
func (e *EventPublisher) handleUpdates(ctx context.Context) {
|
2020-06-18 22:11:42 +00:00
|
|
|
for {
|
2020-06-19 16:26:36 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
// TODO: also close all subscriptions so the subscribers are moved
|
|
|
|
// to the new publisher?
|
|
|
|
return
|
|
|
|
case update := <-e.publishCh:
|
|
|
|
e.sendEvents(update)
|
|
|
|
}
|
2020-06-18 22:11:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// sendEvents sends the given events to any applicable topic listeners, as well
|
|
|
|
// as any ACL update events to cause affected listeners to reset their stream.
|
2020-07-06 18:34:58 +00:00
|
|
|
func (e *EventPublisher) sendEvents(update changeEvents) {
|
2020-07-08 18:45:18 +00:00
|
|
|
eventsByTopic := make(map[Topic][]Event)
|
2020-07-06 18:34:58 +00:00
|
|
|
for _, event := range update.events {
|
2020-07-06 21:29:45 +00:00
|
|
|
if unsubEvent, ok := event.Payload.(closeSubscriptionPayload); ok {
|
|
|
|
e.subscriptions.closeSubscriptionsForTokens(unsubEvent.tokensSecretIDs)
|
2020-07-08 18:45:18 +00:00
|
|
|
continue
|
2020-07-06 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2020-06-02 22:37:10 +00:00
|
|
|
eventsByTopic[event.Topic] = append(eventsByTopic[event.Topic], event)
|
|
|
|
}
|
|
|
|
|
2020-06-18 22:11:42 +00:00
|
|
|
e.lock.Lock()
|
|
|
|
defer e.lock.Unlock()
|
2020-06-02 22:37:10 +00:00
|
|
|
for topic, events := range eventsByTopic {
|
2020-06-15 20:59:09 +00:00
|
|
|
e.getTopicBuffer(topic).Append(events)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getTopicBuffer for the topic. Creates a new event buffer if one does not
|
|
|
|
// already exist.
|
|
|
|
//
|
|
|
|
// EventPublisher.lock must be held to call this method.
|
2020-07-06 21:29:45 +00:00
|
|
|
func (e *EventPublisher) getTopicBuffer(topic Topic) *eventBuffer {
|
2020-06-15 20:59:09 +00:00
|
|
|
buf, ok := e.topicBuffers[topic]
|
|
|
|
if !ok {
|
2020-07-06 21:29:45 +00:00
|
|
|
buf = newEventBuffer()
|
2020-06-15 20:59:09 +00:00
|
|
|
e.topicBuffers[topic] = buf
|
2020-06-02 22:37:10 +00:00
|
|
|
}
|
2020-06-15 20:59:09 +00:00
|
|
|
return buf
|
2020-06-02 22:37:10 +00:00
|
|
|
}
|
|
|
|
|
2020-07-08 04:31:22 +00:00
|
|
|
// Subscribe returns a new Subscription for the given request. A subscription
|
|
|
|
// will receive an initial snapshot of events matching the request if req.Index > 0.
|
|
|
|
// After the snapshot, events will be streamed as they are created.
|
|
|
|
// Subscriptions may be closed, forcing the client to resubscribe (for example if
|
|
|
|
// ACL policies changed or the state store is abandoned).
|
2020-06-02 22:37:10 +00:00
|
|
|
//
|
2020-06-18 22:29:06 +00:00
|
|
|
// When the caller is finished with the subscription for any reason, it must
|
|
|
|
// call Subscription.Unsubscribe to free ACL tracking resources.
|
2020-07-08 04:31:22 +00:00
|
|
|
func (e *EventPublisher) Subscribe(req *SubscribeRequest) (*Subscription, error) {
|
2020-06-02 22:37:10 +00:00
|
|
|
// Ensure we know how to make a snapshot for this topic
|
2020-07-06 22:44:51 +00:00
|
|
|
_, ok := e.snapshotHandlers[req.Topic]
|
2020-07-08 04:31:22 +00:00
|
|
|
if !ok || req.Topic == nil {
|
2020-07-07 00:04:24 +00:00
|
|
|
return nil, fmt.Errorf("unknown topic %v", req.Topic)
|
2020-06-02 22:37:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
e.lock.Lock()
|
|
|
|
defer e.lock.Unlock()
|
|
|
|
|
|
|
|
// Ensure there is a topic buffer for that topic so we start capturing any
|
|
|
|
// future published events.
|
2020-06-15 20:59:09 +00:00
|
|
|
buf := e.getTopicBuffer(req.Topic)
|
2020-06-02 22:37:10 +00:00
|
|
|
|
|
|
|
// See if we need a snapshot
|
|
|
|
topicHead := buf.Head()
|
2020-07-06 20:15:13 +00:00
|
|
|
var sub *Subscription
|
2020-06-02 22:37:10 +00:00
|
|
|
if req.Index > 0 && len(topicHead.Events) > 0 && topicHead.Events[0].Index == req.Index {
|
2020-07-08 04:31:22 +00:00
|
|
|
// No need for a snapshot, send the "end of empty snapshot" message to signal to
|
2020-07-08 18:45:18 +00:00
|
|
|
// client its cache is still good, then follow along from here in the topic.
|
2020-07-06 21:29:45 +00:00
|
|
|
buf := newEventBuffer()
|
2020-06-02 22:37:10 +00:00
|
|
|
|
|
|
|
// Store the head of that buffer before we append to it to give as the
|
|
|
|
// starting point for the subscription.
|
|
|
|
subHead := buf.Head()
|
|
|
|
|
2020-07-08 04:31:22 +00:00
|
|
|
buf.Append([]Event{{
|
|
|
|
Index: req.Index,
|
|
|
|
Topic: req.Topic,
|
|
|
|
Key: req.Key,
|
|
|
|
Payload: endOfEmptySnapshot{},
|
|
|
|
}})
|
2020-06-02 22:37:10 +00:00
|
|
|
|
|
|
|
// Now splice the rest of the topic buffer on so the subscription will
|
|
|
|
// continue to see future updates in the topic buffer.
|
2020-07-08 04:31:22 +00:00
|
|
|
buf.AppendItem(topicHead.NextLink())
|
2020-06-02 22:37:10 +00:00
|
|
|
|
2020-07-08 04:31:22 +00:00
|
|
|
sub = newSubscription(req, subHead, e.subscriptions.unsubscribe(req))
|
2020-06-02 22:37:10 +00:00
|
|
|
} else {
|
|
|
|
snap, err := e.getSnapshotLocked(req, topicHead)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-07-08 04:31:22 +00:00
|
|
|
sub = newSubscription(req, snap.Head, e.subscriptions.unsubscribe(req))
|
2020-06-02 22:37:10 +00:00
|
|
|
}
|
|
|
|
|
2020-06-18 22:11:42 +00:00
|
|
|
e.subscriptions.add(req, sub)
|
|
|
|
return sub, nil
|
|
|
|
}
|
|
|
|
|
2020-07-06 20:15:13 +00:00
|
|
|
func (s *subscriptions) add(req *SubscribeRequest, sub *Subscription) {
|
2020-06-18 22:11:42 +00:00
|
|
|
s.lock.Lock()
|
|
|
|
defer s.lock.Unlock()
|
|
|
|
|
2020-06-18 22:29:06 +00:00
|
|
|
subsByToken, ok := s.byToken[req.Token]
|
2020-06-02 22:37:10 +00:00
|
|
|
if !ok {
|
2020-07-06 20:15:13 +00:00
|
|
|
subsByToken = make(map[*SubscribeRequest]*Subscription)
|
2020-06-18 22:29:06 +00:00
|
|
|
s.byToken[req.Token] = subsByToken
|
2020-06-02 22:37:10 +00:00
|
|
|
}
|
|
|
|
subsByToken[req] = sub
|
|
|
|
}
|
|
|
|
|
2020-07-06 18:34:58 +00:00
|
|
|
func (s *subscriptions) closeSubscriptionsForTokens(tokenSecretIDs []string) {
|
|
|
|
s.lock.RLock()
|
|
|
|
defer s.lock.RUnlock()
|
|
|
|
|
|
|
|
for _, secretID := range tokenSecretIDs {
|
|
|
|
if subs, ok := s.byToken[secretID]; ok {
|
|
|
|
for _, sub := range subs {
|
2020-07-08 04:31:22 +00:00
|
|
|
sub.forceClose()
|
2020-07-06 18:34:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-08 04:31:22 +00:00
|
|
|
// unsubscribe returns a function that the subscription will call to remove
|
|
|
|
// itself from the subsByToken.
|
|
|
|
// This function is returned as a closure so that the caller doesn't need to keep
|
|
|
|
// track of the SubscriptionRequest, and can not accidentally call unsubscribe with the
|
|
|
|
// wrong pointer.
|
|
|
|
func (s *subscriptions) unsubscribe(req *SubscribeRequest) func() {
|
|
|
|
return func() {
|
|
|
|
s.lock.Lock()
|
|
|
|
defer s.lock.Unlock()
|
|
|
|
|
|
|
|
subsByToken, ok := s.byToken[req.Token]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
delete(subsByToken, req)
|
|
|
|
if len(subsByToken) == 0 {
|
|
|
|
delete(s.byToken, req.Token)
|
|
|
|
}
|
2020-06-02 22:37:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-06 21:29:45 +00:00
|
|
|
func (e *EventPublisher) getSnapshotLocked(req *SubscribeRequest, topicHead *bufferItem) (*eventSnapshot, error) {
|
2020-06-02 22:37:10 +00:00
|
|
|
topicSnaps, ok := e.snapCache[req.Topic]
|
|
|
|
if !ok {
|
2020-07-06 21:29:45 +00:00
|
|
|
topicSnaps = make(map[string]*eventSnapshot)
|
2020-06-02 22:37:10 +00:00
|
|
|
e.snapCache[req.Topic] = topicSnaps
|
|
|
|
}
|
|
|
|
|
|
|
|
snap, ok := topicSnaps[req.Key]
|
2020-07-06 21:29:45 +00:00
|
|
|
if ok && snap.err() == nil {
|
2020-06-02 22:37:10 +00:00
|
|
|
return snap, nil
|
|
|
|
}
|
|
|
|
|
2020-07-06 22:44:51 +00:00
|
|
|
handler, ok := e.snapshotHandlers[req.Topic]
|
2020-06-02 22:37:10 +00:00
|
|
|
if !ok {
|
2020-07-07 00:04:24 +00:00
|
|
|
return nil, fmt.Errorf("unknown topic %v", req.Topic)
|
2020-06-02 22:37:10 +00:00
|
|
|
}
|
|
|
|
|
2020-07-06 22:44:51 +00:00
|
|
|
snap = newEventSnapshot(req, topicHead, handler)
|
2020-06-02 22:37:10 +00:00
|
|
|
if e.snapCacheTTL > 0 {
|
|
|
|
topicSnaps[req.Key] = snap
|
|
|
|
|
|
|
|
// Trigger a clearout after TTL
|
|
|
|
time.AfterFunc(e.snapCacheTTL, func() {
|
|
|
|
e.lock.Lock()
|
|
|
|
defer e.lock.Unlock()
|
|
|
|
delete(topicSnaps, req.Key)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return snap, nil
|
|
|
|
}
|