2018-10-02 10:27:10 +00:00
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2019-01-18 17:44:04 +00:00
|
|
|
"reflect"
|
2018-10-02 10:27:10 +00:00
|
|
|
"time"
|
2019-01-18 17:44:04 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/consul/lib"
|
2018-10-02 10:27:10 +00:00
|
|
|
)
|
|
|
|
|
2019-03-06 17:13:28 +00:00
|
|
|
// UpdateEvent is a struct summarizing an update to a cache entry
|
2018-10-02 10:27:10 +00:00
|
|
|
type UpdateEvent struct {
|
|
|
|
// CorrelationID is used by the Notify API to allow correlation of updates
|
|
|
|
// with specific requests. We could return the full request object and
|
|
|
|
// cachetype for consumers to match against the calls they made but in
|
|
|
|
// practice it's cleaner for them to choose the minimal necessary unique
|
|
|
|
// identifier given the set of things they are watching. They might even
|
|
|
|
// choose to assign random IDs for example.
|
|
|
|
CorrelationID string
|
|
|
|
Result interface{}
|
|
|
|
Meta ResultMeta
|
|
|
|
Err error
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify registers a desire to be updated about changes to a cache result.
|
|
|
|
//
|
2018-10-09 16:57:26 +00:00
|
|
|
// It is a helper that abstracts code from performing their own "blocking" query
|
2018-10-02 10:27:10 +00:00
|
|
|
// logic against a cache key to watch for changes and to maintain the key in
|
|
|
|
// cache actively. It will continue to perform blocking Get requests until the
|
|
|
|
// context is canceled.
|
|
|
|
//
|
2019-03-06 17:13:28 +00:00
|
|
|
// The passed context must be canceled or timeout in order to free resources
|
2018-10-02 10:27:10 +00:00
|
|
|
// and stop maintaining the value in cache. Typically request-scoped resources
|
|
|
|
// do this but if a long-lived context like context.Background is used, then the
|
2019-03-06 17:13:28 +00:00
|
|
|
// caller must arrange for it to be canceled when the watch is no longer
|
2018-10-02 10:27:10 +00:00
|
|
|
// needed.
|
|
|
|
//
|
|
|
|
// The passed chan may be buffered or unbuffered, if the caller doesn't consume
|
|
|
|
// fast enough it will block the notification loop. When the chan is later
|
|
|
|
// drained, watching resumes correctly. If the pause is longer than the
|
|
|
|
// cachetype's TTL, the result might be removed from the local cache. Even in
|
|
|
|
// this case though when the chan is drained again, the new Get will re-fetch
|
2018-10-09 16:57:26 +00:00
|
|
|
// the entry from servers and resume notification behavior transparently.
|
2018-10-02 10:27:10 +00:00
|
|
|
//
|
|
|
|
// The chan is passed in to allow multiple cached results to be watched by a
|
|
|
|
// single consumer without juggling extra goroutines per watch. The
|
|
|
|
// correlationID is opaque and will be returned in all UpdateEvents generated by
|
|
|
|
// result of watching the specified request so the caller can set this to any
|
2018-10-09 16:57:26 +00:00
|
|
|
// value that allows them to disambiguate between events in the returned chan
|
2018-10-02 10:27:10 +00:00
|
|
|
// when sharing a chan between multiple cache entries. If the chan is closed,
|
|
|
|
// the notify loop will terminate.
|
2020-04-02 22:05:26 +00:00
|
|
|
func (c *Cache) Notify(
|
|
|
|
ctx context.Context,
|
|
|
|
t string,
|
|
|
|
r Request,
|
|
|
|
correlationID string,
|
|
|
|
ch chan<- UpdateEvent,
|
|
|
|
) error {
|
2018-10-02 10:27:10 +00:00
|
|
|
c.typesLock.RLock()
|
|
|
|
tEntry, ok := c.types[t]
|
|
|
|
c.typesLock.RUnlock()
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("unknown type in cache: %s", t)
|
|
|
|
}
|
2020-04-02 22:05:26 +00:00
|
|
|
|
2020-04-14 22:29:30 +00:00
|
|
|
if tEntry.Opts.SupportsBlocking {
|
2020-04-13 18:49:13 +00:00
|
|
|
go c.notifyBlockingQuery(ctx, newGetOptions(tEntry, r), correlationID, ch)
|
2020-04-02 22:05:26 +00:00
|
|
|
return nil
|
2018-10-02 10:27:10 +00:00
|
|
|
}
|
|
|
|
|
2020-04-02 22:05:26 +00:00
|
|
|
info := r.CacheInfo()
|
|
|
|
if info.MaxAge == 0 {
|
|
|
|
return fmt.Errorf("Cannot use Notify for polling cache types without specifying the MaxAge")
|
|
|
|
}
|
2020-04-13 18:49:13 +00:00
|
|
|
go c.notifyPollingQuery(ctx, newGetOptions(tEntry, r), correlationID, ch)
|
2019-01-18 17:44:04 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-13 18:49:13 +00:00
|
|
|
func (c *Cache) notifyBlockingQuery(ctx context.Context, r getOptions, correlationID string, ch chan<- UpdateEvent) {
|
2019-01-18 17:44:04 +00:00
|
|
|
// Always start at 0 index to deliver the initial (possibly currently cached
|
2018-10-02 10:27:10 +00:00
|
|
|
// value).
|
|
|
|
index := uint64(0)
|
2019-01-18 17:44:04 +00:00
|
|
|
failures := uint(0)
|
2018-10-02 10:27:10 +00:00
|
|
|
|
2019-01-18 17:44:04 +00:00
|
|
|
for {
|
2019-03-06 17:13:28 +00:00
|
|
|
// Check context hasn't been canceled
|
2019-01-18 17:44:04 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Blocking request
|
2020-04-13 18:49:13 +00:00
|
|
|
r.Info.MinIndex = index
|
2020-06-15 15:01:25 +00:00
|
|
|
res, meta, err := c.getWithIndex(ctx, r)
|
2018-10-02 10:27:10 +00:00
|
|
|
|
2019-03-06 17:13:28 +00:00
|
|
|
// Check context hasn't been canceled
|
2019-01-18 17:44:04 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the index of the value returned in the cache entry to be sure it
|
|
|
|
// changed
|
2019-04-24 13:11:08 +00:00
|
|
|
if index == 0 || index < meta.Index {
|
2019-01-18 17:44:04 +00:00
|
|
|
u := UpdateEvent{correlationID, res, meta, err}
|
|
|
|
select {
|
|
|
|
case ch <- u:
|
|
|
|
case <-ctx.Done():
|
2018-10-02 10:27:10 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-01-18 17:44:04 +00:00
|
|
|
// Update index for next request
|
|
|
|
index = meta.Index
|
|
|
|
}
|
2018-10-02 10:27:10 +00:00
|
|
|
|
2020-04-02 22:05:26 +00:00
|
|
|
var wait time.Duration
|
2019-01-18 17:44:04 +00:00
|
|
|
// Handle errors with backoff. Badly behaved blocking calls that returned
|
|
|
|
// a zero index are considered as failures since we need to not get stuck
|
|
|
|
// in a busy loop.
|
|
|
|
if err == nil && meta.Index > 0 {
|
|
|
|
failures = 0
|
|
|
|
} else {
|
|
|
|
failures++
|
|
|
|
wait = backOffWait(failures)
|
|
|
|
}
|
|
|
|
|
|
|
|
if wait > 0 {
|
|
|
|
select {
|
|
|
|
case <-time.After(wait):
|
|
|
|
case <-ctx.Done():
|
2018-10-02 10:27:10 +00:00
|
|
|
return
|
|
|
|
}
|
2019-01-18 17:44:04 +00:00
|
|
|
}
|
|
|
|
// Sanity check we always request blocking on second pass
|
2019-09-26 15:42:17 +00:00
|
|
|
if err == nil && index < 1 {
|
2019-01-18 17:44:04 +00:00
|
|
|
index = 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-13 18:49:13 +00:00
|
|
|
func (c *Cache) notifyPollingQuery(ctx context.Context, r getOptions, correlationID string, ch chan<- UpdateEvent) {
|
2019-01-18 17:44:04 +00:00
|
|
|
index := uint64(0)
|
|
|
|
failures := uint(0)
|
|
|
|
|
|
|
|
var lastValue interface{} = nil
|
2018-10-02 10:27:10 +00:00
|
|
|
|
2019-01-18 17:44:04 +00:00
|
|
|
for {
|
2019-03-06 17:13:28 +00:00
|
|
|
// Check context hasn't been canceled
|
2019-01-18 17:44:04 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make the request
|
2020-04-13 18:49:13 +00:00
|
|
|
r.Info.MinIndex = index
|
2020-06-15 15:01:25 +00:00
|
|
|
res, meta, err := c.getWithIndex(ctx, r)
|
2019-01-18 17:44:04 +00:00
|
|
|
|
2019-03-06 17:13:28 +00:00
|
|
|
// Check context hasn't been canceled
|
2019-01-18 17:44:04 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for a change in the value or an index change
|
|
|
|
if index < meta.Index || !reflect.DeepEqual(lastValue, res) {
|
|
|
|
u := UpdateEvent{correlationID, res, meta, err}
|
|
|
|
select {
|
|
|
|
case ch <- u:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
2018-10-02 10:27:10 +00:00
|
|
|
}
|
|
|
|
|
2019-01-18 17:44:04 +00:00
|
|
|
// Update index and lastValue
|
|
|
|
lastValue = res
|
|
|
|
index = meta.Index
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset or increment failure counter
|
|
|
|
if err == nil {
|
|
|
|
failures = 0
|
|
|
|
} else {
|
|
|
|
failures++
|
|
|
|
}
|
|
|
|
|
2020-04-02 22:05:26 +00:00
|
|
|
var wait time.Duration
|
2019-01-18 17:44:04 +00:00
|
|
|
// Determining how long to wait before the next poll is complicated.
|
|
|
|
// First off the happy path and the error path waits are handled distinctly
|
|
|
|
//
|
|
|
|
// Once fetching the data through the cache returns an error (and until a
|
|
|
|
// non-error value is returned) the wait time between each round of the loop
|
|
|
|
// gets controlled by the backOffWait function. Because we would have waited
|
|
|
|
// at least until the age of the cached data was too old the error path should
|
|
|
|
// immediately retry the fetch and backoff on the time as needed for persistent
|
|
|
|
// failures which potentially will wait much longer than the MaxAge of the request
|
|
|
|
//
|
|
|
|
// When on the happy path we just need to fetch from the cache often enough to ensure
|
|
|
|
// that the data is not older than the MaxAge. Therefore after fetching the data from
|
|
|
|
// the cache we can sleep until the age of that data would exceed the MaxAge. Sometimes
|
|
|
|
// this will be for the MaxAge duration (like when only a single notify was executed so
|
|
|
|
// only 1 go routine is keeping the cache updated). Other times this will be some smaller
|
|
|
|
// duration than MaxAge (when multiple notify calls were executed and this go routine just
|
|
|
|
// got data back from the cache that was a cache hit after the other go routine fetched it
|
|
|
|
// without a hit). We cannot just set MustRevalidate on the request and always sleep for MaxAge
|
|
|
|
// as this would eliminate the single-flighting of these requests in the cache and
|
|
|
|
// the efficiencies gained by it.
|
|
|
|
if failures > 0 {
|
2020-04-02 22:05:26 +00:00
|
|
|
wait = backOffWait(failures)
|
2019-01-18 17:44:04 +00:00
|
|
|
} else {
|
|
|
|
// Calculate when the cached data's Age will get too stale and
|
|
|
|
// need to be re-queried. When the data's Age already exceeds the
|
|
|
|
// maxAge the pollWait value is left at 0 to immediately re-poll
|
2020-04-13 18:49:13 +00:00
|
|
|
if meta.Age <= r.Info.MaxAge {
|
|
|
|
wait = r.Info.MaxAge - meta.Age
|
2018-10-02 10:27:10 +00:00
|
|
|
}
|
2019-01-18 17:44:04 +00:00
|
|
|
|
|
|
|
// Add a small amount of random jitter to the polling time. One
|
|
|
|
// purpose of the jitter is to ensure that the next time
|
|
|
|
// we fetch from the cache the data will be stale (unless another
|
|
|
|
// notify go routine has updated it while this one is sleeping).
|
|
|
|
// Without this it would be possible to wake up, fetch the data
|
|
|
|
// again where the age of the data is strictly equal to the MaxAge
|
|
|
|
// and then immediately have to re-fetch again. That wouldn't
|
|
|
|
// be terrible but it would expend a bunch more cpu cycles when
|
|
|
|
// we can definitely avoid it.
|
2020-04-13 18:49:13 +00:00
|
|
|
wait += lib.RandomStagger(r.Info.MaxAge / 16)
|
2020-04-02 22:05:26 +00:00
|
|
|
}
|
2019-01-18 17:44:04 +00:00
|
|
|
|
2020-04-02 22:05:26 +00:00
|
|
|
select {
|
|
|
|
case <-time.After(wait):
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
2018-10-02 10:27:10 +00:00
|
|
|
}
|
2019-01-18 17:44:04 +00:00
|
|
|
}
|
2018-10-02 10:27:10 +00:00
|
|
|
}
|