2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2020-04-30 13:13:00 +00:00
|
|
|
package volumewatcher
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"sync"
|
2022-04-04 14:46:45 +00:00
|
|
|
"time"
|
2020-04-30 13:13:00 +00:00
|
|
|
|
|
|
|
log "github.com/hashicorp/go-hclog"
|
|
|
|
memdb "github.com/hashicorp/go-memdb"
|
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Watcher is used to watch volumes and their allocations created
|
|
|
|
// by the scheduler and trigger the scheduler when allocation health
|
|
|
|
// transitions.
|
|
|
|
type Watcher struct {
|
|
|
|
enabled bool
|
|
|
|
logger log.Logger
|
|
|
|
|
|
|
|
// rpc contains the set of Server methods that can be used by
|
|
|
|
// the volumes watcher for RPC
|
2020-08-06 18:31:18 +00:00
|
|
|
rpc CSIVolumeRPC
|
2020-04-30 13:13:00 +00:00
|
|
|
|
2020-08-07 19:37:27 +00:00
|
|
|
// the ACL needed to send RPCs
|
|
|
|
leaderAcl string
|
|
|
|
|
2020-04-30 13:13:00 +00:00
|
|
|
// state is the state that is watched for state changes.
|
|
|
|
state *state.StateStore
|
|
|
|
|
|
|
|
// watchers is the set of active watchers, one per volume
|
|
|
|
watchers map[string]*volumeWatcher
|
|
|
|
|
|
|
|
// ctx and exitFn are used to cancel the watcher
|
|
|
|
ctx context.Context
|
|
|
|
exitFn context.CancelFunc
|
|
|
|
|
2022-04-04 14:46:45 +00:00
|
|
|
// quiescentTimeout is the time we wait until the volume has "settled"
|
|
|
|
// before stopping the child watcher goroutines
|
|
|
|
quiescentTimeout time.Duration
|
|
|
|
|
2020-04-30 13:13:00 +00:00
|
|
|
wlock sync.RWMutex
|
|
|
|
}
|
|
|
|
|
2022-04-04 14:46:45 +00:00
|
|
|
var defaultQuiescentTimeout = time.Minute * 5
|
|
|
|
|
2020-04-30 13:13:00 +00:00
|
|
|
// NewVolumesWatcher returns a volumes watcher that is used to watch
|
|
|
|
// volumes and trigger the scheduler as needed.
|
2020-08-07 19:37:27 +00:00
|
|
|
func NewVolumesWatcher(logger log.Logger, rpc CSIVolumeRPC, leaderAcl string) *Watcher {
|
2020-04-30 13:13:00 +00:00
|
|
|
|
|
|
|
// the leader step-down calls SetEnabled(false) which is what
|
|
|
|
// cancels this context, rather than passing in its own shutdown
|
|
|
|
// context
|
|
|
|
ctx, exitFn := context.WithCancel(context.Background())
|
|
|
|
|
|
|
|
return &Watcher{
|
2022-04-04 14:46:45 +00:00
|
|
|
rpc: rpc,
|
|
|
|
logger: logger.Named("volumes_watcher"),
|
|
|
|
ctx: ctx,
|
|
|
|
exitFn: exitFn,
|
|
|
|
leaderAcl: leaderAcl,
|
|
|
|
quiescentTimeout: defaultQuiescentTimeout,
|
2020-04-30 13:13:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetEnabled is used to control if the watcher is enabled. The
|
|
|
|
// watcher should only be enabled on the active leader. When being
|
2022-01-24 16:49:50 +00:00
|
|
|
// enabled the state and leader's ACL is passed in as it is no longer
|
|
|
|
// valid once a leader election has taken place.
|
|
|
|
func (w *Watcher) SetEnabled(enabled bool, state *state.StateStore, leaderAcl string) {
|
2020-04-30 13:13:00 +00:00
|
|
|
w.wlock.Lock()
|
|
|
|
defer w.wlock.Unlock()
|
|
|
|
|
|
|
|
wasEnabled := w.enabled
|
|
|
|
w.enabled = enabled
|
2022-01-24 16:49:50 +00:00
|
|
|
w.leaderAcl = leaderAcl
|
2020-04-30 13:13:00 +00:00
|
|
|
|
|
|
|
if state != nil {
|
|
|
|
w.state = state
|
|
|
|
}
|
|
|
|
|
|
|
|
// Flush the state to create the necessary objects
|
2020-05-26 13:18:17 +00:00
|
|
|
w.flush(enabled)
|
2020-04-30 13:13:00 +00:00
|
|
|
|
|
|
|
// If we are starting now, launch the watch daemon
|
|
|
|
if enabled && !wasEnabled {
|
|
|
|
go w.watchVolumes(w.ctx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// flush is used to clear the state of the watcher
|
2020-05-26 13:18:17 +00:00
|
|
|
func (w *Watcher) flush(enabled bool) {
|
2020-04-30 13:13:00 +00:00
|
|
|
// Stop all the watchers and clear it
|
|
|
|
for _, watcher := range w.watchers {
|
|
|
|
watcher.Stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Kill everything associated with the watcher
|
|
|
|
if w.exitFn != nil {
|
|
|
|
w.exitFn()
|
|
|
|
}
|
|
|
|
|
|
|
|
w.watchers = make(map[string]*volumeWatcher, 32)
|
|
|
|
w.ctx, w.exitFn = context.WithCancel(context.Background())
|
|
|
|
}
|
|
|
|
|
|
|
|
// watchVolumes is the long lived go-routine that watches for volumes to
|
|
|
|
// add and remove watchers on.
|
|
|
|
func (w *Watcher) watchVolumes(ctx context.Context) {
|
|
|
|
vIndex := uint64(1)
|
|
|
|
for {
|
|
|
|
volumes, idx, err := w.getVolumes(ctx, vIndex)
|
|
|
|
if err != nil {
|
|
|
|
if err == context.Canceled {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.logger.Error("failed to retrieve volumes", "error", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
vIndex = idx // last-seen index
|
|
|
|
for _, v := range volumes {
|
|
|
|
if err := w.add(v); err != nil {
|
|
|
|
w.logger.Error("failed to track volume", "volume_id", v.ID, "error", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getVolumes retrieves all volumes blocking at the given index.
|
|
|
|
func (w *Watcher) getVolumes(ctx context.Context, minIndex uint64) ([]*structs.CSIVolume, uint64, error) {
|
|
|
|
resp, index, err := w.state.BlockingQuery(w.getVolumesImpl, minIndex, ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp.([]*structs.CSIVolume), index, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getVolumesImpl retrieves all volumes from the passed state store.
|
|
|
|
func (w *Watcher) getVolumesImpl(ws memdb.WatchSet, state *state.StateStore) (interface{}, uint64, error) {
|
|
|
|
|
|
|
|
iter, err := state.CSIVolumes(ws)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var volumes []*structs.CSIVolume
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
volume := raw.(*structs.CSIVolume)
|
|
|
|
volumes = append(volumes, volume)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Use the last index that affected the volume table
|
|
|
|
index, err := state.Index("csi_volumes")
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return volumes, index, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// add adds a volume to the watch list
|
2022-04-04 14:46:45 +00:00
|
|
|
func (w *Watcher) add(v *structs.CSIVolume) error {
|
2020-04-30 13:13:00 +00:00
|
|
|
w.wlock.Lock()
|
|
|
|
defer w.wlock.Unlock()
|
2022-04-04 14:46:45 +00:00
|
|
|
_, err := w.addLocked(v)
|
2020-04-30 13:13:00 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// addLocked adds a volume to the watch list and should only be called when
|
|
|
|
// locked. Creating the volumeWatcher starts a go routine to .watch() it
|
|
|
|
func (w *Watcher) addLocked(v *structs.CSIVolume) (*volumeWatcher, error) {
|
|
|
|
// Not enabled so no-op
|
|
|
|
if !w.enabled {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Already watched so trigger an update for the volume
|
|
|
|
if watcher, ok := w.watchers[v.ID+v.Namespace]; ok {
|
|
|
|
watcher.Notify(v)
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
watcher := newVolumeWatcher(w, v)
|
|
|
|
w.watchers[v.ID+v.Namespace] = watcher
|
2022-11-01 20:53:10 +00:00
|
|
|
|
|
|
|
// Sending the first volume update here before we return ensures we've hit
|
|
|
|
// the run loop in the goroutine before freeing the lock. This prevents a
|
|
|
|
// race between shutting down the watcher and the blocking query.
|
|
|
|
//
|
|
|
|
// It also ensures that we don't drop events that happened during leadership
|
|
|
|
// transitions and didn't get completed by the prior leader
|
|
|
|
watcher.updateCh <- v
|
2020-04-30 13:13:00 +00:00
|
|
|
return watcher, nil
|
|
|
|
}
|
2022-04-04 14:46:45 +00:00
|
|
|
|
|
|
|
// removes a volume from the watch list
|
|
|
|
func (w *Watcher) remove(volID string) {
|
|
|
|
w.wlock.Lock()
|
|
|
|
defer w.wlock.Unlock()
|
|
|
|
delete(w.watchers, volID)
|
|
|
|
}
|