2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2020-04-30 13:13:00 +00:00
|
|
|
package volumewatcher
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"sync"
|
2022-04-04 14:46:45 +00:00
|
|
|
"time"
|
2020-04-30 13:13:00 +00:00
|
|
|
|
|
|
|
log "github.com/hashicorp/go-hclog"
|
|
|
|
memdb "github.com/hashicorp/go-memdb"
|
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
2022-04-04 14:46:45 +00:00
|
|
|
"github.com/hashicorp/nomad/helper"
|
2020-04-30 13:13:00 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
)
|
|
|
|
|
|
|
|
// volumeWatcher is used to watch a single volume and trigger the
|
|
|
|
// scheduler when allocation health transitions.
|
|
|
|
type volumeWatcher struct {
|
|
|
|
// v is the volume being watched
|
|
|
|
v *structs.CSIVolume
|
|
|
|
|
|
|
|
// state is the state that is watched for state changes.
|
|
|
|
state *state.StateStore
|
|
|
|
|
|
|
|
// server interface for CSI client RPCs
|
2020-08-06 18:31:18 +00:00
|
|
|
rpc CSIVolumeRPC
|
2020-04-30 13:13:00 +00:00
|
|
|
|
2020-08-07 19:37:27 +00:00
|
|
|
// the ACL needed to send RPCs
|
|
|
|
leaderAcl string
|
|
|
|
|
2020-04-30 13:13:00 +00:00
|
|
|
logger log.Logger
|
|
|
|
shutdownCtx context.Context // parent context
|
|
|
|
ctx context.Context // own context
|
|
|
|
exitFn context.CancelFunc
|
2022-04-04 14:46:45 +00:00
|
|
|
deleteFn func()
|
|
|
|
|
|
|
|
// quiescentTimeout is the time we wait until the volume has "settled"
|
|
|
|
// before stopping the child watcher goroutines
|
|
|
|
quiescentTimeout time.Duration
|
2020-04-30 13:13:00 +00:00
|
|
|
|
|
|
|
// updateCh is triggered when there is an updated volume
|
|
|
|
updateCh chan *structs.CSIVolume
|
|
|
|
|
|
|
|
wLock sync.RWMutex
|
|
|
|
running bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// newVolumeWatcher returns a volume watcher that is used to watch
|
|
|
|
// volumes
|
|
|
|
func newVolumeWatcher(parent *Watcher, vol *structs.CSIVolume) *volumeWatcher {
|
|
|
|
|
|
|
|
w := &volumeWatcher{
|
2022-04-04 14:46:45 +00:00
|
|
|
updateCh: make(chan *structs.CSIVolume, 1),
|
|
|
|
v: vol,
|
|
|
|
state: parent.state,
|
|
|
|
rpc: parent.rpc,
|
|
|
|
leaderAcl: parent.leaderAcl,
|
|
|
|
logger: parent.logger.With("volume_id", vol.ID, "namespace", vol.Namespace),
|
|
|
|
shutdownCtx: parent.ctx,
|
|
|
|
deleteFn: func() { parent.remove(vol.ID + vol.Namespace) },
|
|
|
|
quiescentTimeout: parent.quiescentTimeout,
|
2020-04-30 13:13:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start the long lived watcher that scans for allocation updates
|
|
|
|
w.Start()
|
|
|
|
return w
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify signals an update to the tracked volume.
|
|
|
|
func (vw *volumeWatcher) Notify(v *structs.CSIVolume) {
|
|
|
|
if !vw.isRunning() {
|
|
|
|
vw.Start()
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case vw.updateCh <- v:
|
|
|
|
case <-vw.shutdownCtx.Done(): // prevent deadlock if we stopped
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (vw *volumeWatcher) Start() {
|
2020-05-07 19:57:24 +00:00
|
|
|
vw.logger.Trace("starting watcher")
|
2020-04-30 13:13:00 +00:00
|
|
|
vw.wLock.Lock()
|
|
|
|
defer vw.wLock.Unlock()
|
|
|
|
vw.running = true
|
|
|
|
go vw.watch()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (vw *volumeWatcher) Stop() {
|
2020-05-07 19:57:24 +00:00
|
|
|
vw.logger.Trace("no more claims")
|
2022-11-01 20:53:10 +00:00
|
|
|
vw.wLock.Lock()
|
|
|
|
defer vw.wLock.Unlock()
|
|
|
|
vw.running = false
|
2020-04-30 13:13:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (vw *volumeWatcher) isRunning() bool {
|
|
|
|
vw.wLock.RLock()
|
|
|
|
defer vw.wLock.RUnlock()
|
|
|
|
select {
|
|
|
|
case <-vw.shutdownCtx.Done():
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
return vw.running
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// watch is the long-running function that watches for changes to a volume.
|
|
|
|
// Each pass steps the volume's claims through the various states of reaping
|
|
|
|
// until the volume has no more claims eligible to be reaped.
|
|
|
|
func (vw *volumeWatcher) watch() {
|
2022-11-01 20:53:10 +00:00
|
|
|
defer vw.deleteFn()
|
|
|
|
defer vw.Stop()
|
2022-01-05 16:40:20 +00:00
|
|
|
|
2022-04-04 14:46:45 +00:00
|
|
|
timer, stop := helper.NewSafeTimer(vw.quiescentTimeout)
|
|
|
|
defer stop()
|
|
|
|
|
2020-04-30 13:13:00 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
// TODO(tgross): currently server->client RPC have no cancellation
|
|
|
|
// context, so we can't stop the long-runner RPCs gracefully
|
|
|
|
case <-vw.shutdownCtx.Done():
|
|
|
|
return
|
|
|
|
case vol := <-vw.updateCh:
|
2022-04-04 14:46:45 +00:00
|
|
|
vol = vw.getVolume(vol)
|
|
|
|
if vol == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vw.volumeReap(vol)
|
|
|
|
timer.Reset(vw.quiescentTimeout)
|
|
|
|
case <-timer.C:
|
2022-11-01 20:53:10 +00:00
|
|
|
// Wait until the volume has "settled" before stopping this
|
|
|
|
// goroutine so that we can handle the burst of updates around
|
|
|
|
// freeing claims without having to spin it back up
|
2020-05-11 13:32:05 +00:00
|
|
|
return
|
2020-04-30 13:13:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getVolume returns the tracked volume, fully populated with the current
|
|
|
|
// state
|
|
|
|
func (vw *volumeWatcher) getVolume(vol *structs.CSIVolume) *structs.CSIVolume {
|
|
|
|
vw.wLock.RLock()
|
|
|
|
defer vw.wLock.RUnlock()
|
|
|
|
|
|
|
|
var err error
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
|
2022-04-04 14:46:45 +00:00
|
|
|
vol, err = vw.state.CSIVolumeByID(ws, vol.Namespace, vol.ID)
|
2020-04-30 13:13:00 +00:00
|
|
|
if err != nil {
|
2022-04-04 14:46:45 +00:00
|
|
|
vw.logger.Error("could not query for volume", "error", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if vol == nil {
|
2020-04-30 13:13:00 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
vol, err = vw.state.CSIVolumeDenormalize(ws, vol)
|
|
|
|
if err != nil {
|
|
|
|
vw.logger.Error("could not query allocs for volume", "error", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
vw.v = vol
|
|
|
|
return vol
|
|
|
|
}
|
|
|
|
|
|
|
|
// volumeReap collects errors for logging but doesn't return them
|
|
|
|
// to the main loop.
|
|
|
|
func (vw *volumeWatcher) volumeReap(vol *structs.CSIVolume) {
|
2020-05-07 19:57:24 +00:00
|
|
|
vw.logger.Trace("releasing unused volume claims")
|
2020-04-30 13:13:00 +00:00
|
|
|
err := vw.volumeReapImpl(vol)
|
|
|
|
if err != nil {
|
|
|
|
vw.logger.Error("error releasing volume claims", "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (vw *volumeWatcher) isUnclaimed(vol *structs.CSIVolume) bool {
|
|
|
|
return len(vol.ReadClaims) == 0 && len(vol.WriteClaims) == 0 && len(vol.PastClaims) == 0
|
|
|
|
}
|
|
|
|
|
2022-01-27 15:39:08 +00:00
|
|
|
// volumeReapImpl unpublished all the volume's PastClaims. PastClaims
|
|
|
|
// will be populated from nil or terminal allocs when we call
|
|
|
|
// CSIVolumeDenormalize(), so this assumes we've done so in the caller
|
2020-04-30 13:13:00 +00:00
|
|
|
func (vw *volumeWatcher) volumeReapImpl(vol *structs.CSIVolume) error {
|
|
|
|
var result *multierror.Error
|
2020-08-06 18:31:18 +00:00
|
|
|
for _, claim := range vol.PastClaims {
|
|
|
|
err := vw.unpublish(vol, claim)
|
|
|
|
if err != nil {
|
|
|
|
result = multierror.Append(result, err)
|
2020-04-30 13:13:00 +00:00
|
|
|
}
|
|
|
|
}
|
2020-08-06 18:31:18 +00:00
|
|
|
return result.ErrorOrNil()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (vw *volumeWatcher) collectPastClaims(vol *structs.CSIVolume) *structs.CSIVolume {
|
|
|
|
|
2020-04-30 13:13:00 +00:00
|
|
|
collect := func(allocs map[string]*structs.Allocation,
|
|
|
|
claims map[string]*structs.CSIVolumeClaim) {
|
|
|
|
|
|
|
|
for allocID, alloc := range allocs {
|
|
|
|
if alloc == nil {
|
|
|
|
_, exists := vol.PastClaims[allocID]
|
|
|
|
if !exists {
|
|
|
|
vol.PastClaims[allocID] = &structs.CSIVolumeClaim{
|
|
|
|
AllocationID: allocID,
|
|
|
|
State: structs.CSIVolumeClaimStateReadyToFree,
|
|
|
|
}
|
|
|
|
}
|
2020-08-06 18:31:18 +00:00
|
|
|
} else if alloc.Terminated() {
|
2020-04-30 13:13:00 +00:00
|
|
|
// don't overwrite the PastClaim if we've seen it before,
|
|
|
|
// so that we can track state between subsequent calls
|
|
|
|
_, exists := vol.PastClaims[allocID]
|
|
|
|
if !exists {
|
|
|
|
claim, ok := claims[allocID]
|
|
|
|
if !ok {
|
|
|
|
claim = &structs.CSIVolumeClaim{
|
|
|
|
AllocationID: allocID,
|
|
|
|
NodeID: alloc.NodeID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
claim.State = structs.CSIVolumeClaimStateTaken
|
|
|
|
vol.PastClaims[allocID] = claim
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
collect(vol.ReadAllocs, vol.ReadClaims)
|
|
|
|
collect(vol.WriteAllocs, vol.WriteClaims)
|
2020-08-06 18:31:18 +00:00
|
|
|
return vol
|
2020-04-30 13:13:00 +00:00
|
|
|
}
|
|
|
|
|
2020-08-06 18:31:18 +00:00
|
|
|
func (vw *volumeWatcher) unpublish(vol *structs.CSIVolume, claim *structs.CSIVolumeClaim) error {
|
2022-04-04 14:46:45 +00:00
|
|
|
vw.logger.Trace("unpublishing volume", "alloc", claim.AllocationID)
|
2020-08-06 18:31:18 +00:00
|
|
|
req := &structs.CSIVolumeUnpublishRequest{
|
2020-08-07 19:37:27 +00:00
|
|
|
VolumeID: vol.ID,
|
|
|
|
Claim: claim,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Namespace: vol.Namespace,
|
|
|
|
Region: vw.state.Config().Region,
|
|
|
|
AuthToken: vw.leaderAcl,
|
|
|
|
},
|
2020-04-30 13:13:00 +00:00
|
|
|
}
|
2020-08-06 18:31:18 +00:00
|
|
|
err := vw.rpc.Unpublish(req, &structs.CSIVolumeUnpublishResponse{})
|
2020-04-30 13:13:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
claim.State = structs.CSIVolumeClaimStateReadyToFree
|
|
|
|
return nil
|
|
|
|
}
|