open-nomad/client/pluginmanager/csimanager/usage_tracker_test.go
Tim Gross f6b3d38eb8
CSI: move node unmount to server-driven RPCs (#7596)
If a volume-claiming alloc stops and the CSI Node plugin that serves
that alloc's volumes is missing, there's no way for the allocrunner
hook to send the `NodeUnpublish` and `NodeUnstage` RPCs.

This changeset addresses this issue with a redesign of the client-side
for CSI. Rather than unmounting in the alloc runner hook, the alloc
runner hook will simply exit. When the server gets the
`Node.UpdateAlloc` for the terminal allocation that had a volume claim,
it creates a volume claim GC job. This job will made client RPCs to a
new node plugin RPC endpoint, and only once that succeeds, move on to
making the client RPCs to the controller plugin. If the node plugin is
unavailable, the GC job will fail and be requeued.
2020-04-02 16:04:56 -04:00

63 lines
1.3 KiB
Go

package csimanager
import (
"testing"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/require"
)
func TestUsageTracker(t *testing.T) {
mockAllocs := []*structs.Allocation{
mock.Alloc(),
mock.Alloc(),
mock.Alloc(),
mock.Alloc(),
mock.Alloc(),
}
cases := []struct {
Name string
RegisterAllocs []*structs.Allocation
FreeAllocs []*structs.Allocation
ExpectedResult bool
}{
{
Name: "Register and deregister all allocs",
RegisterAllocs: mockAllocs,
FreeAllocs: mockAllocs,
ExpectedResult: true,
},
{
Name: "Register all and deregister partial allocs",
RegisterAllocs: mockAllocs,
FreeAllocs: mockAllocs[0:3],
ExpectedResult: false,
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
tracker := newVolumeUsageTracker()
volume := &structs.CSIVolume{
ID: "foo",
}
for _, alloc := range tc.RegisterAllocs {
tracker.Claim(alloc.ID, volume.ID, &UsageOptions{})
}
result := false
for _, alloc := range tc.FreeAllocs {
result = tracker.Free(alloc.ID, volume.ID, &UsageOptions{})
}
require.Equal(t, tc.ExpectedResult, result, "Tracker State: %#v", tracker.state)
})
}
}