2019-01-10 18:31:43 +00:00
|
|
|
package docker
|
|
|
|
|
|
|
|
import (
|
2019-02-22 12:22:02 +00:00
|
|
|
"runtime"
|
2019-04-02 14:16:57 +00:00
|
|
|
"sync"
|
2019-01-10 18:31:43 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
docker "github.com/fsouza/go-dockerclient"
|
2022-03-15 12:42:43 +00:00
|
|
|
"github.com/hashicorp/nomad/ci"
|
2019-04-02 14:16:57 +00:00
|
|
|
cstructs "github.com/hashicorp/nomad/client/structs"
|
2019-01-10 18:31:43 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestDriver_DockerStatsCollector(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-10 18:31:43 +00:00
|
|
|
require := require.New(t)
|
2022-03-15 12:42:43 +00:00
|
|
|
|
2019-01-10 18:31:43 +00:00
|
|
|
src := make(chan *docker.Stats)
|
|
|
|
defer close(src)
|
2019-04-02 16:18:38 +00:00
|
|
|
dst, recvCh := newStatsChanPipe()
|
2019-04-02 14:16:57 +00:00
|
|
|
defer dst.close()
|
2019-01-10 18:31:43 +00:00
|
|
|
stats := &docker.Stats{}
|
|
|
|
stats.CPUStats.ThrottlingData.Periods = 10
|
|
|
|
stats.CPUStats.ThrottlingData.ThrottledPeriods = 10
|
|
|
|
stats.CPUStats.ThrottlingData.ThrottledTime = 10
|
|
|
|
|
|
|
|
stats.MemoryStats.Stats.Rss = 6537216
|
|
|
|
stats.MemoryStats.Stats.Cache = 1234
|
|
|
|
stats.MemoryStats.Stats.Swap = 0
|
2022-01-10 20:35:19 +00:00
|
|
|
stats.MemoryStats.Stats.MappedFile = 1024
|
2019-01-14 23:47:52 +00:00
|
|
|
stats.MemoryStats.Usage = 5651904
|
2019-01-10 18:31:43 +00:00
|
|
|
stats.MemoryStats.MaxUsage = 6651904
|
2019-02-22 12:22:02 +00:00
|
|
|
stats.MemoryStats.Commit = 123231
|
|
|
|
stats.MemoryStats.CommitPeak = 321323
|
|
|
|
stats.MemoryStats.PrivateWorkingSet = 62222
|
2019-01-10 18:31:43 +00:00
|
|
|
|
|
|
|
go dockerStatsCollector(dst, src, time.Second)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case src <- stats:
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
require.Fail("sending stats should not block here")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2019-04-02 14:16:57 +00:00
|
|
|
case ru := <-recvCh:
|
2019-02-22 12:22:02 +00:00
|
|
|
if runtime.GOOS != "windows" {
|
|
|
|
require.Equal(stats.MemoryStats.Stats.Rss, ru.ResourceUsage.MemoryStats.RSS)
|
|
|
|
require.Equal(stats.MemoryStats.Stats.Cache, ru.ResourceUsage.MemoryStats.Cache)
|
|
|
|
require.Equal(stats.MemoryStats.Stats.Swap, ru.ResourceUsage.MemoryStats.Swap)
|
2022-01-10 20:35:19 +00:00
|
|
|
require.Equal(stats.MemoryStats.Stats.MappedFile, ru.ResourceUsage.MemoryStats.MappedFile)
|
2019-02-22 12:22:02 +00:00
|
|
|
require.Equal(stats.MemoryStats.Usage, ru.ResourceUsage.MemoryStats.Usage)
|
|
|
|
require.Equal(stats.MemoryStats.MaxUsage, ru.ResourceUsage.MemoryStats.MaxUsage)
|
|
|
|
require.Equal(stats.CPUStats.ThrottlingData.ThrottledPeriods, ru.ResourceUsage.CpuStats.ThrottledPeriods)
|
|
|
|
require.Equal(stats.CPUStats.ThrottlingData.ThrottledTime, ru.ResourceUsage.CpuStats.ThrottledTime)
|
|
|
|
} else {
|
|
|
|
require.Equal(stats.MemoryStats.PrivateWorkingSet, ru.ResourceUsage.MemoryStats.RSS)
|
|
|
|
require.Equal(stats.MemoryStats.Commit, ru.ResourceUsage.MemoryStats.Usage)
|
|
|
|
require.Equal(stats.MemoryStats.CommitPeak, ru.ResourceUsage.MemoryStats.MaxUsage)
|
|
|
|
require.Equal(stats.CPUStats.ThrottlingData.ThrottledPeriods, ru.ResourceUsage.CpuStats.ThrottledPeriods)
|
|
|
|
require.Equal(stats.CPUStats.ThrottlingData.ThrottledTime, ru.ResourceUsage.CpuStats.ThrottledTime)
|
|
|
|
|
|
|
|
}
|
2019-01-10 18:31:43 +00:00
|
|
|
case <-time.After(time.Second):
|
|
|
|
require.Fail("receiving stats should not block here")
|
|
|
|
}
|
|
|
|
}
|
2019-04-02 14:16:57 +00:00
|
|
|
|
|
|
|
// TestDriver_DockerUsageSender asserts that the TaskResourceUsage chan wrapper
|
|
|
|
// supports closing and sending on a chan from concurrent goroutines.
|
|
|
|
func TestDriver_DockerUsageSender(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-04-02 14:16:57 +00:00
|
|
|
|
|
|
|
// sample payload
|
|
|
|
res := &cstructs.TaskResourceUsage{}
|
|
|
|
|
2019-04-02 16:18:38 +00:00
|
|
|
destCh, recvCh := newStatsChanPipe()
|
2019-04-02 14:16:57 +00:00
|
|
|
|
|
|
|
// Sending should never fail
|
|
|
|
destCh.send(res)
|
|
|
|
destCh.send(res)
|
|
|
|
destCh.send(res)
|
|
|
|
|
|
|
|
// Clear chan
|
|
|
|
<-recvCh
|
|
|
|
|
|
|
|
// Send and close concurrently to let the race detector help us out
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
wg.Add(3)
|
|
|
|
|
|
|
|
// Sender
|
|
|
|
go func() {
|
|
|
|
destCh.send(res)
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Closer
|
|
|
|
go func() {
|
|
|
|
destCh.close()
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Clear recv chan
|
|
|
|
go func() {
|
|
|
|
for range recvCh {
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// Assert closed
|
|
|
|
destCh.mu.Lock()
|
|
|
|
closed := destCh.closed
|
|
|
|
destCh.mu.Unlock()
|
|
|
|
require.True(t, closed)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case _, ok := <-recvCh:
|
|
|
|
require.False(t, ok)
|
|
|
|
default:
|
|
|
|
require.Fail(t, "expect recvCh to be closed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert sending and closing never fails
|
|
|
|
destCh.send(res)
|
|
|
|
destCh.close()
|
|
|
|
destCh.close()
|
|
|
|
destCh.send(res)
|
|
|
|
}
|