support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
package taskrunner
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"sync/atomic"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/hashicorp/consul/api"
|
2023-02-22 16:22:48 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2022-03-15 12:42:43 +00:00
|
|
|
"github.com/hashicorp/nomad/ci"
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces"
|
2022-03-15 08:38:30 +00:00
|
|
|
"github.com/hashicorp/nomad/client/serviceregistration"
|
|
|
|
regMock "github.com/hashicorp/nomad/client/serviceregistration/mock"
|
2022-03-21 09:29:57 +00:00
|
|
|
"github.com/hashicorp/nomad/client/serviceregistration/wrapper"
|
2019-09-05 15:43:23 +00:00
|
|
|
"github.com/hashicorp/nomad/client/taskenv"
|
2020-01-09 13:12:54 +00:00
|
|
|
agentconsul "github.com/hashicorp/nomad/command/agent/consul"
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/testlog"
|
2020-01-09 13:12:54 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2023-02-22 16:22:48 +00:00
|
|
|
"github.com/shoenig/test/must"
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
2021-03-16 18:22:21 +00:00
|
|
|
func newScriptMock(hb TTLUpdater, exec interfaces.ScriptExecutor, logger hclog.Logger, interval, timeout time.Duration) *scriptCheck {
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
script := newScriptCheck(&scriptCheckConfig{
|
2019-09-05 15:43:23 +00:00
|
|
|
allocID: "allocid",
|
|
|
|
taskName: "testtask",
|
|
|
|
serviceID: "serviceid",
|
|
|
|
check: &structs.ServiceCheck{
|
|
|
|
Interval: interval,
|
|
|
|
Timeout: timeout,
|
|
|
|
},
|
2021-03-16 18:22:21 +00:00
|
|
|
ttlUpdater: hb,
|
2019-09-05 15:43:23 +00:00
|
|
|
driverExec: exec,
|
|
|
|
taskEnv: &taskenv.TaskEnv{},
|
|
|
|
logger: logger,
|
|
|
|
shutdownCh: nil,
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
})
|
|
|
|
script.callback = newScriptCheckCallback(script)
|
|
|
|
script.lastCheckOk = true
|
|
|
|
return script
|
|
|
|
}
|
|
|
|
|
2021-03-16 18:22:21 +00:00
|
|
|
// fakeHeartbeater implements the TTLUpdater interface to allow mocking out
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
// Consul in script executor tests.
|
|
|
|
type fakeHeartbeater struct {
|
|
|
|
heartbeats chan heartbeat
|
|
|
|
}
|
|
|
|
|
2021-03-16 18:22:21 +00:00
|
|
|
func (f *fakeHeartbeater) UpdateTTL(checkID, namespace, output, status string) error {
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
f.heartbeats <- heartbeat{checkID: checkID, output: output, status: status}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func newFakeHeartbeater() *fakeHeartbeater {
|
|
|
|
return &fakeHeartbeater{heartbeats: make(chan heartbeat)}
|
|
|
|
}
|
|
|
|
|
|
|
|
type heartbeat struct {
|
|
|
|
checkID string
|
|
|
|
output string
|
|
|
|
status string
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestScript_Exec_Cancel asserts cancelling a script check shortcircuits
|
|
|
|
// any running scripts.
|
|
|
|
func TestScript_Exec_Cancel(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
exec, cancel := newBlockingScriptExec()
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
logger := testlog.HCLogger(t)
|
2021-03-16 18:22:21 +00:00
|
|
|
script := newScriptMock(nil, // TTLUpdater should never be called
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
exec, logger, time.Hour, time.Hour)
|
|
|
|
|
|
|
|
handle := script.run()
|
|
|
|
<-exec.running // wait until Exec is called
|
|
|
|
handle.cancel() // cancel now that we're blocked in exec
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-handle.wait():
|
|
|
|
case <-time.After(3 * time.Second):
|
|
|
|
t.Fatalf("timed out waiting for script check to exit")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The underlying ScriptExecutor (newBlockScriptExec) *cannot* be
|
|
|
|
// canceled. Only a wrapper around it obeys the context cancelation.
|
|
|
|
require.NotEqual(t, atomic.LoadInt32(&exec.exited), 1,
|
|
|
|
"expected script executor to still be running after timeout")
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestScript_Exec_TimeoutBasic asserts a script will be killed when the
|
|
|
|
// timeout is reached.
|
|
|
|
func TestScript_Exec_TimeoutBasic(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
exec, cancel := newBlockingScriptExec()
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
logger := testlog.HCLogger(t)
|
|
|
|
hb := newFakeHeartbeater()
|
|
|
|
script := newScriptMock(hb, exec, logger, time.Hour, time.Second)
|
|
|
|
|
|
|
|
handle := script.run()
|
|
|
|
defer handle.cancel() // cleanup
|
|
|
|
<-exec.running // wait until Exec is called
|
|
|
|
|
|
|
|
// Check for UpdateTTL call
|
|
|
|
select {
|
|
|
|
case update := <-hb.heartbeats:
|
|
|
|
require.Equal(t, update.output, context.DeadlineExceeded.Error())
|
|
|
|
require.Equal(t, update.status, api.HealthCritical)
|
|
|
|
case <-time.After(3 * time.Second):
|
|
|
|
t.Fatalf("timed out waiting for script check to exit")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The underlying ScriptExecutor (newBlockScriptExec) *cannot* be
|
|
|
|
// canceled. Only a wrapper around it obeys the context cancelation.
|
|
|
|
require.NotEqual(t, atomic.LoadInt32(&exec.exited), 1,
|
|
|
|
"expected script executor to still be running after timeout")
|
|
|
|
|
|
|
|
// Cancel and watch for exit
|
|
|
|
handle.cancel()
|
|
|
|
select {
|
|
|
|
case <-handle.wait(): // ok!
|
|
|
|
case update := <-hb.heartbeats:
|
|
|
|
t.Errorf("unexpected UpdateTTL call on exit with status=%q", update)
|
|
|
|
case <-time.After(3 * time.Second):
|
|
|
|
t.Fatalf("timed out waiting for script check to exit")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestScript_Exec_TimeoutCritical asserts a script will be killed when
|
|
|
|
// the timeout is reached and always set a critical status regardless of what
|
|
|
|
// Exec returns.
|
|
|
|
func TestScript_Exec_TimeoutCritical(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
|
|
|
hb := newFakeHeartbeater()
|
|
|
|
script := newScriptMock(hb, sleeperExec{}, logger, time.Hour, time.Nanosecond)
|
|
|
|
|
|
|
|
handle := script.run()
|
|
|
|
defer handle.cancel() // cleanup
|
|
|
|
|
|
|
|
// Check for UpdateTTL call
|
|
|
|
select {
|
|
|
|
case update := <-hb.heartbeats:
|
|
|
|
require.Equal(t, update.output, context.DeadlineExceeded.Error())
|
|
|
|
require.Equal(t, update.status, api.HealthCritical)
|
|
|
|
case <-time.After(3 * time.Second):
|
|
|
|
t.Fatalf("timed out waiting for script check to timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestScript_Exec_Shutdown asserts a script will be executed once more
|
|
|
|
// when told to shutdown.
|
|
|
|
func TestScript_Exec_Shutdown(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
shutdown := make(chan struct{})
|
|
|
|
exec := newSimpleExec(0, nil)
|
|
|
|
logger := testlog.HCLogger(t)
|
|
|
|
hb := newFakeHeartbeater()
|
|
|
|
script := newScriptMock(hb, exec, logger, time.Hour, 3*time.Second)
|
|
|
|
script.shutdownCh = shutdown
|
|
|
|
|
|
|
|
handle := script.run()
|
|
|
|
defer handle.cancel() // cleanup
|
|
|
|
close(shutdown) // tell scriptCheck to exit
|
|
|
|
|
|
|
|
select {
|
|
|
|
case update := <-hb.heartbeats:
|
|
|
|
require.Equal(t, update.output, "code=0 err=<nil>")
|
|
|
|
require.Equal(t, update.status, api.HealthPassing)
|
|
|
|
case <-time.After(3 * time.Second):
|
|
|
|
t.Fatalf("timed out waiting for script check to exit")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-handle.wait(): // ok!
|
|
|
|
case <-time.After(3 * time.Second):
|
|
|
|
t.Fatalf("timed out waiting for script check to exit")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestScript_Exec_Codes asserts script exit codes are translated to their
|
|
|
|
// corresponding Consul health check status.
|
|
|
|
func TestScript_Exec_Codes(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
|
|
|
|
exec := newScriptedExec([]execResult{
|
|
|
|
{[]byte("output"), 1, nil},
|
|
|
|
{[]byte("output"), 0, nil},
|
|
|
|
{[]byte("output"), 0, context.DeadlineExceeded},
|
|
|
|
{[]byte("output"), 0, nil},
|
|
|
|
{[]byte("<ignored output>"), 2, fmt.Errorf("some error")},
|
|
|
|
{[]byte("output"), 0, nil},
|
|
|
|
{[]byte("error9000"), 9000, nil},
|
|
|
|
})
|
|
|
|
logger := testlog.HCLogger(t)
|
|
|
|
hb := newFakeHeartbeater()
|
|
|
|
script := newScriptMock(
|
|
|
|
hb, exec, logger, time.Nanosecond, 3*time.Second)
|
|
|
|
|
|
|
|
handle := script.run()
|
|
|
|
defer handle.cancel() // cleanup
|
|
|
|
deadline := time.After(3 * time.Second)
|
|
|
|
|
|
|
|
expected := []heartbeat{
|
|
|
|
{script.id, "output", api.HealthWarning},
|
|
|
|
{script.id, "output", api.HealthPassing},
|
|
|
|
{script.id, context.DeadlineExceeded.Error(), api.HealthCritical},
|
|
|
|
{script.id, "output", api.HealthPassing},
|
|
|
|
{script.id, "some error", api.HealthCritical},
|
|
|
|
{script.id, "output", api.HealthPassing},
|
|
|
|
{script.id, "error9000", api.HealthCritical},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i <= 6; i++ {
|
|
|
|
select {
|
|
|
|
case update := <-hb.heartbeats:
|
|
|
|
require.Equal(t, update, expected[i],
|
|
|
|
"expected update %d to be '%s' but received '%s'",
|
|
|
|
i, expected[i], update)
|
|
|
|
case <-deadline:
|
|
|
|
t.Fatalf("timed out waiting for all script checks to finish")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-01-09 13:12:54 +00:00
|
|
|
|
|
|
|
// TestScript_TaskEnvInterpolation asserts that script check hooks are
|
|
|
|
// interpolated in the same way that services are
|
|
|
|
func TestScript_TaskEnvInterpolation(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-01-09 13:12:54 +00:00
|
|
|
|
|
|
|
logger := testlog.HCLogger(t)
|
2022-03-15 08:38:30 +00:00
|
|
|
consulClient := regMock.NewServiceRegistrationHandler(logger)
|
2022-03-21 09:29:57 +00:00
|
|
|
regWrap := wrapper.NewHandlerWrapper(logger, consulClient, nil)
|
2020-01-09 13:12:54 +00:00
|
|
|
exec, cancel := newBlockingScriptExec()
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
alloc := mock.ConnectAlloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
|
|
|
|
task.Services[0].Name = "${NOMAD_JOB_NAME}-${TASK}-${SVC_NAME}"
|
|
|
|
task.Services[0].Checks[0].Name = "${NOMAD_JOB_NAME}-${SVC_NAME}-check"
|
|
|
|
alloc.Job.Canonicalize() // need to re-canonicalize b/c the mock already did it
|
|
|
|
|
|
|
|
env := taskenv.NewBuilder(mock.Node(), alloc, task, "global").SetHookEnv(
|
|
|
|
"script_check",
|
|
|
|
map[string]string{"SVC_NAME": "frontend"}).Build()
|
|
|
|
|
|
|
|
svcHook := newServiceHook(serviceHookConfig{
|
2022-03-21 09:29:57 +00:00
|
|
|
alloc: alloc,
|
|
|
|
task: task,
|
|
|
|
serviceRegWrapper: regWrap,
|
|
|
|
logger: logger,
|
2020-01-09 13:12:54 +00:00
|
|
|
})
|
|
|
|
// emulate prestart having been fired
|
|
|
|
svcHook.taskEnv = env
|
|
|
|
|
|
|
|
scHook := newScriptCheckHook(scriptCheckHookConfig{
|
|
|
|
alloc: alloc,
|
|
|
|
task: task,
|
|
|
|
consul: consulClient,
|
|
|
|
logger: logger,
|
2021-03-16 18:22:21 +00:00
|
|
|
shutdownWait: time.Hour, // TTLUpdater will never be called
|
2020-01-09 13:12:54 +00:00
|
|
|
})
|
|
|
|
// emulate prestart having been fired
|
|
|
|
scHook.taskEnv = env
|
|
|
|
scHook.driverExec = exec
|
|
|
|
|
2023-02-22 16:22:48 +00:00
|
|
|
workload := svcHook.getWorkloadServices()
|
|
|
|
must.Eq(t, "web", workload.AllocInfo.Group)
|
|
|
|
|
|
|
|
expectedSvc := workload.Services[0]
|
2022-03-15 08:38:30 +00:00
|
|
|
expected := agentconsul.MakeCheckID(serviceregistration.MakeAllocServiceID(
|
2020-01-09 13:12:54 +00:00
|
|
|
alloc.ID, task.Name, expectedSvc), expectedSvc.Checks[0])
|
|
|
|
|
|
|
|
actual := scHook.newScriptChecks()
|
|
|
|
check, ok := actual[expected]
|
2023-02-22 16:22:48 +00:00
|
|
|
must.True(t, ok)
|
|
|
|
must.Eq(t, "my-job-frontend-check", check.check.Name)
|
2020-01-09 13:12:54 +00:00
|
|
|
|
|
|
|
// emulate an update
|
|
|
|
env = taskenv.NewBuilder(mock.Node(), alloc, task, "global").SetHookEnv(
|
|
|
|
"script_check",
|
|
|
|
map[string]string{"SVC_NAME": "backend"}).Build()
|
|
|
|
scHook.taskEnv = env
|
|
|
|
svcHook.taskEnv = env
|
|
|
|
|
|
|
|
expectedSvc = svcHook.getWorkloadServices().Services[0]
|
2022-03-15 08:38:30 +00:00
|
|
|
expected = agentconsul.MakeCheckID(serviceregistration.MakeAllocServiceID(
|
2020-01-09 13:12:54 +00:00
|
|
|
alloc.ID, task.Name, expectedSvc), expectedSvc.Checks[0])
|
|
|
|
|
|
|
|
actual = scHook.newScriptChecks()
|
|
|
|
check, ok = actual[expected]
|
2023-02-22 16:22:48 +00:00
|
|
|
must.True(t, ok)
|
|
|
|
must.Eq(t, "my-job-backend-check", check.check.Name)
|
2020-01-09 13:12:54 +00:00
|
|
|
}
|
2020-09-28 15:48:28 +00:00
|
|
|
|
|
|
|
func TestScript_associated(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2020-09-28 15:48:28 +00:00
|
|
|
t.Run("neither set", func(t *testing.T) {
|
|
|
|
require.False(t, new(scriptCheckHook).associated("task1", "", ""))
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("service set", func(t *testing.T) {
|
|
|
|
require.True(t, new(scriptCheckHook).associated("task1", "task1", ""))
|
|
|
|
require.False(t, new(scriptCheckHook).associated("task1", "task2", ""))
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("check set", func(t *testing.T) {
|
|
|
|
require.True(t, new(scriptCheckHook).associated("task1", "", "task1"))
|
|
|
|
require.False(t, new(scriptCheckHook).associated("task1", "", "task2"))
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("both set", func(t *testing.T) {
|
|
|
|
// ensure check.task takes precedence over service.task
|
|
|
|
require.True(t, new(scriptCheckHook).associated("task1", "task1", "task1"))
|
|
|
|
require.False(t, new(scriptCheckHook).associated("task1", "task1", "task2"))
|
|
|
|
require.True(t, new(scriptCheckHook).associated("task1", "task2", "task1"))
|
|
|
|
require.False(t, new(scriptCheckHook).associated("task1", "task2", "task2"))
|
|
|
|
})
|
|
|
|
}
|