2018-06-11 20:33:18 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
2019-12-03 13:58:29 +00:00
|
|
|
"time"
|
2018-06-11 20:33:18 +00:00
|
|
|
|
2018-09-13 17:43:40 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
2018-06-11 20:33:18 +00:00
|
|
|
"github.com/hashicorp/nomad/command/agent/consul"
|
2019-01-09 21:36:43 +00:00
|
|
|
testing "github.com/mitchellh/go-testing-interface"
|
2018-06-11 20:33:18 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// MockConsulOp represents the register/deregister operations.
|
|
|
|
type MockConsulOp struct {
|
2019-12-03 13:58:29 +00:00
|
|
|
Op string // add, remove, or update
|
|
|
|
AllocID string
|
|
|
|
Name string // task or group name
|
|
|
|
OccurredAt time.Time
|
2018-06-11 20:33:18 +00:00
|
|
|
}
|
|
|
|
|
2019-08-14 22:02:00 +00:00
|
|
|
func NewMockConsulOp(op, allocID, name string) MockConsulOp {
|
|
|
|
switch op {
|
|
|
|
case "add", "remove", "update", "alloc_registrations",
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
"add_group", "remove_group", "update_group", "update_ttl":
|
2019-08-14 22:02:00 +00:00
|
|
|
default:
|
2018-06-11 20:33:18 +00:00
|
|
|
panic(fmt.Errorf("invalid consul op: %s", op))
|
|
|
|
}
|
|
|
|
return MockConsulOp{
|
2019-12-03 13:58:29 +00:00
|
|
|
Op: op,
|
|
|
|
AllocID: allocID,
|
|
|
|
Name: name,
|
|
|
|
OccurredAt: time.Now(),
|
2018-06-11 20:33:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// MockConsulServiceClient implements the ConsulServiceAPI interface to record
|
|
|
|
// and log task registration/deregistration.
|
|
|
|
type MockConsulServiceClient struct {
|
2018-09-14 00:27:14 +00:00
|
|
|
ops []MockConsulOp
|
2018-06-11 20:33:18 +00:00
|
|
|
mu sync.Mutex
|
|
|
|
|
2018-09-13 17:43:40 +00:00
|
|
|
logger log.Logger
|
2018-06-11 20:33:18 +00:00
|
|
|
|
|
|
|
// AllocRegistrationsFn allows injecting return values for the
|
|
|
|
// AllocRegistrations function.
|
|
|
|
AllocRegistrationsFn func(allocID string) (*consul.AllocRegistration, error)
|
|
|
|
}
|
|
|
|
|
2018-09-13 17:43:40 +00:00
|
|
|
func NewMockConsulServiceClient(t testing.T, logger log.Logger) *MockConsulServiceClient {
|
|
|
|
logger = logger.Named("mock_consul")
|
2018-06-11 20:33:18 +00:00
|
|
|
m := MockConsulServiceClient{
|
2018-09-18 00:44:25 +00:00
|
|
|
ops: make([]MockConsulOp, 0, 20),
|
2018-09-13 17:43:40 +00:00
|
|
|
logger: logger,
|
2018-06-11 20:33:18 +00:00
|
|
|
}
|
|
|
|
return &m
|
|
|
|
}
|
|
|
|
|
2019-11-18 18:04:01 +00:00
|
|
|
func (m *MockConsulServiceClient) UpdateWorkload(old, newSvcs *consul.WorkloadServices) error {
|
2019-08-14 22:02:00 +00:00
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
2019-11-18 18:04:01 +00:00
|
|
|
m.logger.Trace("UpdateWorkload", "alloc_id", newSvcs.AllocID, "name", newSvcs.Name(),
|
2019-01-09 21:36:43 +00:00
|
|
|
"old_services", len(old.Services), "new_services", len(newSvcs.Services),
|
|
|
|
)
|
2019-11-18 18:04:01 +00:00
|
|
|
m.ops = append(m.ops, NewMockConsulOp("update", newSvcs.AllocID, newSvcs.Name()))
|
2018-06-11 20:33:18 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-18 18:04:01 +00:00
|
|
|
func (m *MockConsulServiceClient) RegisterWorkload(svcs *consul.WorkloadServices) error {
|
2018-06-11 20:33:18 +00:00
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
2019-11-18 18:04:01 +00:00
|
|
|
m.logger.Trace("RegisterWorkload", "alloc_id", svcs.AllocID, "name", svcs.Name(),
|
|
|
|
"services", len(svcs.Services),
|
2019-01-09 21:36:43 +00:00
|
|
|
)
|
2019-11-18 18:04:01 +00:00
|
|
|
m.ops = append(m.ops, NewMockConsulOp("add", svcs.AllocID, svcs.Name()))
|
2018-06-11 20:33:18 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-18 18:04:01 +00:00
|
|
|
func (m *MockConsulServiceClient) RemoveWorkload(svcs *consul.WorkloadServices) {
|
2018-06-11 20:33:18 +00:00
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
2019-11-18 18:04:01 +00:00
|
|
|
m.logger.Trace("RemoveWorkload", "alloc_id", svcs.AllocID, "name", svcs.Name(),
|
|
|
|
"services", len(svcs.Services),
|
2019-01-09 21:36:43 +00:00
|
|
|
)
|
2019-11-18 18:04:01 +00:00
|
|
|
m.ops = append(m.ops, NewMockConsulOp("remove", svcs.AllocID, svcs.Name()))
|
2018-06-11 20:33:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MockConsulServiceClient) AllocRegistrations(allocID string) (*consul.AllocRegistration, error) {
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
2018-09-13 17:43:40 +00:00
|
|
|
m.logger.Trace("AllocRegistrations", "alloc_id", allocID)
|
2018-09-18 00:44:25 +00:00
|
|
|
m.ops = append(m.ops, NewMockConsulOp("alloc_registrations", allocID, ""))
|
2018-06-11 20:33:18 +00:00
|
|
|
|
|
|
|
if m.AllocRegistrationsFn != nil {
|
|
|
|
return m.AllocRegistrationsFn(allocID)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
2018-09-14 00:27:14 +00:00
|
|
|
|
2021-03-16 18:22:21 +00:00
|
|
|
func (m *MockConsulServiceClient) UpdateTTL(checkID, namespace, output, status string) error {
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
// TODO(tgross): this method is here so we can implement the
|
|
|
|
// interface but the locking we need for testing creates a lot
|
|
|
|
// of opportunities for deadlocks in testing that will never
|
|
|
|
// appear in live code.
|
2021-03-16 18:22:21 +00:00
|
|
|
m.logger.Trace("UpdateTTL", "check_id", checkID, "namespace", namespace, "status", status)
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-09-14 00:27:14 +00:00
|
|
|
func (m *MockConsulServiceClient) GetOps() []MockConsulOp {
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
return m.ops
|
|
|
|
}
|