bfcbc00f4e
In order to support implicit ACL policies for tasks to get their own secrets, each task would need to have its own ACL token. This would add extra raft overhead as well as new garbage collection jobs for cleaning up task-specific ACL tokens. Instead, Nomad will create a workload Identity Claim for each task. An Identity Claim is a JSON Web Token (JWT) signed by the server’s private key and attached to an Allocation at the time a plan is applied. The encoded JWT can be submitted as the X-Nomad-Token header to replace ACL token secret IDs for the RPCs that support identity claims. Whenever a key is is added to a server’s keyring, it will use the key as the seed for a Ed25519 public-private private keypair. That keypair will be used for signing the JWT and for verifying the JWT. This implementation is a ruthlessly minimal approach to support the secure variables feature. When a JWT is verified, the allocation ID will be checked against the Nomad state store, and non-existent or terminal allocation IDs will cause the validation to be rejected. This is sufficient to support the secure variables feature at launch without requiring implementation of a background process to renew soon-to-expire tokens.
144 lines
3.7 KiB
Go
144 lines
3.7 KiB
Go
package taskrunner
|
|
|
|
import (
|
|
"github.com/hashicorp/nomad/client/allocrunner/taskrunner/state"
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
)
|
|
|
|
func (tr *TaskRunner) Alloc() *structs.Allocation {
|
|
tr.allocLock.Lock()
|
|
defer tr.allocLock.Unlock()
|
|
return tr.alloc
|
|
}
|
|
|
|
// setAlloc and task on TaskRunner
|
|
func (tr *TaskRunner) setAlloc(updated *structs.Allocation, task *structs.Task) {
|
|
tr.allocLock.Lock()
|
|
defer tr.allocLock.Unlock()
|
|
|
|
tr.taskLock.Lock()
|
|
defer tr.taskLock.Unlock()
|
|
|
|
tr.alloc = updated
|
|
tr.task = task
|
|
}
|
|
|
|
// IsLeader returns true if this task is the leader of its task group.
|
|
func (tr *TaskRunner) IsLeader() bool {
|
|
return tr.taskLeader
|
|
}
|
|
|
|
// IsPoststopTask returns true if this task is a poststop task in its task group.
|
|
func (tr *TaskRunner) IsPoststopTask() bool {
|
|
return tr.Task().Lifecycle != nil && tr.Task().Lifecycle.Hook == structs.TaskLifecycleHookPoststop
|
|
}
|
|
|
|
// IsSidecarTask returns true if this task is a sidecar task in its task group.
|
|
func (tr *TaskRunner) IsSidecarTask() bool {
|
|
return tr.Task().Lifecycle != nil && tr.Task().Lifecycle.Sidecar
|
|
}
|
|
|
|
func (tr *TaskRunner) Task() *structs.Task {
|
|
tr.taskLock.RLock()
|
|
defer tr.taskLock.RUnlock()
|
|
return tr.task
|
|
}
|
|
|
|
func (tr *TaskRunner) TaskState() *structs.TaskState {
|
|
tr.stateLock.Lock()
|
|
defer tr.stateLock.Unlock()
|
|
return tr.state.Copy()
|
|
}
|
|
|
|
func (tr *TaskRunner) getVaultToken() string {
|
|
tr.vaultTokenLock.Lock()
|
|
defer tr.vaultTokenLock.Unlock()
|
|
return tr.vaultToken
|
|
}
|
|
|
|
// setVaultToken updates the vault token on the task runner as well as in the
|
|
// task's environment. These two places must be set atomically to avoid a task
|
|
// seeing a different token on the task runner and in its environment.
|
|
func (tr *TaskRunner) setVaultToken(token string) {
|
|
tr.vaultTokenLock.Lock()
|
|
defer tr.vaultTokenLock.Unlock()
|
|
|
|
// Update the Vault token on the runner
|
|
tr.vaultToken = token
|
|
|
|
// Update the task's environment
|
|
taskNamespace := tr.task.Vault.Namespace
|
|
|
|
ns := tr.clientConfig.VaultConfig.Namespace
|
|
if taskNamespace != "" {
|
|
ns = taskNamespace
|
|
}
|
|
tr.envBuilder.SetVaultToken(token, ns, tr.task.Vault.Env)
|
|
}
|
|
|
|
func (tr *TaskRunner) getNomadToken() string {
|
|
tr.nomadTokenLock.Lock()
|
|
defer tr.nomadTokenLock.Unlock()
|
|
return tr.nomadToken
|
|
}
|
|
|
|
func (tr *TaskRunner) setNomadToken(token string) {
|
|
tr.nomadTokenLock.Lock()
|
|
defer tr.nomadTokenLock.Unlock()
|
|
tr.nomadToken = token
|
|
}
|
|
|
|
// getDriverHandle returns a driver handle.
|
|
func (tr *TaskRunner) getDriverHandle() *DriverHandle {
|
|
tr.handleLock.Lock()
|
|
defer tr.handleLock.Unlock()
|
|
return tr.handle
|
|
}
|
|
|
|
// setDriverHandle sets the driver handle and updates the driver network in the
|
|
// task's environment.
|
|
func (tr *TaskRunner) setDriverHandle(handle *DriverHandle) {
|
|
tr.handleLock.Lock()
|
|
defer tr.handleLock.Unlock()
|
|
tr.handle = handle
|
|
|
|
// Update the environment's driver network
|
|
tr.envBuilder.SetDriverNetwork(handle.net)
|
|
}
|
|
|
|
func (tr *TaskRunner) clearDriverHandle() {
|
|
tr.handleLock.Lock()
|
|
defer tr.handleLock.Unlock()
|
|
if tr.handle != nil {
|
|
tr.driver.DestroyTask(tr.handle.ID(), true)
|
|
}
|
|
tr.handle = nil
|
|
}
|
|
|
|
// setKillErr stores any error that arouse while killing the task
|
|
func (tr *TaskRunner) setKillErr(err error) {
|
|
tr.killErrLock.Lock()
|
|
defer tr.killErrLock.Unlock()
|
|
tr.killErr = err
|
|
}
|
|
|
|
// getKillErr returns any error that arouse while killing the task
|
|
func (tr *TaskRunner) getKillErr() error {
|
|
tr.killErrLock.Lock()
|
|
defer tr.killErrLock.Unlock()
|
|
return tr.killErr
|
|
}
|
|
|
|
// hookState returns the state for the given hook or nil if no state is
|
|
// persisted for the hook.
|
|
func (tr *TaskRunner) hookState(name string) *state.HookState {
|
|
tr.stateLock.RLock()
|
|
defer tr.stateLock.RUnlock()
|
|
|
|
var s *state.HookState
|
|
if tr.localState.Hooks != nil {
|
|
s = tr.localState.Hooks[name].Copy()
|
|
}
|
|
return s
|
|
}
|