open-nomad/client/allocrunner/taskrunner/template/template.go

746 lines
20 KiB
Go
Raw Normal View History

2018-07-13 20:45:57 +00:00
package template
import (
"context"
client: fix interpolation in template source While Nomad v0.12.8 fixed `NOMAD_{ALLOC,TASK,SECRETS}_DIR` use in `template.destination`, interpolating these variables in `template.source` caused a path escape error. **Why not apply the destination fix to source?** The destination fix forces destination to always be relative to the task directory. This makes sense for the destination as a destination outside the task directory would be unreachable by the task. There's no reason to ever render a template outside the task directory. (Using `..` does allow destinations to escape the task directory if `template.disable_file_sandbox = true`. That's just awkward and unsafe enough I hope no one uses it.) There is a reason to source a template outside a task directory. At least if there weren't then I can't think of why we implemented `template.disable_file_sandbox`. So v0.12.8 left the behavior of `template.source` the more straightforward "Interpolate and validate." However, since outside of `raw_exec` every other driver uses absolute paths for `NOMAD_*_DIR` interpolation, this means those variables are unusable unless `disable_file_sandbox` is set. **The Fix** The variables are now interpolated as relative paths *only for the purpose of rendering templates.* This is an unfortunate special case, but reflects the fact that the templates view of the filesystem is completely different (unconstrainted) vs the task's view (chrooted). Arguably the values of these variables *should be context-specific.* I think it's more reasonable to think of the "hack" as templating running uncontainerized than that giving templates different paths is a hack. **TODO** - [ ] E2E tests - [ ] Job validation may still be broken and prevent my fix from working? **raw_exec** `raw_exec` is actually broken _a different way_ as exercised by tests in this commit. I think we should probably remove these tests and fix that in a followup PR/release, but I wanted to leave them in for the initial review and discussion. Since non-containerized source paths are broken anyway, perhaps there's another solution to this entire problem I'm overlooking?
2020-11-18 05:59:47 +00:00
"errors"
2018-07-13 20:45:57 +00:00
"fmt"
"math/rand"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
ctconf "github.com/hashicorp/consul-template/config"
"github.com/hashicorp/consul-template/manager"
"github.com/hashicorp/consul-template/signals"
envparse "github.com/hashicorp/go-envparse"
multierror "github.com/hashicorp/go-multierror"
2018-10-04 23:22:01 +00:00
"github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces"
2018-07-13 20:45:57 +00:00
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/taskenv"
2018-07-13 20:45:57 +00:00
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
// consulTemplateSourceName is the source name when using the TaskHooks.
consulTemplateSourceName = "Template"
// missingDepEventLimit is the number of missing dependencies that will be
// logged before we switch to showing just the number of missing
// dependencies.
missingDepEventLimit = 3
// DefaultMaxTemplateEventRate is the default maximum rate at which a
// template event should be fired.
DefaultMaxTemplateEventRate = 3 * time.Second
)
client: fix interpolation in template source While Nomad v0.12.8 fixed `NOMAD_{ALLOC,TASK,SECRETS}_DIR` use in `template.destination`, interpolating these variables in `template.source` caused a path escape error. **Why not apply the destination fix to source?** The destination fix forces destination to always be relative to the task directory. This makes sense for the destination as a destination outside the task directory would be unreachable by the task. There's no reason to ever render a template outside the task directory. (Using `..` does allow destinations to escape the task directory if `template.disable_file_sandbox = true`. That's just awkward and unsafe enough I hope no one uses it.) There is a reason to source a template outside a task directory. At least if there weren't then I can't think of why we implemented `template.disable_file_sandbox`. So v0.12.8 left the behavior of `template.source` the more straightforward "Interpolate and validate." However, since outside of `raw_exec` every other driver uses absolute paths for `NOMAD_*_DIR` interpolation, this means those variables are unusable unless `disable_file_sandbox` is set. **The Fix** The variables are now interpolated as relative paths *only for the purpose of rendering templates.* This is an unfortunate special case, but reflects the fact that the templates view of the filesystem is completely different (unconstrainted) vs the task's view (chrooted). Arguably the values of these variables *should be context-specific.* I think it's more reasonable to think of the "hack" as templating running uncontainerized than that giving templates different paths is a hack. **TODO** - [ ] E2E tests - [ ] Job validation may still be broken and prevent my fix from working? **raw_exec** `raw_exec` is actually broken _a different way_ as exercised by tests in this commit. I think we should probably remove these tests and fix that in a followup PR/release, but I wanted to leave them in for the initial review and discussion. Since non-containerized source paths are broken anyway, perhaps there's another solution to this entire problem I'm overlooking?
2020-11-18 05:59:47 +00:00
var (
sourceEscapesErr = errors.New("template source path escapes alloc directory")
destEscapesErr = errors.New("template destination path escapes alloc directory")
)
2018-07-13 20:45:57 +00:00
// TaskTemplateManager is used to run a set of templates for a given task
type TaskTemplateManager struct {
// config holds the template managers configuration
config *TaskTemplateManagerConfig
// lookup allows looking up the set of Nomad templates by their consul-template ID
lookup map[string][]*structs.Template
// runner is the consul-template runner
runner *manager.Runner
// signals is a lookup map from the string representation of a signal to its
// actual signal
signals map[string]os.Signal
// shutdownCh is used to signal and started goroutine to shutdown
shutdownCh chan struct{}
// shutdown marks whether the manager has been shutdown
shutdown bool
shutdownLock sync.Mutex
}
// TaskTemplateManagerConfig is used to configure an instance of the
// TaskTemplateManager
type TaskTemplateManagerConfig struct {
// UnblockCh is closed when the template has been rendered
UnblockCh chan struct{}
// Lifecycle is used to interact with the task the template manager is being
// run for
Lifecycle interfaces.TaskLifecycle
// Events is used to emit events for the task
Events interfaces.EventEmitter
// Templates is the set of templates we are managing
Templates []*structs.Template
// ClientConfig is the Nomad Client configuration
ClientConfig *config.Config
// VaultToken is the Vault token for the task.
VaultToken string
// VaultNamespace is the Vault namespace for the task
VaultNamespace string
2018-07-13 20:45:57 +00:00
// TaskDir is the task's directory
TaskDir string
// EnvBuilder is the environment variable builder for the task.
EnvBuilder *taskenv.Builder
2018-07-13 20:45:57 +00:00
// MaxTemplateEventRate is the maximum rate at which we should emit events.
MaxTemplateEventRate time.Duration
// retryRate is only used for testing and is used to increase the retry rate
retryRate time.Duration
}
// Validate validates the configuration.
func (c *TaskTemplateManagerConfig) Validate() error {
if c == nil {
return fmt.Errorf("Nil config passed")
} else if c.UnblockCh == nil {
return fmt.Errorf("Invalid unblock channel given")
} else if c.Lifecycle == nil {
return fmt.Errorf("Invalid lifecycle hooks given")
} else if c.Events == nil {
return fmt.Errorf("Invalid event hook given")
} else if c.ClientConfig == nil {
return fmt.Errorf("Invalid client config given")
} else if c.TaskDir == "" {
return fmt.Errorf("Invalid task directory given: %q", c.TaskDir)
} else if c.EnvBuilder == nil {
return fmt.Errorf("Invalid task environment given")
} else if c.MaxTemplateEventRate == 0 {
return fmt.Errorf("Invalid max template event rate given")
}
return nil
}
func NewTaskTemplateManager(config *TaskTemplateManagerConfig) (*TaskTemplateManager, error) {
// Check pre-conditions
if err := config.Validate(); err != nil {
return nil, err
}
tm := &TaskTemplateManager{
config: config,
shutdownCh: make(chan struct{}),
}
// Parse the signals that we need
for _, tmpl := range config.Templates {
if tmpl.ChangeSignal == "" {
continue
}
sig, err := signals.Parse(tmpl.ChangeSignal)
if err != nil {
return nil, fmt.Errorf("Failed to parse signal %q", tmpl.ChangeSignal)
}
if tm.signals == nil {
tm.signals = make(map[string]os.Signal)
}
tm.signals[tmpl.ChangeSignal] = sig
}
// Build the consul-template runner
runner, lookup, err := templateRunner(config)
if err != nil {
return nil, err
}
tm.runner = runner
tm.lookup = lookup
go tm.run()
return tm, nil
}
// Stop is used to stop the consul-template runner
func (tm *TaskTemplateManager) Stop() {
tm.shutdownLock.Lock()
defer tm.shutdownLock.Unlock()
if tm.shutdown {
return
}
close(tm.shutdownCh)
tm.shutdown = true
// Stop the consul-template runner
if tm.runner != nil {
tm.runner.Stop()
}
}
// run is the long lived loop that handles errors and templates being rendered
func (tm *TaskTemplateManager) run() {
// Runner is nil if there is no templates
if tm.runner == nil {
// Unblock the start if there is nothing to do
close(tm.config.UnblockCh)
return
}
// Start the runner
go tm.runner.Start()
// Block till all the templates have been rendered
tm.handleFirstRender()
// Detect if there was a shutdown.
select {
case <-tm.shutdownCh:
return
default:
}
// Read environment variables from env templates before we unblock
envMap, err := loadTemplateEnv(tm.config.Templates, tm.config.EnvBuilder.Build())
2018-07-13 20:45:57 +00:00
if err != nil {
tm.config.Lifecycle.Kill(context.Background(),
structs.NewTaskEvent(structs.TaskKilling).
SetFailsTask().
SetDisplayMessage(fmt.Sprintf("Template failed to read environment variables: %v", err)))
2018-07-13 20:45:57 +00:00
return
}
tm.config.EnvBuilder.SetTemplateEnv(envMap)
// Unblock the task
close(tm.config.UnblockCh)
// If all our templates are change mode no-op, then we can exit here
if tm.allTemplatesNoop() {
return
}
// handle all subsequent render events.
tm.handleTemplateRerenders(time.Now())
}
// handleFirstRender blocks till all templates have been rendered
func (tm *TaskTemplateManager) handleFirstRender() {
// missingDependencies is the set of missing dependencies.
var missingDependencies map[string]struct{}
// eventTimer is used to trigger the firing of an event showing the missing
// dependencies.
eventTimer := time.NewTimer(tm.config.MaxTemplateEventRate)
if !eventTimer.Stop() {
<-eventTimer.C
}
// outstandingEvent tracks whether there is an outstanding event that should
// be fired.
outstandingEvent := false
// Wait till all the templates have been rendered
WAIT:
for {
select {
case <-tm.shutdownCh:
return
case err, ok := <-tm.runner.ErrCh:
if !ok {
continue
}
tm.config.Lifecycle.Kill(context.Background(),
structs.NewTaskEvent(structs.TaskKilling).
SetFailsTask().
SetDisplayMessage(fmt.Sprintf("Template failed: %v", err)))
2018-07-13 20:45:57 +00:00
case <-tm.runner.TemplateRenderedCh():
// A template has been rendered, figure out what to do
events := tm.runner.RenderEvents()
// Not all templates have been rendered yet
if len(events) < len(tm.lookup) {
continue
}
dirty := false
2018-07-13 20:45:57 +00:00
for _, event := range events {
// This template hasn't been rendered
if event.LastWouldRender.IsZero() {
continue WAIT
}
if event.WouldRender && event.DidRender {
dirty = true
}
}
// if there's a driver handle then the task is already running and
// that changes how we want to behave on first render
if dirty && tm.config.Lifecycle.IsRunning() {
handledRenders := make(map[string]time.Time, len(tm.config.Templates))
tm.onTemplateRendered(handledRenders, time.Time{})
2018-07-13 20:45:57 +00:00
}
break WAIT
case <-tm.runner.RenderEventCh():
events := tm.runner.RenderEvents()
joinedSet := make(map[string]struct{})
for _, event := range events {
missing := event.MissingDeps
if missing == nil {
continue
}
for _, dep := range missing.List() {
joinedSet[dep.String()] = struct{}{}
}
}
// Check to see if the new joined set is the same as the old
different := len(joinedSet) != len(missingDependencies)
if !different {
for k := range joinedSet {
if _, ok := missingDependencies[k]; !ok {
different = true
break
}
}
}
// Nothing to do
if !different {
continue
}
// Update the missing set
missingDependencies = joinedSet
// Update the event timer channel
if !outstandingEvent {
// We got new data so reset
outstandingEvent = true
eventTimer.Reset(tm.config.MaxTemplateEventRate)
}
case <-eventTimer.C:
if missingDependencies == nil {
continue
}
// Clear the outstanding event
outstandingEvent = false
// Build the missing set
missingSlice := make([]string, 0, len(missingDependencies))
for k := range missingDependencies {
missingSlice = append(missingSlice, k)
}
sort.Strings(missingSlice)
if l := len(missingSlice); l > missingDepEventLimit {
missingSlice[missingDepEventLimit] = fmt.Sprintf("and %d more", l-missingDepEventLimit)
missingSlice = missingSlice[:missingDepEventLimit+1]
}
missingStr := strings.Join(missingSlice, ", ")
tm.config.Events.EmitEvent(structs.NewTaskEvent(consulTemplateSourceName).SetDisplayMessage(fmt.Sprintf("Missing: %s", missingStr)))
2018-07-13 20:45:57 +00:00
}
}
}
// handleTemplateRerenders is used to handle template render events after they
// have all rendered. It takes action based on which set of templates re-render.
// The passed allRenderedTime is the time at which all templates have rendered.
// This is used to avoid signaling the task for any render event before hand.
func (tm *TaskTemplateManager) handleTemplateRerenders(allRenderedTime time.Time) {
// A lookup for the last time the template was handled
handledRenders := make(map[string]time.Time, len(tm.config.Templates))
for {
select {
case <-tm.shutdownCh:
return
case err, ok := <-tm.runner.ErrCh:
if !ok {
continue
}
tm.config.Lifecycle.Kill(context.Background(),
structs.NewTaskEvent(structs.TaskKilling).
SetFailsTask().
SetDisplayMessage(fmt.Sprintf("Template failed: %v", err)))
2018-07-13 20:45:57 +00:00
case <-tm.runner.TemplateRenderedCh():
tm.onTemplateRendered(handledRenders, allRenderedTime)
}
}
}
2018-07-13 20:45:57 +00:00
func (tm *TaskTemplateManager) onTemplateRendered(handledRenders map[string]time.Time, allRenderedTime time.Time) {
2018-07-13 20:45:57 +00:00
var handling []string
signals := make(map[string]struct{})
restart := false
var splay time.Duration
2018-07-13 20:45:57 +00:00
events := tm.runner.RenderEvents()
for id, event := range events {
2018-07-13 20:45:57 +00:00
// First time through
if allRenderedTime.After(event.LastDidRender) || allRenderedTime.Equal(event.LastDidRender) {
handledRenders[id] = allRenderedTime
continue
}
2018-07-13 20:45:57 +00:00
// We have already handled this one
if htime := handledRenders[id]; htime.After(event.LastDidRender) || htime.Equal(event.LastDidRender) {
continue
}
2018-07-13 20:45:57 +00:00
// Lookup the template and determine what to do
tmpls, ok := tm.lookup[id]
if !ok {
tm.config.Lifecycle.Kill(context.Background(),
structs.NewTaskEvent(structs.TaskKilling).
SetFailsTask().
SetDisplayMessage(fmt.Sprintf("Template runner returned unknown template id %q", id)))
return
}
2018-07-13 20:45:57 +00:00
// Read environment variables from templates
envMap, err := loadTemplateEnv(tm.config.Templates, tm.config.EnvBuilder.Build())
if err != nil {
tm.config.Lifecycle.Kill(context.Background(),
structs.NewTaskEvent(structs.TaskKilling).
SetFailsTask().
SetDisplayMessage(fmt.Sprintf("Template failed to read environment variables: %v", err)))
return
}
tm.config.EnvBuilder.SetTemplateEnv(envMap)
for _, tmpl := range tmpls {
switch tmpl.ChangeMode {
case structs.TemplateChangeModeSignal:
signals[tmpl.ChangeSignal] = struct{}{}
case structs.TemplateChangeModeRestart:
restart = true
case structs.TemplateChangeModeNoop:
continue
2018-07-13 20:45:57 +00:00
}
if tmpl.Splay > splay {
splay = tmpl.Splay
}
}
2018-07-13 20:45:57 +00:00
handling = append(handling, id)
}
2018-07-13 20:45:57 +00:00
if restart || len(signals) != 0 {
if splay != 0 {
ns := splay.Nanoseconds()
offset := rand.Int63n(ns)
t := time.Duration(offset)
2018-07-13 20:45:57 +00:00
select {
case <-time.After(t):
case <-tm.shutdownCh:
return
}
}
// Update handle time
for _, id := range handling {
handledRenders[id] = events[id].LastDidRender
}
2018-07-13 20:45:57 +00:00
if restart {
tm.config.Lifecycle.Restart(context.Background(),
structs.NewTaskEvent(structs.TaskRestartSignal).
SetDisplayMessage("Template with change_mode restart re-rendered"), false)
} else if len(signals) != 0 {
var mErr multierror.Error
for signal := range signals {
s := tm.signals[signal]
event := structs.NewTaskEvent(structs.TaskSignaling).SetTaskSignal(s).SetDisplayMessage("Template re-rendered")
if err := tm.config.Lifecycle.Signal(event, signal); err != nil {
_ = multierror.Append(&mErr, err)
}
}
if err := mErr.ErrorOrNil(); err != nil {
flat := make([]os.Signal, 0, len(signals))
for signal := range signals {
flat = append(flat, tm.signals[signal])
2018-07-13 20:45:57 +00:00
}
tm.config.Lifecycle.Kill(context.Background(),
structs.NewTaskEvent(structs.TaskKilling).
SetFailsTask().
SetDisplayMessage(fmt.Sprintf("Template failed to send signals %v: %v", flat, err)))
2018-07-13 20:45:57 +00:00
}
}
}
2018-07-13 20:45:57 +00:00
}
// allTemplatesNoop returns whether all the managed templates have change mode noop.
func (tm *TaskTemplateManager) allTemplatesNoop() bool {
for _, tmpl := range tm.config.Templates {
if tmpl.ChangeMode != structs.TemplateChangeModeNoop {
return false
}
}
return true
}
// templateRunner returns a consul-template runner for the given templates and a
// lookup by destination to the template. If no templates are in the config, a
// nil template runner and lookup is returned.
func templateRunner(config *TaskTemplateManagerConfig) (
*manager.Runner, map[string][]*structs.Template, error) {
if len(config.Templates) == 0 {
return nil, nil, nil
}
// Parse the templates
ctmplMapping, err := parseTemplateConfigs(config)
if err != nil {
return nil, nil, err
}
// Create the runner configuration.
runnerConfig, err := newRunnerConfig(config, ctmplMapping)
if err != nil {
return nil, nil, err
}
runner, err := manager.NewRunner(runnerConfig, false)
2018-07-13 20:45:57 +00:00
if err != nil {
return nil, nil, err
}
// Set Nomad's environment variables.
// consul-template falls back to the host process environment if a
// variable isn't explicitly set in the configuration, so we need
// to mask the environment out to ensure only the task env vars are
// available.
runner.Env = maskProcessEnv(config.EnvBuilder.Build().All())
2018-07-13 20:45:57 +00:00
// Build the lookup
idMap := runner.TemplateConfigMapping()
lookup := make(map[string][]*structs.Template, len(idMap))
for id, ctmpls := range idMap {
for _, ctmpl := range ctmpls {
templates := lookup[id]
templates = append(templates, ctmplMapping[ctmpl])
lookup[id] = templates
}
}
return runner, lookup, nil
}
// maskProcessEnv masks away any environment variable not found in task env.
// It manipulates the parameter directly and returns it without copying.
func maskProcessEnv(env map[string]string) map[string]string {
procEnvs := os.Environ()
for _, e := range procEnvs {
ekv := strings.SplitN(e, "=", 2)
if _, ok := env[ekv[0]]; !ok {
env[ekv[0]] = ""
}
}
return env
}
2018-07-13 20:45:57 +00:00
// parseTemplateConfigs converts the tasks templates in the config into
// consul-templates
func parseTemplateConfigs(config *TaskTemplateManagerConfig) (map[*ctconf.TemplateConfig]*structs.Template, error) {
sandboxEnabled := !config.ClientConfig.TemplateConfig.DisableSandbox
2018-07-13 20:45:57 +00:00
taskEnv := config.EnvBuilder.Build()
ctmpls := make(map[*ctconf.TemplateConfig]*structs.Template, len(config.Templates))
2018-07-13 20:45:57 +00:00
for _, tmpl := range config.Templates {
var src, dest string
if tmpl.SourcePath != "" {
var escapes bool
src, escapes = taskEnv.ClientPath(tmpl.SourcePath, false)
if escapes && sandboxEnabled {
client: fix interpolation in template source While Nomad v0.12.8 fixed `NOMAD_{ALLOC,TASK,SECRETS}_DIR` use in `template.destination`, interpolating these variables in `template.source` caused a path escape error. **Why not apply the destination fix to source?** The destination fix forces destination to always be relative to the task directory. This makes sense for the destination as a destination outside the task directory would be unreachable by the task. There's no reason to ever render a template outside the task directory. (Using `..` does allow destinations to escape the task directory if `template.disable_file_sandbox = true`. That's just awkward and unsafe enough I hope no one uses it.) There is a reason to source a template outside a task directory. At least if there weren't then I can't think of why we implemented `template.disable_file_sandbox`. So v0.12.8 left the behavior of `template.source` the more straightforward "Interpolate and validate." However, since outside of `raw_exec` every other driver uses absolute paths for `NOMAD_*_DIR` interpolation, this means those variables are unusable unless `disable_file_sandbox` is set. **The Fix** The variables are now interpolated as relative paths *only for the purpose of rendering templates.* This is an unfortunate special case, but reflects the fact that the templates view of the filesystem is completely different (unconstrainted) vs the task's view (chrooted). Arguably the values of these variables *should be context-specific.* I think it's more reasonable to think of the "hack" as templating running uncontainerized than that giving templates different paths is a hack. **TODO** - [ ] E2E tests - [ ] Job validation may still be broken and prevent my fix from working? **raw_exec** `raw_exec` is actually broken _a different way_ as exercised by tests in this commit. I think we should probably remove these tests and fix that in a followup PR/release, but I wanted to leave them in for the initial review and discussion. Since non-containerized source paths are broken anyway, perhaps there's another solution to this entire problem I'm overlooking?
2020-11-18 05:59:47 +00:00
return nil, sourceEscapesErr
2018-07-13 20:45:57 +00:00
}
}
2018-07-13 20:45:57 +00:00
if tmpl.DestPath != "" {
var escapes bool
dest, escapes = taskEnv.ClientPath(tmpl.DestPath, true)
if escapes && sandboxEnabled {
client: fix interpolation in template source While Nomad v0.12.8 fixed `NOMAD_{ALLOC,TASK,SECRETS}_DIR` use in `template.destination`, interpolating these variables in `template.source` caused a path escape error. **Why not apply the destination fix to source?** The destination fix forces destination to always be relative to the task directory. This makes sense for the destination as a destination outside the task directory would be unreachable by the task. There's no reason to ever render a template outside the task directory. (Using `..` does allow destinations to escape the task directory if `template.disable_file_sandbox = true`. That's just awkward and unsafe enough I hope no one uses it.) There is a reason to source a template outside a task directory. At least if there weren't then I can't think of why we implemented `template.disable_file_sandbox`. So v0.12.8 left the behavior of `template.source` the more straightforward "Interpolate and validate." However, since outside of `raw_exec` every other driver uses absolute paths for `NOMAD_*_DIR` interpolation, this means those variables are unusable unless `disable_file_sandbox` is set. **The Fix** The variables are now interpolated as relative paths *only for the purpose of rendering templates.* This is an unfortunate special case, but reflects the fact that the templates view of the filesystem is completely different (unconstrainted) vs the task's view (chrooted). Arguably the values of these variables *should be context-specific.* I think it's more reasonable to think of the "hack" as templating running uncontainerized than that giving templates different paths is a hack. **TODO** - [ ] E2E tests - [ ] Job validation may still be broken and prevent my fix from working? **raw_exec** `raw_exec` is actually broken _a different way_ as exercised by tests in this commit. I think we should probably remove these tests and fix that in a followup PR/release, but I wanted to leave them in for the initial review and discussion. Since non-containerized source paths are broken anyway, perhaps there's another solution to this entire problem I'm overlooking?
2020-11-18 05:59:47 +00:00
return nil, destEscapesErr
}
2018-07-13 20:45:57 +00:00
}
ct := ctconf.DefaultTemplateConfig()
ct.Source = &src
ct.Destination = &dest
ct.Contents = &tmpl.EmbeddedTmpl
ct.LeftDelim = &tmpl.LeftDelim
ct.RightDelim = &tmpl.RightDelim
ct.FunctionDenylist = config.ClientConfig.TemplateConfig.FunctionDenylist
if sandboxEnabled {
ct.SandboxPath = &config.TaskDir
}
2018-07-13 20:45:57 +00:00
// Set the permissions
if tmpl.Perms != "" {
v, err := strconv.ParseUint(tmpl.Perms, 8, 12)
if err != nil {
return nil, fmt.Errorf("Failed to parse %q as octal: %v", tmpl.Perms, err)
}
m := os.FileMode(v)
ct.Perms = &m
}
ct.Finalize()
ctmpls[ct] = tmpl
2018-07-13 20:45:57 +00:00
}
return ctmpls, nil
}
// newRunnerConfig returns a consul-template runner configuration, setting the
// Vault and Consul configurations based on the clients configs.
func newRunnerConfig(config *TaskTemplateManagerConfig,
templateMapping map[*ctconf.TemplateConfig]*structs.Template) (*ctconf.Config, error) {
2018-07-13 20:45:57 +00:00
cc := config.ClientConfig
conf := ctconf.DefaultConfig()
// Gather the consul-template templates
flat := ctconf.TemplateConfigs(make([]*ctconf.TemplateConfig, 0, len(templateMapping)))
for ctmpl := range templateMapping {
local := ctmpl
flat = append(flat, local)
2018-07-13 20:45:57 +00:00
}
conf.Templates = &flat
// Force faster retries
if config.retryRate != 0 {
rate := config.retryRate
conf.Consul.Retry.Backoff = &rate
}
// Setup the Consul config
if cc.ConsulConfig != nil {
conf.Consul.Address = &cc.ConsulConfig.Addr
conf.Consul.Token = &cc.ConsulConfig.Token
conf.Consul.Namespace = &cc.ConsulConfig.Namespace
2018-07-13 20:45:57 +00:00
if cc.ConsulConfig.EnableSSL != nil && *cc.ConsulConfig.EnableSSL {
verify := cc.ConsulConfig.VerifySSL != nil && *cc.ConsulConfig.VerifySSL
conf.Consul.SSL = &ctconf.SSLConfig{
Enabled: helper.BoolToPtr(true),
Verify: &verify,
Cert: &cc.ConsulConfig.CertFile,
Key: &cc.ConsulConfig.KeyFile,
CaCert: &cc.ConsulConfig.CAFile,
}
}
if cc.ConsulConfig.Auth != "" {
parts := strings.SplitN(cc.ConsulConfig.Auth, ":", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("Failed to parse Consul Auth config")
}
conf.Consul.Auth = &ctconf.AuthConfig{
Enabled: helper.BoolToPtr(true),
Username: &parts[0],
Password: &parts[1],
}
}
}
// Setup the Vault config
// Always set these to ensure nothing is picked up from the environment
emptyStr := ""
conf.Vault.RenewToken = helper.BoolToPtr(false)
conf.Vault.Token = &emptyStr
if cc.VaultConfig != nil && cc.VaultConfig.IsEnabled() {
conf.Vault.Address = &cc.VaultConfig.Addr
conf.Vault.Token = &config.VaultToken
// Set the Vault Namespace. Passed in Task config has
// highest precedence.
if config.ClientConfig.VaultConfig.Namespace != "" {
conf.Vault.Namespace = &config.ClientConfig.VaultConfig.Namespace
}
if config.VaultNamespace != "" {
conf.Vault.Namespace = &config.VaultNamespace
}
2018-07-13 20:45:57 +00:00
if strings.HasPrefix(cc.VaultConfig.Addr, "https") || cc.VaultConfig.TLSCertFile != "" {
skipVerify := cc.VaultConfig.TLSSkipVerify != nil && *cc.VaultConfig.TLSSkipVerify
verify := !skipVerify
conf.Vault.SSL = &ctconf.SSLConfig{
Enabled: helper.BoolToPtr(true),
Verify: &verify,
Cert: &cc.VaultConfig.TLSCertFile,
Key: &cc.VaultConfig.TLSKeyFile,
CaCert: &cc.VaultConfig.TLSCaFile,
CaPath: &cc.VaultConfig.TLSCaPath,
ServerName: &cc.VaultConfig.TLSServerName,
}
} else {
conf.Vault.SSL = &ctconf.SSLConfig{
Enabled: helper.BoolToPtr(false),
Verify: helper.BoolToPtr(false),
Cert: &emptyStr,
Key: &emptyStr,
CaCert: &emptyStr,
CaPath: &emptyStr,
ServerName: &emptyStr,
}
}
}
conf.Finalize()
return conf, nil
}
// loadTemplateEnv loads task environment variables from all templates.
func loadTemplateEnv(tmpls []*structs.Template, taskEnv *taskenv.TaskEnv) (map[string]string, error) {
2018-07-13 20:45:57 +00:00
all := make(map[string]string, 50)
for _, t := range tmpls {
if !t.Envvars {
continue
}
// we checked escape before we rendered the file
dest, _ := taskEnv.ClientPath(t.DestPath, true)
f, err := os.Open(dest)
2018-07-13 20:45:57 +00:00
if err != nil {
return nil, fmt.Errorf("error opening env template: %v", err)
}
defer f.Close()
// Parse environment fil
vars, err := envparse.Parse(f)
if err != nil {
return nil, fmt.Errorf("error parsing env template %q: %v", dest, err)
2018-07-13 20:45:57 +00:00
}
for k, v := range vars {
all[k] = v
}
}
return all, nil
}