Merge network namespaces work into master (#6046)
Merge network namespaces work into master
This commit is contained in:
commit
34deb47a8b
|
@ -458,7 +458,8 @@ type AllocatedTaskResources struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type AllocatedSharedResources struct {
|
type AllocatedSharedResources struct {
|
||||||
DiskMB int64
|
DiskMB int64
|
||||||
|
Networks []*NetworkResource
|
||||||
}
|
}
|
||||||
|
|
||||||
type AllocatedCpuResources struct {
|
type AllocatedCpuResources struct {
|
||||||
|
|
|
@ -20,7 +20,7 @@ func TestCompose(t *testing.T) {
|
||||||
{
|
{
|
||||||
CIDR: "0.0.0.0/0",
|
CIDR: "0.0.0.0/0",
|
||||||
MBits: intToPtr(100),
|
MBits: intToPtr(100),
|
||||||
ReservedPorts: []Port{{"", 80}, {"", 443}},
|
ReservedPorts: []Port{{"", 80, 0}, {"", 443, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -111,8 +111,8 @@ func TestCompose(t *testing.T) {
|
||||||
CIDR: "0.0.0.0/0",
|
CIDR: "0.0.0.0/0",
|
||||||
MBits: intToPtr(100),
|
MBits: intToPtr(100),
|
||||||
ReservedPorts: []Port{
|
ReservedPorts: []Port{
|
||||||
{"", 80},
|
{"", 80, 0},
|
||||||
{"", 443},
|
{"", 443, 0},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -86,17 +86,20 @@ func (r *Resources) Merge(other *Resources) {
|
||||||
type Port struct {
|
type Port struct {
|
||||||
Label string
|
Label string
|
||||||
Value int `mapstructure:"static"`
|
Value int `mapstructure:"static"`
|
||||||
|
To int `mapstructure:"to"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NetworkResource is used to describe required network
|
// NetworkResource is used to describe required network
|
||||||
// resources of a given task.
|
// resources of a given task.
|
||||||
type NetworkResource struct {
|
type NetworkResource struct {
|
||||||
|
Mode string
|
||||||
Device string
|
Device string
|
||||||
CIDR string
|
CIDR string
|
||||||
IP string
|
IP string
|
||||||
MBits *int
|
MBits *int
|
||||||
ReservedPorts []Port
|
ReservedPorts []Port
|
||||||
DynamicPorts []Port
|
DynamicPorts []Port
|
||||||
|
Services []*Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NetworkResource) Canonicalize() {
|
func (n *NetworkResource) Canonicalize() {
|
||||||
|
|
25
api/tasks.go
25
api/tasks.go
|
@ -372,6 +372,7 @@ type Service struct {
|
||||||
AddressMode string `mapstructure:"address_mode"`
|
AddressMode string `mapstructure:"address_mode"`
|
||||||
Checks []ServiceCheck
|
Checks []ServiceCheck
|
||||||
CheckRestart *CheckRestart `mapstructure:"check_restart"`
|
CheckRestart *CheckRestart `mapstructure:"check_restart"`
|
||||||
|
Connect *ConsulConnect
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
|
func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
|
||||||
|
@ -392,6 +393,25 @@ func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ConsulConnect struct {
|
||||||
|
SidecarService *ConsulSidecarService `mapstructure:"sidecar_service"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConsulSidecarService struct {
|
||||||
|
Port string
|
||||||
|
Proxy *ConsulProxy
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConsulProxy struct {
|
||||||
|
Upstreams []*ConsulUpstream
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConsulUpstream struct {
|
||||||
|
//FIXME Pointers?
|
||||||
|
DestinationName string `mapstructure:"destination_name"`
|
||||||
|
LocalBindPort int `mapstructure:"local_bind_port"`
|
||||||
|
}
|
||||||
|
|
||||||
// EphemeralDisk is an ephemeral disk object
|
// EphemeralDisk is an ephemeral disk object
|
||||||
type EphemeralDisk struct {
|
type EphemeralDisk struct {
|
||||||
Sticky *bool
|
Sticky *bool
|
||||||
|
@ -493,7 +513,9 @@ type TaskGroup struct {
|
||||||
EphemeralDisk *EphemeralDisk
|
EphemeralDisk *EphemeralDisk
|
||||||
Update *UpdateStrategy
|
Update *UpdateStrategy
|
||||||
Migrate *MigrateStrategy
|
Migrate *MigrateStrategy
|
||||||
|
Networks []*NetworkResource
|
||||||
Meta map[string]string
|
Meta map[string]string
|
||||||
|
Services []*Service
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTaskGroup creates a new TaskGroup.
|
// NewTaskGroup creates a new TaskGroup.
|
||||||
|
@ -604,6 +626,9 @@ func (g *TaskGroup) Canonicalize(job *Job) {
|
||||||
for _, a := range g.Affinities {
|
for _, a := range g.Affinities {
|
||||||
a.Canonicalize()
|
a.Canonicalize()
|
||||||
}
|
}
|
||||||
|
for _, n := range g.Networks {
|
||||||
|
n.Canonicalize()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Constrain is used to add a constraint to a task group.
|
// Constrain is used to add a constraint to a task group.
|
||||||
|
|
|
@ -269,7 +269,7 @@ func TestTask_Require(t *testing.T) {
|
||||||
{
|
{
|
||||||
CIDR: "0.0.0.0/0",
|
CIDR: "0.0.0.0/0",
|
||||||
MBits: intToPtr(100),
|
MBits: intToPtr(100),
|
||||||
ReservedPorts: []Port{{"", 80}, {"", 443}},
|
ReservedPorts: []Port{{"", 80, 0}, {"", 443, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -238,7 +238,12 @@ func (t *Tracker) watchTaskEvents() {
|
||||||
// Store the task states
|
// Store the task states
|
||||||
t.l.Lock()
|
t.l.Lock()
|
||||||
for task, state := range alloc.TaskStates {
|
for task, state := range alloc.TaskStates {
|
||||||
t.taskHealth[task].state = state
|
//TODO(schmichael) for now skip unknown tasks as
|
||||||
|
//they're task group services which don't currently
|
||||||
|
//support checks anyway
|
||||||
|
if v, ok := t.taskHealth[task]; ok {
|
||||||
|
v.state = state
|
||||||
|
}
|
||||||
}
|
}
|
||||||
t.l.Unlock()
|
t.l.Unlock()
|
||||||
|
|
||||||
|
@ -355,7 +360,12 @@ OUTER:
|
||||||
// Store the task registrations
|
// Store the task registrations
|
||||||
t.l.Lock()
|
t.l.Lock()
|
||||||
for task, reg := range allocReg.Tasks {
|
for task, reg := range allocReg.Tasks {
|
||||||
t.taskHealth[task].taskRegistrations = reg
|
//TODO(schmichael) for now skip unknown tasks as
|
||||||
|
//they're task group services which don't currently
|
||||||
|
//support checks anyway
|
||||||
|
if v, ok := t.taskHealth[task]; ok {
|
||||||
|
v.taskRegistrations = reg
|
||||||
|
}
|
||||||
}
|
}
|
||||||
t.l.Unlock()
|
t.l.Unlock()
|
||||||
|
|
||||||
|
|
|
@ -185,7 +185,9 @@ func NewAllocRunner(config *Config) (*allocRunner, error) {
|
||||||
ar.allocDir = allocdir.NewAllocDir(ar.logger, filepath.Join(config.ClientConfig.AllocDir, alloc.ID))
|
ar.allocDir = allocdir.NewAllocDir(ar.logger, filepath.Join(config.ClientConfig.AllocDir, alloc.ID))
|
||||||
|
|
||||||
// Initialize the runners hooks.
|
// Initialize the runners hooks.
|
||||||
ar.initRunnerHooks()
|
if err := ar.initRunnerHooks(config.ClientConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Create the TaskRunners
|
// Create the TaskRunners
|
||||||
if err := ar.initTaskRunners(tg.Tasks); err != nil {
|
if err := ar.initTaskRunners(tg.Tasks); err != nil {
|
||||||
|
|
|
@ -6,9 +6,28 @@ import (
|
||||||
|
|
||||||
multierror "github.com/hashicorp/go-multierror"
|
multierror "github.com/hashicorp/go-multierror"
|
||||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||||
|
clientconfig "github.com/hashicorp/nomad/client/config"
|
||||||
"github.com/hashicorp/nomad/nomad/structs"
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
|
"github.com/hashicorp/nomad/plugins/drivers"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type networkIsolationSetter interface {
|
||||||
|
SetNetworkIsolation(*drivers.NetworkIsolationSpec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocNetworkIsolationSetter is a shim to allow the alloc network hook to
|
||||||
|
// set the alloc network isolation configuration without full access
|
||||||
|
// to the alloc runner
|
||||||
|
type allocNetworkIsolationSetter struct {
|
||||||
|
ar *allocRunner
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *allocNetworkIsolationSetter) SetNetworkIsolation(n *drivers.NetworkIsolationSpec) {
|
||||||
|
for _, tr := range a.ar.tasks {
|
||||||
|
tr.SetNetworkIsolation(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// allocHealthSetter is a shim to allow the alloc health watcher hook to set
|
// allocHealthSetter is a shim to allow the alloc health watcher hook to set
|
||||||
// and clear the alloc health without full access to the alloc runner state
|
// and clear the alloc health without full access to the alloc runner state
|
||||||
type allocHealthSetter struct {
|
type allocHealthSetter struct {
|
||||||
|
@ -76,12 +95,24 @@ func (a *allocHealthSetter) SetHealth(healthy, isDeploy bool, trackerTaskEvents
|
||||||
}
|
}
|
||||||
|
|
||||||
// initRunnerHooks intializes the runners hooks.
|
// initRunnerHooks intializes the runners hooks.
|
||||||
func (ar *allocRunner) initRunnerHooks() {
|
func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error {
|
||||||
hookLogger := ar.logger.Named("runner_hook")
|
hookLogger := ar.logger.Named("runner_hook")
|
||||||
|
|
||||||
// create health setting shim
|
// create health setting shim
|
||||||
hs := &allocHealthSetter{ar}
|
hs := &allocHealthSetter{ar}
|
||||||
|
|
||||||
|
// create network isolation setting shim
|
||||||
|
ns := &allocNetworkIsolationSetter{ar: ar}
|
||||||
|
|
||||||
|
// build the network manager
|
||||||
|
nm, err := newNetworkManager(ar.Alloc(), ar.driverManager)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to configure network manager: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// create network configurator
|
||||||
|
nc := newNetworkConfigurator(ar.Alloc(), config)
|
||||||
|
|
||||||
// Create the alloc directory hook. This is run first to ensure the
|
// Create the alloc directory hook. This is run first to ensure the
|
||||||
// directory path exists for other hooks.
|
// directory path exists for other hooks.
|
||||||
ar.runnerHooks = []interfaces.RunnerHook{
|
ar.runnerHooks = []interfaces.RunnerHook{
|
||||||
|
@ -89,7 +120,10 @@ func (ar *allocRunner) initRunnerHooks() {
|
||||||
newUpstreamAllocsHook(hookLogger, ar.prevAllocWatcher),
|
newUpstreamAllocsHook(hookLogger, ar.prevAllocWatcher),
|
||||||
newDiskMigrationHook(hookLogger, ar.prevAllocMigrator, ar.allocDir),
|
newDiskMigrationHook(hookLogger, ar.prevAllocMigrator, ar.allocDir),
|
||||||
newAllocHealthWatcherHook(hookLogger, ar.Alloc(), hs, ar.Listener(), ar.consulClient),
|
newAllocHealthWatcherHook(hookLogger, ar.Alloc(), hs, ar.Listener(), ar.consulClient),
|
||||||
|
newNetworkHook(hookLogger, ns, ar.Alloc(), nm, nc),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// prerun is used to run the runners prerun hooks.
|
// prerun is used to run the runners prerun hooks.
|
||||||
|
|
88
client/allocrunner/network_hook.go
Normal file
88
client/allocrunner/network_hook.go
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
package allocrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
hclog "github.com/hashicorp/go-hclog"
|
||||||
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
|
"github.com/hashicorp/nomad/plugins/drivers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// networkHook is an alloc lifecycle hook that manages the network namespace
|
||||||
|
// for an alloc
|
||||||
|
type networkHook struct {
|
||||||
|
// setter is a callback to set the network isolation spec when after the
|
||||||
|
// network is created
|
||||||
|
setter networkIsolationSetter
|
||||||
|
|
||||||
|
// manager is used when creating the network namespace. This defaults to
|
||||||
|
// bind mounting a network namespace descritor under /var/run/netns but
|
||||||
|
// can be created by a driver if nessicary
|
||||||
|
manager drivers.DriverNetworkManager
|
||||||
|
|
||||||
|
// alloc should only be read from
|
||||||
|
alloc *structs.Allocation
|
||||||
|
|
||||||
|
// spec described the network namespace and is syncronized by specLock
|
||||||
|
spec *drivers.NetworkIsolationSpec
|
||||||
|
|
||||||
|
// networkConfigurator configures the network interfaces, routes, etc once
|
||||||
|
// the alloc network has been created
|
||||||
|
networkConfigurator NetworkConfigurator
|
||||||
|
|
||||||
|
logger hclog.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNetworkHook(logger hclog.Logger, ns networkIsolationSetter,
|
||||||
|
alloc *structs.Allocation, netManager drivers.DriverNetworkManager,
|
||||||
|
netConfigurator NetworkConfigurator) *networkHook {
|
||||||
|
return &networkHook{
|
||||||
|
setter: ns,
|
||||||
|
alloc: alloc,
|
||||||
|
manager: netManager,
|
||||||
|
networkConfigurator: netConfigurator,
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *networkHook) Name() string {
|
||||||
|
return "network"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *networkHook) Prerun() error {
|
||||||
|
tg := h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup)
|
||||||
|
if len(tg.Networks) == 0 || tg.Networks[0].Mode == "host" || tg.Networks[0].Mode == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.manager == nil || h.networkConfigurator == nil {
|
||||||
|
h.logger.Trace("shared network namespaces are not supported on this platform, skipping network hook")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
spec, err := h.manager.CreateNetwork(h.alloc.ID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create network for alloc: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if spec != nil {
|
||||||
|
h.spec = spec
|
||||||
|
h.setter.SetNetworkIsolation(spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := h.networkConfigurator.Setup(h.alloc, spec); err != nil {
|
||||||
|
return fmt.Errorf("failed to configure networking for alloc: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *networkHook) Postrun() error {
|
||||||
|
if h.spec == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := h.networkConfigurator.Teardown(h.alloc, h.spec); err != nil {
|
||||||
|
h.logger.Error("failed to cleanup network for allocation, resources may have leaked", "alloc", h.alloc.ID, "error", err)
|
||||||
|
}
|
||||||
|
return h.manager.DestroyNetwork(h.alloc.ID, h.spec)
|
||||||
|
}
|
86
client/allocrunner/network_hook_test.go
Normal file
86
client/allocrunner/network_hook_test.go
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
package allocrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||||
|
"github.com/hashicorp/nomad/helper/testlog"
|
||||||
|
"github.com/hashicorp/nomad/nomad/mock"
|
||||||
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
|
"github.com/hashicorp/nomad/plugins/drivers"
|
||||||
|
"github.com/hashicorp/nomad/plugins/drivers/testutils"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// statically assert network hook implements the expected interfaces
|
||||||
|
var _ interfaces.RunnerPrerunHook = (*networkHook)(nil)
|
||||||
|
var _ interfaces.RunnerPostrunHook = (*networkHook)(nil)
|
||||||
|
|
||||||
|
type mockNetworkIsolationSetter struct {
|
||||||
|
t *testing.T
|
||||||
|
expectedSpec *drivers.NetworkIsolationSpec
|
||||||
|
called bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockNetworkIsolationSetter) SetNetworkIsolation(spec *drivers.NetworkIsolationSpec) {
|
||||||
|
m.called = true
|
||||||
|
require.Exactly(m.t, m.expectedSpec, spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the prerun and postrun hooks call the setter with the expected spec when
|
||||||
|
// the network mode is not host
|
||||||
|
func TestNetworkHook_Prerun_Postrun(t *testing.T) {
|
||||||
|
alloc := mock.Alloc()
|
||||||
|
alloc.Job.TaskGroups[0].Networks = []*structs.NetworkResource{
|
||||||
|
{
|
||||||
|
Mode: "bridge",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
spec := &drivers.NetworkIsolationSpec{
|
||||||
|
Mode: drivers.NetIsolationModeGroup,
|
||||||
|
Path: "test",
|
||||||
|
Labels: map[string]string{"abc": "123"},
|
||||||
|
}
|
||||||
|
|
||||||
|
destroyCalled := false
|
||||||
|
nm := &testutils.MockDriver{
|
||||||
|
MockNetworkManager: testutils.MockNetworkManager{
|
||||||
|
CreateNetworkF: func(allocID string) (*drivers.NetworkIsolationSpec, error) {
|
||||||
|
require.Equal(t, alloc.ID, allocID)
|
||||||
|
return spec, nil
|
||||||
|
},
|
||||||
|
|
||||||
|
DestroyNetworkF: func(allocID string, netSpec *drivers.NetworkIsolationSpec) error {
|
||||||
|
destroyCalled = true
|
||||||
|
require.Equal(t, alloc.ID, allocID)
|
||||||
|
require.Exactly(t, spec, netSpec)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
setter := &mockNetworkIsolationSetter{
|
||||||
|
t: t,
|
||||||
|
expectedSpec: spec,
|
||||||
|
}
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
|
logger := testlog.HCLogger(t)
|
||||||
|
hook := newNetworkHook(logger, setter, alloc, nm, &hostNetworkConfigurator{})
|
||||||
|
require.NoError(hook.Prerun())
|
||||||
|
require.True(setter.called)
|
||||||
|
require.False(destroyCalled)
|
||||||
|
require.NoError(hook.Postrun())
|
||||||
|
require.True(destroyCalled)
|
||||||
|
|
||||||
|
// reset and use host network mode
|
||||||
|
setter.called = false
|
||||||
|
destroyCalled = false
|
||||||
|
alloc.Job.TaskGroups[0].Networks[0].Mode = "host"
|
||||||
|
hook = newNetworkHook(logger, setter, alloc, nm, &hostNetworkConfigurator{})
|
||||||
|
require.NoError(hook.Prerun())
|
||||||
|
require.False(setter.called)
|
||||||
|
require.False(destroyCalled)
|
||||||
|
require.NoError(hook.Postrun())
|
||||||
|
require.False(destroyCalled)
|
||||||
|
|
||||||
|
}
|
138
client/allocrunner/network_manager_linux.go
Normal file
138
client/allocrunner/network_manager_linux.go
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
package allocrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
clientconfig "github.com/hashicorp/nomad/client/config"
|
||||||
|
"github.com/hashicorp/nomad/client/lib/nsutil"
|
||||||
|
"github.com/hashicorp/nomad/client/pluginmanager/drivermanager"
|
||||||
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
|
"github.com/hashicorp/nomad/plugins/drivers"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newNetworkManager(alloc *structs.Allocation, driverManager drivermanager.Manager) (nm drivers.DriverNetworkManager, err error) {
|
||||||
|
// The defaultNetworkManager is used if a driver doesn't need to create the network
|
||||||
|
nm = &defaultNetworkManager{}
|
||||||
|
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
||||||
|
|
||||||
|
// default netmode to host, this can be overridden by the task or task group
|
||||||
|
tgNetMode := "host"
|
||||||
|
if len(tg.Networks) > 0 && tg.Networks[0].Mode != "" {
|
||||||
|
tgNetMode = tg.Networks[0].Mode
|
||||||
|
}
|
||||||
|
|
||||||
|
// networkInitiator tracks the task driver which needs to create the network
|
||||||
|
// to check for multiple drivers needing the create the network
|
||||||
|
var networkInitiator string
|
||||||
|
|
||||||
|
// driverCaps tracks which drivers we've checked capabilities for so as not
|
||||||
|
// to do extra work
|
||||||
|
driverCaps := make(map[string]struct{})
|
||||||
|
for _, task := range tg.Tasks {
|
||||||
|
// the task's netmode defaults to the the task group but can be overridden
|
||||||
|
taskNetMode := tgNetMode
|
||||||
|
if len(task.Resources.Networks) > 0 && task.Resources.Networks[0].Mode != "" {
|
||||||
|
taskNetMode = task.Resources.Networks[0].Mode
|
||||||
|
}
|
||||||
|
|
||||||
|
// netmode host should always work to support backwards compat
|
||||||
|
if taskNetMode == "host" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// check to see if capabilities of this task's driver have already been checked
|
||||||
|
if _, ok := driverCaps[task.Driver]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
driver, err := driverManager.Dispense(task.Driver)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to dispense driver %s: %v", task.Driver, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
caps, err := driver.Capabilities()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to retrive capabilities for driver %s: %v",
|
||||||
|
task.Driver, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that the driver supports the requested network isolation mode
|
||||||
|
netIsolationMode := netModeToIsolationMode(taskNetMode)
|
||||||
|
if !caps.HasNetIsolationMode(netIsolationMode) {
|
||||||
|
return nil, fmt.Errorf("task %s does not support %q networking mode", task.Name, taskNetMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if the driver needs to create the network and if a different
|
||||||
|
// driver has already claimed it needs to initiate the network
|
||||||
|
if caps.MustInitiateNetwork {
|
||||||
|
if networkInitiator != "" {
|
||||||
|
return nil, fmt.Errorf("tasks %s and %s want to initiate networking but only one driver can do so", networkInitiator, task.Name)
|
||||||
|
}
|
||||||
|
netManager, ok := driver.(drivers.DriverNetworkManager)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("driver %s does not implement network management RPCs", task.Driver)
|
||||||
|
}
|
||||||
|
|
||||||
|
nm = netManager
|
||||||
|
networkInitiator = task.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// mark this driver's capabilities as checked
|
||||||
|
driverCaps[task.Driver] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultNetworkManager creates a network namespace for the alloc
|
||||||
|
type defaultNetworkManager struct{}
|
||||||
|
|
||||||
|
func (*defaultNetworkManager) CreateNetwork(allocID string) (*drivers.NetworkIsolationSpec, error) {
|
||||||
|
netns, err := nsutil.NewNS(allocID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
spec := &drivers.NetworkIsolationSpec{
|
||||||
|
Mode: drivers.NetIsolationModeGroup,
|
||||||
|
Path: netns.Path(),
|
||||||
|
Labels: make(map[string]string),
|
||||||
|
}
|
||||||
|
|
||||||
|
return spec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*defaultNetworkManager) DestroyNetwork(allocID string, spec *drivers.NetworkIsolationSpec) error {
|
||||||
|
return nsutil.UnmountNS(spec.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func netModeToIsolationMode(netMode string) drivers.NetIsolationMode {
|
||||||
|
switch strings.ToLower(netMode) {
|
||||||
|
case "host":
|
||||||
|
return drivers.NetIsolationModeHost
|
||||||
|
case "bridge", "none":
|
||||||
|
return drivers.NetIsolationModeGroup
|
||||||
|
case "driver":
|
||||||
|
return drivers.NetIsolationModeTask
|
||||||
|
default:
|
||||||
|
return drivers.NetIsolationModeHost
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNetworkConfigurator(alloc *structs.Allocation, config *clientconfig.Config) NetworkConfigurator {
|
||||||
|
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
||||||
|
|
||||||
|
// Check if network stanza is given
|
||||||
|
if len(tg.Networks) == 0 {
|
||||||
|
return &hostNetworkConfigurator{}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch strings.ToLower(tg.Networks[0].Mode) {
|
||||||
|
case "bridge":
|
||||||
|
return newBridgeNetworkConfigurator(context.Background(), config.BridgeNetworkName, config.BridgeNetworkAllocSubnet, config.CNIPath)
|
||||||
|
default:
|
||||||
|
return &hostNetworkConfigurator{}
|
||||||
|
}
|
||||||
|
}
|
190
client/allocrunner/network_manager_linux_test.go
Normal file
190
client/allocrunner/network_manager_linux_test.go
Normal file
|
@ -0,0 +1,190 @@
|
||||||
|
package allocrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/nomad/client/pluginmanager"
|
||||||
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
|
"github.com/hashicorp/nomad/plugins/drivers"
|
||||||
|
"github.com/hashicorp/nomad/plugins/drivers/testutils"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var mockDrivers = map[string]drivers.DriverPlugin{
|
||||||
|
"hostonly": &testutils.MockDriver{
|
||||||
|
CapabilitiesF: func() (*drivers.Capabilities, error) {
|
||||||
|
return &drivers.Capabilities{
|
||||||
|
NetIsolationModes: []drivers.NetIsolationMode{drivers.NetIsolationModeHost},
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"group1": &testutils.MockDriver{
|
||||||
|
CapabilitiesF: func() (*drivers.Capabilities, error) {
|
||||||
|
return &drivers.Capabilities{
|
||||||
|
NetIsolationModes: []drivers.NetIsolationMode{
|
||||||
|
drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup},
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"group2": &testutils.MockDriver{
|
||||||
|
CapabilitiesF: func() (*drivers.Capabilities, error) {
|
||||||
|
return &drivers.Capabilities{
|
||||||
|
NetIsolationModes: []drivers.NetIsolationMode{
|
||||||
|
drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup},
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"mustinit1": &testutils.MockDriver{
|
||||||
|
CapabilitiesF: func() (*drivers.Capabilities, error) {
|
||||||
|
return &drivers.Capabilities{
|
||||||
|
NetIsolationModes: []drivers.NetIsolationMode{
|
||||||
|
drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup},
|
||||||
|
MustInitiateNetwork: true,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"mustinit2": &testutils.MockDriver{
|
||||||
|
CapabilitiesF: func() (*drivers.Capabilities, error) {
|
||||||
|
return &drivers.Capabilities{
|
||||||
|
NetIsolationModes: []drivers.NetIsolationMode{
|
||||||
|
drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup},
|
||||||
|
MustInitiateNetwork: true,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockDriverManager struct {
|
||||||
|
pluginmanager.MockPluginManager
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockDriverManager) Dispense(driver string) (drivers.DriverPlugin, error) {
|
||||||
|
return mockDrivers[driver], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewNetworkManager(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
alloc *structs.Allocation
|
||||||
|
err bool
|
||||||
|
mustInit bool
|
||||||
|
errContains string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "defaults/backwards compat",
|
||||||
|
alloc: &structs.Allocation{
|
||||||
|
TaskGroup: "group",
|
||||||
|
Job: &structs.Job{
|
||||||
|
TaskGroups: []*structs.TaskGroup{
|
||||||
|
{
|
||||||
|
Name: "group",
|
||||||
|
Networks: []*structs.NetworkResource{},
|
||||||
|
Tasks: []*structs.Task{
|
||||||
|
{
|
||||||
|
Name: "task1",
|
||||||
|
Driver: "group1",
|
||||||
|
Resources: &structs.Resources{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "task2",
|
||||||
|
Driver: "group2",
|
||||||
|
Resources: &structs.Resources{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "task3",
|
||||||
|
Driver: "mustinit1",
|
||||||
|
Resources: &structs.Resources{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "driver /w must init network",
|
||||||
|
alloc: &structs.Allocation{
|
||||||
|
TaskGroup: "group",
|
||||||
|
Job: &structs.Job{
|
||||||
|
TaskGroups: []*structs.TaskGroup{
|
||||||
|
{
|
||||||
|
Name: "group",
|
||||||
|
Networks: []*structs.NetworkResource{
|
||||||
|
{
|
||||||
|
Mode: "bridge",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Tasks: []*structs.Task{
|
||||||
|
{
|
||||||
|
Name: "task1",
|
||||||
|
Driver: "group1",
|
||||||
|
Resources: &structs.Resources{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "task2",
|
||||||
|
Driver: "mustinit2",
|
||||||
|
Resources: &structs.Resources{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
mustInit: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple mustinit",
|
||||||
|
alloc: &structs.Allocation{
|
||||||
|
TaskGroup: "group",
|
||||||
|
Job: &structs.Job{
|
||||||
|
TaskGroups: []*structs.TaskGroup{
|
||||||
|
{
|
||||||
|
Name: "group",
|
||||||
|
Networks: []*structs.NetworkResource{
|
||||||
|
{
|
||||||
|
Mode: "bridge",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Tasks: []*structs.Task{
|
||||||
|
{
|
||||||
|
Name: "task1",
|
||||||
|
Driver: "mustinit1",
|
||||||
|
Resources: &structs.Resources{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "task2",
|
||||||
|
Driver: "mustinit2",
|
||||||
|
Resources: &structs.Resources{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: true,
|
||||||
|
errContains: "want to initiate networking but only one",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
require := require.New(t)
|
||||||
|
nm, err := newNetworkManager(tc.alloc, &mockDriverManager{})
|
||||||
|
if tc.err {
|
||||||
|
require.Error(err)
|
||||||
|
require.Contains(err.Error(), tc.errContains)
|
||||||
|
} else {
|
||||||
|
require.NoError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tc.mustInit {
|
||||||
|
_, ok := nm.(*testutils.MockDriver)
|
||||||
|
require.True(ok)
|
||||||
|
} else if tc.err {
|
||||||
|
require.Nil(nm)
|
||||||
|
} else {
|
||||||
|
_, ok := nm.(*defaultNetworkManager)
|
||||||
|
require.True(ok)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
19
client/allocrunner/network_manager_nonlinux.go
Normal file
19
client/allocrunner/network_manager_nonlinux.go
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
//+build !linux
|
||||||
|
|
||||||
|
package allocrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
clientconfig "github.com/hashicorp/nomad/client/config"
|
||||||
|
"github.com/hashicorp/nomad/client/pluginmanager/drivermanager"
|
||||||
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
|
"github.com/hashicorp/nomad/plugins/drivers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: Support windows shared networking
|
||||||
|
func newNetworkManager(alloc *structs.Allocation, driverManager drivermanager.Manager) (nm drivers.DriverNetworkManager, err error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNetworkConfigurator(alloc *structs.Allocation, config *clientconfig.Config) NetworkConfigurator {
|
||||||
|
return &hostNetworkConfigurator{}
|
||||||
|
}
|
25
client/allocrunner/networking.go
Normal file
25
client/allocrunner/networking.go
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
package allocrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
|
"github.com/hashicorp/nomad/plugins/drivers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NetworkConfigurator sets up and tears down the interfaces, routes, firewall
|
||||||
|
// rules, etc for the configured networking mode of the allocation.
|
||||||
|
type NetworkConfigurator interface {
|
||||||
|
Setup(*structs.Allocation, *drivers.NetworkIsolationSpec) error
|
||||||
|
Teardown(*structs.Allocation, *drivers.NetworkIsolationSpec) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// hostNetworkConfigurator is a noop implementation of a NetworkConfigurator for
|
||||||
|
// when the alloc join's a client host's network namespace and thus does not
|
||||||
|
// require further configuration
|
||||||
|
type hostNetworkConfigurator struct{}
|
||||||
|
|
||||||
|
func (h *hostNetworkConfigurator) Setup(*structs.Allocation, *drivers.NetworkIsolationSpec) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (h *hostNetworkConfigurator) Teardown(*structs.Allocation, *drivers.NetworkIsolationSpec) error {
|
||||||
|
return nil
|
||||||
|
}
|
172
client/allocrunner/networking_bridge_linux.go
Normal file
172
client/allocrunner/networking_bridge_linux.go
Normal file
|
@ -0,0 +1,172 @@
|
||||||
|
package allocrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/containernetworking/cni/libcni"
|
||||||
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
|
"github.com/hashicorp/nomad/plugins/drivers"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// envCNIPath is the environment variable name to use to derive the CNI path
|
||||||
|
// when it is not explicitly set by the client
|
||||||
|
envCNIPath = "CNI_PATH"
|
||||||
|
|
||||||
|
// defaultCNIPath is the CNI path to use when it is not set by the client
|
||||||
|
// and is not set by environment variable
|
||||||
|
defaultCNIPath = "/opt/cni/bin"
|
||||||
|
|
||||||
|
// defaultNomadBridgeName is the name of the bridge to use when not set by
|
||||||
|
// the client
|
||||||
|
defaultNomadBridgeName = "nomad"
|
||||||
|
|
||||||
|
// bridgeNetworkAllocIfName is the name that is set for the interface created
|
||||||
|
// inside of the alloc network which is connected to the bridge
|
||||||
|
bridgeNetworkContainerIfName = "eth0"
|
||||||
|
|
||||||
|
// defaultNomadAllocSubnet is the subnet to use for host local ip address
|
||||||
|
// allocation when not specified by the client
|
||||||
|
defaultNomadAllocSubnet = "172.26.66.0/23"
|
||||||
|
)
|
||||||
|
|
||||||
|
// bridgeNetworkConfigurator is a NetworkConfigurator which adds the alloc to a
|
||||||
|
// shared bridge, configures masquerading for egress traffic and port mapping
|
||||||
|
// for ingress
|
||||||
|
type bridgeNetworkConfigurator struct {
|
||||||
|
ctx context.Context
|
||||||
|
cniConfig *libcni.CNIConfig
|
||||||
|
allocSubnet string
|
||||||
|
bridgeName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBridgeNetworkConfigurator(ctx context.Context, bridgeName, ipRange, cniPath string) *bridgeNetworkConfigurator {
|
||||||
|
b := &bridgeNetworkConfigurator{
|
||||||
|
ctx: ctx,
|
||||||
|
bridgeName: bridgeName,
|
||||||
|
allocSubnet: ipRange,
|
||||||
|
}
|
||||||
|
if cniPath == "" {
|
||||||
|
if cniPath = os.Getenv(envCNIPath); cniPath == "" {
|
||||||
|
cniPath = defaultCNIPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.cniConfig = libcni.NewCNIConfig(filepath.SplitList(cniPath), nil)
|
||||||
|
|
||||||
|
if b.bridgeName == "" {
|
||||||
|
b.bridgeName = defaultNomadBridgeName
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.allocSubnet == "" {
|
||||||
|
b.allocSubnet = defaultNomadAllocSubnet
|
||||||
|
}
|
||||||
|
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup calls the CNI plugins with the add action
|
||||||
|
func (b *bridgeNetworkConfigurator) Setup(alloc *structs.Allocation, spec *drivers.NetworkIsolationSpec) error {
|
||||||
|
netconf, err := b.buildNomadNetConfig()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := b.cniConfig.AddNetworkList(b.ctx, netconf, b.runtimeConf(alloc, spec))
|
||||||
|
if result != nil {
|
||||||
|
result.Print()
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Teardown calls the CNI plugins with the delete action
|
||||||
|
func (b *bridgeNetworkConfigurator) Teardown(alloc *structs.Allocation, spec *drivers.NetworkIsolationSpec) error {
|
||||||
|
netconf, err := b.buildNomadNetConfig()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = b.cniConfig.DelNetworkList(b.ctx, netconf, b.runtimeConf(alloc, spec))
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPortMapping builds a list of portMapping structs that are used as the
|
||||||
|
// portmapping capability arguments for the portmap CNI plugin
|
||||||
|
func getPortMapping(alloc *structs.Allocation) []*portMapping {
|
||||||
|
ports := []*portMapping{}
|
||||||
|
for _, network := range alloc.AllocatedResources.Shared.Networks {
|
||||||
|
for _, port := range append(network.DynamicPorts, network.ReservedPorts...) {
|
||||||
|
for _, proto := range []string{"tcp", "udp"} {
|
||||||
|
ports = append(ports, &portMapping{
|
||||||
|
Host: port.Value,
|
||||||
|
Container: port.To,
|
||||||
|
Proto: proto,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ports
|
||||||
|
}
|
||||||
|
|
||||||
|
// portMapping is the json representation of the portmapping capability arguments
|
||||||
|
// for the portmap CNI plugin
|
||||||
|
type portMapping struct {
|
||||||
|
Host int `json:"hostPort"`
|
||||||
|
Container int `json:"containerPort"`
|
||||||
|
Proto string `json:"protocol"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// runtimeConf builds the configuration needed by CNI to locate the target netns
|
||||||
|
func (b *bridgeNetworkConfigurator) runtimeConf(alloc *structs.Allocation, spec *drivers.NetworkIsolationSpec) *libcni.RuntimeConf {
|
||||||
|
return &libcni.RuntimeConf{
|
||||||
|
ContainerID: fmt.Sprintf("nomad-%s", alloc.ID[:8]),
|
||||||
|
NetNS: spec.Path,
|
||||||
|
IfName: bridgeNetworkContainerIfName,
|
||||||
|
CapabilityArgs: map[string]interface{}{
|
||||||
|
"portMappings": getPortMapping(alloc),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildNomadNetConfig generates the CNI network configuration for the bridge
|
||||||
|
// networking mode
|
||||||
|
func (b *bridgeNetworkConfigurator) buildNomadNetConfig() (*libcni.NetworkConfigList, error) {
|
||||||
|
rendered := fmt.Sprintf(nomadCNIConfigTemplate, b.bridgeName, b.allocSubnet)
|
||||||
|
return libcni.ConfListFromBytes([]byte(rendered))
|
||||||
|
}
|
||||||
|
|
||||||
|
const nomadCNIConfigTemplate = `{
|
||||||
|
"cniVersion": "0.4.0",
|
||||||
|
"name": "nomad",
|
||||||
|
"plugins": [
|
||||||
|
{
|
||||||
|
"type": "bridge",
|
||||||
|
"bridge": "%s",
|
||||||
|
"isDefaultGateway": true,
|
||||||
|
"ipMasq": true,
|
||||||
|
"ipam": {
|
||||||
|
"type": "host-local",
|
||||||
|
"ranges": [
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"subnet": "%s"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "firewall"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "portmap",
|
||||||
|
"capabilities": {"portMappings": true}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`
|
|
@ -202,6 +202,9 @@ type TaskRunner struct {
|
||||||
// fails and the Run method should wait until serversContactedCh is
|
// fails and the Run method should wait until serversContactedCh is
|
||||||
// closed.
|
// closed.
|
||||||
waitOnServers bool
|
waitOnServers bool
|
||||||
|
|
||||||
|
networkIsolationLock sync.Mutex
|
||||||
|
networkIsolationSpec *drivers.NetworkIsolationSpec
|
||||||
}
|
}
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
|
@ -895,6 +898,8 @@ func (tr *TaskRunner) buildTaskConfig() *drivers.TaskConfig {
|
||||||
invocationid := uuid.Generate()[:8]
|
invocationid := uuid.Generate()[:8]
|
||||||
taskResources := tr.taskResources
|
taskResources := tr.taskResources
|
||||||
env := tr.envBuilder.Build()
|
env := tr.envBuilder.Build()
|
||||||
|
tr.networkIsolationLock.Lock()
|
||||||
|
defer tr.networkIsolationLock.Unlock()
|
||||||
|
|
||||||
return &drivers.TaskConfig{
|
return &drivers.TaskConfig{
|
||||||
ID: fmt.Sprintf("%s/%s/%s", alloc.ID, task.Name, invocationid),
|
ID: fmt.Sprintf("%s/%s/%s", alloc.ID, task.Name, invocationid),
|
||||||
|
@ -909,15 +914,16 @@ func (tr *TaskRunner) buildTaskConfig() *drivers.TaskConfig {
|
||||||
PercentTicks: float64(taskResources.Cpu.CpuShares) / float64(tr.clientConfig.Node.NodeResources.Cpu.CpuShares),
|
PercentTicks: float64(taskResources.Cpu.CpuShares) / float64(tr.clientConfig.Node.NodeResources.Cpu.CpuShares),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Devices: tr.hookResources.getDevices(),
|
Devices: tr.hookResources.getDevices(),
|
||||||
Mounts: tr.hookResources.getMounts(),
|
Mounts: tr.hookResources.getMounts(),
|
||||||
Env: env.Map(),
|
Env: env.Map(),
|
||||||
DeviceEnv: env.DeviceEnv(),
|
DeviceEnv: env.DeviceEnv(),
|
||||||
User: task.User,
|
User: task.User,
|
||||||
AllocDir: tr.taskDir.AllocDir,
|
AllocDir: tr.taskDir.AllocDir,
|
||||||
StdoutPath: tr.logmonHookConfig.stdoutFifo,
|
StdoutPath: tr.logmonHookConfig.stdoutFifo,
|
||||||
StderrPath: tr.logmonHookConfig.stderrFifo,
|
StderrPath: tr.logmonHookConfig.stderrFifo,
|
||||||
AllocID: tr.allocID,
|
AllocID: tr.allocID,
|
||||||
|
NetworkIsolation: tr.networkIsolationSpec,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1181,6 +1187,14 @@ func (tr *TaskRunner) Update(update *structs.Allocation) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetNetworkIsolation is called by the PreRun allocation hook after configuring
|
||||||
|
// the network isolation for the allocation
|
||||||
|
func (tr *TaskRunner) SetNetworkIsolation(n *drivers.NetworkIsolationSpec) {
|
||||||
|
tr.networkIsolationLock.Lock()
|
||||||
|
tr.networkIsolationSpec = n
|
||||||
|
tr.networkIsolationLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// triggerUpdate if there isn't already an update pending. Should be called
|
// triggerUpdate if there isn't already an update pending. Should be called
|
||||||
// instead of calling updateHooks directly to serialize runs of update hooks.
|
// instead of calling updateHooks directly to serialize runs of update hooks.
|
||||||
// TaskRunner state should be updated prior to triggering update hooks.
|
// TaskRunner state should be updated prior to triggering update hooks.
|
||||||
|
|
|
@ -354,6 +354,18 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic
|
||||||
c.configCopy = c.config.Copy()
|
c.configCopy = c.config.Copy()
|
||||||
c.configLock.Unlock()
|
c.configLock.Unlock()
|
||||||
|
|
||||||
|
// Auto download CNI binaries and configure CNI_PATH if requested.
|
||||||
|
if c.config.AutoFetchCNI {
|
||||||
|
if cniPath := FetchCNIPlugins(c.logger, c.config.AutoFetchCNIURL, c.config.AutoFetchCNIDir); cniPath != "" {
|
||||||
|
if c.config.CNIPath == "" {
|
||||||
|
c.config.CNIPath = cniPath
|
||||||
|
} else {
|
||||||
|
c.config.CNIPath = c.config.CNIPath + ":" + cniPath
|
||||||
|
}
|
||||||
|
c.logger.Debug("using new CNI Path", "cni_path", c.config.CNIPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fingerprintManager := NewFingerprintManager(
|
fingerprintManager := NewFingerprintManager(
|
||||||
c.configCopy.PluginSingletonLoader, c.GetConfig, c.configCopy.Node,
|
c.configCopy.PluginSingletonLoader, c.GetConfig, c.configCopy.Node,
|
||||||
c.shutdownCh, c.updateNodeFromFingerprint, c.logger)
|
c.shutdownCh, c.updateNodeFromFingerprint, c.logger)
|
||||||
|
@ -556,6 +568,35 @@ func (c *Client) init() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Info("using alloc directory", "alloc_dir", c.config.AllocDir)
|
c.logger.Info("using alloc directory", "alloc_dir", c.config.AllocDir)
|
||||||
|
|
||||||
|
// Ensure the cnibin dir exists if we have one
|
||||||
|
if c.config.AutoFetchCNIDir != "" {
|
||||||
|
if err := os.MkdirAll(c.config.AutoFetchCNIDir, 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create directory for AutoFetchCNIDir: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Otherwise make a temp directory to use.
|
||||||
|
p, err := ioutil.TempDir("", "NomadClient")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed creating temporary directory for the AutoFetchCNIDir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err = filepath.EvalSymlinks(p)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to find temporary directory for the AutoFetchCNIDir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change the permissions to have the execute bit
|
||||||
|
if err := os.Chmod(p, 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to change directory permissions for the AutoFetchCNIdir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.config.AutoFetchCNIDir = p
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.config.AutoFetchCNI {
|
||||||
|
c.logger.Info("using cni directory for plugin downloads", "cni_dir", c.config.AutoFetchCNIDir)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
48
client/cni_autofetch.go
Normal file
48
client/cni_autofetch.go
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
getter "github.com/hashicorp/go-getter"
|
||||||
|
hclog "github.com/hashicorp/go-hclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
nomadCNIBinDir = "cnibin"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
|
||||||
|
// checksums are copied from https://github.com/containernetworking/plugins/releases
|
||||||
|
defaultCNIGetterChecksums = map[string]string{
|
||||||
|
"linux-amd64": "sha256:e9bfc78acd3ae71be77eb8f3e890cc9078a33cc3797703b8ff2fc3077a232252",
|
||||||
|
"linux-arm": "sha256:ae6ddbd87c05a79aceb92e1c8c32d11e302f6fc55045f87f6a3ea7e0268b2fda",
|
||||||
|
"linux-arm64": "sha256:acde854e3def3c776c532ae521c19d8784534918cc56449ff16945a2909bff6d",
|
||||||
|
"windows-amd64": "sha256:a8a24e9cf93f4db92321afca3fe53bd3ccdf2b7117c403c55a5bac162d8d79cc",
|
||||||
|
}
|
||||||
|
defaultCNIPluginVersion = "0.8.1"
|
||||||
|
defaultCNIGetterSrc = fmt.Sprintf("https://github.com/containernetworking/plugins/releases/download/v%s/cni-plugins-%s-%s-v%s.tgz?checksum=%s",
|
||||||
|
defaultCNIPluginVersion, runtime.GOOS, runtime.GOARCH, defaultCNIPluginVersion,
|
||||||
|
defaultCNIGetterChecksums[runtime.GOOS+"-"+runtime.GOARCH])
|
||||||
|
)
|
||||||
|
|
||||||
|
// FetchCNIPlugins downloads the standard set of CNI plugins to the client's
|
||||||
|
// data directory and returns the path to be used when setting up the CNI_PATH
|
||||||
|
// environment variable. If an error occures during download, it is logged and
|
||||||
|
// an empty path is returned
|
||||||
|
func FetchCNIPlugins(logger hclog.Logger, src string, dataDir string) string {
|
||||||
|
if src == "" {
|
||||||
|
src = defaultCNIGetterSrc
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info("downloading CNI plugins", "url", src)
|
||||||
|
dst := filepath.Join(dataDir, nomadCNIBinDir)
|
||||||
|
if err := getter.Get(dst, src); err != nil {
|
||||||
|
logger.Warn("failed to fetch CNI plugins", "url", src, "error", err)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst
|
||||||
|
}
|
|
@ -221,6 +221,31 @@ type Config struct {
|
||||||
|
|
||||||
// StateDBFactory is used to override stateDB implementations,
|
// StateDBFactory is used to override stateDB implementations,
|
||||||
StateDBFactory state.NewStateDBFunc
|
StateDBFactory state.NewStateDBFunc
|
||||||
|
|
||||||
|
// CNIPath is the path used to search for CNI plugins. Multiple paths can
|
||||||
|
// be specified with colon delimited
|
||||||
|
CNIPath string
|
||||||
|
|
||||||
|
// BridgeNetworkName is the name to use for the bridge created in bridge
|
||||||
|
// networking mode. This defaults to 'nomad' if not set
|
||||||
|
BridgeNetworkName string
|
||||||
|
|
||||||
|
// BridgeNetworkAllocSubnet is the IP subnet to use for address allocation
|
||||||
|
// for allocations in bridge networking mode. Subnet must be in CIDR
|
||||||
|
// notation
|
||||||
|
BridgeNetworkAllocSubnet string
|
||||||
|
|
||||||
|
// AutoFetchCNI is a toggle to enable auto downloading of the CNI standard
|
||||||
|
// plugins managed by the CNI team. This defaults to false
|
||||||
|
AutoFetchCNI bool
|
||||||
|
|
||||||
|
// AutoFetchCNIURL is the go-getter URL to use when auto downloading CNI
|
||||||
|
// plugins
|
||||||
|
AutoFetchCNIURL string
|
||||||
|
|
||||||
|
// AutoFetchCNIDir is the destination dir to use when auto doanloading CNI plugins.
|
||||||
|
// This directory will be appended to the CNIPath so it is searched last
|
||||||
|
AutoFetchCNIDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) Copy() *Config {
|
func (c *Config) Copy() *Config {
|
||||||
|
@ -255,6 +280,7 @@ func DefaultConfig() *Config {
|
||||||
DisableRemoteExec: false,
|
DisableRemoteExec: false,
|
||||||
BackwardsCompatibleMetrics: false,
|
BackwardsCompatibleMetrics: false,
|
||||||
RPCHoldTimeout: 5 * time.Second,
|
RPCHoldTimeout: 5 * time.Second,
|
||||||
|
AutoFetchCNI: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
154
client/lib/nsutil/netns_linux.go
Normal file
154
client/lib/nsutil/netns_linux.go
Normal file
|
@ -0,0 +1,154 @@
|
||||||
|
// Copyright 2018 CNI authors
|
||||||
|
// Copyright 2019 HashiCorp
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
// The functions in this file are derived from:
|
||||||
|
// https://github.com/containernetworking/plugins/blob/0950a3607bf5e8a57c6a655c7e573e6aab0dc650/pkg/testutils/netns_linux.go
|
||||||
|
|
||||||
|
package nsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containernetworking/plugins/pkg/ns"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NetNSRunDir is the directory which new network namespaces will be bind mounted
|
||||||
|
const NetNSRunDir = "/var/run/netns"
|
||||||
|
|
||||||
|
// NewNS creates a new persistent (bind-mounted) network namespace and returns
|
||||||
|
// an object representing that namespace, without switching to it.
|
||||||
|
func NewNS(nsName string) (ns.NetNS, error) {
|
||||||
|
|
||||||
|
// Create the directory for mounting network namespaces
|
||||||
|
// This needs to be a shared mountpoint in case it is mounted in to
|
||||||
|
// other namespaces (containers)
|
||||||
|
err := os.MkdirAll(NetNSRunDir, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remount the namespace directory shared. This will fail if it is not
|
||||||
|
// already a mountpoint, so bind-mount it on to itself to "upgrade" it
|
||||||
|
// to a mountpoint.
|
||||||
|
err = unix.Mount("", NetNSRunDir, "none", unix.MS_SHARED|unix.MS_REC, "")
|
||||||
|
if err != nil {
|
||||||
|
if err != unix.EINVAL {
|
||||||
|
return nil, fmt.Errorf("mount --make-rshared %s failed: %q", NetNSRunDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively remount /var/run/netns on itself. The recursive flag is
|
||||||
|
// so that any existing netns bindmounts are carried over.
|
||||||
|
err = unix.Mount(NetNSRunDir, NetNSRunDir, "none", unix.MS_BIND|unix.MS_REC, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("mount --rbind %s %s failed: %q", NetNSRunDir, NetNSRunDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we can make it shared
|
||||||
|
err = unix.Mount("", NetNSRunDir, "none", unix.MS_SHARED|unix.MS_REC, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("mount --make-rshared %s failed: %q", NetNSRunDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// create an empty file at the mount point
|
||||||
|
nsPath := path.Join(NetNSRunDir, nsName)
|
||||||
|
mountPointFd, err := os.Create(nsPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mountPointFd.Close()
|
||||||
|
|
||||||
|
// Ensure the mount point is cleaned up on errors; if the namespace
|
||||||
|
// was successfully mounted this will have no effect because the file
|
||||||
|
// is in-use
|
||||||
|
defer os.RemoveAll(nsPath)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
// do namespace work in a dedicated goroutine, so that we can safely
|
||||||
|
// Lock/Unlock OSThread without upsetting the lock/unlock state of
|
||||||
|
// the caller of this function
|
||||||
|
go (func() {
|
||||||
|
defer wg.Done()
|
||||||
|
runtime.LockOSThread()
|
||||||
|
// Don't unlock. By not unlocking, golang will kill the OS thread when the
|
||||||
|
// goroutine is done (for go1.10+)
|
||||||
|
|
||||||
|
var origNS ns.NetNS
|
||||||
|
origNS, err = ns.GetNS(getCurrentThreadNetNSPath())
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("failed to get the current netns: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer origNS.Close()
|
||||||
|
|
||||||
|
// create a new netns on the current thread
|
||||||
|
err = unix.Unshare(unix.CLONE_NEWNET)
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("error from unshare: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put this thread back to the orig ns, since it might get reused (pre go1.10)
|
||||||
|
defer origNS.Set()
|
||||||
|
|
||||||
|
// bind mount the netns from the current thread (from /proc) onto the
|
||||||
|
// mount point. This causes the namespace to persist, even when there
|
||||||
|
// are no threads in the ns.
|
||||||
|
err = unix.Mount(getCurrentThreadNetNSPath(), nsPath, "none", unix.MS_BIND, "")
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("failed to bind mount ns at %s: %v", nsPath, err)
|
||||||
|
}
|
||||||
|
})()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create namespace: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ns.GetNS(nsPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmountNS unmounts the NS held by the netns object
|
||||||
|
func UnmountNS(nsPath string) error {
|
||||||
|
// Only unmount if it's been bind-mounted (don't touch namespaces in /proc...)
|
||||||
|
if strings.HasPrefix(nsPath, NetNSRunDir) {
|
||||||
|
if err := unix.Unmount(nsPath, 0); err != nil {
|
||||||
|
return fmt.Errorf("failed to unmount NS: at %s: %v", nsPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Remove(nsPath); err != nil {
|
||||||
|
return fmt.Errorf("failed to remove ns path %s: %v", nsPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCurrentThreadNetNSPath copied from pkg/ns
|
||||||
|
func getCurrentThreadNetNSPath() string {
|
||||||
|
// /proc/self/ns/net returns the namespace of the main thread, not
|
||||||
|
// of whatever thread this goroutine is running on. Make sure we
|
||||||
|
// use the thread's net namespace since the thread is switching around
|
||||||
|
return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
|
||||||
|
}
|
|
@ -437,6 +437,7 @@ func convertClientConfig(agentConfig *Config) (*clientconfig.Config, error) {
|
||||||
if agentConfig.DataDir != "" {
|
if agentConfig.DataDir != "" {
|
||||||
conf.StateDir = filepath.Join(agentConfig.DataDir, "client")
|
conf.StateDir = filepath.Join(agentConfig.DataDir, "client")
|
||||||
conf.AllocDir = filepath.Join(agentConfig.DataDir, "alloc")
|
conf.AllocDir = filepath.Join(agentConfig.DataDir, "alloc")
|
||||||
|
conf.AutoFetchCNIDir = filepath.Join(agentConfig.DataDir, "cnibin")
|
||||||
}
|
}
|
||||||
if agentConfig.Client.StateDir != "" {
|
if agentConfig.Client.StateDir != "" {
|
||||||
conf.StateDir = agentConfig.Client.StateDir
|
conf.StateDir = agentConfig.Client.StateDir
|
||||||
|
@ -538,6 +539,13 @@ func convertClientConfig(agentConfig *Config) (*clientconfig.Config, error) {
|
||||||
conf.ACLTokenTTL = agentConfig.ACL.TokenTTL
|
conf.ACLTokenTTL = agentConfig.ACL.TokenTTL
|
||||||
conf.ACLPolicyTTL = agentConfig.ACL.PolicyTTL
|
conf.ACLPolicyTTL = agentConfig.ACL.PolicyTTL
|
||||||
|
|
||||||
|
// Setup networking configration
|
||||||
|
conf.CNIPath = agentConfig.Client.CNIPath
|
||||||
|
conf.BridgeNetworkName = agentConfig.Client.BridgeNetworkName
|
||||||
|
conf.BridgeNetworkAllocSubnet = agentConfig.Client.BridgeNetworkSubnet
|
||||||
|
conf.AutoFetchCNI = agentConfig.Client.AutoFetchCNIPlugins
|
||||||
|
conf.AutoFetchCNIURL = agentConfig.Client.AutoFetchCNIPluginsURL
|
||||||
|
|
||||||
return conf, nil
|
return conf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -247,6 +247,29 @@ type ClientConfig struct {
|
||||||
|
|
||||||
// ExtraKeysHCL is used by hcl to surface unexpected keys
|
// ExtraKeysHCL is used by hcl to surface unexpected keys
|
||||||
ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"`
|
ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"`
|
||||||
|
|
||||||
|
// CNIPath is the path to search for CNI plugins, multiple paths can be
|
||||||
|
// specified colon delimited
|
||||||
|
CNIPath string `hcl:"cni_path"`
|
||||||
|
|
||||||
|
// BridgeNetworkName is the name of the bridge to create when using the
|
||||||
|
// bridge network mode
|
||||||
|
BridgeNetworkName string `hcl:"bridge_network_name"`
|
||||||
|
|
||||||
|
// BridgeNetworkSubnet is the subnet to allocate IP addresses from when
|
||||||
|
// creating allocations with bridge networking mode. This range is local to
|
||||||
|
// the host
|
||||||
|
BridgeNetworkSubnet string `hcl:"bridge_network_subnet"`
|
||||||
|
|
||||||
|
// AutoFetchCNIPlugins toggles if the Nomad client should attempt to
|
||||||
|
// automatically download a standard set of CNI plugins, typically from
|
||||||
|
// the community repo https://github.com/containernetworking/plugins/releases
|
||||||
|
AutoFetchCNIPlugins bool `hcl:"auto_fetch_cni_plugins"`
|
||||||
|
|
||||||
|
// AutoFetchCNIPluginsURL sets the source URL to be used if automatically
|
||||||
|
// downloading CNI plugins. If not set will use a known working version from
|
||||||
|
// the community repo https://github.com/containernetworking/plugins/releases
|
||||||
|
AutoFetchCNIPluginsURL string `hcl:"auto_fetch_cni_plugins_url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ACLConfig is configuration specific to the ACL system
|
// ACLConfig is configuration specific to the ACL system
|
||||||
|
@ -661,6 +684,7 @@ func DevConfig() *Config {
|
||||||
conf.Telemetry.PrometheusMetrics = true
|
conf.Telemetry.PrometheusMetrics = true
|
||||||
conf.Telemetry.PublishAllocationMetrics = true
|
conf.Telemetry.PublishAllocationMetrics = true
|
||||||
conf.Telemetry.PublishNodeMetrics = true
|
conf.Telemetry.PublishNodeMetrics = true
|
||||||
|
conf.Client.AutoFetchCNIPlugins = true
|
||||||
|
|
||||||
return conf
|
return conf
|
||||||
}
|
}
|
||||||
|
|
|
@ -685,6 +685,8 @@ func ApiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) {
|
||||||
tg.Meta = taskGroup.Meta
|
tg.Meta = taskGroup.Meta
|
||||||
tg.Constraints = ApiConstraintsToStructs(taskGroup.Constraints)
|
tg.Constraints = ApiConstraintsToStructs(taskGroup.Constraints)
|
||||||
tg.Affinities = ApiAffinitiesToStructs(taskGroup.Affinities)
|
tg.Affinities = ApiAffinitiesToStructs(taskGroup.Affinities)
|
||||||
|
tg.Networks = ApiNetworkResourceToStructs(taskGroup.Networks)
|
||||||
|
tg.Services = ApiServicesToStructs(taskGroup.Services)
|
||||||
|
|
||||||
tg.RestartPolicy = &structs.RestartPolicy{
|
tg.RestartPolicy = &structs.RestartPolicy{
|
||||||
Attempts: *taskGroup.RestartPolicy.Attempts,
|
Attempts: *taskGroup.RestartPolicy.Attempts,
|
||||||
|
@ -886,35 +888,8 @@ func ApiResourcesToStructs(in *api.Resources) *structs.Resources {
|
||||||
out.IOPS = *in.IOPS
|
out.IOPS = *in.IOPS
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := len(in.Networks); l != 0 {
|
if len(in.Networks) != 0 {
|
||||||
out.Networks = make([]*structs.NetworkResource, l)
|
out.Networks = ApiNetworkResourceToStructs(in.Networks)
|
||||||
for i, nw := range in.Networks {
|
|
||||||
out.Networks[i] = &structs.NetworkResource{
|
|
||||||
CIDR: nw.CIDR,
|
|
||||||
IP: nw.IP,
|
|
||||||
MBits: *nw.MBits,
|
|
||||||
}
|
|
||||||
|
|
||||||
if l := len(nw.DynamicPorts); l != 0 {
|
|
||||||
out.Networks[i].DynamicPorts = make([]structs.Port, l)
|
|
||||||
for j, dp := range nw.DynamicPorts {
|
|
||||||
out.Networks[i].DynamicPorts[j] = structs.Port{
|
|
||||||
Label: dp.Label,
|
|
||||||
Value: dp.Value,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if l := len(nw.ReservedPorts); l != 0 {
|
|
||||||
out.Networks[i].ReservedPorts = make([]structs.Port, l)
|
|
||||||
for j, rp := range nw.ReservedPorts {
|
|
||||||
out.Networks[i].ReservedPorts[j] = structs.Port{
|
|
||||||
Label: rp.Label,
|
|
||||||
Value: rp.Value,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := len(in.Devices); l != 0 {
|
if l := len(in.Devices); l != 0 {
|
||||||
|
@ -932,6 +907,127 @@ func ApiResourcesToStructs(in *api.Resources) *structs.Resources {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ApiNetworkResourceToStructs(in []*api.NetworkResource) []*structs.NetworkResource {
|
||||||
|
var out []*structs.NetworkResource
|
||||||
|
if len(in) == 0 {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
out = make([]*structs.NetworkResource, len(in))
|
||||||
|
for i, nw := range in {
|
||||||
|
out[i] = &structs.NetworkResource{
|
||||||
|
Mode: nw.Mode,
|
||||||
|
CIDR: nw.CIDR,
|
||||||
|
IP: nw.IP,
|
||||||
|
MBits: *nw.MBits,
|
||||||
|
}
|
||||||
|
|
||||||
|
if l := len(nw.DynamicPorts); l != 0 {
|
||||||
|
out[i].DynamicPorts = make([]structs.Port, l)
|
||||||
|
for j, dp := range nw.DynamicPorts {
|
||||||
|
out[i].DynamicPorts[j] = structs.Port{
|
||||||
|
Label: dp.Label,
|
||||||
|
Value: dp.Value,
|
||||||
|
To: dp.To,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if l := len(nw.ReservedPorts); l != 0 {
|
||||||
|
out[i].ReservedPorts = make([]structs.Port, l)
|
||||||
|
for j, rp := range nw.ReservedPorts {
|
||||||
|
out[i].ReservedPorts[j] = structs.Port{
|
||||||
|
Label: rp.Label,
|
||||||
|
Value: rp.Value,
|
||||||
|
To: rp.To,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO(schmichael) refactor and reuse in service parsing above
|
||||||
|
func ApiServicesToStructs(in []*api.Service) []*structs.Service {
|
||||||
|
if len(in) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*structs.Service, len(in))
|
||||||
|
for i, s := range in {
|
||||||
|
out[i] = &structs.Service{
|
||||||
|
Name: s.Name,
|
||||||
|
PortLabel: s.PortLabel,
|
||||||
|
Tags: s.Tags,
|
||||||
|
CanaryTags: s.CanaryTags,
|
||||||
|
AddressMode: s.AddressMode,
|
||||||
|
}
|
||||||
|
|
||||||
|
if l := len(s.Checks); l != 0 {
|
||||||
|
out[i].Checks = make([]*structs.ServiceCheck, l)
|
||||||
|
for j, check := range s.Checks {
|
||||||
|
out[i].Checks[j] = &structs.ServiceCheck{
|
||||||
|
Name: check.Name,
|
||||||
|
Type: check.Type,
|
||||||
|
Command: check.Command,
|
||||||
|
Args: check.Args,
|
||||||
|
Path: check.Path,
|
||||||
|
Protocol: check.Protocol,
|
||||||
|
PortLabel: check.PortLabel,
|
||||||
|
AddressMode: check.AddressMode,
|
||||||
|
Interval: check.Interval,
|
||||||
|
Timeout: check.Timeout,
|
||||||
|
InitialStatus: check.InitialStatus,
|
||||||
|
TLSSkipVerify: check.TLSSkipVerify,
|
||||||
|
Header: check.Header,
|
||||||
|
Method: check.Method,
|
||||||
|
GRPCService: check.GRPCService,
|
||||||
|
GRPCUseTLS: check.GRPCUseTLS,
|
||||||
|
}
|
||||||
|
if check.CheckRestart != nil {
|
||||||
|
out[i].Checks[j].CheckRestart = &structs.CheckRestart{
|
||||||
|
Limit: check.CheckRestart.Limit,
|
||||||
|
Grace: *check.CheckRestart.Grace,
|
||||||
|
IgnoreWarnings: check.CheckRestart.IgnoreWarnings,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Connect == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
out[i].Connect = &structs.ConsulConnect{}
|
||||||
|
|
||||||
|
if s.Connect.SidecarService == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
out[i].Connect.SidecarService = &structs.ConsulSidecarService{
|
||||||
|
Port: s.Connect.SidecarService.Port,
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Connect.SidecarService.Proxy == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
out[i].Connect.SidecarService.Proxy = &structs.ConsulProxy{}
|
||||||
|
|
||||||
|
upstreams := make([]*structs.ConsulUpstream, len(s.Connect.SidecarService.Proxy.Upstreams))
|
||||||
|
for i, p := range s.Connect.SidecarService.Proxy.Upstreams {
|
||||||
|
upstreams[i] = &structs.ConsulUpstream{
|
||||||
|
DestinationName: p.DestinationName,
|
||||||
|
LocalBindPort: p.LocalBindPort,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out[i].Connect.SidecarService.Proxy.Upstreams = upstreams
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
func ApiConstraintsToStructs(in []*api.Constraint) []*structs.Constraint {
|
func ApiConstraintsToStructs(in []*api.Constraint) []*structs.Constraint {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -216,6 +216,12 @@ var (
|
||||||
hclspec.NewAttr("nvidia_runtime", "string", false),
|
hclspec.NewAttr("nvidia_runtime", "string", false),
|
||||||
hclspec.NewLiteral(`"nvidia"`),
|
hclspec.NewLiteral(`"nvidia"`),
|
||||||
),
|
),
|
||||||
|
|
||||||
|
// image to use when creating a network namespace parent container
|
||||||
|
"infra_image": hclspec.NewDefault(
|
||||||
|
hclspec.NewAttr("infra_image", "string", false),
|
||||||
|
hclspec.NewLiteral(`"gcr.io/google_containers/pause-amd64:3.0"`),
|
||||||
|
),
|
||||||
})
|
})
|
||||||
|
|
||||||
// taskConfigSpec is the hcl specification for the driver config section of
|
// taskConfigSpec is the hcl specification for the driver config section of
|
||||||
|
@ -310,6 +316,12 @@ var (
|
||||||
SendSignals: true,
|
SendSignals: true,
|
||||||
Exec: true,
|
Exec: true,
|
||||||
FSIsolation: drivers.FSIsolationImage,
|
FSIsolation: drivers.FSIsolationImage,
|
||||||
|
NetIsolationModes: []drivers.NetIsolationMode{
|
||||||
|
drivers.NetIsolationModeHost,
|
||||||
|
drivers.NetIsolationModeGroup,
|
||||||
|
drivers.NetIsolationModeTask,
|
||||||
|
},
|
||||||
|
MustInitiateNetwork: true,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -485,6 +497,7 @@ type DriverConfig struct {
|
||||||
AllowPrivileged bool `codec:"allow_privileged"`
|
AllowPrivileged bool `codec:"allow_privileged"`
|
||||||
AllowCaps []string `codec:"allow_caps"`
|
AllowCaps []string `codec:"allow_caps"`
|
||||||
GPURuntimeName string `codec:"nvidia_runtime"`
|
GPURuntimeName string `codec:"nvidia_runtime"`
|
||||||
|
InfraImage string `codec:"infra_image"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type AuthConfig struct {
|
type AuthConfig struct {
|
||||||
|
|
|
@ -65,6 +65,9 @@ type DockerImageClient interface {
|
||||||
// LogEventFn is a callback which allows Drivers to emit task events.
|
// LogEventFn is a callback which allows Drivers to emit task events.
|
||||||
type LogEventFn func(message string, annotations map[string]string)
|
type LogEventFn func(message string, annotations map[string]string)
|
||||||
|
|
||||||
|
// noopLogEventFn satisfies the LogEventFn type but noops when called
|
||||||
|
func noopLogEventFn(string, map[string]string) {}
|
||||||
|
|
||||||
// dockerCoordinatorConfig is used to configure the Docker coordinator.
|
// dockerCoordinatorConfig is used to configure the Docker coordinator.
|
||||||
type dockerCoordinatorConfig struct {
|
type dockerCoordinatorConfig struct {
|
||||||
// logger is the logger the coordinator should use
|
// logger is the logger the coordinator should use
|
||||||
|
|
|
@ -266,7 +266,7 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
|
||||||
|
|
||||||
startAttempts := 0
|
startAttempts := 0
|
||||||
CREATE:
|
CREATE:
|
||||||
container, err := d.createContainer(client, containerCfg, &driverConfig)
|
container, err := d.createContainer(client, containerCfg, driverConfig.Image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Error("failed to create container", "error", err)
|
d.logger.Error("failed to create container", "error", err)
|
||||||
return nil, nil, nstructs.WrapRecoverable(fmt.Sprintf("failed to create container: %v", err), err)
|
return nil, nil, nstructs.WrapRecoverable(fmt.Sprintf("failed to create container: %v", err), err)
|
||||||
|
@ -368,7 +368,7 @@ type createContainerClient interface {
|
||||||
// createContainer creates the container given the passed configuration. It
|
// createContainer creates the container given the passed configuration. It
|
||||||
// attempts to handle any transient Docker errors.
|
// attempts to handle any transient Docker errors.
|
||||||
func (d *Driver) createContainer(client createContainerClient, config docker.CreateContainerOptions,
|
func (d *Driver) createContainer(client createContainerClient, config docker.CreateContainerOptions,
|
||||||
driverConfig *TaskConfig) (*docker.Container, error) {
|
image string) (*docker.Container, error) {
|
||||||
// Create a container
|
// Create a container
|
||||||
attempted := 0
|
attempted := 0
|
||||||
CREATE:
|
CREATE:
|
||||||
|
@ -378,7 +378,7 @@ CREATE:
|
||||||
}
|
}
|
||||||
|
|
||||||
d.logger.Debug("failed to create container", "container_name",
|
d.logger.Debug("failed to create container", "container_name",
|
||||||
config.Name, "image_name", driverConfig.Image, "image_id", config.Config.Image,
|
config.Name, "image_name", image, "image_id", config.Config.Image,
|
||||||
"attempt", attempted+1, "error", createErr)
|
"attempt", attempted+1, "error", createErr)
|
||||||
|
|
||||||
// Volume management tools like Portworx may not have detached a volume
|
// Volume management tools like Portworx may not have detached a volume
|
||||||
|
@ -869,11 +869,22 @@ func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *T
|
||||||
|
|
||||||
hostConfig.ReadonlyRootfs = driverConfig.ReadonlyRootfs
|
hostConfig.ReadonlyRootfs = driverConfig.ReadonlyRootfs
|
||||||
|
|
||||||
|
// set the docker network mode
|
||||||
hostConfig.NetworkMode = driverConfig.NetworkMode
|
hostConfig.NetworkMode = driverConfig.NetworkMode
|
||||||
|
|
||||||
|
// if the driver config does not specify a network mode then try to use the
|
||||||
|
// shared alloc network
|
||||||
if hostConfig.NetworkMode == "" {
|
if hostConfig.NetworkMode == "" {
|
||||||
// docker default
|
if task.NetworkIsolation != nil && task.NetworkIsolation.Path != "" {
|
||||||
logger.Debug("networking mode not specified; using default", "network_mode", defaultNetworkMode)
|
// find the previously created parent container to join networks with
|
||||||
hostConfig.NetworkMode = defaultNetworkMode
|
netMode := fmt.Sprintf("container:%s", task.NetworkIsolation.Labels[dockerNetSpecLabelKey])
|
||||||
|
logger.Debug("configuring network mode for task group", "network_mode", netMode)
|
||||||
|
hostConfig.NetworkMode = netMode
|
||||||
|
} else {
|
||||||
|
// docker default
|
||||||
|
logger.Debug("networking mode not specified; using default", "network_mode", defaultNetworkMode)
|
||||||
|
hostConfig.NetworkMode = defaultNetworkMode
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup port mapping and exposed ports
|
// Setup port mapping and exposed ports
|
||||||
|
|
|
@ -2230,7 +2230,7 @@ func TestDockerDriver_VolumeError(t *testing.T) {
|
||||||
driver := dockerDriverHarness(t, nil)
|
driver := dockerDriverHarness(t, nil)
|
||||||
|
|
||||||
// assert volume error is recoverable
|
// assert volume error is recoverable
|
||||||
_, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg)
|
_, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg.Image)
|
||||||
require.True(t, structs.IsRecoverable(err))
|
require.True(t, structs.IsRecoverable(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
90
drivers/docker/network.go
Normal file
90
drivers/docker/network.go
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
docker "github.com/fsouza/go-dockerclient"
|
||||||
|
"github.com/hashicorp/nomad/plugins/drivers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// dockerNetSpecLabelKey is used when creating a parent container for
|
||||||
|
// shared networking. It is a label whos value identifies the container ID of
|
||||||
|
// the parent container so tasks can configure their network mode accordingly
|
||||||
|
const dockerNetSpecLabelKey = "docker_sandbox_container_id"
|
||||||
|
|
||||||
|
func (d *Driver) CreateNetwork(allocID string) (*drivers.NetworkIsolationSpec, error) {
|
||||||
|
// Initialize docker API clients
|
||||||
|
client, _, err := d.dockerClients()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to connect to docker daemon: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
repo, _ := parseDockerImage(d.config.InfraImage)
|
||||||
|
authOptions, err := firstValidAuth(repo, []authBackend{
|
||||||
|
authFromDockerConfig(d.config.Auth.Config),
|
||||||
|
authFromHelper(d.config.Auth.Helper),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
d.logger.Debug("auth failed for infra container image pull", "image", d.config.InfraImage, "error", err)
|
||||||
|
}
|
||||||
|
_, err = d.coordinator.PullImage(d.config.InfraImage, authOptions, allocID, noopLogEventFn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := d.createSandboxContainerConfig(allocID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
container, err := d.createContainer(client, *config, d.config.InfraImage)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := d.startContainer(container); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := client.InspectContainer(container.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &drivers.NetworkIsolationSpec{
|
||||||
|
Mode: drivers.NetIsolationModeGroup,
|
||||||
|
Path: c.NetworkSettings.SandboxKey,
|
||||||
|
Labels: map[string]string{
|
||||||
|
dockerNetSpecLabelKey: c.ID,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Driver) DestroyNetwork(allocID string, spec *drivers.NetworkIsolationSpec) error {
|
||||||
|
client, _, err := d.dockerClients()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to connect to docker daemon: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.RemoveContainer(docker.RemoveContainerOptions{
|
||||||
|
Force: true,
|
||||||
|
ID: spec.Labels[dockerNetSpecLabelKey],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// createSandboxContainerConfig creates a docker container configuration which
|
||||||
|
// starts a container with an empty network namespace
|
||||||
|
func (d *Driver) createSandboxContainerConfig(allocID string) (*docker.CreateContainerOptions, error) {
|
||||||
|
|
||||||
|
return &docker.CreateContainerOptions{
|
||||||
|
Name: fmt.Sprintf("nomad_init_%s", allocID),
|
||||||
|
Config: &docker.Config{
|
||||||
|
Image: d.config.InfraImage,
|
||||||
|
},
|
||||||
|
HostConfig: &docker.HostConfig{
|
||||||
|
// set the network mode to none which creates a network namespace with
|
||||||
|
// only a loopback interface
|
||||||
|
NetworkMode: "none",
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
|
@ -342,17 +342,18 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
|
||||||
}
|
}
|
||||||
|
|
||||||
execCmd := &executor.ExecCommand{
|
execCmd := &executor.ExecCommand{
|
||||||
Cmd: driverConfig.Command,
|
Cmd: driverConfig.Command,
|
||||||
Args: driverConfig.Args,
|
Args: driverConfig.Args,
|
||||||
Env: cfg.EnvList(),
|
Env: cfg.EnvList(),
|
||||||
User: user,
|
User: user,
|
||||||
ResourceLimits: true,
|
ResourceLimits: true,
|
||||||
Resources: cfg.Resources,
|
Resources: cfg.Resources,
|
||||||
TaskDir: cfg.TaskDir().Dir,
|
TaskDir: cfg.TaskDir().Dir,
|
||||||
StdoutPath: cfg.StdoutPath,
|
StdoutPath: cfg.StdoutPath,
|
||||||
StderrPath: cfg.StderrPath,
|
StderrPath: cfg.StderrPath,
|
||||||
Mounts: cfg.Mounts,
|
Mounts: cfg.Mounts,
|
||||||
Devices: cfg.Devices,
|
Devices: cfg.Devices,
|
||||||
|
NetworkIsolation: cfg.NetworkIsolation,
|
||||||
}
|
}
|
||||||
|
|
||||||
ps, err := exec.Launch(execCmd)
|
ps, err := exec.Launch(execCmd)
|
||||||
|
|
|
@ -678,3 +678,11 @@ func (d *Driver) GetHandle(taskID string) *taskHandle {
|
||||||
func (d *Driver) Shutdown() {
|
func (d *Driver) Shutdown() {
|
||||||
d.signalShutdown()
|
d.signalShutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Driver) CreateNetwork(allocID string) (*drivers.NetworkIsolationSpec, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Driver) DestroyNetwork(allocID string, spec *drivers.NetworkIsolationSpec) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -95,6 +95,10 @@ var (
|
||||||
SendSignals: true,
|
SendSignals: true,
|
||||||
Exec: true,
|
Exec: true,
|
||||||
FSIsolation: drivers.FSIsolationNone,
|
FSIsolation: drivers.FSIsolationNone,
|
||||||
|
NetIsolationModes: []drivers.NetIsolationMode{
|
||||||
|
drivers.NetIsolationModeHost,
|
||||||
|
drivers.NetIsolationModeGroup,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -342,6 +346,7 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
|
||||||
TaskDir: cfg.TaskDir().Dir,
|
TaskDir: cfg.TaskDir().Dir,
|
||||||
StdoutPath: cfg.StdoutPath,
|
StdoutPath: cfg.StdoutPath,
|
||||||
StderrPath: cfg.StderrPath,
|
StderrPath: cfg.StderrPath,
|
||||||
|
NetworkIsolation: cfg.NetworkIsolation,
|
||||||
}
|
}
|
||||||
|
|
||||||
ps, err := exec.Launch(execCmd)
|
ps, err := exec.Launch(execCmd)
|
||||||
|
|
|
@ -43,6 +43,7 @@ func (c *grpcExecutorClient) Launch(cmd *ExecCommand) (*ProcessState, error) {
|
||||||
BasicProcessCgroup: cmd.BasicProcessCgroup,
|
BasicProcessCgroup: cmd.BasicProcessCgroup,
|
||||||
Mounts: drivers.MountsToProto(cmd.Mounts),
|
Mounts: drivers.MountsToProto(cmd.Mounts),
|
||||||
Devices: drivers.DevicesToProto(cmd.Devices),
|
Devices: drivers.DevicesToProto(cmd.Devices),
|
||||||
|
NetworkIsolation: drivers.NetworkIsolationSpecToProto(cmd.NetworkIsolation),
|
||||||
}
|
}
|
||||||
resp, err := c.client.Launch(ctx, req)
|
resp, err := c.client.Launch(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -126,6 +126,8 @@ type ExecCommand struct {
|
||||||
|
|
||||||
// Devices are the the device nodes to be created in isolation environment
|
// Devices are the the device nodes to be created in isolation environment
|
||||||
Devices []*drivers.DeviceConfig
|
Devices []*drivers.DeviceConfig
|
||||||
|
|
||||||
|
NetworkIsolation *drivers.NetworkIsolationSpec
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetWriters sets the writer for the process stdout and stderr. This should
|
// SetWriters sets the writer for the process stdout and stderr. This should
|
||||||
|
@ -307,8 +309,7 @@ func (e *UniversalExecutor) Launch(command *ExecCommand) (*ProcessState, error)
|
||||||
e.childCmd.Env = e.commandCfg.Env
|
e.childCmd.Env = e.commandCfg.Env
|
||||||
|
|
||||||
// Start the process
|
// Start the process
|
||||||
e.logger.Debug("launching", "command", command.Cmd, "args", strings.Join(command.Args, " "))
|
if err = e.start(command); err != nil {
|
||||||
if err := e.childCmd.Start(); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to start command path=%q --- args=%q: %v", path, e.childCmd.Args, err)
|
return nil, fmt.Errorf("failed to start command path=%q --- args=%q: %v", path, e.childCmd.Args, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,3 +19,7 @@ func (e *UniversalExecutor) runAs(_ string) error { return nil }
|
||||||
func (e *UniversalExecutor) getAllPids() (map[int]*nomadPid, error) {
|
func (e *UniversalExecutor) getAllPids() (map[int]*nomadPid, error) {
|
||||||
return getAllPidsByScanning()
|
return getAllPidsByScanning()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *UniversalExecutor) start(command *ExecCommand) error {
|
||||||
|
return e.childCmd.Start()
|
||||||
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/armon/circbuf"
|
"github.com/armon/circbuf"
|
||||||
|
"github.com/containernetworking/plugins/pkg/ns"
|
||||||
"github.com/hashicorp/consul-template/signals"
|
"github.com/hashicorp/consul-template/signals"
|
||||||
hclog "github.com/hashicorp/go-hclog"
|
hclog "github.com/hashicorp/go-hclog"
|
||||||
multierror "github.com/hashicorp/go-multierror"
|
multierror "github.com/hashicorp/go-multierror"
|
||||||
|
@ -183,9 +184,23 @@ func (l *LibcontainerExecutor) Launch(command *ExecCommand) (*ProcessState, erro
|
||||||
l.systemCpuStats = stats.NewCpuStats()
|
l.systemCpuStats = stats.NewCpuStats()
|
||||||
|
|
||||||
// Starts the task
|
// Starts the task
|
||||||
if err := container.Run(process); err != nil {
|
if command.NetworkIsolation != nil && command.NetworkIsolation.Path != "" {
|
||||||
container.Destroy()
|
netns, err := ns.GetNS(command.NetworkIsolation.Path)
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get ns %s: %v", command.NetworkIsolation.Path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the container in the network namespace
|
||||||
|
err = netns.Do(func(ns.NetNS) error { return container.Run(process) })
|
||||||
|
if err != nil {
|
||||||
|
container.Destroy()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := container.Run(process); err != nil {
|
||||||
|
container.Destroy()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pid, err := process.Pid()
|
pid, err := process.Pid()
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containernetworking/plugins/pkg/ns"
|
||||||
multierror "github.com/hashicorp/go-multierror"
|
multierror "github.com/hashicorp/go-multierror"
|
||||||
"github.com/hashicorp/nomad/helper"
|
"github.com/hashicorp/nomad/helper"
|
||||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
|
@ -171,3 +172,20 @@ func DestroyCgroup(groups *lconfigs.Cgroup, executorPid int) error {
|
||||||
}
|
}
|
||||||
return mErrs.ErrorOrNil()
|
return mErrs.ErrorOrNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *UniversalExecutor) start(command *ExecCommand) error {
|
||||||
|
if command.NetworkIsolation != nil && command.NetworkIsolation.Path != "" {
|
||||||
|
// Get a handle to the target network namespace
|
||||||
|
netns, err := ns.GetNS(command.NetworkIsolation.Path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the container in the network namespace
|
||||||
|
return netns.Do(func(ns.NetNS) error {
|
||||||
|
return e.childCmd.Start()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.childCmd.Start()
|
||||||
|
}
|
||||||
|
|
|
@ -26,28 +26,29 @@ var _ = math.Inf
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
type LaunchRequest struct {
|
type LaunchRequest struct {
|
||||||
Cmd string `protobuf:"bytes,1,opt,name=cmd,proto3" json:"cmd,omitempty"`
|
Cmd string `protobuf:"bytes,1,opt,name=cmd,proto3" json:"cmd,omitempty"`
|
||||||
Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`
|
Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`
|
||||||
Resources *proto1.Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"`
|
Resources *proto1.Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"`
|
||||||
StdoutPath string `protobuf:"bytes,4,opt,name=stdout_path,json=stdoutPath,proto3" json:"stdout_path,omitempty"`
|
StdoutPath string `protobuf:"bytes,4,opt,name=stdout_path,json=stdoutPath,proto3" json:"stdout_path,omitempty"`
|
||||||
StderrPath string `protobuf:"bytes,5,opt,name=stderr_path,json=stderrPath,proto3" json:"stderr_path,omitempty"`
|
StderrPath string `protobuf:"bytes,5,opt,name=stderr_path,json=stderrPath,proto3" json:"stderr_path,omitempty"`
|
||||||
Env []string `protobuf:"bytes,6,rep,name=env,proto3" json:"env,omitempty"`
|
Env []string `protobuf:"bytes,6,rep,name=env,proto3" json:"env,omitempty"`
|
||||||
User string `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"`
|
User string `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"`
|
||||||
TaskDir string `protobuf:"bytes,8,opt,name=task_dir,json=taskDir,proto3" json:"task_dir,omitempty"`
|
TaskDir string `protobuf:"bytes,8,opt,name=task_dir,json=taskDir,proto3" json:"task_dir,omitempty"`
|
||||||
ResourceLimits bool `protobuf:"varint,9,opt,name=resource_limits,json=resourceLimits,proto3" json:"resource_limits,omitempty"`
|
ResourceLimits bool `protobuf:"varint,9,opt,name=resource_limits,json=resourceLimits,proto3" json:"resource_limits,omitempty"`
|
||||||
BasicProcessCgroup bool `protobuf:"varint,10,opt,name=basic_process_cgroup,json=basicProcessCgroup,proto3" json:"basic_process_cgroup,omitempty"`
|
BasicProcessCgroup bool `protobuf:"varint,10,opt,name=basic_process_cgroup,json=basicProcessCgroup,proto3" json:"basic_process_cgroup,omitempty"`
|
||||||
Mounts []*proto1.Mount `protobuf:"bytes,11,rep,name=mounts,proto3" json:"mounts,omitempty"`
|
Mounts []*proto1.Mount `protobuf:"bytes,11,rep,name=mounts,proto3" json:"mounts,omitempty"`
|
||||||
Devices []*proto1.Device `protobuf:"bytes,12,rep,name=devices,proto3" json:"devices,omitempty"`
|
Devices []*proto1.Device `protobuf:"bytes,12,rep,name=devices,proto3" json:"devices,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
NetworkIsolation *proto1.NetworkIsolationSpec `protobuf:"bytes,13,opt,name=network_isolation,json=networkIsolation,proto3" json:"network_isolation,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LaunchRequest) Reset() { *m = LaunchRequest{} }
|
func (m *LaunchRequest) Reset() { *m = LaunchRequest{} }
|
||||||
func (m *LaunchRequest) String() string { return proto.CompactTextString(m) }
|
func (m *LaunchRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*LaunchRequest) ProtoMessage() {}
|
func (*LaunchRequest) ProtoMessage() {}
|
||||||
func (*LaunchRequest) Descriptor() ([]byte, []int) {
|
func (*LaunchRequest) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{0}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{0}
|
||||||
}
|
}
|
||||||
func (m *LaunchRequest) XXX_Unmarshal(b []byte) error {
|
func (m *LaunchRequest) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_LaunchRequest.Unmarshal(m, b)
|
return xxx_messageInfo_LaunchRequest.Unmarshal(m, b)
|
||||||
|
@ -151,6 +152,13 @@ func (m *LaunchRequest) GetDevices() []*proto1.Device {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *LaunchRequest) GetNetworkIsolation() *proto1.NetworkIsolationSpec {
|
||||||
|
if m != nil {
|
||||||
|
return m.NetworkIsolation
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type LaunchResponse struct {
|
type LaunchResponse struct {
|
||||||
Process *ProcessState `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"`
|
Process *ProcessState `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
@ -162,7 +170,7 @@ func (m *LaunchResponse) Reset() { *m = LaunchResponse{} }
|
||||||
func (m *LaunchResponse) String() string { return proto.CompactTextString(m) }
|
func (m *LaunchResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*LaunchResponse) ProtoMessage() {}
|
func (*LaunchResponse) ProtoMessage() {}
|
||||||
func (*LaunchResponse) Descriptor() ([]byte, []int) {
|
func (*LaunchResponse) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{1}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{1}
|
||||||
}
|
}
|
||||||
func (m *LaunchResponse) XXX_Unmarshal(b []byte) error {
|
func (m *LaunchResponse) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_LaunchResponse.Unmarshal(m, b)
|
return xxx_messageInfo_LaunchResponse.Unmarshal(m, b)
|
||||||
|
@ -199,7 +207,7 @@ func (m *WaitRequest) Reset() { *m = WaitRequest{} }
|
||||||
func (m *WaitRequest) String() string { return proto.CompactTextString(m) }
|
func (m *WaitRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*WaitRequest) ProtoMessage() {}
|
func (*WaitRequest) ProtoMessage() {}
|
||||||
func (*WaitRequest) Descriptor() ([]byte, []int) {
|
func (*WaitRequest) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{2}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{2}
|
||||||
}
|
}
|
||||||
func (m *WaitRequest) XXX_Unmarshal(b []byte) error {
|
func (m *WaitRequest) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_WaitRequest.Unmarshal(m, b)
|
return xxx_messageInfo_WaitRequest.Unmarshal(m, b)
|
||||||
|
@ -230,7 +238,7 @@ func (m *WaitResponse) Reset() { *m = WaitResponse{} }
|
||||||
func (m *WaitResponse) String() string { return proto.CompactTextString(m) }
|
func (m *WaitResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*WaitResponse) ProtoMessage() {}
|
func (*WaitResponse) ProtoMessage() {}
|
||||||
func (*WaitResponse) Descriptor() ([]byte, []int) {
|
func (*WaitResponse) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{3}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{3}
|
||||||
}
|
}
|
||||||
func (m *WaitResponse) XXX_Unmarshal(b []byte) error {
|
func (m *WaitResponse) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_WaitResponse.Unmarshal(m, b)
|
return xxx_messageInfo_WaitResponse.Unmarshal(m, b)
|
||||||
|
@ -269,7 +277,7 @@ func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} }
|
||||||
func (m *ShutdownRequest) String() string { return proto.CompactTextString(m) }
|
func (m *ShutdownRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ShutdownRequest) ProtoMessage() {}
|
func (*ShutdownRequest) ProtoMessage() {}
|
||||||
func (*ShutdownRequest) Descriptor() ([]byte, []int) {
|
func (*ShutdownRequest) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{4}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{4}
|
||||||
}
|
}
|
||||||
func (m *ShutdownRequest) XXX_Unmarshal(b []byte) error {
|
func (m *ShutdownRequest) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ShutdownRequest.Unmarshal(m, b)
|
return xxx_messageInfo_ShutdownRequest.Unmarshal(m, b)
|
||||||
|
@ -313,7 +321,7 @@ func (m *ShutdownResponse) Reset() { *m = ShutdownResponse{} }
|
||||||
func (m *ShutdownResponse) String() string { return proto.CompactTextString(m) }
|
func (m *ShutdownResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ShutdownResponse) ProtoMessage() {}
|
func (*ShutdownResponse) ProtoMessage() {}
|
||||||
func (*ShutdownResponse) Descriptor() ([]byte, []int) {
|
func (*ShutdownResponse) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{5}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{5}
|
||||||
}
|
}
|
||||||
func (m *ShutdownResponse) XXX_Unmarshal(b []byte) error {
|
func (m *ShutdownResponse) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ShutdownResponse.Unmarshal(m, b)
|
return xxx_messageInfo_ShutdownResponse.Unmarshal(m, b)
|
||||||
|
@ -344,7 +352,7 @@ func (m *UpdateResourcesRequest) Reset() { *m = UpdateResourcesRequest{}
|
||||||
func (m *UpdateResourcesRequest) String() string { return proto.CompactTextString(m) }
|
func (m *UpdateResourcesRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*UpdateResourcesRequest) ProtoMessage() {}
|
func (*UpdateResourcesRequest) ProtoMessage() {}
|
||||||
func (*UpdateResourcesRequest) Descriptor() ([]byte, []int) {
|
func (*UpdateResourcesRequest) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{6}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{6}
|
||||||
}
|
}
|
||||||
func (m *UpdateResourcesRequest) XXX_Unmarshal(b []byte) error {
|
func (m *UpdateResourcesRequest) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_UpdateResourcesRequest.Unmarshal(m, b)
|
return xxx_messageInfo_UpdateResourcesRequest.Unmarshal(m, b)
|
||||||
|
@ -381,7 +389,7 @@ func (m *UpdateResourcesResponse) Reset() { *m = UpdateResourcesResponse
|
||||||
func (m *UpdateResourcesResponse) String() string { return proto.CompactTextString(m) }
|
func (m *UpdateResourcesResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*UpdateResourcesResponse) ProtoMessage() {}
|
func (*UpdateResourcesResponse) ProtoMessage() {}
|
||||||
func (*UpdateResourcesResponse) Descriptor() ([]byte, []int) {
|
func (*UpdateResourcesResponse) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{7}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{7}
|
||||||
}
|
}
|
||||||
func (m *UpdateResourcesResponse) XXX_Unmarshal(b []byte) error {
|
func (m *UpdateResourcesResponse) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_UpdateResourcesResponse.Unmarshal(m, b)
|
return xxx_messageInfo_UpdateResourcesResponse.Unmarshal(m, b)
|
||||||
|
@ -411,7 +419,7 @@ func (m *VersionRequest) Reset() { *m = VersionRequest{} }
|
||||||
func (m *VersionRequest) String() string { return proto.CompactTextString(m) }
|
func (m *VersionRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*VersionRequest) ProtoMessage() {}
|
func (*VersionRequest) ProtoMessage() {}
|
||||||
func (*VersionRequest) Descriptor() ([]byte, []int) {
|
func (*VersionRequest) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{8}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{8}
|
||||||
}
|
}
|
||||||
func (m *VersionRequest) XXX_Unmarshal(b []byte) error {
|
func (m *VersionRequest) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_VersionRequest.Unmarshal(m, b)
|
return xxx_messageInfo_VersionRequest.Unmarshal(m, b)
|
||||||
|
@ -442,7 +450,7 @@ func (m *VersionResponse) Reset() { *m = VersionResponse{} }
|
||||||
func (m *VersionResponse) String() string { return proto.CompactTextString(m) }
|
func (m *VersionResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*VersionResponse) ProtoMessage() {}
|
func (*VersionResponse) ProtoMessage() {}
|
||||||
func (*VersionResponse) Descriptor() ([]byte, []int) {
|
func (*VersionResponse) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{9}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{9}
|
||||||
}
|
}
|
||||||
func (m *VersionResponse) XXX_Unmarshal(b []byte) error {
|
func (m *VersionResponse) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_VersionResponse.Unmarshal(m, b)
|
return xxx_messageInfo_VersionResponse.Unmarshal(m, b)
|
||||||
|
@ -480,7 +488,7 @@ func (m *StatsRequest) Reset() { *m = StatsRequest{} }
|
||||||
func (m *StatsRequest) String() string { return proto.CompactTextString(m) }
|
func (m *StatsRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*StatsRequest) ProtoMessage() {}
|
func (*StatsRequest) ProtoMessage() {}
|
||||||
func (*StatsRequest) Descriptor() ([]byte, []int) {
|
func (*StatsRequest) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{10}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{10}
|
||||||
}
|
}
|
||||||
func (m *StatsRequest) XXX_Unmarshal(b []byte) error {
|
func (m *StatsRequest) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_StatsRequest.Unmarshal(m, b)
|
return xxx_messageInfo_StatsRequest.Unmarshal(m, b)
|
||||||
|
@ -518,7 +526,7 @@ func (m *StatsResponse) Reset() { *m = StatsResponse{} }
|
||||||
func (m *StatsResponse) String() string { return proto.CompactTextString(m) }
|
func (m *StatsResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*StatsResponse) ProtoMessage() {}
|
func (*StatsResponse) ProtoMessage() {}
|
||||||
func (*StatsResponse) Descriptor() ([]byte, []int) {
|
func (*StatsResponse) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{11}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{11}
|
||||||
}
|
}
|
||||||
func (m *StatsResponse) XXX_Unmarshal(b []byte) error {
|
func (m *StatsResponse) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_StatsResponse.Unmarshal(m, b)
|
return xxx_messageInfo_StatsResponse.Unmarshal(m, b)
|
||||||
|
@ -556,7 +564,7 @@ func (m *SignalRequest) Reset() { *m = SignalRequest{} }
|
||||||
func (m *SignalRequest) String() string { return proto.CompactTextString(m) }
|
func (m *SignalRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*SignalRequest) ProtoMessage() {}
|
func (*SignalRequest) ProtoMessage() {}
|
||||||
func (*SignalRequest) Descriptor() ([]byte, []int) {
|
func (*SignalRequest) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{12}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{12}
|
||||||
}
|
}
|
||||||
func (m *SignalRequest) XXX_Unmarshal(b []byte) error {
|
func (m *SignalRequest) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_SignalRequest.Unmarshal(m, b)
|
return xxx_messageInfo_SignalRequest.Unmarshal(m, b)
|
||||||
|
@ -593,7 +601,7 @@ func (m *SignalResponse) Reset() { *m = SignalResponse{} }
|
||||||
func (m *SignalResponse) String() string { return proto.CompactTextString(m) }
|
func (m *SignalResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*SignalResponse) ProtoMessage() {}
|
func (*SignalResponse) ProtoMessage() {}
|
||||||
func (*SignalResponse) Descriptor() ([]byte, []int) {
|
func (*SignalResponse) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{13}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{13}
|
||||||
}
|
}
|
||||||
func (m *SignalResponse) XXX_Unmarshal(b []byte) error {
|
func (m *SignalResponse) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_SignalResponse.Unmarshal(m, b)
|
return xxx_messageInfo_SignalResponse.Unmarshal(m, b)
|
||||||
|
@ -626,7 +634,7 @@ func (m *ExecRequest) Reset() { *m = ExecRequest{} }
|
||||||
func (m *ExecRequest) String() string { return proto.CompactTextString(m) }
|
func (m *ExecRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ExecRequest) ProtoMessage() {}
|
func (*ExecRequest) ProtoMessage() {}
|
||||||
func (*ExecRequest) Descriptor() ([]byte, []int) {
|
func (*ExecRequest) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{14}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{14}
|
||||||
}
|
}
|
||||||
func (m *ExecRequest) XXX_Unmarshal(b []byte) error {
|
func (m *ExecRequest) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ExecRequest.Unmarshal(m, b)
|
return xxx_messageInfo_ExecRequest.Unmarshal(m, b)
|
||||||
|
@ -679,7 +687,7 @@ func (m *ExecResponse) Reset() { *m = ExecResponse{} }
|
||||||
func (m *ExecResponse) String() string { return proto.CompactTextString(m) }
|
func (m *ExecResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ExecResponse) ProtoMessage() {}
|
func (*ExecResponse) ProtoMessage() {}
|
||||||
func (*ExecResponse) Descriptor() ([]byte, []int) {
|
func (*ExecResponse) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{15}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{15}
|
||||||
}
|
}
|
||||||
func (m *ExecResponse) XXX_Unmarshal(b []byte) error {
|
func (m *ExecResponse) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ExecResponse.Unmarshal(m, b)
|
return xxx_messageInfo_ExecResponse.Unmarshal(m, b)
|
||||||
|
@ -727,7 +735,7 @@ func (m *ProcessState) Reset() { *m = ProcessState{} }
|
||||||
func (m *ProcessState) String() string { return proto.CompactTextString(m) }
|
func (m *ProcessState) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ProcessState) ProtoMessage() {}
|
func (*ProcessState) ProtoMessage() {}
|
||||||
func (*ProcessState) Descriptor() ([]byte, []int) {
|
func (*ProcessState) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_executor_5ea6ca9df3b0f07e, []int{16}
|
return fileDescriptor_executor_43dc81e71868eb7b, []int{16}
|
||||||
}
|
}
|
||||||
func (m *ProcessState) XXX_Unmarshal(b []byte) error {
|
func (m *ProcessState) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ProcessState.Unmarshal(m, b)
|
return xxx_messageInfo_ProcessState.Unmarshal(m, b)
|
||||||
|
@ -1192,67 +1200,69 @@ var _Executor_serviceDesc = grpc.ServiceDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("drivers/shared/executor/proto/executor.proto", fileDescriptor_executor_5ea6ca9df3b0f07e)
|
proto.RegisterFile("drivers/shared/executor/proto/executor.proto", fileDescriptor_executor_43dc81e71868eb7b)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptor_executor_5ea6ca9df3b0f07e = []byte{
|
var fileDescriptor_executor_43dc81e71868eb7b = []byte{
|
||||||
// 919 bytes of a gzipped FileDescriptorProto
|
// 955 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x5f, 0x6f, 0xdc, 0x44,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x5b, 0x6f, 0x1b, 0x45,
|
||||||
0x10, 0xaf, 0xeb, 0xdc, 0xbf, 0xb9, 0xbb, 0x24, 0x5a, 0xa1, 0xe0, 0x9a, 0x87, 0x1e, 0x7e, 0xa0,
|
0x14, 0xee, 0xc6, 0xf1, 0xed, 0xd8, 0x4e, 0xcc, 0x08, 0x85, 0xad, 0x79, 0xa8, 0xd9, 0x07, 0x6a,
|
||||||
0x27, 0x28, 0xbe, 0x28, 0xfd, 0xc7, 0x0b, 0x14, 0x91, 0x14, 0x5e, 0x42, 0x15, 0x39, 0x85, 0x4a,
|
0x41, 0x59, 0x47, 0xe9, 0x0d, 0x09, 0x41, 0x11, 0x49, 0x41, 0x48, 0x21, 0x8a, 0xd6, 0x85, 0x4a,
|
||||||
0x3c, 0x70, 0x6c, 0xec, 0xc5, 0x5e, 0xe5, 0xce, 0x6b, 0x76, 0xd7, 0x47, 0x90, 0x90, 0x78, 0xe2,
|
0x3c, 0x60, 0x26, 0xbb, 0xc3, 0xee, 0x28, 0xf6, 0xce, 0x32, 0x33, 0xeb, 0x06, 0x09, 0x89, 0x27,
|
||||||
0x1b, 0x80, 0xc4, 0xe7, 0xe4, 0x13, 0xa0, 0xfd, 0xe7, 0xdc, 0xa5, 0xa5, 0xf2, 0x15, 0xf1, 0x74,
|
0xfe, 0x01, 0x48, 0xfc, 0x38, 0x7e, 0x0c, 0x9a, 0xdb, 0xc6, 0x4e, 0x4b, 0xb5, 0x2e, 0xe2, 0xc9,
|
||||||
0x3b, 0xe3, 0xf9, 0xfd, 0x66, 0x66, 0x77, 0xe6, 0x77, 0x70, 0x3f, 0xe3, 0x74, 0x45, 0xb8, 0x98,
|
0x33, 0x67, 0xcf, 0xf7, 0x9d, 0xcb, 0x9c, 0xf3, 0x19, 0xee, 0x25, 0x9c, 0xae, 0x08, 0x17, 0x53,
|
||||||
0x89, 0x02, 0x73, 0x92, 0xcd, 0xc8, 0x15, 0x49, 0x6b, 0xc9, 0xf8, 0xac, 0xe2, 0x4c, 0xb2, 0xc6,
|
0x91, 0x61, 0x4e, 0x92, 0x29, 0xb9, 0x22, 0x71, 0x29, 0x19, 0x9f, 0x16, 0x9c, 0x49, 0x56, 0x5d,
|
||||||
0x8c, 0xb5, 0x89, 0x3e, 0x28, 0xb0, 0x28, 0x68, 0xca, 0x78, 0x15, 0x97, 0x6c, 0x89, 0xb3, 0xb8,
|
0x43, 0x7d, 0x45, 0xef, 0x67, 0x58, 0x64, 0x34, 0x66, 0xbc, 0x08, 0x73, 0xb6, 0xc4, 0x49, 0x58,
|
||||||
0x5a, 0xd4, 0x39, 0x2d, 0x45, 0xbc, 0x19, 0x17, 0xde, 0xcd, 0x19, 0xcb, 0x17, 0xc4, 0x90, 0x5c,
|
0x2c, 0xca, 0x94, 0xe6, 0x22, 0xdc, 0xf4, 0x1b, 0xdd, 0x49, 0x19, 0x4b, 0x17, 0xc4, 0x90, 0x5c,
|
||||||
0xd4, 0x3f, 0xce, 0x24, 0x5d, 0x12, 0x21, 0xf1, 0xb2, 0xb2, 0x01, 0x9f, 0xe6, 0x54, 0x16, 0xf5,
|
0x94, 0x3f, 0x4d, 0x25, 0x5d, 0x12, 0x21, 0xf1, 0xb2, 0xb0, 0x0e, 0x9f, 0xa6, 0x54, 0x66, 0xe5,
|
||||||
0x45, 0x9c, 0xb2, 0xe5, 0xac, 0xe1, 0x9c, 0x69, 0xce, 0x99, 0xe5, 0x9c, 0xb9, 0xca, 0x4c, 0x25,
|
0x45, 0x18, 0xb3, 0xe5, 0xb4, 0xe2, 0x9c, 0x6a, 0xce, 0xa9, 0xe5, 0x9c, 0xba, 0xcc, 0x4c, 0x26,
|
||||||
0xc6, 0x32, 0xf0, 0xe8, 0x6f, 0x1f, 0xc6, 0xa7, 0xb8, 0x2e, 0xd3, 0x22, 0x21, 0x3f, 0xd5, 0x44,
|
0xe6, 0x66, 0xe0, 0xc1, 0xdf, 0xbb, 0x30, 0x38, 0xc5, 0x65, 0x1e, 0x67, 0x11, 0xf9, 0xb9, 0x24,
|
||||||
0x48, 0xb4, 0x0f, 0x7e, 0xba, 0xcc, 0x02, 0x6f, 0xe2, 0x4d, 0x07, 0x89, 0x3a, 0x22, 0x04, 0x3b,
|
0x42, 0xa2, 0x21, 0x34, 0xe2, 0x65, 0xe2, 0x7b, 0x63, 0x6f, 0xd2, 0x8d, 0xd4, 0x11, 0x21, 0xd8,
|
||||||
0x98, 0xe7, 0x22, 0xb8, 0x3d, 0xf1, 0xa7, 0x83, 0x44, 0x9f, 0xd1, 0x73, 0x18, 0x70, 0x22, 0x58,
|
0xc5, 0x3c, 0x15, 0xfe, 0xce, 0xb8, 0x31, 0xe9, 0x46, 0xfa, 0x8c, 0xce, 0xa0, 0xcb, 0x89, 0x60,
|
||||||
0xcd, 0x53, 0x22, 0x02, 0x7f, 0xe2, 0x4d, 0x87, 0x47, 0x87, 0xf1, 0xbf, 0xf5, 0x64, 0xf3, 0x9b,
|
0x25, 0x8f, 0x89, 0xf0, 0x1b, 0x63, 0x6f, 0xd2, 0x3b, 0x3a, 0x0c, 0xff, 0xad, 0x26, 0x1b, 0xdf,
|
||||||
0x94, 0x71, 0xe2, 0x70, 0xc9, 0x35, 0x05, 0xba, 0x0b, 0x43, 0x21, 0x33, 0x56, 0xcb, 0x79, 0x85,
|
0x84, 0x0c, 0x23, 0x87, 0x8b, 0xae, 0x29, 0xd0, 0x1d, 0xe8, 0x09, 0x99, 0xb0, 0x52, 0xce, 0x0b,
|
||||||
0x65, 0x11, 0xec, 0xe8, 0xec, 0x60, 0x5c, 0x67, 0x58, 0x16, 0x36, 0x80, 0x70, 0x6e, 0x02, 0x3a,
|
0x2c, 0x33, 0x7f, 0x57, 0x47, 0x07, 0x63, 0x3a, 0xc7, 0x32, 0xb3, 0x0e, 0x84, 0x73, 0xe3, 0xd0,
|
||||||
0x4d, 0x00, 0xe1, 0x5c, 0x07, 0xec, 0x83, 0x4f, 0xca, 0x55, 0xd0, 0xd5, 0x45, 0xaa, 0xa3, 0xaa,
|
0xac, 0x1c, 0x08, 0xe7, 0xda, 0x61, 0x08, 0x0d, 0x92, 0xaf, 0xfc, 0x96, 0x4e, 0x52, 0x1d, 0x55,
|
||||||
0xbb, 0x16, 0x84, 0x07, 0x3d, 0x1d, 0xab, 0xcf, 0xe8, 0x0e, 0xf4, 0x25, 0x16, 0x97, 0xf3, 0x8c,
|
0xde, 0xa5, 0x20, 0xdc, 0x6f, 0x6b, 0x5f, 0x7d, 0x46, 0xb7, 0xa1, 0x23, 0xb1, 0xb8, 0x9c, 0x27,
|
||||||
0xf2, 0xa0, 0xaf, 0xfd, 0x3d, 0x65, 0x9f, 0x50, 0x8e, 0xee, 0xc1, 0x9e, 0xab, 0x67, 0xbe, 0xa0,
|
0x94, 0xfb, 0x1d, 0x6d, 0x6f, 0xab, 0xfb, 0x09, 0xe5, 0xe8, 0x2e, 0xec, 0xbb, 0x7c, 0xe6, 0x0b,
|
||||||
0x4b, 0x2a, 0x45, 0x30, 0x98, 0x78, 0xd3, 0x7e, 0xb2, 0xeb, 0xdc, 0xa7, 0xda, 0x8b, 0x0e, 0xe1,
|
0xba, 0xa4, 0x52, 0xf8, 0xdd, 0xb1, 0x37, 0xe9, 0x44, 0x7b, 0xce, 0x7c, 0xaa, 0xad, 0xe8, 0x10,
|
||||||
0x9d, 0x0b, 0x2c, 0x68, 0x3a, 0xaf, 0x38, 0x4b, 0x89, 0x10, 0xf3, 0x34, 0xe7, 0xac, 0xae, 0x02,
|
0xde, 0xbe, 0xc0, 0x82, 0xc6, 0xf3, 0x82, 0xb3, 0x98, 0x08, 0x31, 0x8f, 0x53, 0xce, 0xca, 0xc2,
|
||||||
0xd0, 0xd1, 0x48, 0x7f, 0x3b, 0x33, 0x9f, 0x8e, 0xf5, 0x17, 0x74, 0x02, 0xdd, 0x25, 0xab, 0x4b,
|
0x07, 0xed, 0x8d, 0xf4, 0xb7, 0x73, 0xf3, 0xe9, 0x58, 0x7f, 0x41, 0x27, 0xd0, 0x5a, 0xb2, 0x32,
|
||||||
0x29, 0x82, 0xe1, 0xc4, 0x9f, 0x0e, 0x8f, 0xee, 0xb7, 0xbc, 0xaa, 0xaf, 0x15, 0x28, 0xb1, 0x58,
|
0x97, 0xc2, 0xef, 0x8d, 0x1b, 0x93, 0xde, 0xd1, 0xbd, 0x9a, 0xad, 0xfa, 0x46, 0x81, 0x22, 0x8b,
|
||||||
0xf4, 0x15, 0xf4, 0x32, 0xb2, 0xa2, 0xea, 0xc6, 0x47, 0x9a, 0xe6, 0xe3, 0x96, 0x34, 0x27, 0x1a,
|
0x45, 0x5f, 0x41, 0x3b, 0x21, 0x2b, 0xaa, 0x3a, 0xde, 0xd7, 0x34, 0x1f, 0xd5, 0xa4, 0x39, 0xd1,
|
||||||
0x95, 0x38, 0x74, 0xf4, 0x03, 0xec, 0xba, 0x37, 0x17, 0x15, 0x2b, 0x05, 0x41, 0xcf, 0xa1, 0x67,
|
0xa8, 0xc8, 0xa1, 0x51, 0x06, 0x6f, 0xe5, 0x44, 0xbe, 0x60, 0xfc, 0x72, 0x4e, 0x05, 0x5b, 0x60,
|
||||||
0x9b, 0xd1, 0x0f, 0x3f, 0x3c, 0x7a, 0x18, 0xb7, 0x1b, 0xd0, 0xd8, 0x36, 0x7a, 0x2e, 0xb1, 0x24,
|
0x49, 0x59, 0xee, 0x0f, 0xf4, 0x23, 0x7e, 0x52, 0x93, 0xf2, 0xcc, 0xe0, 0xbf, 0x76, 0xf0, 0x59,
|
||||||
0x89, 0x23, 0x89, 0xc6, 0x30, 0x7c, 0x89, 0xa9, 0xb4, 0x33, 0x15, 0x7d, 0x0f, 0x23, 0x63, 0xfe,
|
0x41, 0xe2, 0x68, 0x98, 0xdf, 0xb0, 0x06, 0x3f, 0xc2, 0x9e, 0x9b, 0x2e, 0x51, 0xb0, 0x5c, 0x10,
|
||||||
0x4f, 0xe9, 0x4e, 0x61, 0xef, 0xbc, 0xa8, 0x65, 0xc6, 0x7e, 0x2e, 0xdd, 0x18, 0x1f, 0x40, 0x57,
|
0x74, 0x06, 0x6d, 0xdb, 0x36, 0x3d, 0x62, 0xbd, 0xa3, 0x07, 0x61, 0xbd, 0x55, 0x08, 0x6d, 0x4b,
|
||||||
0xd0, 0xbc, 0xc4, 0x0b, 0x3b, 0xc9, 0xd6, 0x42, 0xef, 0xc3, 0x28, 0xe7, 0x38, 0x25, 0xf3, 0x8a,
|
0x67, 0x12, 0x4b, 0x12, 0x39, 0x92, 0x60, 0x00, 0xbd, 0xe7, 0x98, 0x4a, 0x3b, 0xbd, 0xc1, 0x0f,
|
||||||
0x70, 0xca, 0xb2, 0xe0, 0xf6, 0xc4, 0x9b, 0xfa, 0xc9, 0x50, 0xfb, 0xce, 0xb4, 0x2b, 0x42, 0xb0,
|
0xd0, 0x37, 0xd7, 0xff, 0x29, 0xdc, 0x29, 0xec, 0xcf, 0xb2, 0x52, 0x26, 0xec, 0x45, 0xee, 0x16,
|
||||||
0x7f, 0xcd, 0x66, 0x2a, 0x8e, 0x0a, 0x38, 0xf8, 0xa6, 0xca, 0x54, 0xd2, 0x66, 0x7a, 0x6d, 0xa2,
|
0xe6, 0x00, 0x5a, 0x82, 0xa6, 0x39, 0x5e, 0xd8, 0x9d, 0xb1, 0x37, 0xf4, 0x1e, 0xf4, 0x53, 0x8e,
|
||||||
0x8d, 0x4d, 0xf0, 0xfe, 0xf3, 0x26, 0x44, 0x77, 0xe0, 0xdd, 0x57, 0x32, 0xd9, 0x22, 0xf6, 0x61,
|
0x63, 0x32, 0x2f, 0x08, 0xa7, 0x2c, 0xf1, 0x77, 0xc6, 0xde, 0xa4, 0x11, 0xf5, 0xb4, 0xed, 0x5c,
|
||||||
0xf7, 0x5b, 0xc2, 0x05, 0x65, 0xae, 0xcb, 0xe8, 0x23, 0xd8, 0x6b, 0x3c, 0xf6, 0x6e, 0x03, 0xe8,
|
0x9b, 0x02, 0x04, 0xc3, 0x6b, 0x36, 0x93, 0x71, 0x90, 0xc1, 0xc1, 0xb7, 0x45, 0xa2, 0x82, 0x56,
|
||||||
0xad, 0x8c, 0xcb, 0x76, 0xee, 0xcc, 0xe8, 0x43, 0x18, 0xa9, 0x7b, 0x6b, 0x2a, 0x0f, 0xa1, 0x4f,
|
0x7b, 0x62, 0x03, 0x6d, 0xec, 0x9c, 0xf7, 0x9f, 0x77, 0x2e, 0xb8, 0x0d, 0xef, 0xbc, 0x14, 0xc9,
|
||||||
0x4b, 0x49, 0xf8, 0xca, 0x5e, 0x92, 0x9f, 0x34, 0x76, 0xf4, 0x12, 0xc6, 0x36, 0xd6, 0xd2, 0x7e,
|
0x26, 0x31, 0x84, 0xbd, 0xef, 0x08, 0x17, 0x94, 0xb9, 0x2a, 0x83, 0x0f, 0x61, 0xbf, 0xb2, 0xd8,
|
||||||
0x09, 0x1d, 0xa1, 0x1c, 0x5b, 0xb6, 0xf8, 0x02, 0x8b, 0x4b, 0x43, 0x64, 0xe0, 0xd1, 0x3d, 0x18,
|
0xde, 0xfa, 0xd0, 0x5e, 0x19, 0x93, 0xad, 0xdc, 0x5d, 0x83, 0x0f, 0xa0, 0xaf, 0xfa, 0x56, 0x65,
|
||||||
0x9f, 0xeb, 0x97, 0x78, 0xfd, 0x43, 0x75, 0xdc, 0x43, 0xa9, 0x66, 0x5d, 0xa0, 0x6d, 0xff, 0x12,
|
0x3e, 0x82, 0x0e, 0xcd, 0x25, 0xe1, 0x2b, 0xdb, 0xa4, 0x46, 0x54, 0xdd, 0x83, 0xe7, 0x30, 0xb0,
|
||||||
0x86, 0xcf, 0xae, 0x48, 0xea, 0x80, 0x8f, 0xa1, 0x9f, 0x11, 0x9c, 0x2d, 0x68, 0x49, 0x6c, 0x51,
|
0xbe, 0x96, 0xf6, 0x4b, 0x68, 0x0a, 0x65, 0xd8, 0xb2, 0xc4, 0x67, 0x58, 0x5c, 0x1a, 0x22, 0x03,
|
||||||
0x61, 0x6c, 0xd4, 0x32, 0x76, 0x6a, 0x19, 0xbf, 0x70, 0x6a, 0x99, 0x34, 0xb1, 0x4e, 0xe0, 0x6e,
|
0x0f, 0xee, 0xc2, 0x60, 0xa6, 0x5f, 0xe2, 0xd5, 0x0f, 0xd5, 0x74, 0x0f, 0xa5, 0x8a, 0x75, 0x8e,
|
||||||
0xbf, 0x2a, 0x70, 0xfe, 0xb5, 0xc0, 0x45, 0xc7, 0x30, 0x32, 0xc9, 0x6c, 0xff, 0x07, 0xd0, 0x65,
|
0xb6, 0xfc, 0x4b, 0xe8, 0x3d, 0xbd, 0x22, 0xb1, 0x03, 0x3e, 0x82, 0x4e, 0x42, 0x70, 0xb2, 0xa0,
|
||||||
0xb5, 0xac, 0x6a, 0xa9, 0x73, 0x8d, 0x12, 0x6b, 0xa1, 0xf7, 0x60, 0x40, 0xae, 0xa8, 0x9c, 0xa7,
|
0x39, 0xb1, 0x49, 0x8d, 0x42, 0xa3, 0xcb, 0xa1, 0xd3, 0xe5, 0xf0, 0x99, 0xd3, 0xe5, 0xa8, 0xf2,
|
||||||
0x2c, 0x23, 0x9a, 0xb3, 0x93, 0xf4, 0x95, 0xe3, 0x98, 0x65, 0x24, 0xfa, 0xdd, 0x83, 0xd1, 0xfa,
|
0x75, 0x52, 0xba, 0xf3, 0xb2, 0x94, 0x36, 0xae, 0xa5, 0x34, 0x38, 0x86, 0xbe, 0x09, 0x66, 0xeb,
|
||||||
0xc4, 0xaa, 0xdc, 0x15, 0xcd, 0x6c, 0xa7, 0xea, 0xf8, 0x46, 0xfc, 0xda, 0xdd, 0xf8, 0xeb, 0x77,
|
0x3f, 0x80, 0x16, 0x2b, 0x65, 0x51, 0x4a, 0x1d, 0xab, 0x1f, 0xd9, 0x1b, 0x7a, 0x17, 0xba, 0xe4,
|
||||||
0x83, 0x62, 0xd8, 0x51, 0xff, 0x03, 0x5a, 0x26, 0xdf, 0xdc, 0xb6, 0x8e, 0x3b, 0xfa, 0x73, 0x00,
|
0x8a, 0xca, 0x79, 0xcc, 0x12, 0xa2, 0x39, 0x9b, 0x51, 0x47, 0x19, 0x8e, 0x59, 0x42, 0x82, 0xdf,
|
||||||
0xfd, 0x67, 0x76, 0x91, 0xd0, 0x2f, 0xd0, 0x35, 0xdb, 0x8f, 0x1e, 0xb5, 0xdd, 0xba, 0x8d, 0x7f,
|
0x3d, 0xe8, 0xaf, 0x4f, 0xac, 0x8a, 0x5d, 0xd0, 0xc4, 0x56, 0xaa, 0x8e, 0xaf, 0xc5, 0xaf, 0xf5,
|
||||||
0x88, 0xf0, 0xf1, 0xb6, 0x30, 0xfb, 0x7e, 0xb7, 0x90, 0x80, 0x1d, 0xa5, 0x03, 0xe8, 0x41, 0x5b,
|
0xa6, 0xb1, 0xde, 0x1b, 0x14, 0xc2, 0xae, 0xfa, 0xc7, 0xd1, 0x82, 0xfc, 0xfa, 0xb2, 0xb5, 0xdf,
|
||||||
0x86, 0x35, 0x11, 0x09, 0x1f, 0x6e, 0x07, 0x6a, 0x92, 0xfe, 0x06, 0x7d, 0xb7, 0xce, 0xe8, 0x49,
|
0xd1, 0x9f, 0x5d, 0xe8, 0x3c, 0xb5, 0x8b, 0x84, 0x7e, 0x81, 0x96, 0xd9, 0x7e, 0xf4, 0xb0, 0xee,
|
||||||
0x5b, 0x8e, 0x1b, 0x72, 0x12, 0x7e, 0xb2, 0x3d, 0xb0, 0x29, 0xe0, 0x0f, 0x0f, 0xf6, 0x6e, 0xac,
|
0xd6, 0x6d, 0xfc, 0x17, 0x8d, 0x1e, 0x6d, 0x0b, 0xb3, 0xef, 0x77, 0x0b, 0x09, 0xd8, 0x55, 0x3a,
|
||||||
0x34, 0xfa, 0xac, 0x2d, 0xdf, 0xeb, 0x55, 0x27, 0x7c, 0xfa, 0xd6, 0xf8, 0xa6, 0xac, 0x5f, 0xa1,
|
0x80, 0xee, 0xd7, 0x65, 0x58, 0x13, 0x91, 0xd1, 0x83, 0xed, 0x40, 0x55, 0xd0, 0xdf, 0xa0, 0xe3,
|
||||||
0x67, 0xb5, 0x03, 0xb5, 0x7e, 0xd1, 0x4d, 0xf9, 0x09, 0x9f, 0x6c, 0x8d, 0x6b, 0xb2, 0x5f, 0x41,
|
0xd6, 0x19, 0x3d, 0xae, 0xcb, 0x71, 0x43, 0x4e, 0x46, 0x1f, 0x6f, 0x0f, 0xac, 0x12, 0xf8, 0xc3,
|
||||||
0x47, 0xeb, 0x02, 0x6a, 0xfd, 0xac, 0xeb, 0xda, 0x15, 0x3e, 0xda, 0x12, 0xe5, 0xf2, 0x1e, 0x7a,
|
0x83, 0xfd, 0x1b, 0x2b, 0x8d, 0x3e, 0xab, 0xcb, 0xf7, 0x6a, 0xd5, 0x19, 0x3d, 0x79, 0x63, 0x7c,
|
||||||
0x6a, 0xfe, 0x8d, 0xb0, 0xb4, 0x9f, 0xff, 0x0d, 0xc5, 0x6a, 0x3f, 0xff, 0x37, 0xf4, 0x4b, 0xcf,
|
0x95, 0xd6, 0xaf, 0xd0, 0xb6, 0xda, 0x81, 0x6a, 0xbf, 0xe8, 0xa6, 0xfc, 0x8c, 0x1e, 0x6f, 0x8d,
|
||||||
0xbf, 0x5a, 0xc3, 0xf6, 0xf3, 0xbf, 0xa6, 0x77, 0xed, 0xe7, 0x7f, 0x5d, 0xb7, 0xa2, 0x5b, 0xe8,
|
0xab, 0xa2, 0x5f, 0x41, 0x53, 0xeb, 0x02, 0xaa, 0xfd, 0xac, 0xeb, 0xda, 0x35, 0x7a, 0xb8, 0x25,
|
||||||
0x2f, 0x0f, 0xc6, 0xca, 0x75, 0x2e, 0x39, 0xc1, 0x4b, 0x5a, 0xe6, 0xe8, 0x69, 0x4b, 0xf1, 0x56,
|
0xca, 0xc5, 0x3d, 0xf4, 0xd4, 0xfc, 0x1b, 0x61, 0xa9, 0x3f, 0xff, 0x1b, 0x8a, 0x55, 0x7f, 0xfe,
|
||||||
0x28, 0x23, 0xe0, 0x16, 0xe9, 0x4a, 0xf9, 0xfc, 0xed, 0x09, 0x5c, 0x59, 0x53, 0xef, 0xd0, 0xfb,
|
0x6f, 0xe8, 0x97, 0x9e, 0x7f, 0xb5, 0x86, 0xf5, 0xe7, 0x7f, 0x4d, 0xef, 0xea, 0xcf, 0xff, 0xba,
|
||||||
0xa2, 0xf7, 0x5d, 0xc7, 0x68, 0x56, 0x57, 0xff, 0x3c, 0xf8, 0x27, 0x00, 0x00, 0xff, 0xff, 0xe4,
|
0x6e, 0x05, 0xb7, 0xd0, 0x5f, 0x1e, 0x0c, 0x94, 0x69, 0x26, 0x39, 0xc1, 0x4b, 0x9a, 0xa7, 0xe8,
|
||||||
0x09, 0xe7, 0x2c, 0x45, 0x0b, 0x00, 0x00,
|
0x49, 0x4d, 0xf1, 0x56, 0x28, 0x23, 0xe0, 0x16, 0xe9, 0x52, 0xf9, 0xfc, 0xcd, 0x09, 0x5c, 0x5a,
|
||||||
|
0x13, 0xef, 0xd0, 0xfb, 0xa2, 0xfd, 0x7d, 0xd3, 0x68, 0x56, 0x4b, 0xff, 0xdc, 0xff, 0x27, 0x00,
|
||||||
|
0x00, 0xff, 0xff, 0xad, 0xfe, 0x69, 0xb2, 0xaf, 0x0b, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@ message LaunchRequest {
|
||||||
bool basic_process_cgroup = 10;
|
bool basic_process_cgroup = 10;
|
||||||
repeated hashicorp.nomad.plugins.drivers.proto.Mount mounts = 11;
|
repeated hashicorp.nomad.plugins.drivers.proto.Mount mounts = 11;
|
||||||
repeated hashicorp.nomad.plugins.drivers.proto.Device devices = 12;
|
repeated hashicorp.nomad.plugins.drivers.proto.Device devices = 12;
|
||||||
|
hashicorp.nomad.plugins.drivers.proto.NetworkIsolationSpec network_isolation = 13;
|
||||||
}
|
}
|
||||||
|
|
||||||
message LaunchResponse {
|
message LaunchResponse {
|
||||||
|
|
|
@ -33,6 +33,7 @@ func (s *grpcExecutorServer) Launch(ctx context.Context, req *proto.LaunchReques
|
||||||
BasicProcessCgroup: req.BasicProcessCgroup,
|
BasicProcessCgroup: req.BasicProcessCgroup,
|
||||||
Mounts: drivers.MountsFromProto(req.Mounts),
|
Mounts: drivers.MountsFromProto(req.Mounts),
|
||||||
Devices: drivers.DevicesFromProto(req.Devices),
|
Devices: drivers.DevicesFromProto(req.Devices),
|
||||||
|
NetworkIsolation: drivers.NetworkIsolationSpecFromProto(req.NetworkIsolation),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -190,6 +190,31 @@ func SliceSetDisjoint(first, second []string) (bool, []string) {
|
||||||
return false, flattened
|
return false, flattened
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CompareSliceSetString returns true if the slices contain the same strings.
|
||||||
|
// Order is ignored. The slice may be copied but is never altered. The slice is
|
||||||
|
// assumed to be a set. Multiple instances of an entry are treated the same as
|
||||||
|
// a single instance.
|
||||||
|
func CompareSliceSetString(a, b []string) bool {
|
||||||
|
n := len(a)
|
||||||
|
if n != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy a into a map and compare b against it
|
||||||
|
amap := make(map[string]struct{}, n)
|
||||||
|
for i := range a {
|
||||||
|
amap[a[i]] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range b {
|
||||||
|
if _, ok := amap[b[i]]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// CompareMapStringString returns true if the maps are equivalent. A nil and
|
// CompareMapStringString returns true if the maps are equivalent. A nil and
|
||||||
// empty map are considered not equal.
|
// empty map are considered not equal.
|
||||||
func CompareMapStringString(a, b map[string]string) bool {
|
func CompareMapStringString(a, b map[string]string) bool {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package helper
|
package helper
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -21,6 +22,75 @@ func TestSliceStringIsSubset(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCompareSliceSetString(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
A []string
|
||||||
|
B []string
|
||||||
|
Result bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
A: []string{},
|
||||||
|
B: []string{},
|
||||||
|
Result: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
A: []string{},
|
||||||
|
B: []string{"a"},
|
||||||
|
Result: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
A: []string{"a"},
|
||||||
|
B: []string{"a"},
|
||||||
|
Result: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
A: []string{"a"},
|
||||||
|
B: []string{"b"},
|
||||||
|
Result: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
A: []string{"a", "b"},
|
||||||
|
B: []string{"b"},
|
||||||
|
Result: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
A: []string{"a", "b"},
|
||||||
|
B: []string{"a"},
|
||||||
|
Result: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
A: []string{"a", "b"},
|
||||||
|
B: []string{"a", "b"},
|
||||||
|
Result: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
A: []string{"a", "b"},
|
||||||
|
B: []string{"b", "a"},
|
||||||
|
Result: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range cases {
|
||||||
|
tc := tc
|
||||||
|
t.Run(fmt.Sprintf("case-%da", i), func(t *testing.T) {
|
||||||
|
if res := CompareSliceSetString(tc.A, tc.B); res != tc.Result {
|
||||||
|
t.Fatalf("expected %t but CompareSliceSetString(%v, %v) -> %t",
|
||||||
|
tc.Result, tc.A, tc.B, res,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Function is commutative so compare B and A
|
||||||
|
t.Run(fmt.Sprintf("case-%db", i), func(t *testing.T) {
|
||||||
|
if res := CompareSliceSetString(tc.B, tc.A); res != tc.Result {
|
||||||
|
t.Fatalf("expected %t but CompareSliceSetString(%v, %v) -> %t",
|
||||||
|
tc.Result, tc.B, tc.A, res,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestMapStringStringSliceValueSet(t *testing.T) {
|
func TestMapStringStringSliceValueSet(t *testing.T) {
|
||||||
m := map[string][]string{
|
m := map[string][]string{
|
||||||
"foo": {"1", "2"},
|
"foo": {"1", "2"},
|
||||||
|
|
326
jobspec/parse.go
326
jobspec/parse.go
|
@ -314,6 +314,8 @@ func parseGroups(result *api.Job, list *ast.ObjectList) error {
|
||||||
"vault",
|
"vault",
|
||||||
"migrate",
|
"migrate",
|
||||||
"spread",
|
"spread",
|
||||||
|
"network",
|
||||||
|
"service",
|
||||||
}
|
}
|
||||||
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
|
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
|
||||||
return multierror.Prefix(err, fmt.Sprintf("'%s' ->", n))
|
return multierror.Prefix(err, fmt.Sprintf("'%s' ->", n))
|
||||||
|
@ -333,6 +335,8 @@ func parseGroups(result *api.Job, list *ast.ObjectList) error {
|
||||||
delete(m, "vault")
|
delete(m, "vault")
|
||||||
delete(m, "migrate")
|
delete(m, "migrate")
|
||||||
delete(m, "spread")
|
delete(m, "spread")
|
||||||
|
delete(m, "network")
|
||||||
|
delete(m, "service")
|
||||||
|
|
||||||
// Build the group with the basic decode
|
// Build the group with the basic decode
|
||||||
var g api.TaskGroup
|
var g api.TaskGroup
|
||||||
|
@ -369,6 +373,15 @@ func parseGroups(result *api.Job, list *ast.ObjectList) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse network
|
||||||
|
if o := listVal.Filter("network"); len(o.Items) > 0 {
|
||||||
|
networks, err := parseNetwork(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
g.Networks = []*api.NetworkResource{networks}
|
||||||
|
}
|
||||||
|
|
||||||
// Parse reschedule policy
|
// Parse reschedule policy
|
||||||
if o := listVal.Filter("reschedule"); len(o.Items) > 0 {
|
if o := listVal.Filter("reschedule"); len(o.Items) > 0 {
|
||||||
if err := parseReschedulePolicy(&g.ReschedulePolicy, o); err != nil {
|
if err := parseReschedulePolicy(&g.ReschedulePolicy, o); err != nil {
|
||||||
|
@ -437,6 +450,12 @@ func parseGroups(result *api.Job, list *ast.ObjectList) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if o := listVal.Filter("service"); len(o.Items) > 0 {
|
||||||
|
if err := parseGroupServices(*result.Name, *g.Name, &g, o); err != nil {
|
||||||
|
return multierror.Prefix(err, fmt.Sprintf("'%s',", n))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
collection = append(collection, &g)
|
collection = append(collection, &g)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1191,6 +1210,83 @@ func parseTemplates(result *[]*api.Template, list *ast.ObjectList) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//TODO(schmichael) combine with non-group services
|
||||||
|
func parseGroupServices(jobName string, taskGroupName string, g *api.TaskGroup, serviceObjs *ast.ObjectList) error {
|
||||||
|
g.Services = make([]*api.Service, len(serviceObjs.Items))
|
||||||
|
for idx, o := range serviceObjs.Items {
|
||||||
|
// Check for invalid keys
|
||||||
|
valid := []string{
|
||||||
|
"name",
|
||||||
|
"tags",
|
||||||
|
"canary_tags",
|
||||||
|
"port",
|
||||||
|
"check",
|
||||||
|
"address_mode",
|
||||||
|
"check_restart",
|
||||||
|
"connect",
|
||||||
|
}
|
||||||
|
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
|
||||||
|
return multierror.Prefix(err, fmt.Sprintf("service (%d) ->", idx))
|
||||||
|
}
|
||||||
|
|
||||||
|
var service api.Service
|
||||||
|
var m map[string]interface{}
|
||||||
|
if err := hcl.DecodeObject(&m, o.Val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(m, "check")
|
||||||
|
delete(m, "check_restart")
|
||||||
|
delete(m, "connect")
|
||||||
|
|
||||||
|
if err := mapstructure.WeakDecode(m, &service); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter checks
|
||||||
|
var checkList *ast.ObjectList
|
||||||
|
if ot, ok := o.Val.(*ast.ObjectType); ok {
|
||||||
|
checkList = ot.List
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("service '%s': should be an object", service.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if co := checkList.Filter("check"); len(co.Items) > 0 {
|
||||||
|
if err := parseChecks(&service, co); err != nil {
|
||||||
|
return multierror.Prefix(err, fmt.Sprintf("service: '%s',", service.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter check_restart
|
||||||
|
if cro := checkList.Filter("check_restart"); len(cro.Items) > 0 {
|
||||||
|
if len(cro.Items) > 1 {
|
||||||
|
return fmt.Errorf("check_restart '%s': cannot have more than 1 check_restart", service.Name)
|
||||||
|
}
|
||||||
|
if cr, err := parseCheckRestart(cro.Items[0]); err != nil {
|
||||||
|
return multierror.Prefix(err, fmt.Sprintf("service: '%s',", service.Name))
|
||||||
|
} else {
|
||||||
|
service.CheckRestart = cr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter connect
|
||||||
|
if co := checkList.Filter("connect"); len(co.Items) > 0 {
|
||||||
|
if len(co.Items) > 1 {
|
||||||
|
return fmt.Errorf("connect '%s': cannot have more than 1 connect", service.Name)
|
||||||
|
}
|
||||||
|
if c, err := parseConnect(co.Items[0]); err != nil {
|
||||||
|
return multierror.Prefix(err, fmt.Sprintf("service: '%s',", service.Name))
|
||||||
|
} else {
|
||||||
|
service.Connect = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
g.Services[idx] = &service
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func parseServices(jobName string, taskGroupName string, task *api.Task, serviceObjs *ast.ObjectList) error {
|
func parseServices(jobName string, taskGroupName string, task *api.Task, serviceObjs *ast.ObjectList) error {
|
||||||
task.Services = make([]*api.Service, len(serviceObjs.Items))
|
task.Services = make([]*api.Service, len(serviceObjs.Items))
|
||||||
for idx, o := range serviceObjs.Items {
|
for idx, o := range serviceObjs.Items {
|
||||||
|
@ -1387,6 +1483,162 @@ func parseCheckRestart(cro *ast.ObjectItem) (*api.CheckRestart, error) {
|
||||||
return &checkRestart, nil
|
return &checkRestart, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseConnect(co *ast.ObjectItem) (*api.ConsulConnect, error) {
|
||||||
|
valid := []string{
|
||||||
|
"sidecar_service",
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := helper.CheckHCLKeys(co.Val, valid); err != nil {
|
||||||
|
return nil, multierror.Prefix(err, "connect ->")
|
||||||
|
}
|
||||||
|
|
||||||
|
var connect api.ConsulConnect
|
||||||
|
|
||||||
|
var connectList *ast.ObjectList
|
||||||
|
if ot, ok := co.Val.(*ast.ObjectType); ok {
|
||||||
|
connectList = ot.List
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("connect should be an object")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the sidecar_service
|
||||||
|
o := connectList.Filter("sidecar_service")
|
||||||
|
if len(o.Items) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if len(o.Items) > 1 {
|
||||||
|
return nil, fmt.Errorf("only one 'sidecar_service' block allowed per task")
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := parseSidecarService(o.Items[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("sidecar_service, %v", err)
|
||||||
|
}
|
||||||
|
connect.SidecarService = r
|
||||||
|
|
||||||
|
return &connect, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSidecarService(o *ast.ObjectItem) (*api.ConsulSidecarService, error) {
|
||||||
|
valid := []string{
|
||||||
|
"port",
|
||||||
|
"proxy",
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
|
||||||
|
return nil, multierror.Prefix(err, "sidecar_service ->")
|
||||||
|
}
|
||||||
|
|
||||||
|
var sidecar api.ConsulSidecarService
|
||||||
|
var m map[string]interface{}
|
||||||
|
if err := hcl.DecodeObject(&m, o.Val); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(m, "proxy")
|
||||||
|
|
||||||
|
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
||||||
|
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
||||||
|
WeaklyTypedInput: true,
|
||||||
|
Result: &sidecar,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := dec.Decode(m); err != nil {
|
||||||
|
return nil, fmt.Errorf("foo: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var proxyList *ast.ObjectList
|
||||||
|
if ot, ok := o.Val.(*ast.ObjectType); ok {
|
||||||
|
proxyList = ot.List
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("sidecar_service: should be an object")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the proxy
|
||||||
|
po := proxyList.Filter("proxy")
|
||||||
|
if len(po.Items) == 0 {
|
||||||
|
return &sidecar, nil
|
||||||
|
}
|
||||||
|
if len(po.Items) > 1 {
|
||||||
|
return nil, fmt.Errorf("only one 'proxy' block allowed per task")
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := parseProxy(po.Items[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("proxy, %v", err)
|
||||||
|
}
|
||||||
|
sidecar.Proxy = r
|
||||||
|
|
||||||
|
return &sidecar, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseProxy(o *ast.ObjectItem) (*api.ConsulProxy, error) {
|
||||||
|
valid := []string{
|
||||||
|
"upstreams",
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
|
||||||
|
return nil, multierror.Prefix(err, "proxy ->")
|
||||||
|
}
|
||||||
|
|
||||||
|
var proxy api.ConsulProxy
|
||||||
|
|
||||||
|
var listVal *ast.ObjectList
|
||||||
|
if ot, ok := o.Val.(*ast.ObjectType); ok {
|
||||||
|
listVal = ot.List
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("proxy: should be an object")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the proxy
|
||||||
|
uo := listVal.Filter("upstreams")
|
||||||
|
proxy.Upstreams = make([]*api.ConsulUpstream, len(uo.Items))
|
||||||
|
for i := range uo.Items {
|
||||||
|
u, err := parseUpstream(uo.Items[i])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
proxy.Upstreams[i] = u
|
||||||
|
}
|
||||||
|
|
||||||
|
return &proxy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseUpstream(uo *ast.ObjectItem) (*api.ConsulUpstream, error) {
|
||||||
|
valid := []string{
|
||||||
|
"destination_name",
|
||||||
|
"local_bind_port",
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := helper.CheckHCLKeys(uo.Val, valid); err != nil {
|
||||||
|
return nil, multierror.Prefix(err, "upstream ->")
|
||||||
|
}
|
||||||
|
|
||||||
|
var upstream api.ConsulUpstream
|
||||||
|
var m map[string]interface{}
|
||||||
|
if err := hcl.DecodeObject(&m, uo.Val); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
||||||
|
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
||||||
|
WeaklyTypedInput: true,
|
||||||
|
Result: &upstream,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := dec.Decode(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &upstream, nil
|
||||||
|
}
|
||||||
|
|
||||||
func parseResources(result *api.Resources, list *ast.ObjectList) error {
|
func parseResources(result *api.Resources, list *ast.ObjectList) error {
|
||||||
list = list.Elem()
|
list = list.Elem()
|
||||||
if len(list.Items) == 0 {
|
if len(list.Items) == 0 {
|
||||||
|
@ -1433,39 +1685,11 @@ func parseResources(result *api.Resources, list *ast.ObjectList) error {
|
||||||
|
|
||||||
// Parse the network resources
|
// Parse the network resources
|
||||||
if o := listVal.Filter("network"); len(o.Items) > 0 {
|
if o := listVal.Filter("network"); len(o.Items) > 0 {
|
||||||
if len(o.Items) > 1 {
|
r, err := parseNetwork(o)
|
||||||
return fmt.Errorf("only one 'network' resource allowed")
|
if err != nil {
|
||||||
|
return fmt.Errorf("resource, %v", err)
|
||||||
}
|
}
|
||||||
|
result.Networks = []*api.NetworkResource{r}
|
||||||
// Check for invalid keys
|
|
||||||
valid := []string{
|
|
||||||
"mbits",
|
|
||||||
"port",
|
|
||||||
}
|
|
||||||
if err := helper.CheckHCLKeys(o.Items[0].Val, valid); err != nil {
|
|
||||||
return multierror.Prefix(err, "resources, network ->")
|
|
||||||
}
|
|
||||||
|
|
||||||
var r api.NetworkResource
|
|
||||||
var m map[string]interface{}
|
|
||||||
if err := hcl.DecodeObject(&m, o.Items[0].Val); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := mapstructure.WeakDecode(m, &r); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var networkObj *ast.ObjectList
|
|
||||||
if ot, ok := o.Items[0].Val.(*ast.ObjectType); ok {
|
|
||||||
networkObj = ot.List
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("resource: should be an object")
|
|
||||||
}
|
|
||||||
if err := parsePorts(networkObj, &r); err != nil {
|
|
||||||
return multierror.Prefix(err, "resources, network, ports ->")
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Networks = []*api.NetworkResource{&r}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse the device resources
|
// Parse the device resources
|
||||||
|
@ -1535,11 +1759,49 @@ func parseResources(result *api.Resources, list *ast.ObjectList) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseNetwork(o *ast.ObjectList) (*api.NetworkResource, error) {
|
||||||
|
if len(o.Items) > 1 {
|
||||||
|
return nil, fmt.Errorf("only one 'network' resource allowed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for invalid keys
|
||||||
|
valid := []string{
|
||||||
|
"mode",
|
||||||
|
"mbits",
|
||||||
|
"port",
|
||||||
|
}
|
||||||
|
if err := helper.CheckHCLKeys(o.Items[0].Val, valid); err != nil {
|
||||||
|
return nil, multierror.Prefix(err, "network ->")
|
||||||
|
}
|
||||||
|
|
||||||
|
var r api.NetworkResource
|
||||||
|
var m map[string]interface{}
|
||||||
|
if err := hcl.DecodeObject(&m, o.Items[0].Val); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := mapstructure.WeakDecode(m, &r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var networkObj *ast.ObjectList
|
||||||
|
if ot, ok := o.Items[0].Val.(*ast.ObjectType); ok {
|
||||||
|
networkObj = ot.List
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("should be an object")
|
||||||
|
}
|
||||||
|
if err := parsePorts(networkObj, &r); err != nil {
|
||||||
|
return nil, multierror.Prefix(err, "network, ports ->")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &r, nil
|
||||||
|
}
|
||||||
|
|
||||||
func parsePorts(networkObj *ast.ObjectList, nw *api.NetworkResource) error {
|
func parsePorts(networkObj *ast.ObjectList, nw *api.NetworkResource) error {
|
||||||
// Check for invalid keys
|
// Check for invalid keys
|
||||||
valid := []string{
|
valid := []string{
|
||||||
"mbits",
|
"mbits",
|
||||||
"port",
|
"port",
|
||||||
|
"mode",
|
||||||
}
|
}
|
||||||
if err := helper.CheckHCLKeys(networkObj, valid); err != nil {
|
if err := helper.CheckHCLKeys(networkObj, valid); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -871,6 +871,50 @@ func TestParse(t *testing.T) {
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"tg-network.hcl",
|
||||||
|
&api.Job{
|
||||||
|
ID: helper.StringToPtr("foo"),
|
||||||
|
Name: helper.StringToPtr("foo"),
|
||||||
|
Datacenters: []string{"dc1"},
|
||||||
|
TaskGroups: []*api.TaskGroup{
|
||||||
|
{
|
||||||
|
Name: helper.StringToPtr("bar"),
|
||||||
|
Count: helper.IntToPtr(3),
|
||||||
|
Networks: []*api.NetworkResource{
|
||||||
|
{
|
||||||
|
Mode: "bridge",
|
||||||
|
ReservedPorts: []api.Port{
|
||||||
|
{
|
||||||
|
Label: "http",
|
||||||
|
Value: 80,
|
||||||
|
To: 8080,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Tasks: []*api.Task{
|
||||||
|
{
|
||||||
|
Name: "bar",
|
||||||
|
Driver: "raw_exec",
|
||||||
|
Config: map[string]interface{}{
|
||||||
|
"command": "bash",
|
||||||
|
"args": []interface{}{"-c", "echo hi"},
|
||||||
|
},
|
||||||
|
Resources: &api.Resources{
|
||||||
|
Networks: []*api.NetworkResource{
|
||||||
|
{
|
||||||
|
MBits: helper.IntToPtr(10),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
|
|
25
jobspec/test-fixtures/tg-network.hcl
Normal file
25
jobspec/test-fixtures/tg-network.hcl
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
job "foo" {
|
||||||
|
datacenters = ["dc1"]
|
||||||
|
group "bar" {
|
||||||
|
count = 3
|
||||||
|
network {
|
||||||
|
mode = "bridge"
|
||||||
|
port "http" {
|
||||||
|
static = 80
|
||||||
|
to = 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
task "bar" {
|
||||||
|
driver = "raw_exec"
|
||||||
|
config {
|
||||||
|
command = "bash"
|
||||||
|
args = ["-c", "echo hi"]
|
||||||
|
}
|
||||||
|
resources {
|
||||||
|
network {
|
||||||
|
mbits = 10
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
109
nomad/structs/consul.go
Normal file
109
nomad/structs/consul.go
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
package structs
|
||||||
|
|
||||||
|
type ConsulConnect struct {
|
||||||
|
SidecarService *ConsulSidecarService
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConsulConnect) Copy() *ConsulConnect {
|
||||||
|
return &ConsulConnect{
|
||||||
|
SidecarService: c.SidecarService.Copy(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConsulConnect) Equals(o *ConsulConnect) bool {
|
||||||
|
if c == nil || o == nil {
|
||||||
|
return c == o
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SidecarService.Equals(o.SidecarService)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConsulConnect) HasSidecar() bool {
|
||||||
|
return c != nil && c.SidecarService != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConsulSidecarService struct {
|
||||||
|
Port string
|
||||||
|
Proxy *ConsulProxy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ConsulSidecarService) Copy() *ConsulSidecarService {
|
||||||
|
return &ConsulSidecarService{
|
||||||
|
Port: s.Port,
|
||||||
|
Proxy: s.Proxy.Copy(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ConsulSidecarService) Equals(o *ConsulSidecarService) bool {
|
||||||
|
if s == nil || o == nil {
|
||||||
|
return s == o
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Port != o.Port {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.Proxy.Equals(o.Proxy)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConsulProxy struct {
|
||||||
|
Upstreams []*ConsulUpstream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ConsulProxy) Copy() *ConsulProxy {
|
||||||
|
upstreams := make([]*ConsulUpstream, len(p.Upstreams))
|
||||||
|
|
||||||
|
for i := range p.Upstreams {
|
||||||
|
upstreams[i] = p.Upstreams[i].Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ConsulProxy{
|
||||||
|
Upstreams: upstreams,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ConsulProxy) Equals(o *ConsulProxy) bool {
|
||||||
|
if p == nil || o == nil {
|
||||||
|
return p == o
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p.Upstreams) != len(o.Upstreams) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order doesn't matter
|
||||||
|
OUTER:
|
||||||
|
for _, up := range p.Upstreams {
|
||||||
|
for _, innerUp := range o.Upstreams {
|
||||||
|
if up.Equals(innerUp) {
|
||||||
|
// Match; find next upstream
|
||||||
|
continue OUTER
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// No match
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConsulUpstream struct {
|
||||||
|
DestinationName string
|
||||||
|
LocalBindPort int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *ConsulUpstream) Copy() *ConsulUpstream {
|
||||||
|
return &ConsulUpstream{
|
||||||
|
DestinationName: u.DestinationName,
|
||||||
|
LocalBindPort: u.LocalBindPort,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *ConsulUpstream) Equals(o *ConsulUpstream) bool {
|
||||||
|
if u == nil || o == nil {
|
||||||
|
return u == o
|
||||||
|
}
|
||||||
|
|
||||||
|
return (*u) == (*o)
|
||||||
|
}
|
|
@ -3317,6 +3317,7 @@ func TestTaskDiff(t *testing.T) {
|
||||||
DynamicPorts: []Port{
|
DynamicPorts: []Port{
|
||||||
{
|
{
|
||||||
Label: "bar",
|
Label: "bar",
|
||||||
|
To: 8080,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3340,6 +3341,7 @@ func TestTaskDiff(t *testing.T) {
|
||||||
DynamicPorts: []Port{
|
DynamicPorts: []Port{
|
||||||
{
|
{
|
||||||
Label: "baz",
|
Label: "baz",
|
||||||
|
To: 8081,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3375,6 +3377,12 @@ func TestTaskDiff(t *testing.T) {
|
||||||
Old: "",
|
Old: "",
|
||||||
New: "foo",
|
New: "foo",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Type: DiffTypeAdded,
|
||||||
|
Name: "To",
|
||||||
|
Old: "",
|
||||||
|
New: "0",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Type: DiffTypeAdded,
|
Type: DiffTypeAdded,
|
||||||
Name: "Value",
|
Name: "Value",
|
||||||
|
@ -3393,6 +3401,12 @@ func TestTaskDiff(t *testing.T) {
|
||||||
Old: "",
|
Old: "",
|
||||||
New: "baz",
|
New: "baz",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Type: DiffTypeAdded,
|
||||||
|
Name: "To",
|
||||||
|
Old: "",
|
||||||
|
New: "8081",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3419,6 +3433,12 @@ func TestTaskDiff(t *testing.T) {
|
||||||
Old: "foo",
|
Old: "foo",
|
||||||
New: "",
|
New: "",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Type: DiffTypeDeleted,
|
||||||
|
Name: "To",
|
||||||
|
Old: "0",
|
||||||
|
New: "",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Type: DiffTypeDeleted,
|
Type: DiffTypeDeleted,
|
||||||
Name: "Value",
|
Name: "Value",
|
||||||
|
@ -3437,6 +3457,12 @@ func TestTaskDiff(t *testing.T) {
|
||||||
Old: "bar",
|
Old: "bar",
|
||||||
New: "",
|
New: "",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Type: DiffTypeDeleted,
|
||||||
|
Name: "To",
|
||||||
|
Old: "8080",
|
||||||
|
New: "",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3879,6 +3905,12 @@ func TestTaskDiff(t *testing.T) {
|
||||||
Old: "boom_port",
|
Old: "boom_port",
|
||||||
New: "boom_port",
|
New: "boom_port",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Type: DiffTypeNone,
|
||||||
|
Name: "boom.To",
|
||||||
|
Old: "0",
|
||||||
|
New: "0",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Type: DiffTypeNone,
|
Type: DiffTypeNone,
|
||||||
Name: "boom.Value",
|
Name: "boom.Value",
|
||||||
|
|
|
@ -108,7 +108,7 @@ func TestAllocsFit_PortsOvercommitted_Old(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"main", 8000}},
|
ReservedPorts: []Port{{"main", 8000, 80}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -160,7 +160,7 @@ func TestAllocsFit_Old(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"main", 80}},
|
ReservedPorts: []Port{{"main", 80, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -176,7 +176,7 @@ func TestAllocsFit_Old(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"main", 8000}},
|
ReservedPorts: []Port{{"main", 8000, 80}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -227,7 +227,7 @@ func TestAllocsFit_TerminalAlloc_Old(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"main", 80}},
|
ReservedPorts: []Port{{"main", 80, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -243,7 +243,7 @@ func TestAllocsFit_TerminalAlloc_Old(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"main", 8000}},
|
ReservedPorts: []Port{{"main", 8000, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -323,7 +323,7 @@ func TestAllocsFit(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"main", 8000}},
|
ReservedPorts: []Port{{"main", 8000, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -407,7 +407,7 @@ func TestAllocsFit_TerminalAlloc(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"main", 8000}},
|
ReservedPorts: []Port{{"main", 8000, 80}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -113,6 +113,15 @@ func (idx *NetworkIndex) AddAllocs(allocs []*Allocation) (collide bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if alloc.AllocatedResources != nil {
|
if alloc.AllocatedResources != nil {
|
||||||
|
// Add network resources that are at the task group level
|
||||||
|
if len(alloc.AllocatedResources.Shared.Networks) > 0 {
|
||||||
|
for _, network := range alloc.AllocatedResources.Shared.Networks {
|
||||||
|
if idx.AddReserved(network) {
|
||||||
|
collide = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, task := range alloc.AllocatedResources.Tasks {
|
for _, task := range alloc.AllocatedResources.Tasks {
|
||||||
if len(task.Networks) == 0 {
|
if len(task.Networks) == 0 {
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -15,7 +15,7 @@ func TestNetworkIndex_Overcommitted(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
MBits: 505,
|
MBits: 505,
|
||||||
ReservedPorts: []Port{{"one", 8000}, {"two", 9000}},
|
ReservedPorts: []Port{{"one", 8000, 0}, {"two", 9000, 0}},
|
||||||
}
|
}
|
||||||
collide := idx.AddReserved(reserved)
|
collide := idx.AddReserved(reserved)
|
||||||
if collide {
|
if collide {
|
||||||
|
@ -96,7 +96,7 @@ func TestNetworkIndex_AddAllocs(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
MBits: 20,
|
MBits: 20,
|
||||||
ReservedPorts: []Port{{"one", 8000}, {"two", 9000}},
|
ReservedPorts: []Port{{"one", 8000, 0}, {"two", 9000, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -112,7 +112,7 @@ func TestNetworkIndex_AddAllocs(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"one", 10000}},
|
ReservedPorts: []Port{{"one", 10000, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -146,7 +146,7 @@ func TestNetworkIndex_AddReserved(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
MBits: 20,
|
MBits: 20,
|
||||||
ReservedPorts: []Port{{"one", 8000}, {"two", 9000}},
|
ReservedPorts: []Port{{"one", 8000, 0}, {"two", 9000, 0}},
|
||||||
}
|
}
|
||||||
collide := idx.AddReserved(reserved)
|
collide := idx.AddReserved(reserved)
|
||||||
if collide {
|
if collide {
|
||||||
|
@ -224,7 +224,7 @@ func TestNetworkIndex_AssignNetwork(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
MBits: 20,
|
MBits: 20,
|
||||||
ReservedPorts: []Port{{"one", 8000}, {"two", 9000}},
|
ReservedPorts: []Port{{"one", 8000, 0}, {"two", 9000, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -238,7 +238,7 @@ func TestNetworkIndex_AssignNetwork(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"main", 10000}},
|
ReservedPorts: []Port{{"main", 10000, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -249,7 +249,7 @@ func TestNetworkIndex_AssignNetwork(t *testing.T) {
|
||||||
|
|
||||||
// Ask for a reserved port
|
// Ask for a reserved port
|
||||||
ask := &NetworkResource{
|
ask := &NetworkResource{
|
||||||
ReservedPorts: []Port{{"main", 8000}},
|
ReservedPorts: []Port{{"main", 8000, 0}},
|
||||||
}
|
}
|
||||||
offer, err := idx.AssignNetwork(ask)
|
offer, err := idx.AssignNetwork(ask)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -261,14 +261,14 @@ func TestNetworkIndex_AssignNetwork(t *testing.T) {
|
||||||
if offer.IP != "192.168.0.101" {
|
if offer.IP != "192.168.0.101" {
|
||||||
t.Fatalf("bad: %#v", offer)
|
t.Fatalf("bad: %#v", offer)
|
||||||
}
|
}
|
||||||
rp := Port{"main", 8000}
|
rp := Port{"main", 8000, 0}
|
||||||
if len(offer.ReservedPorts) != 1 || offer.ReservedPorts[0] != rp {
|
if len(offer.ReservedPorts) != 1 || offer.ReservedPorts[0] != rp {
|
||||||
t.Fatalf("bad: %#v", offer)
|
t.Fatalf("bad: %#v", offer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ask for dynamic ports
|
// Ask for dynamic ports
|
||||||
ask = &NetworkResource{
|
ask = &NetworkResource{
|
||||||
DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}},
|
DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}, {"admin", 0, 8080}},
|
||||||
}
|
}
|
||||||
offer, err = idx.AssignNetwork(ask)
|
offer, err = idx.AssignNetwork(ask)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -291,8 +291,8 @@ func TestNetworkIndex_AssignNetwork(t *testing.T) {
|
||||||
|
|
||||||
// Ask for reserved + dynamic ports
|
// Ask for reserved + dynamic ports
|
||||||
ask = &NetworkResource{
|
ask = &NetworkResource{
|
||||||
ReservedPorts: []Port{{"main", 2345}},
|
ReservedPorts: []Port{{"main", 2345, 0}},
|
||||||
DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}},
|
DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}, {"admin", 0, 8080}},
|
||||||
}
|
}
|
||||||
offer, err = idx.AssignNetwork(ask)
|
offer, err = idx.AssignNetwork(ask)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -305,7 +305,7 @@ func TestNetworkIndex_AssignNetwork(t *testing.T) {
|
||||||
t.Fatalf("bad: %#v", offer)
|
t.Fatalf("bad: %#v", offer)
|
||||||
}
|
}
|
||||||
|
|
||||||
rp = Port{"main", 2345}
|
rp = Port{"main", 2345, 0}
|
||||||
if len(offer.ReservedPorts) != 1 || offer.ReservedPorts[0] != rp {
|
if len(offer.ReservedPorts) != 1 || offer.ReservedPorts[0] != rp {
|
||||||
t.Fatalf("bad: %#v", offer)
|
t.Fatalf("bad: %#v", offer)
|
||||||
}
|
}
|
||||||
|
@ -350,7 +350,7 @@ func TestNetworkIndex_AssignNetwork_Dynamic_Contention(t *testing.T) {
|
||||||
|
|
||||||
// Ask for dynamic ports
|
// Ask for dynamic ports
|
||||||
ask := &NetworkResource{
|
ask := &NetworkResource{
|
||||||
DynamicPorts: []Port{{"http", 0}},
|
DynamicPorts: []Port{{"http", 0, 80}},
|
||||||
}
|
}
|
||||||
offer, err := idx.AssignNetwork(ask)
|
offer, err := idx.AssignNetwork(ask)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -379,7 +379,7 @@ func TestNetworkIndex_Overcommitted_Old(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
MBits: 505,
|
MBits: 505,
|
||||||
ReservedPorts: []Port{{"one", 8000}, {"two", 9000}},
|
ReservedPorts: []Port{{"one", 8000, 0}, {"two", 9000, 0}},
|
||||||
}
|
}
|
||||||
collide := idx.AddReserved(reserved)
|
collide := idx.AddReserved(reserved)
|
||||||
if collide {
|
if collide {
|
||||||
|
@ -431,7 +431,7 @@ func TestNetworkIndex_SetNode_Old(t *testing.T) {
|
||||||
{
|
{
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
ReservedPorts: []Port{{"ssh", 22}},
|
ReservedPorts: []Port{{"ssh", 22, 0}},
|
||||||
MBits: 1,
|
MBits: 1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -468,7 +468,7 @@ func TestNetworkIndex_AddAllocs_Old(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
MBits: 20,
|
MBits: 20,
|
||||||
ReservedPorts: []Port{{"one", 8000}, {"two", 9000}},
|
ReservedPorts: []Port{{"one", 8000, 0}, {"two", 9000, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -482,7 +482,7 @@ func TestNetworkIndex_AddAllocs_Old(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"one", 10000}},
|
ReservedPorts: []Port{{"one", 10000, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -526,7 +526,7 @@ func TestNetworkIndex_yieldIP_Old(t *testing.T) {
|
||||||
{
|
{
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
ReservedPorts: []Port{{"ssh", 22}},
|
ReservedPorts: []Port{{"ssh", 22, 0}},
|
||||||
MBits: 1,
|
MBits: 1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -565,7 +565,7 @@ func TestNetworkIndex_AssignNetwork_Old(t *testing.T) {
|
||||||
{
|
{
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
ReservedPorts: []Port{{"ssh", 22}},
|
ReservedPorts: []Port{{"ssh", 22, 0}},
|
||||||
MBits: 1,
|
MBits: 1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -582,7 +582,7 @@ func TestNetworkIndex_AssignNetwork_Old(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
MBits: 20,
|
MBits: 20,
|
||||||
ReservedPorts: []Port{{"one", 8000}, {"two", 9000}},
|
ReservedPorts: []Port{{"one", 8000, 0}, {"two", 9000, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -596,7 +596,7 @@ func TestNetworkIndex_AssignNetwork_Old(t *testing.T) {
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.100",
|
IP: "192.168.0.100",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"main", 10000}},
|
ReservedPorts: []Port{{"main", 10000, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -607,7 +607,7 @@ func TestNetworkIndex_AssignNetwork_Old(t *testing.T) {
|
||||||
|
|
||||||
// Ask for a reserved port
|
// Ask for a reserved port
|
||||||
ask := &NetworkResource{
|
ask := &NetworkResource{
|
||||||
ReservedPorts: []Port{{"main", 8000}},
|
ReservedPorts: []Port{{"main", 8000, 0}},
|
||||||
}
|
}
|
||||||
offer, err := idx.AssignNetwork(ask)
|
offer, err := idx.AssignNetwork(ask)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -619,14 +619,14 @@ func TestNetworkIndex_AssignNetwork_Old(t *testing.T) {
|
||||||
if offer.IP != "192.168.0.101" {
|
if offer.IP != "192.168.0.101" {
|
||||||
t.Fatalf("bad: %#v", offer)
|
t.Fatalf("bad: %#v", offer)
|
||||||
}
|
}
|
||||||
rp := Port{"main", 8000}
|
rp := Port{"main", 8000, 0}
|
||||||
if len(offer.ReservedPorts) != 1 || offer.ReservedPorts[0] != rp {
|
if len(offer.ReservedPorts) != 1 || offer.ReservedPorts[0] != rp {
|
||||||
t.Fatalf("bad: %#v", offer)
|
t.Fatalf("bad: %#v", offer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ask for dynamic ports
|
// Ask for dynamic ports
|
||||||
ask = &NetworkResource{
|
ask = &NetworkResource{
|
||||||
DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}},
|
DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}, {"admin", 0, 8080}},
|
||||||
}
|
}
|
||||||
offer, err = idx.AssignNetwork(ask)
|
offer, err = idx.AssignNetwork(ask)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -649,8 +649,8 @@ func TestNetworkIndex_AssignNetwork_Old(t *testing.T) {
|
||||||
|
|
||||||
// Ask for reserved + dynamic ports
|
// Ask for reserved + dynamic ports
|
||||||
ask = &NetworkResource{
|
ask = &NetworkResource{
|
||||||
ReservedPorts: []Port{{"main", 2345}},
|
ReservedPorts: []Port{{"main", 2345, 0}},
|
||||||
DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}},
|
DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}, {"admin", 0, 8080}},
|
||||||
}
|
}
|
||||||
offer, err = idx.AssignNetwork(ask)
|
offer, err = idx.AssignNetwork(ask)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -663,7 +663,7 @@ func TestNetworkIndex_AssignNetwork_Old(t *testing.T) {
|
||||||
t.Fatalf("bad: %#v", offer)
|
t.Fatalf("bad: %#v", offer)
|
||||||
}
|
}
|
||||||
|
|
||||||
rp = Port{"main", 2345}
|
rp = Port{"main", 2345, 0}
|
||||||
if len(offer.ReservedPorts) != 1 || offer.ReservedPorts[0] != rp {
|
if len(offer.ReservedPorts) != 1 || offer.ReservedPorts[0] != rp {
|
||||||
t.Fatalf("bad: %#v", offer)
|
t.Fatalf("bad: %#v", offer)
|
||||||
}
|
}
|
||||||
|
@ -716,7 +716,7 @@ func TestNetworkIndex_AssignNetwork_Dynamic_Contention_Old(t *testing.T) {
|
||||||
|
|
||||||
// Ask for dynamic ports
|
// Ask for dynamic ports
|
||||||
ask := &NetworkResource{
|
ask := &NetworkResource{
|
||||||
DynamicPorts: []Port{{"http", 0}},
|
DynamicPorts: []Port{{"http", 0, 80}},
|
||||||
}
|
}
|
||||||
offer, err := idx.AssignNetwork(ask)
|
offer, err := idx.AssignNetwork(ask)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1937,13 +1937,7 @@ func (r *Resources) Copy() *Resources {
|
||||||
*newR = *r
|
*newR = *r
|
||||||
|
|
||||||
// Copy the network objects
|
// Copy the network objects
|
||||||
if r.Networks != nil {
|
newR.Networks = r.Networks.Copy()
|
||||||
n := len(r.Networks)
|
|
||||||
newR.Networks = make([]*NetworkResource, n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
newR.Networks[i] = r.Networks[i].Copy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy the devices
|
// Copy the devices
|
||||||
if r.Devices != nil {
|
if r.Devices != nil {
|
||||||
|
@ -2011,11 +2005,13 @@ func (r *Resources) GoString() string {
|
||||||
type Port struct {
|
type Port struct {
|
||||||
Label string
|
Label string
|
||||||
Value int
|
Value int
|
||||||
|
To int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NetworkResource is used to represent available network
|
// NetworkResource is used to represent available network
|
||||||
// resources
|
// resources
|
||||||
type NetworkResource struct {
|
type NetworkResource struct {
|
||||||
|
Mode string // Mode of the network
|
||||||
Device string // Name of the device
|
Device string // Name of the device
|
||||||
CIDR string // CIDR block of addresses
|
CIDR string // CIDR block of addresses
|
||||||
IP string // Host IP address
|
IP string // Host IP address
|
||||||
|
@ -2025,6 +2021,10 @@ type NetworkResource struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nr *NetworkResource) Equals(other *NetworkResource) bool {
|
func (nr *NetworkResource) Equals(other *NetworkResource) bool {
|
||||||
|
if nr.Mode != other.Mode {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if nr.Device != other.Device {
|
if nr.Device != other.Device {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -2065,6 +2065,7 @@ func (nr *NetworkResource) Equals(other *NetworkResource) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2137,6 +2138,18 @@ func (n *NetworkResource) PortLabels() map[string]int {
|
||||||
// Networks defined for a task on the Resources struct.
|
// Networks defined for a task on the Resources struct.
|
||||||
type Networks []*NetworkResource
|
type Networks []*NetworkResource
|
||||||
|
|
||||||
|
func (ns Networks) Copy() Networks {
|
||||||
|
if len(ns) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*NetworkResource, len(ns))
|
||||||
|
for i := range ns {
|
||||||
|
out[i] = ns[i].Copy()
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// Port assignment and IP for the given label or empty values.
|
// Port assignment and IP for the given label or empty values.
|
||||||
func (ns Networks) Port(label string) (string, int) {
|
func (ns Networks) Port(label string) (string, int) {
|
||||||
for _, n := range ns {
|
for _, n := range ns {
|
||||||
|
@ -2289,13 +2302,7 @@ func (n *NodeResources) Copy() *NodeResources {
|
||||||
*newN = *n
|
*newN = *n
|
||||||
|
|
||||||
// Copy the networks
|
// Copy the networks
|
||||||
if n.Networks != nil {
|
newN.Networks = n.Networks.Copy()
|
||||||
networks := len(n.Networks)
|
|
||||||
newN.Networks = make([]*NetworkResource, networks)
|
|
||||||
for i := 0; i < networks; i++ {
|
|
||||||
newN.Networks[i] = n.Networks[i].Copy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy the devices
|
// Copy the devices
|
||||||
if n.Devices != nil {
|
if n.Devices != nil {
|
||||||
|
@ -2822,18 +2829,19 @@ func (a *AllocatedResources) Copy() *AllocatedResources {
|
||||||
if a == nil {
|
if a == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
newA := new(AllocatedResources)
|
|
||||||
*newA = *a
|
|
||||||
|
|
||||||
if a.Tasks != nil {
|
out := AllocatedResources{
|
||||||
tr := make(map[string]*AllocatedTaskResources, len(newA.Tasks))
|
Shared: a.Shared.Copy(),
|
||||||
for task, resource := range newA.Tasks {
|
|
||||||
tr[task] = resource.Copy()
|
|
||||||
}
|
|
||||||
newA.Tasks = tr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return newA
|
if a.Tasks != nil {
|
||||||
|
out.Tasks = make(map[string]*AllocatedTaskResources, len(out.Tasks))
|
||||||
|
for task, resource := range a.Tasks {
|
||||||
|
out.Tasks[task] = resource.Copy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &out
|
||||||
}
|
}
|
||||||
|
|
||||||
// Comparable returns a comparable version of the allocations allocated
|
// Comparable returns a comparable version of the allocations allocated
|
||||||
|
@ -2849,6 +2857,13 @@ func (a *AllocatedResources) Comparable() *ComparableResources {
|
||||||
for _, r := range a.Tasks {
|
for _, r := range a.Tasks {
|
||||||
c.Flattened.Add(r)
|
c.Flattened.Add(r)
|
||||||
}
|
}
|
||||||
|
// Add network resources that are at the task group level
|
||||||
|
for _, network := range a.Shared.Networks {
|
||||||
|
c.Flattened.Add(&AllocatedTaskResources{
|
||||||
|
Networks: []*NetworkResource{network},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2882,13 +2897,7 @@ func (a *AllocatedTaskResources) Copy() *AllocatedTaskResources {
|
||||||
*newA = *a
|
*newA = *a
|
||||||
|
|
||||||
// Copy the networks
|
// Copy the networks
|
||||||
if a.Networks != nil {
|
newA.Networks = a.Networks.Copy()
|
||||||
n := len(a.Networks)
|
|
||||||
newA.Networks = make([]*NetworkResource, n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
newA.Networks[i] = a.Networks[i].Copy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy the devices
|
// Copy the devices
|
||||||
if newA.Devices != nil {
|
if newA.Devices != nil {
|
||||||
|
@ -2970,15 +2979,24 @@ func (a *AllocatedTaskResources) Subtract(delta *AllocatedTaskResources) {
|
||||||
|
|
||||||
// AllocatedSharedResources are the set of resources allocated to a task group.
|
// AllocatedSharedResources are the set of resources allocated to a task group.
|
||||||
type AllocatedSharedResources struct {
|
type AllocatedSharedResources struct {
|
||||||
DiskMB int64
|
Networks Networks
|
||||||
|
DiskMB int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a AllocatedSharedResources) Copy() AllocatedSharedResources {
|
||||||
|
return AllocatedSharedResources{
|
||||||
|
Networks: a.Networks.Copy(),
|
||||||
|
DiskMB: a.DiskMB,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *AllocatedSharedResources) Add(delta *AllocatedSharedResources) {
|
func (a *AllocatedSharedResources) Add(delta *AllocatedSharedResources) {
|
||||||
if delta == nil {
|
if delta == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
a.Networks = append(a.Networks, delta.Networks...)
|
||||||
a.DiskMB += delta.DiskMB
|
a.DiskMB += delta.DiskMB
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *AllocatedSharedResources) Subtract(delta *AllocatedSharedResources) {
|
func (a *AllocatedSharedResources) Subtract(delta *AllocatedSharedResources) {
|
||||||
|
@ -2986,6 +3004,17 @@ func (a *AllocatedSharedResources) Subtract(delta *AllocatedSharedResources) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
diff := map[*NetworkResource]bool{}
|
||||||
|
for _, n := range delta.Networks {
|
||||||
|
diff[n] = true
|
||||||
|
}
|
||||||
|
var nets Networks
|
||||||
|
for _, n := range a.Networks {
|
||||||
|
if _, ok := diff[n]; !ok {
|
||||||
|
nets = append(nets, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
a.Networks = nets
|
||||||
a.DiskMB -= delta.DiskMB
|
a.DiskMB -= delta.DiskMB
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4623,6 +4652,13 @@ type TaskGroup struct {
|
||||||
// Spread can be specified at the task group level to express spreading
|
// Spread can be specified at the task group level to express spreading
|
||||||
// allocations across a desired attribute, such as datacenter
|
// allocations across a desired attribute, such as datacenter
|
||||||
Spreads []*Spread
|
Spreads []*Spread
|
||||||
|
|
||||||
|
// Networks are the network configuration for the task group. This can be
|
||||||
|
// overridden in the task.
|
||||||
|
Networks Networks
|
||||||
|
|
||||||
|
// Services this group provides
|
||||||
|
Services []*Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tg *TaskGroup) Copy() *TaskGroup {
|
func (tg *TaskGroup) Copy() *TaskGroup {
|
||||||
|
@ -4638,6 +4674,15 @@ func (tg *TaskGroup) Copy() *TaskGroup {
|
||||||
ntg.Affinities = CopySliceAffinities(ntg.Affinities)
|
ntg.Affinities = CopySliceAffinities(ntg.Affinities)
|
||||||
ntg.Spreads = CopySliceSpreads(ntg.Spreads)
|
ntg.Spreads = CopySliceSpreads(ntg.Spreads)
|
||||||
|
|
||||||
|
// Copy the network objects
|
||||||
|
if tg.Networks != nil {
|
||||||
|
n := len(tg.Networks)
|
||||||
|
ntg.Networks = make([]*NetworkResource, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
ntg.Networks[i] = tg.Networks[i].Copy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if tg.Tasks != nil {
|
if tg.Tasks != nil {
|
||||||
tasks := make([]*Task, len(ntg.Tasks))
|
tasks := make([]*Task, len(ntg.Tasks))
|
||||||
for i, t := range ntg.Tasks {
|
for i, t := range ntg.Tasks {
|
||||||
|
@ -4651,6 +4696,14 @@ func (tg *TaskGroup) Copy() *TaskGroup {
|
||||||
if tg.EphemeralDisk != nil {
|
if tg.EphemeralDisk != nil {
|
||||||
ntg.EphemeralDisk = tg.EphemeralDisk.Copy()
|
ntg.EphemeralDisk = tg.EphemeralDisk.Copy()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if tg.Services != nil {
|
||||||
|
ntg.Services = make([]*Service, len(tg.Services))
|
||||||
|
for i, s := range tg.Services {
|
||||||
|
ntg.Services[i] = s.Copy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return ntg
|
return ntg
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4786,10 +4839,8 @@ func (tg *TaskGroup) Validate(j *Job) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for duplicate tasks, that there is only leader task if any,
|
// Check that there is only one leader task if any
|
||||||
// and no duplicated static ports
|
|
||||||
tasks := make(map[string]int)
|
tasks := make(map[string]int)
|
||||||
staticPorts := make(map[int]string)
|
|
||||||
leaderTasks := 0
|
leaderTasks := 0
|
||||||
for idx, task := range tg.Tasks {
|
for idx, task := range tg.Tasks {
|
||||||
if task.Name == "" {
|
if task.Name == "" {
|
||||||
|
@ -4803,27 +4854,18 @@ func (tg *TaskGroup) Validate(j *Job) error {
|
||||||
if task.Leader {
|
if task.Leader {
|
||||||
leaderTasks++
|
leaderTasks++
|
||||||
}
|
}
|
||||||
|
|
||||||
if task.Resources == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, net := range task.Resources.Networks {
|
|
||||||
for _, port := range net.ReservedPorts {
|
|
||||||
if other, ok := staticPorts[port.Value]; ok {
|
|
||||||
err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
|
|
||||||
mErr.Errors = append(mErr.Errors, err)
|
|
||||||
} else {
|
|
||||||
staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if leaderTasks > 1 {
|
if leaderTasks > 1 {
|
||||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader"))
|
mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate task group and task network resources
|
||||||
|
if err := tg.validateNetworks(); err != nil {
|
||||||
|
outer := fmt.Errorf("Task group network validation failed: %v", err)
|
||||||
|
mErr.Errors = append(mErr.Errors, outer)
|
||||||
|
}
|
||||||
|
|
||||||
// Validate the tasks
|
// Validate the tasks
|
||||||
for _, task := range tg.Tasks {
|
for _, task := range tg.Tasks {
|
||||||
if err := task.Validate(tg.EphemeralDisk, j.Type); err != nil {
|
if err := task.Validate(tg.EphemeralDisk, j.Type); err != nil {
|
||||||
|
@ -4834,6 +4876,75 @@ func (tg *TaskGroup) Validate(j *Job) error {
|
||||||
return mErr.ErrorOrNil()
|
return mErr.ErrorOrNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tg *TaskGroup) validateNetworks() error {
|
||||||
|
var mErr multierror.Error
|
||||||
|
portLabels := make(map[string]string)
|
||||||
|
staticPorts := make(map[int]string)
|
||||||
|
mappedPorts := make(map[int]string)
|
||||||
|
|
||||||
|
for _, net := range tg.Networks {
|
||||||
|
for _, port := range append(net.ReservedPorts, net.DynamicPorts...) {
|
||||||
|
if other, ok := portLabels[port.Label]; ok {
|
||||||
|
mErr.Errors = append(mErr.Errors, fmt.Errorf("Port label %s already in use by %s", port.Label, other))
|
||||||
|
} else {
|
||||||
|
portLabels[port.Label] = "taskgroup network"
|
||||||
|
}
|
||||||
|
|
||||||
|
if port.Value != 0 {
|
||||||
|
// static port
|
||||||
|
if other, ok := staticPorts[port.Value]; ok {
|
||||||
|
err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
|
||||||
|
mErr.Errors = append(mErr.Errors, err)
|
||||||
|
} else {
|
||||||
|
staticPorts[port.Value] = fmt.Sprintf("taskgroup network:%s", port.Label)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if port.To != 0 {
|
||||||
|
if other, ok := mappedPorts[port.To]; ok {
|
||||||
|
err := fmt.Errorf("Port mapped to %d already in use by %s", port.To, other)
|
||||||
|
mErr.Errors = append(mErr.Errors, err)
|
||||||
|
} else {
|
||||||
|
mappedPorts[port.To] = fmt.Sprintf("taskgroup network:%s", port.Label)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check for duplicate tasks or port labels, and no duplicated static or mapped ports
|
||||||
|
for _, task := range tg.Tasks {
|
||||||
|
if task.Resources == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, net := range task.Resources.Networks {
|
||||||
|
for _, port := range append(net.ReservedPorts, net.DynamicPorts...) {
|
||||||
|
if other, ok := portLabels[port.Label]; ok {
|
||||||
|
mErr.Errors = append(mErr.Errors, fmt.Errorf("Port label %s already in use by %s", port.Label, other))
|
||||||
|
}
|
||||||
|
|
||||||
|
if port.Value != 0 {
|
||||||
|
if other, ok := staticPorts[port.Value]; ok {
|
||||||
|
err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
|
||||||
|
mErr.Errors = append(mErr.Errors, err)
|
||||||
|
} else {
|
||||||
|
staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if port.To != 0 {
|
||||||
|
if other, ok := mappedPorts[port.To]; ok {
|
||||||
|
err := fmt.Errorf("Port mapped to %d already in use by %s", port.To, other)
|
||||||
|
mErr.Errors = append(mErr.Errors, err)
|
||||||
|
} else {
|
||||||
|
mappedPorts[port.To] = fmt.Sprintf("taskgroup network:%s", port.Label)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mErr.ErrorOrNil()
|
||||||
|
}
|
||||||
|
|
||||||
// Warnings returns a list of warnings that may be from dubious settings or
|
// Warnings returns a list of warnings that may be from dubious settings or
|
||||||
// deprecation warnings.
|
// deprecation warnings.
|
||||||
func (tg *TaskGroup) Warnings(j *Job) error {
|
func (tg *TaskGroup) Warnings(j *Job) error {
|
||||||
|
@ -4891,6 +5002,26 @@ func (c *CheckRestart) Copy() *CheckRestart {
|
||||||
return nc
|
return nc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *CheckRestart) Equals(o *CheckRestart) bool {
|
||||||
|
if c == nil || o == nil {
|
||||||
|
return c == o
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Limit != o.Limit {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Grace != o.Grace {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.IgnoreWarnings != o.IgnoreWarnings {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func (c *CheckRestart) Validate() error {
|
func (c *CheckRestart) Validate() error {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -4958,6 +5089,83 @@ func (sc *ServiceCheck) Copy() *ServiceCheck {
|
||||||
return nsc
|
return nsc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sc *ServiceCheck) Equals(o *ServiceCheck) bool {
|
||||||
|
if sc == nil || o == nil {
|
||||||
|
return sc == o
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.Name != o.Name {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.AddressMode != o.AddressMode {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !helper.CompareSliceSetString(sc.Args, o.Args) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !sc.CheckRestart.Equals(o.CheckRestart) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.Command != o.Command {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.GRPCService != o.GRPCService {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.GRPCUseTLS != o.GRPCUseTLS {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use DeepEqual here as order of slice values could matter
|
||||||
|
if !reflect.DeepEqual(sc.Header, o.Header) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.InitialStatus != o.InitialStatus {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.Interval != o.Interval {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.Method != o.Method {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.Path != o.Path {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.PortLabel != o.Path {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.Protocol != o.Protocol {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.TLSSkipVerify != o.TLSSkipVerify {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.Timeout != o.Timeout {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.Type != o.Type {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func (sc *ServiceCheck) Canonicalize(serviceName string) {
|
func (sc *ServiceCheck) Canonicalize(serviceName string) {
|
||||||
// Ensure empty maps/slices are treated as null to avoid scheduling
|
// Ensure empty maps/slices are treated as null to avoid scheduling
|
||||||
// issues when using DeepEquals.
|
// issues when using DeepEquals.
|
||||||
|
@ -5134,6 +5342,7 @@ type Service struct {
|
||||||
Tags []string // List of tags for the service
|
Tags []string // List of tags for the service
|
||||||
CanaryTags []string // List of tags for the service when it is a canary
|
CanaryTags []string // List of tags for the service when it is a canary
|
||||||
Checks []*ServiceCheck // List of checks associated with the service
|
Checks []*ServiceCheck // List of checks associated with the service
|
||||||
|
Connect *ConsulConnect // Consul Connect configuration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) Copy() *Service {
|
func (s *Service) Copy() *Service {
|
||||||
|
@ -5260,6 +5469,55 @@ func (s *Service) Hash(allocID, taskName string, canary bool) string {
|
||||||
return b32.EncodeToString(h.Sum(nil))
|
return b32.EncodeToString(h.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Service) Equals(o *Service) bool {
|
||||||
|
if s == nil || o == nil {
|
||||||
|
return s == o
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.AddressMode != o.AddressMode {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !helper.CompareSliceSetString(s.CanaryTags, o.CanaryTags) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.Checks) != len(o.Checks) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
OUTER:
|
||||||
|
for i := range s.Checks {
|
||||||
|
for ii := range o.Checks {
|
||||||
|
if s.Checks[i].Equals(o.Checks[ii]) {
|
||||||
|
// Found match; continue with next check
|
||||||
|
continue OUTER
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// No match
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.Connect.Equals(o.Connect) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Name != o.Name {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.PortLabel != o.PortLabel {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !helper.CompareSliceSetString(s.Tags, o.Tags) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// DefaultKillTimeout is the default timeout between signaling a task it
|
// DefaultKillTimeout is the default timeout between signaling a task it
|
||||||
// will be killed and killing it.
|
// will be killed and killing it.
|
||||||
|
@ -7828,7 +8086,7 @@ func (a *Allocation) SetEventDisplayMessages() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// COMPAT(0.11): Remove in 0.11
|
// COMPAT(0.11): Remove in 0.11
|
||||||
// ComparableResources returns the resouces on the allocation
|
// ComparableResources returns the resources on the allocation
|
||||||
// handling upgrade paths. After 0.11 calls to this should be replaced with:
|
// handling upgrade paths. After 0.11 calls to this should be replaced with:
|
||||||
// alloc.AllocatedResources.Comparable()
|
// alloc.AllocatedResources.Comparable()
|
||||||
func (a *Allocation) ComparableResources() *ComparableResources {
|
func (a *Allocation) ComparableResources() *ComparableResources {
|
||||||
|
|
|
@ -846,6 +846,28 @@ func TestTaskGroup_Validate(t *testing.T) {
|
||||||
if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") {
|
if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") {
|
||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tg = &TaskGroup{
|
||||||
|
Networks: []*NetworkResource{
|
||||||
|
{
|
||||||
|
DynamicPorts: []Port{{"http", 0, 80}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Tasks: []*Task{
|
||||||
|
{
|
||||||
|
Resources: &Resources{
|
||||||
|
Networks: []*NetworkResource{
|
||||||
|
{
|
||||||
|
DynamicPorts: []Port{{"http", 0, 80}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err = tg.Validate(j)
|
||||||
|
require.Contains(t, err.Error(), "Port label http already in use")
|
||||||
|
require.Contains(t, err.Error(), "Port mapped to 80 already in use")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTask_Validate(t *testing.T) {
|
func TestTask_Validate(t *testing.T) {
|
||||||
|
@ -1856,7 +1878,7 @@ func TestResource_Add(t *testing.T) {
|
||||||
{
|
{
|
||||||
CIDR: "10.0.0.0/8",
|
CIDR: "10.0.0.0/8",
|
||||||
MBits: 100,
|
MBits: 100,
|
||||||
ReservedPorts: []Port{{"ssh", 22}},
|
ReservedPorts: []Port{{"ssh", 22, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1868,7 +1890,7 @@ func TestResource_Add(t *testing.T) {
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"web", 80}},
|
ReservedPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1886,7 +1908,7 @@ func TestResource_Add(t *testing.T) {
|
||||||
{
|
{
|
||||||
CIDR: "10.0.0.0/8",
|
CIDR: "10.0.0.0/8",
|
||||||
MBits: 150,
|
MBits: 150,
|
||||||
ReservedPorts: []Port{{"ssh", 22}, {"web", 80}},
|
ReservedPorts: []Port{{"ssh", 22, 0}, {"web", 80, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1902,7 +1924,7 @@ func TestResource_Add_Network(t *testing.T) {
|
||||||
Networks: []*NetworkResource{
|
Networks: []*NetworkResource{
|
||||||
{
|
{
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
DynamicPorts: []Port{{"http", 0}, {"https", 0}},
|
DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1910,7 +1932,7 @@ func TestResource_Add_Network(t *testing.T) {
|
||||||
Networks: []*NetworkResource{
|
Networks: []*NetworkResource{
|
||||||
{
|
{
|
||||||
MBits: 25,
|
MBits: 25,
|
||||||
DynamicPorts: []Port{{"admin", 0}},
|
DynamicPorts: []Port{{"admin", 0, 8080}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1928,7 +1950,7 @@ func TestResource_Add_Network(t *testing.T) {
|
||||||
Networks: []*NetworkResource{
|
Networks: []*NetworkResource{
|
||||||
{
|
{
|
||||||
MBits: 75,
|
MBits: 75,
|
||||||
DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}},
|
DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}, {"admin", 0, 8080}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1951,7 +1973,7 @@ func TestComparableResources_Subtract(t *testing.T) {
|
||||||
{
|
{
|
||||||
CIDR: "10.0.0.0/8",
|
CIDR: "10.0.0.0/8",
|
||||||
MBits: 100,
|
MBits: 100,
|
||||||
ReservedPorts: []Port{{"ssh", 22}},
|
ReservedPorts: []Port{{"ssh", 22, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1972,7 +1994,7 @@ func TestComparableResources_Subtract(t *testing.T) {
|
||||||
{
|
{
|
||||||
CIDR: "10.0.0.0/8",
|
CIDR: "10.0.0.0/8",
|
||||||
MBits: 20,
|
MBits: 20,
|
||||||
ReservedPorts: []Port{{"ssh", 22}},
|
ReservedPorts: []Port{{"ssh", 22, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1994,7 +2016,7 @@ func TestComparableResources_Subtract(t *testing.T) {
|
||||||
{
|
{
|
||||||
CIDR: "10.0.0.0/8",
|
CIDR: "10.0.0.0/8",
|
||||||
MBits: 100,
|
MBits: 100,
|
||||||
ReservedPorts: []Port{{"ssh", 22}},
|
ReservedPorts: []Port{{"ssh", 22, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -4040,12 +4062,12 @@ func TestNetworkResourcesEquals(t *testing.T) {
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"web", 80}},
|
ReservedPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"web", 80}},
|
ReservedPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
true,
|
true,
|
||||||
|
@ -4056,12 +4078,12 @@ func TestNetworkResourcesEquals(t *testing.T) {
|
||||||
{
|
{
|
||||||
IP: "10.0.0.0",
|
IP: "10.0.0.0",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"web", 80}},
|
ReservedPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"web", 80}},
|
ReservedPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
|
@ -4072,12 +4094,12 @@ func TestNetworkResourcesEquals(t *testing.T) {
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 40,
|
MBits: 40,
|
||||||
ReservedPorts: []Port{{"web", 80}},
|
ReservedPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"web", 80}},
|
ReservedPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
|
@ -4088,12 +4110,12 @@ func TestNetworkResourcesEquals(t *testing.T) {
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"web", 80}},
|
ReservedPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"web", 80}, {"web", 80}},
|
ReservedPorts: []Port{{"web", 80, 0}, {"web", 80, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
|
@ -4104,7 +4126,7 @@ func TestNetworkResourcesEquals(t *testing.T) {
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"web", 80}},
|
ReservedPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
|
@ -4120,12 +4142,12 @@ func TestNetworkResourcesEquals(t *testing.T) {
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"web", 80}},
|
ReservedPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
ReservedPorts: []Port{{"notweb", 80}},
|
ReservedPorts: []Port{{"notweb", 80, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
|
@ -4136,12 +4158,12 @@ func TestNetworkResourcesEquals(t *testing.T) {
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
DynamicPorts: []Port{{"web", 80}},
|
DynamicPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
DynamicPorts: []Port{{"web", 80}, {"web", 80}},
|
DynamicPorts: []Port{{"web", 80, 0}, {"web", 80, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
|
@ -4152,7 +4174,7 @@ func TestNetworkResourcesEquals(t *testing.T) {
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
DynamicPorts: []Port{{"web", 80}},
|
DynamicPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
|
@ -4168,12 +4190,12 @@ func TestNetworkResourcesEquals(t *testing.T) {
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
DynamicPorts: []Port{{"web", 80}},
|
DynamicPorts: []Port{{"web", 80, 0}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
IP: "10.0.0.1",
|
IP: "10.0.0.1",
|
||||||
MBits: 50,
|
MBits: 50,
|
||||||
DynamicPorts: []Port{{"notweb", 80}},
|
DynamicPorts: []Port{{"notweb", 80, 0}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
|
|
|
@ -459,3 +459,30 @@ func (d *driverPluginClient) ExecTaskStreamingRaw(ctx context.Context,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *driverPluginClient) CreateNetwork(allocID string) (*NetworkIsolationSpec, error) {
|
||||||
|
req := &proto.CreateNetworkRequest{
|
||||||
|
AllocId: allocID,
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := d.client.CreateNetwork(d.doneCtx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, grpcutils.HandleGrpcErr(err, d.doneCtx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return NetworkIsolationSpecFromProto(resp.IsolationSpec), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *driverPluginClient) DestroyNetwork(allocID string, spec *NetworkIsolationSpec) error {
|
||||||
|
req := &proto.DestroyNetworkRequest{
|
||||||
|
AllocId: allocID,
|
||||||
|
IsolationSpec: NetworkIsolationSpecToProto(spec),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := d.client.DestroyNetwork(d.doneCtx, req)
|
||||||
|
if err != nil {
|
||||||
|
return grpcutils.HandleGrpcErr(err, d.doneCtx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -79,6 +79,14 @@ type ExecOptions struct {
|
||||||
ResizeCh <-chan TerminalSize
|
ResizeCh <-chan TerminalSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DriverNetworkManager is the interface with exposes function for creating a
|
||||||
|
// network namespace for which tasks can join. This only needs to be implemented
|
||||||
|
// if the driver MUST create the network namespace
|
||||||
|
type DriverNetworkManager interface {
|
||||||
|
CreateNetwork(allocID string) (*NetworkIsolationSpec, error)
|
||||||
|
DestroyNetwork(allocID string, spec *NetworkIsolationSpec) error
|
||||||
|
}
|
||||||
|
|
||||||
// InternalDriverPlugin is an interface that exposes functions that are only
|
// InternalDriverPlugin is an interface that exposes functions that are only
|
||||||
// implemented by internal driver plugins.
|
// implemented by internal driver plugins.
|
||||||
type InternalDriverPlugin interface {
|
type InternalDriverPlugin interface {
|
||||||
|
@ -148,6 +156,45 @@ type Capabilities struct {
|
||||||
|
|
||||||
//FSIsolation indicates what kind of filesystem isolation the driver supports.
|
//FSIsolation indicates what kind of filesystem isolation the driver supports.
|
||||||
FSIsolation FSIsolation
|
FSIsolation FSIsolation
|
||||||
|
|
||||||
|
//NetIsolationModes lists the set of isolation modes supported by the driver
|
||||||
|
NetIsolationModes []NetIsolationMode
|
||||||
|
|
||||||
|
// MustInitiateNetwork tells Nomad that the driver must create the network
|
||||||
|
// namespace and that the CreateNetwork and DestroyNetwork RPCs are implemented.
|
||||||
|
MustInitiateNetwork bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Capabilities) HasNetIsolationMode(m NetIsolationMode) bool {
|
||||||
|
for _, mode := range c.NetIsolationModes {
|
||||||
|
if mode == m {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type NetIsolationMode string
|
||||||
|
|
||||||
|
var (
|
||||||
|
// NetIsolationModeHost disables network isolation and uses the host network
|
||||||
|
NetIsolationModeHost = NetIsolationMode("host")
|
||||||
|
|
||||||
|
// NetIsolationModeGroup uses the group network namespace for isolation
|
||||||
|
NetIsolationModeGroup = NetIsolationMode("group")
|
||||||
|
|
||||||
|
// NetIsolationModeTask isolates the network to just the task
|
||||||
|
NetIsolationModeTask = NetIsolationMode("task")
|
||||||
|
|
||||||
|
// NetIsolationModeNone indicates that there is no network to isolate and is
|
||||||
|
// inteded to be used for tasks that the client manages remotely
|
||||||
|
NetIsolationModeNone = NetIsolationMode("none")
|
||||||
|
)
|
||||||
|
|
||||||
|
type NetworkIsolationSpec struct {
|
||||||
|
Mode NetIsolationMode
|
||||||
|
Path string
|
||||||
|
Labels map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
type TerminalSize struct {
|
type TerminalSize struct {
|
||||||
|
@ -156,21 +203,22 @@ type TerminalSize struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type TaskConfig struct {
|
type TaskConfig struct {
|
||||||
ID string
|
ID string
|
||||||
JobName string
|
JobName string
|
||||||
TaskGroupName string
|
TaskGroupName string
|
||||||
Name string
|
Name string
|
||||||
Env map[string]string
|
Env map[string]string
|
||||||
DeviceEnv map[string]string
|
DeviceEnv map[string]string
|
||||||
Resources *Resources
|
Resources *Resources
|
||||||
Devices []*DeviceConfig
|
Devices []*DeviceConfig
|
||||||
Mounts []*MountConfig
|
Mounts []*MountConfig
|
||||||
User string
|
User string
|
||||||
AllocDir string
|
AllocDir string
|
||||||
rawDriverConfig []byte
|
rawDriverConfig []byte
|
||||||
StdoutPath string
|
StdoutPath string
|
||||||
StderrPath string
|
StderrPath string
|
||||||
AllocID string
|
AllocID string
|
||||||
|
NetworkIsolation *NetworkIsolationSpec
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *TaskConfig) Copy() *TaskConfig {
|
func (tc *TaskConfig) Copy() *TaskConfig {
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -74,6 +74,14 @@ service Driver {
|
||||||
// ExecTaskStreaming executes a command inside the tasks execution context
|
// ExecTaskStreaming executes a command inside the tasks execution context
|
||||||
// and streams back results
|
// and streams back results
|
||||||
rpc ExecTaskStreaming(stream ExecTaskStreamingRequest) returns (stream ExecTaskStreamingResponse) {}
|
rpc ExecTaskStreaming(stream ExecTaskStreamingRequest) returns (stream ExecTaskStreamingResponse) {}
|
||||||
|
|
||||||
|
// CreateNetwork is implemented when the driver needs to create the network
|
||||||
|
// namespace instead of allowing the Nomad client to do.
|
||||||
|
rpc CreateNetwork(CreateNetworkRequest) returns (CreateNetworkResponse) {}
|
||||||
|
|
||||||
|
// DestroyNetwork destroys a previously created network. This rpc is only
|
||||||
|
// implemented if the driver needs to manage network namespace creation.
|
||||||
|
rpc DestroyNetwork(DestroyNetworkRequest) returns (DestroyNetworkResponse) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
message TaskConfigSchemaRequest {}
|
message TaskConfigSchemaRequest {}
|
||||||
|
@ -314,6 +322,27 @@ message ExecTaskStreamingResponse {
|
||||||
ExitResult result = 4;
|
ExitResult result = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message CreateNetworkRequest {
|
||||||
|
|
||||||
|
// AllocID of the allocation the network is associated with
|
||||||
|
string alloc_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CreateNetworkResponse {
|
||||||
|
|
||||||
|
NetworkIsolationSpec isolation_spec = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DestroyNetworkRequest {
|
||||||
|
|
||||||
|
// AllocID of the allocation the network is associated with
|
||||||
|
string alloc_id = 1;
|
||||||
|
|
||||||
|
NetworkIsolationSpec isolation_spec = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DestroyNetworkResponse {}
|
||||||
|
|
||||||
message DriverCapabilities {
|
message DriverCapabilities {
|
||||||
|
|
||||||
// SendSignals indicates that the driver can send process signals (ex. SIGUSR1)
|
// SendSignals indicates that the driver can send process signals (ex. SIGUSR1)
|
||||||
|
@ -331,6 +360,24 @@ message DriverCapabilities {
|
||||||
}
|
}
|
||||||
// FsIsolation indicates what kind of filesystem isolation a driver supports.
|
// FsIsolation indicates what kind of filesystem isolation a driver supports.
|
||||||
FSIsolation fs_isolation = 3;
|
FSIsolation fs_isolation = 3;
|
||||||
|
|
||||||
|
repeated NetworkIsolationSpec.NetworkIsolationMode network_isolation_modes = 4;
|
||||||
|
|
||||||
|
bool must_create_network = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message NetworkIsolationSpec {
|
||||||
|
enum NetworkIsolationMode {
|
||||||
|
HOST = 0;
|
||||||
|
GROUP = 1;
|
||||||
|
TASK = 2;
|
||||||
|
NONE = 3;
|
||||||
|
}
|
||||||
|
NetworkIsolationMode mode = 1;
|
||||||
|
|
||||||
|
string path = 2;
|
||||||
|
|
||||||
|
map<string,string> labels = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TaskConfig {
|
message TaskConfig {
|
||||||
|
@ -384,6 +431,10 @@ message TaskConfig {
|
||||||
|
|
||||||
// AllocId is the ID of the associated allocation
|
// AllocId is the ID of the associated allocation
|
||||||
string alloc_id = 15;
|
string alloc_id = 15;
|
||||||
|
|
||||||
|
// NetworkIsolationSpec specifies the configuration for the network namespace
|
||||||
|
// to use for the task. *Only supported on Linux
|
||||||
|
NetworkIsolationSpec network_isolation_spec = 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Resources {
|
message Resources {
|
||||||
|
|
|
@ -39,8 +39,10 @@ func (b *driverPluginServer) Capabilities(ctx context.Context, req *proto.Capabi
|
||||||
}
|
}
|
||||||
resp := &proto.CapabilitiesResponse{
|
resp := &proto.CapabilitiesResponse{
|
||||||
Capabilities: &proto.DriverCapabilities{
|
Capabilities: &proto.DriverCapabilities{
|
||||||
SendSignals: caps.SendSignals,
|
SendSignals: caps.SendSignals,
|
||||||
Exec: caps.Exec,
|
Exec: caps.Exec,
|
||||||
|
MustCreateNetwork: caps.MustInitiateNetwork,
|
||||||
|
NetworkIsolationModes: []proto.NetworkIsolationSpec_NetworkIsolationMode{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,6 +56,10 @@ func (b *driverPluginServer) Capabilities(ctx context.Context, req *proto.Capabi
|
||||||
default:
|
default:
|
||||||
resp.Capabilities.FsIsolation = proto.DriverCapabilities_NONE
|
resp.Capabilities.FsIsolation = proto.DriverCapabilities_NONE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, mode := range caps.NetIsolationModes {
|
||||||
|
resp.Capabilities.NetworkIsolationModes = append(resp.Capabilities.NetworkIsolationModes, netIsolationModeToProto(mode))
|
||||||
|
}
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -374,3 +380,34 @@ func (b *driverPluginServer) TaskEvents(req *proto.TaskEventsRequest, srv proto.
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *driverPluginServer) CreateNetwork(ctx context.Context, req *proto.CreateNetworkRequest) (*proto.CreateNetworkResponse, error) {
|
||||||
|
nm, ok := b.impl.(DriverNetworkManager)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("CreateNetwork RPC not supported by driver")
|
||||||
|
}
|
||||||
|
|
||||||
|
spec, err := nm.CreateNetwork(req.AllocId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &proto.CreateNetworkResponse{
|
||||||
|
IsolationSpec: NetworkIsolationSpecToProto(spec),
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *driverPluginServer) DestroyNetwork(ctx context.Context, req *proto.DestroyNetworkRequest) (*proto.DestroyNetworkResponse, error) {
|
||||||
|
nm, ok := b.impl.(DriverNetworkManager)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("DestroyNetwork RPC not supported by driver")
|
||||||
|
}
|
||||||
|
|
||||||
|
err := nm.DestroyNetwork(req.AllocId, NetworkIsolationSpecFromProto(req.IsolationSpec))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &proto.DestroyNetworkResponse{}, nil
|
||||||
|
}
|
||||||
|
|
|
@ -195,6 +195,19 @@ type MockDriver struct {
|
||||||
SignalTaskF func(string, string) error
|
SignalTaskF func(string, string) error
|
||||||
ExecTaskF func(string, []string, time.Duration) (*drivers.ExecTaskResult, error)
|
ExecTaskF func(string, []string, time.Duration) (*drivers.ExecTaskResult, error)
|
||||||
ExecTaskStreamingF func(context.Context, string, *drivers.ExecOptions) (*drivers.ExitResult, error)
|
ExecTaskStreamingF func(context.Context, string, *drivers.ExecOptions) (*drivers.ExitResult, error)
|
||||||
|
MockNetworkManager
|
||||||
|
}
|
||||||
|
|
||||||
|
type MockNetworkManager struct {
|
||||||
|
CreateNetworkF func(string) (*drivers.NetworkIsolationSpec, error)
|
||||||
|
DestroyNetworkF func(string, *drivers.NetworkIsolationSpec) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockNetworkManager) CreateNetwork(id string) (*drivers.NetworkIsolationSpec, error) {
|
||||||
|
return m.CreateNetworkF(id)
|
||||||
|
}
|
||||||
|
func (m *MockNetworkManager) DestroyNetwork(id string, spec *drivers.NetworkIsolationSpec) error {
|
||||||
|
return m.DestroyNetworkF(id, spec)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *MockDriver) TaskConfigSchema() (*hclspec.Spec, error) { return d.TaskConfigSchemaF() }
|
func (d *MockDriver) TaskConfigSchema() (*hclspec.Spec, error) { return d.TaskConfigSchemaF() }
|
||||||
|
|
|
@ -571,3 +571,55 @@ func memoryUsageMeasuredFieldsFromProto(fields []proto.MemoryUsage_Fields) []str
|
||||||
|
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func netIsolationModeToProto(mode NetIsolationMode) proto.NetworkIsolationSpec_NetworkIsolationMode {
|
||||||
|
switch mode {
|
||||||
|
case NetIsolationModeHost:
|
||||||
|
return proto.NetworkIsolationSpec_HOST
|
||||||
|
case NetIsolationModeGroup:
|
||||||
|
return proto.NetworkIsolationSpec_GROUP
|
||||||
|
case NetIsolationModeTask:
|
||||||
|
return proto.NetworkIsolationSpec_TASK
|
||||||
|
case NetIsolationModeNone:
|
||||||
|
return proto.NetworkIsolationSpec_NONE
|
||||||
|
default:
|
||||||
|
return proto.NetworkIsolationSpec_HOST
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func netIsolationModeFromProto(pb proto.NetworkIsolationSpec_NetworkIsolationMode) NetIsolationMode {
|
||||||
|
switch pb {
|
||||||
|
case proto.NetworkIsolationSpec_HOST:
|
||||||
|
return NetIsolationModeHost
|
||||||
|
case proto.NetworkIsolationSpec_GROUP:
|
||||||
|
return NetIsolationModeGroup
|
||||||
|
case proto.NetworkIsolationSpec_TASK:
|
||||||
|
return NetIsolationModeTask
|
||||||
|
case proto.NetworkIsolationSpec_NONE:
|
||||||
|
return NetIsolationModeNone
|
||||||
|
default:
|
||||||
|
return NetIsolationModeHost
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NetworkIsolationSpecToProto(spec *NetworkIsolationSpec) *proto.NetworkIsolationSpec {
|
||||||
|
if spec == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &proto.NetworkIsolationSpec{
|
||||||
|
Path: spec.Path,
|
||||||
|
Labels: spec.Labels,
|
||||||
|
Mode: netIsolationModeToProto(spec.Mode),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NetworkIsolationSpecFromProto(pb *proto.NetworkIsolationSpec) *NetworkIsolationSpec {
|
||||||
|
if pb == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &NetworkIsolationSpec{
|
||||||
|
Path: pb.Path,
|
||||||
|
Labels: pb.Labels,
|
||||||
|
Mode: netIsolationModeFromProto(pb.Mode),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -489,6 +489,9 @@ func (s *GenericScheduler) computePlacements(destructive, place []placementResul
|
||||||
DiskMB: int64(tg.EphemeralDisk.SizeMB),
|
DiskMB: int64(tg.EphemeralDisk.SizeMB),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
if option.AllocResources != nil {
|
||||||
|
resources.Shared.Networks = option.AllocResources.Networks
|
||||||
|
}
|
||||||
|
|
||||||
// Create an allocation for this
|
// Create an allocation for this
|
||||||
alloc := &structs.Allocation{
|
alloc := &structs.Allocation{
|
||||||
|
@ -507,7 +510,8 @@ func (s *GenericScheduler) computePlacements(destructive, place []placementResul
|
||||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||||
ClientStatus: structs.AllocClientStatusPending,
|
ClientStatus: structs.AllocClientStatusPending,
|
||||||
SharedResources: &structs.Resources{
|
SharedResources: &structs.Resources{
|
||||||
DiskMB: tg.EphemeralDisk.SizeMB,
|
DiskMB: tg.EphemeralDisk.SizeMB,
|
||||||
|
Networks: tg.Networks,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -512,7 +512,7 @@ func TestPreemption(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
createAlloc(allocIDs[1], lowPrioJob, &structs.Resources{
|
createAllocWithTaskgroupNetwork(allocIDs[1], lowPrioJob, &structs.Resources{
|
||||||
CPU: 200,
|
CPU: 200,
|
||||||
MemoryMB: 256,
|
MemoryMB: 256,
|
||||||
DiskMB: 4 * 1024,
|
DiskMB: 4 * 1024,
|
||||||
|
@ -520,9 +520,13 @@ func TestPreemption(t *testing.T) {
|
||||||
{
|
{
|
||||||
Device: "eth0",
|
Device: "eth0",
|
||||||
IP: "192.168.0.200",
|
IP: "192.168.0.200",
|
||||||
MBits: 500,
|
MBits: 200,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
}, &structs.NetworkResource{
|
||||||
|
Device: "eth0",
|
||||||
|
IP: "192.168.0.201",
|
||||||
|
MBits: 300,
|
||||||
}),
|
}),
|
||||||
createAlloc(allocIDs[2], lowPrioJob, &structs.Resources{
|
createAlloc(allocIDs[2], lowPrioJob, &structs.Resources{
|
||||||
CPU: 200,
|
CPU: 200,
|
||||||
|
@ -1379,10 +1383,19 @@ func TestPreemption(t *testing.T) {
|
||||||
|
|
||||||
// helper method to create allocations with given jobs and resources
|
// helper method to create allocations with given jobs and resources
|
||||||
func createAlloc(id string, job *structs.Job, resource *structs.Resources) *structs.Allocation {
|
func createAlloc(id string, job *structs.Job, resource *structs.Resources) *structs.Allocation {
|
||||||
return createAllocWithDevice(id, job, resource, nil)
|
return createAllocInner(id, job, resource, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper method to create allocation with network at the task group level
|
||||||
|
func createAllocWithTaskgroupNetwork(id string, job *structs.Job, resource *structs.Resources, tgNet *structs.NetworkResource) *structs.Allocation {
|
||||||
|
return createAllocInner(id, job, resource, nil, tgNet)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createAllocWithDevice(id string, job *structs.Job, resource *structs.Resources, allocatedDevices *structs.AllocatedDeviceResource) *structs.Allocation {
|
func createAllocWithDevice(id string, job *structs.Job, resource *structs.Resources, allocatedDevices *structs.AllocatedDeviceResource) *structs.Allocation {
|
||||||
|
return createAllocInner(id, job, resource, allocatedDevices, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createAllocInner(id string, job *structs.Job, resource *structs.Resources, allocatedDevices *structs.AllocatedDeviceResource, tgNetwork *structs.NetworkResource) *structs.Allocation {
|
||||||
alloc := &structs.Allocation{
|
alloc := &structs.Allocation{
|
||||||
ID: id,
|
ID: id,
|
||||||
Job: job,
|
Job: job,
|
||||||
|
@ -1413,5 +1426,11 @@ func createAllocWithDevice(id string, job *structs.Job, resource *structs.Resour
|
||||||
if allocatedDevices != nil {
|
if allocatedDevices != nil {
|
||||||
alloc.AllocatedResources.Tasks["web"].Devices = []*structs.AllocatedDeviceResource{allocatedDevices}
|
alloc.AllocatedResources.Tasks["web"].Devices = []*structs.AllocatedDeviceResource{allocatedDevices}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if tgNetwork != nil {
|
||||||
|
alloc.AllocatedResources.Shared = structs.AllocatedSharedResources{
|
||||||
|
Networks: []*structs.NetworkResource{tgNetwork},
|
||||||
|
}
|
||||||
|
}
|
||||||
return alloc
|
return alloc
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,10 +17,11 @@ const (
|
||||||
// along with a node when iterating. This state can be modified as
|
// along with a node when iterating. This state can be modified as
|
||||||
// various rank methods are applied.
|
// various rank methods are applied.
|
||||||
type RankedNode struct {
|
type RankedNode struct {
|
||||||
Node *structs.Node
|
Node *structs.Node
|
||||||
FinalScore float64
|
FinalScore float64
|
||||||
Scores []float64
|
Scores []float64
|
||||||
TaskResources map[string]*structs.AllocatedTaskResources
|
TaskResources map[string]*structs.AllocatedTaskResources
|
||||||
|
AllocResources *structs.AllocatedSharedResources
|
||||||
|
|
||||||
// Allocs is used to cache the proposed allocations on the
|
// Allocs is used to cache the proposed allocations on the
|
||||||
// node. This can be shared between iterators that require it.
|
// node. This can be shared between iterators that require it.
|
||||||
|
@ -224,6 +225,59 @@ OUTER:
|
||||||
}
|
}
|
||||||
preemptor.SetPreemptions(currentPreemptions)
|
preemptor.SetPreemptions(currentPreemptions)
|
||||||
|
|
||||||
|
// Check if we need task group network resource
|
||||||
|
if len(iter.taskGroup.Networks) > 0 {
|
||||||
|
ask := iter.taskGroup.Networks[0].Copy()
|
||||||
|
offer, err := netIdx.AssignNetwork(ask)
|
||||||
|
if offer == nil {
|
||||||
|
// If eviction is not enabled, mark this node as exhausted and continue
|
||||||
|
if !iter.evict {
|
||||||
|
iter.ctx.Metrics().ExhaustedNode(option.Node,
|
||||||
|
fmt.Sprintf("network: %s", err))
|
||||||
|
netIdx.Release()
|
||||||
|
continue OUTER
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for preemptible allocations to satisfy the network resource for this task
|
||||||
|
preemptor.SetCandidates(proposed)
|
||||||
|
|
||||||
|
netPreemptions := preemptor.PreemptForNetwork(ask, netIdx)
|
||||||
|
if netPreemptions == nil {
|
||||||
|
iter.ctx.Logger().Named("binpack").Error("preemption not possible ", "network_resource", ask)
|
||||||
|
netIdx.Release()
|
||||||
|
continue OUTER
|
||||||
|
}
|
||||||
|
allocsToPreempt = append(allocsToPreempt, netPreemptions...)
|
||||||
|
|
||||||
|
// First subtract out preempted allocations
|
||||||
|
proposed = structs.RemoveAllocs(proposed, netPreemptions)
|
||||||
|
|
||||||
|
// Reset the network index and try the offer again
|
||||||
|
netIdx.Release()
|
||||||
|
netIdx = structs.NewNetworkIndex()
|
||||||
|
netIdx.SetNode(option.Node)
|
||||||
|
netIdx.AddAllocs(proposed)
|
||||||
|
|
||||||
|
offer, err = netIdx.AssignNetwork(ask)
|
||||||
|
if offer == nil {
|
||||||
|
iter.ctx.Logger().Named("binpack").Error("unexpected error, unable to create network offer after considering preemption", "error", err)
|
||||||
|
netIdx.Release()
|
||||||
|
continue OUTER
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reserve this to prevent another task from colliding
|
||||||
|
netIdx.AddReserved(offer)
|
||||||
|
|
||||||
|
// Update the network ask to the offer
|
||||||
|
total.Shared.Networks = []*structs.NetworkResource{offer}
|
||||||
|
option.AllocResources = &structs.AllocatedSharedResources{
|
||||||
|
Networks: []*structs.NetworkResource{offer},
|
||||||
|
DiskMB: int64(iter.taskGroup.EphemeralDisk.SizeMB),
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
for _, task := range iter.taskGroup.Tasks {
|
for _, task := range iter.taskGroup.Tasks {
|
||||||
// Allocate the resources
|
// Allocate the resources
|
||||||
taskResources := &structs.AllocatedTaskResources{
|
taskResources := &structs.AllocatedTaskResources{
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"github.com/hashicorp/nomad/helper/uuid"
|
"github.com/hashicorp/nomad/helper/uuid"
|
||||||
"github.com/hashicorp/nomad/nomad/mock"
|
"github.com/hashicorp/nomad/nomad/mock"
|
||||||
"github.com/hashicorp/nomad/nomad/structs"
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
require "github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFeasibleRankIterator(t *testing.T) {
|
func TestFeasibleRankIterator(t *testing.T) {
|
||||||
|
@ -127,6 +127,246 @@ func TestBinPackIterator_NoExistingAlloc(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests bin packing iterator with network resources at task and task group level
|
||||||
|
func TestBinPackIterator_Network_Success(t *testing.T) {
|
||||||
|
_, ctx := testContext(t)
|
||||||
|
nodes := []*RankedNode{
|
||||||
|
{
|
||||||
|
Node: &structs.Node{
|
||||||
|
// Perfect fit
|
||||||
|
NodeResources: &structs.NodeResources{
|
||||||
|
Cpu: structs.NodeCpuResources{
|
||||||
|
CpuShares: 2048,
|
||||||
|
},
|
||||||
|
Memory: structs.NodeMemoryResources{
|
||||||
|
MemoryMB: 2048,
|
||||||
|
},
|
||||||
|
Networks: []*structs.NetworkResource{
|
||||||
|
{
|
||||||
|
Mode: "host",
|
||||||
|
Device: "eth0",
|
||||||
|
CIDR: "192.168.0.100/32",
|
||||||
|
MBits: 1000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ReservedResources: &structs.NodeReservedResources{
|
||||||
|
Cpu: structs.NodeReservedCpuResources{
|
||||||
|
CpuShares: 1024,
|
||||||
|
},
|
||||||
|
Memory: structs.NodeReservedMemoryResources{
|
||||||
|
MemoryMB: 1024,
|
||||||
|
},
|
||||||
|
Networks: structs.NodeReservedNetworkResources{
|
||||||
|
ReservedHostPorts: "1000-2000",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node: &structs.Node{
|
||||||
|
// 50% fit
|
||||||
|
NodeResources: &structs.NodeResources{
|
||||||
|
Cpu: structs.NodeCpuResources{
|
||||||
|
CpuShares: 4096,
|
||||||
|
},
|
||||||
|
Memory: structs.NodeMemoryResources{
|
||||||
|
MemoryMB: 4096,
|
||||||
|
},
|
||||||
|
Networks: []*structs.NetworkResource{
|
||||||
|
{
|
||||||
|
Mode: "host",
|
||||||
|
Device: "eth0",
|
||||||
|
CIDR: "192.168.0.100/32",
|
||||||
|
MBits: 1000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ReservedResources: &structs.NodeReservedResources{
|
||||||
|
Cpu: structs.NodeReservedCpuResources{
|
||||||
|
CpuShares: 1024,
|
||||||
|
},
|
||||||
|
Memory: structs.NodeReservedMemoryResources{
|
||||||
|
MemoryMB: 1024,
|
||||||
|
},
|
||||||
|
Networks: structs.NodeReservedNetworkResources{
|
||||||
|
ReservedHostPorts: "1000-2000",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
static := NewStaticRankIterator(ctx, nodes)
|
||||||
|
|
||||||
|
// Create a task group with networks specified at task and task group level
|
||||||
|
taskGroup := &structs.TaskGroup{
|
||||||
|
EphemeralDisk: &structs.EphemeralDisk{},
|
||||||
|
Tasks: []*structs.Task{
|
||||||
|
{
|
||||||
|
Name: "web",
|
||||||
|
Resources: &structs.Resources{
|
||||||
|
CPU: 1024,
|
||||||
|
MemoryMB: 1024,
|
||||||
|
Networks: []*structs.NetworkResource{
|
||||||
|
{
|
||||||
|
Device: "eth0",
|
||||||
|
MBits: 300,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Networks: []*structs.NetworkResource{
|
||||||
|
{
|
||||||
|
Device: "eth0",
|
||||||
|
MBits: 500,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
binp := NewBinPackIterator(ctx, static, false, 0)
|
||||||
|
binp.SetTaskGroup(taskGroup)
|
||||||
|
|
||||||
|
scoreNorm := NewScoreNormalizationIterator(ctx, binp)
|
||||||
|
|
||||||
|
out := collectRanked(scoreNorm)
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
|
// We expect both nodes to be eligible to place
|
||||||
|
require.Len(out, 2)
|
||||||
|
require.Equal(out[0], nodes[0])
|
||||||
|
require.Equal(out[1], nodes[1])
|
||||||
|
|
||||||
|
// First node should have a perfect score
|
||||||
|
require.Equal(1.0, out[0].FinalScore)
|
||||||
|
|
||||||
|
if out[1].FinalScore < 0.75 || out[1].FinalScore > 0.95 {
|
||||||
|
t.Fatalf("Bad Score: %v", out[1].FinalScore)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify network information at taskgroup level
|
||||||
|
require.Equal(500, out[0].AllocResources.Networks[0].MBits)
|
||||||
|
require.Equal(500, out[1].AllocResources.Networks[0].MBits)
|
||||||
|
|
||||||
|
// Verify network information at task level
|
||||||
|
require.Equal(300, out[0].TaskResources["web"].Networks[0].MBits)
|
||||||
|
require.Equal(300, out[1].TaskResources["web"].Networks[0].MBits)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that bin packing iterator fails due to overprovisioning of network
|
||||||
|
// This test has network resources at task group and task level
|
||||||
|
func TestBinPackIterator_Network_Failure(t *testing.T) {
|
||||||
|
_, ctx := testContext(t)
|
||||||
|
nodes := []*RankedNode{
|
||||||
|
{
|
||||||
|
Node: &structs.Node{
|
||||||
|
// 50% fit
|
||||||
|
NodeResources: &structs.NodeResources{
|
||||||
|
Cpu: structs.NodeCpuResources{
|
||||||
|
CpuShares: 4096,
|
||||||
|
},
|
||||||
|
Memory: structs.NodeMemoryResources{
|
||||||
|
MemoryMB: 4096,
|
||||||
|
},
|
||||||
|
Networks: []*structs.NetworkResource{
|
||||||
|
{
|
||||||
|
Mode: "host",
|
||||||
|
Device: "eth0",
|
||||||
|
CIDR: "192.168.0.100/32",
|
||||||
|
MBits: 1000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ReservedResources: &structs.NodeReservedResources{
|
||||||
|
Cpu: structs.NodeReservedCpuResources{
|
||||||
|
CpuShares: 1024,
|
||||||
|
},
|
||||||
|
Memory: structs.NodeReservedMemoryResources{
|
||||||
|
MemoryMB: 1024,
|
||||||
|
},
|
||||||
|
Networks: structs.NodeReservedNetworkResources{
|
||||||
|
ReservedHostPorts: "1000-2000",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a planned alloc that takes up some network mbits at task and task group level
|
||||||
|
plan := ctx.Plan()
|
||||||
|
plan.NodeAllocation[nodes[0].Node.ID] = []*structs.Allocation{
|
||||||
|
{
|
||||||
|
AllocatedResources: &structs.AllocatedResources{
|
||||||
|
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||||
|
"web": {
|
||||||
|
Cpu: structs.AllocatedCpuResources{
|
||||||
|
CpuShares: 2048,
|
||||||
|
},
|
||||||
|
Memory: structs.AllocatedMemoryResources{
|
||||||
|
MemoryMB: 2048,
|
||||||
|
},
|
||||||
|
Networks: []*structs.NetworkResource{
|
||||||
|
{
|
||||||
|
Device: "eth0",
|
||||||
|
IP: "192.168.0.1",
|
||||||
|
MBits: 300,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Shared: structs.AllocatedSharedResources{
|
||||||
|
Networks: []*structs.NetworkResource{
|
||||||
|
{
|
||||||
|
Device: "eth0",
|
||||||
|
IP: "192.168.0.1",
|
||||||
|
MBits: 400,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
static := NewStaticRankIterator(ctx, nodes)
|
||||||
|
|
||||||
|
// Create a task group with networks specified at task and task group level
|
||||||
|
taskGroup := &structs.TaskGroup{
|
||||||
|
EphemeralDisk: &structs.EphemeralDisk{},
|
||||||
|
Tasks: []*structs.Task{
|
||||||
|
{
|
||||||
|
Name: "web",
|
||||||
|
Resources: &structs.Resources{
|
||||||
|
CPU: 1024,
|
||||||
|
MemoryMB: 1024,
|
||||||
|
Networks: []*structs.NetworkResource{
|
||||||
|
{
|
||||||
|
Device: "eth0",
|
||||||
|
MBits: 300,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Networks: []*structs.NetworkResource{
|
||||||
|
{
|
||||||
|
Device: "eth0",
|
||||||
|
MBits: 250,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
binp := NewBinPackIterator(ctx, static, false, 0)
|
||||||
|
binp.SetTaskGroup(taskGroup)
|
||||||
|
|
||||||
|
scoreNorm := NewScoreNormalizationIterator(ctx, binp)
|
||||||
|
|
||||||
|
out := collectRanked(scoreNorm)
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
|
// We expect a placement failure because we need 800 mbits of network
|
||||||
|
// and only 300 is free
|
||||||
|
require.Len(out, 0)
|
||||||
|
require.Equal(1, ctx.metrics.DimensionExhausted["network: bandwidth exceeded"])
|
||||||
|
}
|
||||||
|
|
||||||
func TestBinPackIterator_PlannedAlloc(t *testing.T) {
|
func TestBinPackIterator_PlannedAlloc(t *testing.T) {
|
||||||
_, ctx := testContext(t)
|
_, ctx := testContext(t)
|
||||||
nodes := []*RankedNode{
|
nodes := []*RankedNode{
|
||||||
|
|
|
@ -344,6 +344,10 @@ func (s *SystemScheduler) computePlacements(place []allocTuple) error {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if option.AllocResources != nil {
|
||||||
|
resources.Shared.Networks = option.AllocResources.Networks
|
||||||
|
}
|
||||||
|
|
||||||
// Create an allocation for this
|
// Create an allocation for this
|
||||||
alloc := &structs.Allocation{
|
alloc := &structs.Allocation{
|
||||||
ID: uuid.Generate(),
|
ID: uuid.Generate(),
|
||||||
|
@ -360,7 +364,8 @@ func (s *SystemScheduler) computePlacements(place []allocTuple) error {
|
||||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||||
ClientStatus: structs.AllocClientStatusPending,
|
ClientStatus: structs.AllocClientStatusPending,
|
||||||
SharedResources: &structs.Resources{
|
SharedResources: &structs.Resources{
|
||||||
DiskMB: missing.TaskGroup.EphemeralDisk.SizeMB,
|
DiskMB: missing.TaskGroup.EphemeralDisk.SizeMB,
|
||||||
|
Networks: missing.TaskGroup.Networks,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -351,6 +351,11 @@ func tasksUpdated(jobA, jobB *structs.Job, taskGroup string) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check that the network resources haven't changed
|
||||||
|
if networkUpdated(a.Networks, b.Networks) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Check each task
|
// Check each task
|
||||||
for _, at := range a.Tasks {
|
for _, at := range a.Tasks {
|
||||||
bt := b.LookupTask(at.Name)
|
bt := b.LookupTask(at.Name)
|
||||||
|
@ -387,22 +392,9 @@ func tasksUpdated(jobA, jobB *structs.Job, taskGroup string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Inspect the network to see if the dynamic ports are different
|
// Inspect the network to see if the dynamic ports are different
|
||||||
if len(at.Resources.Networks) != len(bt.Resources.Networks) {
|
if networkUpdated(at.Resources.Networks, bt.Resources.Networks) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
for idx := range at.Resources.Networks {
|
|
||||||
an := at.Resources.Networks[idx]
|
|
||||||
bn := bt.Resources.Networks[idx]
|
|
||||||
|
|
||||||
if an.MBits != bn.MBits {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
aPorts, bPorts := networkPortMap(an), networkPortMap(bn)
|
|
||||||
if !reflect.DeepEqual(aPorts, bPorts) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inspect the non-network resources
|
// Inspect the non-network resources
|
||||||
if ar, br := at.Resources, bt.Resources; ar.CPU != br.CPU {
|
if ar, br := at.Resources, bt.Resources; ar.CPU != br.CPU {
|
||||||
|
@ -414,6 +406,26 @@ func tasksUpdated(jobA, jobB *structs.Job, taskGroup string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func networkUpdated(netA, netB []*structs.NetworkResource) bool {
|
||||||
|
if len(netA) != len(netB) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
for idx := range netA {
|
||||||
|
an := netA[idx]
|
||||||
|
bn := netB[idx]
|
||||||
|
|
||||||
|
if an.MBits != bn.MBits {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
aPorts, bPorts := networkPortMap(an), networkPortMap(bn)
|
||||||
|
if !reflect.DeepEqual(aPorts, bPorts) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// networkPortMap takes a network resource and returns a map of port labels to
|
// networkPortMap takes a network resource and returns a map of port labels to
|
||||||
// values. The value for dynamic ports is disregarded even if it is set. This
|
// values. The value for dynamic ports is disregarded even if it is set. This
|
||||||
// makes this function suitable for comparing two network resources for changes.
|
// makes this function suitable for comparing two network resources for changes.
|
||||||
|
@ -825,9 +837,11 @@ func genericAllocUpdateFn(ctx Context, stack Stack, evalID string) allocUpdateTy
|
||||||
newAlloc.AllocatedResources = &structs.AllocatedResources{
|
newAlloc.AllocatedResources = &structs.AllocatedResources{
|
||||||
Tasks: option.TaskResources,
|
Tasks: option.TaskResources,
|
||||||
Shared: structs.AllocatedSharedResources{
|
Shared: structs.AllocatedSharedResources{
|
||||||
DiskMB: int64(newTG.EphemeralDisk.SizeMB),
|
DiskMB: int64(newTG.EphemeralDisk.SizeMB),
|
||||||
|
Networks: newTG.Networks,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use metrics from existing alloc for in place upgrade
|
// Use metrics from existing alloc for in place upgrade
|
||||||
// This is because if the inplace upgrade succeeded, any scoring metadata from
|
// This is because if the inplace upgrade succeeded, any scoring metadata from
|
||||||
// when it first went through the scheduler should still be preserved. Using scoring
|
// when it first went through the scheduler should still be preserved. Using scoring
|
||||||
|
|
497
vendor/github.com/containernetworking/cni/libcni/api.go
generated
vendored
Normal file
497
vendor/github.com/containernetworking/cni/libcni/api.go
generated
vendored
Normal file
|
@ -0,0 +1,497 @@
|
||||||
|
// Copyright 2015 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package libcni
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containernetworking/cni/pkg/invoke"
|
||||||
|
"github.com/containernetworking/cni/pkg/types"
|
||||||
|
"github.com/containernetworking/cni/pkg/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
CacheDir = "/var/lib/cni"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A RuntimeConf holds the arguments to one invocation of a CNI plugin
|
||||||
|
// excepting the network configuration, with the nested exception that
|
||||||
|
// the `runtimeConfig` from the network configuration is included
|
||||||
|
// here.
|
||||||
|
type RuntimeConf struct {
|
||||||
|
ContainerID string
|
||||||
|
NetNS string
|
||||||
|
IfName string
|
||||||
|
Args [][2]string
|
||||||
|
// A dictionary of capability-specific data passed by the runtime
|
||||||
|
// to plugins as top-level keys in the 'runtimeConfig' dictionary
|
||||||
|
// of the plugin's stdin data. libcni will ensure that only keys
|
||||||
|
// in this map which match the capabilities of the plugin are passed
|
||||||
|
// to the plugin
|
||||||
|
CapabilityArgs map[string]interface{}
|
||||||
|
|
||||||
|
// A cache directory in which to library data. Defaults to CacheDir
|
||||||
|
CacheDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
type NetworkConfig struct {
|
||||||
|
Network *types.NetConf
|
||||||
|
Bytes []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type NetworkConfigList struct {
|
||||||
|
Name string
|
||||||
|
CNIVersion string
|
||||||
|
DisableCheck bool
|
||||||
|
Plugins []*NetworkConfig
|
||||||
|
Bytes []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type CNI interface {
|
||||||
|
AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
|
||||||
|
CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
|
||||||
|
DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
|
||||||
|
GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
|
||||||
|
|
||||||
|
AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
|
||||||
|
CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
|
||||||
|
DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
|
||||||
|
GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
|
||||||
|
|
||||||
|
ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error)
|
||||||
|
ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type CNIConfig struct {
|
||||||
|
Path []string
|
||||||
|
exec invoke.Exec
|
||||||
|
}
|
||||||
|
|
||||||
|
// CNIConfig implements the CNI interface
|
||||||
|
var _ CNI = &CNIConfig{}
|
||||||
|
|
||||||
|
// NewCNIConfig returns a new CNIConfig object that will search for plugins
|
||||||
|
// in the given paths and use the given exec interface to run those plugins,
|
||||||
|
// or if the exec interface is not given, will use a default exec handler.
|
||||||
|
func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig {
|
||||||
|
return &CNIConfig{
|
||||||
|
Path: path,
|
||||||
|
exec: exec,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
inject := map[string]interface{}{
|
||||||
|
"name": name,
|
||||||
|
"cniVersion": cniVersion,
|
||||||
|
}
|
||||||
|
// Add previous plugin result
|
||||||
|
if prevResult != nil {
|
||||||
|
inject["prevResult"] = prevResult
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure every config uses the same name and version
|
||||||
|
orig, err = InjectConf(orig, inject)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return injectRuntimeConfig(orig, rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function takes a libcni RuntimeConf structure and injects values into
|
||||||
|
// a "runtimeConfig" dictionary in the CNI network configuration JSON that
|
||||||
|
// will be passed to the plugin on stdin.
|
||||||
|
//
|
||||||
|
// Only "capabilities arguments" passed by the runtime are currently injected.
|
||||||
|
// These capabilities arguments are filtered through the plugin's advertised
|
||||||
|
// capabilities from its config JSON, and any keys in the CapabilityArgs
|
||||||
|
// matching plugin capabilities are added to the "runtimeConfig" dictionary
|
||||||
|
// sent to the plugin via JSON on stdin. For example, if the plugin's
|
||||||
|
// capabilities include "portMappings", and the CapabilityArgs map includes a
|
||||||
|
// "portMappings" key, that key and its value are added to the "runtimeConfig"
|
||||||
|
// dictionary to be passed to the plugin's stdin.
|
||||||
|
func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
rc := make(map[string]interface{})
|
||||||
|
for capability, supported := range orig.Network.Capabilities {
|
||||||
|
if !supported {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if data, ok := rt.CapabilityArgs[capability]; ok {
|
||||||
|
rc[capability] = data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rc) > 0 {
|
||||||
|
orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return orig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure we have a usable exec if the CNIConfig was not given one
|
||||||
|
func (c *CNIConfig) ensureExec() invoke.Exec {
|
||||||
|
if c.exec == nil {
|
||||||
|
c.exec = &invoke.DefaultExec{
|
||||||
|
RawExec: &invoke.RawExec{Stderr: os.Stderr},
|
||||||
|
PluginDecoder: version.PluginDecoder{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return c.exec
|
||||||
|
}
|
||||||
|
|
||||||
|
func getResultCacheFilePath(netName string, rt *RuntimeConf) string {
|
||||||
|
cacheDir := rt.CacheDir
|
||||||
|
if cacheDir == "" {
|
||||||
|
cacheDir = CacheDir
|
||||||
|
}
|
||||||
|
return filepath.Join(cacheDir, "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName))
|
||||||
|
}
|
||||||
|
|
||||||
|
func setCachedResult(result types.Result, netName string, rt *RuntimeConf) error {
|
||||||
|
data, err := json.Marshal(result)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fname := getResultCacheFilePath(netName, rt)
|
||||||
|
if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return ioutil.WriteFile(fname, data, 0600)
|
||||||
|
}
|
||||||
|
|
||||||
|
func delCachedResult(netName string, rt *RuntimeConf) error {
|
||||||
|
fname := getResultCacheFilePath(netName, rt)
|
||||||
|
return os.Remove(fname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) {
|
||||||
|
fname := getResultCacheFilePath(netName, rt)
|
||||||
|
data, err := ioutil.ReadFile(fname)
|
||||||
|
if err != nil {
|
||||||
|
// Ignore read errors; the cached result may not exist on-disk
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the version of the cached result
|
||||||
|
decoder := version.ConfigDecoder{}
|
||||||
|
resultCniVersion, err := decoder.Decode(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure we can understand the result
|
||||||
|
result, err := version.NewResult(resultCniVersion, data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to the config version to ensure plugins get prevResult
|
||||||
|
// in the same version as the config. The cached result version
|
||||||
|
// should match the config version unless the config was changed
|
||||||
|
// while the container was running.
|
||||||
|
result, err = result.GetAsVersion(cniVersion)
|
||||||
|
if err != nil && resultCniVersion != cniVersion {
|
||||||
|
return nil, fmt.Errorf("failed to convert cached result version %q to config version %q: %v", resultCniVersion, cniVersion, err)
|
||||||
|
}
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNetworkListCachedResult returns the cached Result of the previous
|
||||||
|
// previous AddNetworkList() operation for a network list, or an error.
|
||||||
|
func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
|
||||||
|
return getCachedResult(list.Name, list.CNIVersion, rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNetworkCachedResult returns the cached Result of the previous
|
||||||
|
// previous AddNetwork() operation for a network, or an error.
|
||||||
|
func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
|
||||||
|
return getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) {
|
||||||
|
c.ensureExec()
|
||||||
|
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNetworkList executes a sequence of plugins with the ADD command
|
||||||
|
func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
|
||||||
|
var err error
|
||||||
|
var result types.Result
|
||||||
|
for _, net := range list.Plugins {
|
||||||
|
result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = setCachedResult(result, list.Name, rt); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to set network %q cached result: %v", list.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
|
||||||
|
c.ensureExec()
|
||||||
|
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckNetworkList executes a sequence of plugins with the CHECK command
|
||||||
|
func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error {
|
||||||
|
// CHECK was added in CNI spec version 0.4.0 and higher
|
||||||
|
if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
|
||||||
|
return err
|
||||||
|
} else if !gtet {
|
||||||
|
return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
if list.DisableCheck {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cachedResult, err := getCachedResult(list.Name, list.CNIVersion, rt)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, net := range list.Plugins {
|
||||||
|
if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
|
||||||
|
c.ensureExec()
|
||||||
|
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelNetworkList executes a sequence of plugins with the DEL command
|
||||||
|
func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error {
|
||||||
|
var cachedResult types.Result
|
||||||
|
|
||||||
|
// Cached result on DEL was added in CNI spec version 0.4.0 and higher
|
||||||
|
if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
|
||||||
|
return err
|
||||||
|
} else if gtet {
|
||||||
|
cachedResult, err = getCachedResult(list.Name, list.CNIVersion, rt)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := len(list.Plugins) - 1; i >= 0; i-- {
|
||||||
|
net := list.Plugins[i]
|
||||||
|
if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = delCachedResult(list.Name, rt)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNetwork executes the plugin with the ADD command
|
||||||
|
func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
|
||||||
|
result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = setCachedResult(result, net.Network.Name, rt); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to set network %q cached result: %v", net.Network.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckNetwork executes the plugin with the CHECK command
|
||||||
|
func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error {
|
||||||
|
// CHECK was added in CNI spec version 0.4.0 and higher
|
||||||
|
if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil {
|
||||||
|
return err
|
||||||
|
} else if !gtet {
|
||||||
|
return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
cachedResult, err := getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err)
|
||||||
|
}
|
||||||
|
return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelNetwork executes the plugin with the DEL command
|
||||||
|
func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error {
|
||||||
|
var cachedResult types.Result
|
||||||
|
|
||||||
|
// Cached result on DEL was added in CNI spec version 0.4.0 and higher
|
||||||
|
if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil {
|
||||||
|
return err
|
||||||
|
} else if gtet {
|
||||||
|
cachedResult, err = getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_ = delCachedResult(net.Network.Name, rt)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateNetworkList checks that a configuration is reasonably valid.
|
||||||
|
// - all the specified plugins exist on disk
|
||||||
|
// - every plugin supports the desired version.
|
||||||
|
//
|
||||||
|
// Returns a list of all capabilities supported by the configuration, or error
|
||||||
|
func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) {
|
||||||
|
version := list.CNIVersion
|
||||||
|
|
||||||
|
// holding map for seen caps (in case of duplicates)
|
||||||
|
caps := map[string]interface{}{}
|
||||||
|
|
||||||
|
errs := []error{}
|
||||||
|
for _, net := range list.Plugins {
|
||||||
|
if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
for c, enabled := range net.Network.Capabilities {
|
||||||
|
if !enabled {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
caps[c] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
return nil, fmt.Errorf("%v", errs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make caps list
|
||||||
|
cc := make([]string, 0, len(caps))
|
||||||
|
for c := range caps {
|
||||||
|
cc = append(cc, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateNetwork checks that a configuration is reasonably valid.
|
||||||
|
// It uses the same logic as ValidateNetworkList)
|
||||||
|
// Returns a list of capabilities
|
||||||
|
func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) {
|
||||||
|
caps := []string{}
|
||||||
|
for c, ok := range net.Network.Capabilities {
|
||||||
|
if ok {
|
||||||
|
caps = append(caps, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return caps, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validatePlugin checks that an individual plugin's configuration is sane
|
||||||
|
func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error {
|
||||||
|
pluginPath, err := invoke.FindInPath(pluginName, c.Path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, vers := range vi.SupportedVersions() {
|
||||||
|
if vers == expectedVersion {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetVersionInfo reports which versions of the CNI spec are supported by
|
||||||
|
// the given plugin.
|
||||||
|
func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) {
|
||||||
|
c.ensureExec()
|
||||||
|
pluginPath, err := c.exec.FindInPath(pluginType, c.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return invoke.GetVersionInfo(ctx, pluginPath, c.exec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// =====
|
||||||
|
func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args {
|
||||||
|
return &invoke.Args{
|
||||||
|
Command: action,
|
||||||
|
ContainerID: rt.ContainerID,
|
||||||
|
NetNS: rt.NetNS,
|
||||||
|
PluginArgs: rt.Args,
|
||||||
|
IfName: rt.IfName,
|
||||||
|
Path: strings.Join(c.Path, string(os.PathListSeparator)),
|
||||||
|
}
|
||||||
|
}
|
268
vendor/github.com/containernetworking/cni/libcni/conf.go
generated
vendored
Normal file
268
vendor/github.com/containernetworking/cni/libcni/conf.go
generated
vendored
Normal file
|
@ -0,0 +1,268 @@
|
||||||
|
// Copyright 2015 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package libcni
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NotFoundError struct {
|
||||||
|
Dir string
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e NotFoundError) Error() string {
|
||||||
|
return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
type NoConfigsFoundError struct {
|
||||||
|
Dir string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e NoConfigsFoundError) Error() string {
|
||||||
|
return fmt.Sprintf(`no net configurations found in %s`, e.Dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConfFromBytes(bytes []byte) (*NetworkConfig, error) {
|
||||||
|
conf := &NetworkConfig{Bytes: bytes}
|
||||||
|
if err := json.Unmarshal(bytes, &conf.Network); err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing configuration: %s", err)
|
||||||
|
}
|
||||||
|
if conf.Network.Type == "" {
|
||||||
|
return nil, fmt.Errorf("error parsing configuration: missing 'type'")
|
||||||
|
}
|
||||||
|
return conf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConfFromFile(filename string) (*NetworkConfig, error) {
|
||||||
|
bytes, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading %s: %s", filename, err)
|
||||||
|
}
|
||||||
|
return ConfFromBytes(bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
|
||||||
|
rawList := make(map[string]interface{})
|
||||||
|
if err := json.Unmarshal(bytes, &rawList); err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing configuration list: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawName, ok := rawList["name"]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("error parsing configuration list: no name")
|
||||||
|
}
|
||||||
|
name, ok := rawName.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var cniVersion string
|
||||||
|
rawVersion, ok := rawList["cniVersion"]
|
||||||
|
if ok {
|
||||||
|
cniVersion, ok = rawVersion.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disableCheck := false
|
||||||
|
if rawDisableCheck, ok := rawList["disableCheck"]; ok {
|
||||||
|
disableCheck, ok = rawDisableCheck.(bool)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
list := &NetworkConfigList{
|
||||||
|
Name: name,
|
||||||
|
DisableCheck: disableCheck,
|
||||||
|
CNIVersion: cniVersion,
|
||||||
|
Bytes: bytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
var plugins []interface{}
|
||||||
|
plug, ok := rawList["plugins"]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key")
|
||||||
|
}
|
||||||
|
plugins, ok = plug.([]interface{})
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug)
|
||||||
|
}
|
||||||
|
if len(plugins) == 0 {
|
||||||
|
return nil, fmt.Errorf("error parsing configuration list: no plugins in list")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, conf := range plugins {
|
||||||
|
newBytes, err := json.Marshal(conf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to marshal plugin config %d: %v", i, err)
|
||||||
|
}
|
||||||
|
netConf, err := ConfFromBytes(newBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to parse plugin config %d: %v", i, err)
|
||||||
|
}
|
||||||
|
list.Plugins = append(list.Plugins, netConf)
|
||||||
|
}
|
||||||
|
|
||||||
|
return list, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConfListFromFile(filename string) (*NetworkConfigList, error) {
|
||||||
|
bytes, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading %s: %s", filename, err)
|
||||||
|
}
|
||||||
|
return ConfListFromBytes(bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConfFiles(dir string, extensions []string) ([]string, error) {
|
||||||
|
// In part, adapted from rkt/networking/podenv.go#listFiles
|
||||||
|
files, err := ioutil.ReadDir(dir)
|
||||||
|
switch {
|
||||||
|
case err == nil: // break
|
||||||
|
case os.IsNotExist(err):
|
||||||
|
return nil, nil
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
confFiles := []string{}
|
||||||
|
for _, f := range files {
|
||||||
|
if f.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fileExt := filepath.Ext(f.Name())
|
||||||
|
for _, ext := range extensions {
|
||||||
|
if fileExt == ext {
|
||||||
|
confFiles = append(confFiles, filepath.Join(dir, f.Name()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return confFiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadConf(dir, name string) (*NetworkConfig, error) {
|
||||||
|
files, err := ConfFiles(dir, []string{".conf", ".json"})
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return nil, err
|
||||||
|
case len(files) == 0:
|
||||||
|
return nil, NoConfigsFoundError{Dir: dir}
|
||||||
|
}
|
||||||
|
sort.Strings(files)
|
||||||
|
|
||||||
|
for _, confFile := range files {
|
||||||
|
conf, err := ConfFromFile(confFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if conf.Network.Name == name {
|
||||||
|
return conf, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, NotFoundError{dir, name}
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadConfList(dir, name string) (*NetworkConfigList, error) {
|
||||||
|
files, err := ConfFiles(dir, []string{".conflist"})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sort.Strings(files)
|
||||||
|
|
||||||
|
for _, confFile := range files {
|
||||||
|
conf, err := ConfListFromFile(confFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if conf.Name == name {
|
||||||
|
return conf, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try and load a network configuration file (instead of list)
|
||||||
|
// from the same name, then upconvert.
|
||||||
|
singleConf, err := LoadConf(dir, name)
|
||||||
|
if err != nil {
|
||||||
|
// A little extra logic so the error makes sense
|
||||||
|
if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok {
|
||||||
|
// Config lists found but no config files found
|
||||||
|
return nil, NotFoundError{dir, name}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ConfListFromConf(singleConf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) {
|
||||||
|
config := make(map[string]interface{})
|
||||||
|
err := json.Unmarshal(original.Bytes, &config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unmarshal existing network bytes: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, value := range newValues {
|
||||||
|
if key == "" {
|
||||||
|
return nil, fmt.Errorf("keys cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if value == nil {
|
||||||
|
return nil, fmt.Errorf("key '%s' value must not be nil", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
config[key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
newBytes, err := json.Marshal(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ConfFromBytes(newBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfListFromConf "upconverts" a network config in to a NetworkConfigList,
|
||||||
|
// with the single network as the only entry in the list.
|
||||||
|
func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) {
|
||||||
|
// Re-deserialize the config's json, then make a raw map configlist.
|
||||||
|
// This may seem a bit strange, but it's to make the Bytes fields
|
||||||
|
// actually make sense. Otherwise, the generated json is littered with
|
||||||
|
// golang default values.
|
||||||
|
|
||||||
|
rawConfig := make(map[string]interface{})
|
||||||
|
if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rawConfigList := map[string]interface{}{
|
||||||
|
"name": original.Network.Name,
|
||||||
|
"cniVersion": original.Network.CNIVersion,
|
||||||
|
"plugins": []interface{}{rawConfig},
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.Marshal(rawConfigList)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ConfListFromBytes(b)
|
||||||
|
}
|
128
vendor/github.com/containernetworking/cni/pkg/invoke/args.go
generated
vendored
Normal file
128
vendor/github.com/containernetworking/cni/pkg/invoke/args.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
||||||
|
// Copyright 2015 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package invoke
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CNIArgs interface {
|
||||||
|
// For use with os/exec; i.e., return nil to inherit the
|
||||||
|
// environment from this process
|
||||||
|
// For use in delegation; inherit the environment from this
|
||||||
|
// process and allow overrides
|
||||||
|
AsEnv() []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type inherited struct{}
|
||||||
|
|
||||||
|
var inheritArgsFromEnv inherited
|
||||||
|
|
||||||
|
func (_ *inherited) AsEnv() []string {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ArgsFromEnv() CNIArgs {
|
||||||
|
return &inheritArgsFromEnv
|
||||||
|
}
|
||||||
|
|
||||||
|
type Args struct {
|
||||||
|
Command string
|
||||||
|
ContainerID string
|
||||||
|
NetNS string
|
||||||
|
PluginArgs [][2]string
|
||||||
|
PluginArgsStr string
|
||||||
|
IfName string
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Args implements the CNIArgs interface
|
||||||
|
var _ CNIArgs = &Args{}
|
||||||
|
|
||||||
|
func (args *Args) AsEnv() []string {
|
||||||
|
env := os.Environ()
|
||||||
|
pluginArgsStr := args.PluginArgsStr
|
||||||
|
if pluginArgsStr == "" {
|
||||||
|
pluginArgsStr = stringify(args.PluginArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duplicated values which come first will be overrided, so we must put the
|
||||||
|
// custom values in the end to avoid being overrided by the process environments.
|
||||||
|
env = append(env,
|
||||||
|
"CNI_COMMAND="+args.Command,
|
||||||
|
"CNI_CONTAINERID="+args.ContainerID,
|
||||||
|
"CNI_NETNS="+args.NetNS,
|
||||||
|
"CNI_ARGS="+pluginArgsStr,
|
||||||
|
"CNI_IFNAME="+args.IfName,
|
||||||
|
"CNI_PATH="+args.Path,
|
||||||
|
)
|
||||||
|
return dedupEnv(env)
|
||||||
|
}
|
||||||
|
|
||||||
|
// taken from rkt/networking/net_plugin.go
|
||||||
|
func stringify(pluginArgs [][2]string) string {
|
||||||
|
entries := make([]string, len(pluginArgs))
|
||||||
|
|
||||||
|
for i, kv := range pluginArgs {
|
||||||
|
entries[i] = strings.Join(kv[:], "=")
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(entries, ";")
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelegateArgs implements the CNIArgs interface
|
||||||
|
// used for delegation to inherit from environments
|
||||||
|
// and allow some overrides like CNI_COMMAND
|
||||||
|
var _ CNIArgs = &DelegateArgs{}
|
||||||
|
|
||||||
|
type DelegateArgs struct {
|
||||||
|
Command string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DelegateArgs) AsEnv() []string {
|
||||||
|
env := os.Environ()
|
||||||
|
|
||||||
|
// The custom values should come in the end to override the existing
|
||||||
|
// process environment of the same key.
|
||||||
|
env = append(env,
|
||||||
|
"CNI_COMMAND="+d.Command,
|
||||||
|
)
|
||||||
|
return dedupEnv(env)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dedupEnv returns a copy of env with any duplicates removed, in favor of later values.
|
||||||
|
// Items not of the normal environment "key=value" form are preserved unchanged.
|
||||||
|
func dedupEnv(env []string) []string {
|
||||||
|
out := make([]string, 0, len(env))
|
||||||
|
envMap := map[string]string{}
|
||||||
|
|
||||||
|
for _, kv := range env {
|
||||||
|
// find the first "=" in environment, if not, just keep it
|
||||||
|
eq := strings.Index(kv, "=")
|
||||||
|
if eq < 0 {
|
||||||
|
out = append(out, kv)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
envMap[kv[:eq]] = kv[eq+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range envMap {
|
||||||
|
out = append(out, fmt.Sprintf("%s=%s", k, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
80
vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
generated
vendored
Normal file
80
vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
// Copyright 2016 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package invoke
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/containernetworking/cni/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) {
|
||||||
|
if exec == nil {
|
||||||
|
exec = defaultExec
|
||||||
|
}
|
||||||
|
|
||||||
|
paths := filepath.SplitList(os.Getenv("CNI_PATH"))
|
||||||
|
pluginPath, err := exec.FindInPath(delegatePlugin, paths)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return pluginPath, exec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelegateAdd calls the given delegate plugin with the CNI ADD action and
|
||||||
|
// JSON configuration
|
||||||
|
func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) {
|
||||||
|
pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelegateAdd will override the original "CNI_COMMAND" env from process with ADD
|
||||||
|
return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelegateCheck calls the given delegate plugin with the CNI CHECK action and
|
||||||
|
// JSON configuration
|
||||||
|
func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
|
||||||
|
pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelegateCheck will override the original CNI_COMMAND env from process with CHECK
|
||||||
|
return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelegateDel calls the given delegate plugin with the CNI DEL action and
|
||||||
|
// JSON configuration
|
||||||
|
func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
|
||||||
|
pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelegateDel will override the original CNI_COMMAND env from process with DEL
|
||||||
|
return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// return CNIArgs used by delegation
|
||||||
|
func delegateArgs(action string) *DelegateArgs {
|
||||||
|
return &DelegateArgs{
|
||||||
|
Command: action,
|
||||||
|
}
|
||||||
|
}
|
144
vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
generated
vendored
Normal file
144
vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
generated
vendored
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
// Copyright 2015 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package invoke
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/containernetworking/cni/pkg/types"
|
||||||
|
"github.com/containernetworking/cni/pkg/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Exec is an interface encapsulates all operations that deal with finding
|
||||||
|
// and executing a CNI plugin. Tests may provide a fake implementation
|
||||||
|
// to avoid writing fake plugins to temporary directories during the test.
|
||||||
|
type Exec interface {
|
||||||
|
ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error)
|
||||||
|
FindInPath(plugin string, paths []string) (string, error)
|
||||||
|
Decode(jsonBytes []byte) (version.PluginInfo, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// For example, a testcase could pass an instance of the following fakeExec
|
||||||
|
// object to ExecPluginWithResult() to verify the incoming stdin and environment
|
||||||
|
// and provide a tailored response:
|
||||||
|
//
|
||||||
|
//import (
|
||||||
|
// "encoding/json"
|
||||||
|
// "path"
|
||||||
|
// "strings"
|
||||||
|
//)
|
||||||
|
//
|
||||||
|
//type fakeExec struct {
|
||||||
|
// version.PluginDecoder
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
|
||||||
|
// net := &types.NetConf{}
|
||||||
|
// err := json.Unmarshal(stdinData, net)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err)
|
||||||
|
// }
|
||||||
|
// pluginName := path.Base(pluginPath)
|
||||||
|
// if pluginName != net.Type {
|
||||||
|
// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type)
|
||||||
|
// }
|
||||||
|
// for _, e := range environ {
|
||||||
|
// // Check environment for forced failure request
|
||||||
|
// parts := strings.Split(e, "=")
|
||||||
|
// if len(parts) > 0 && parts[0] == "FAIL" {
|
||||||
|
// return nil, fmt.Errorf("failed to execute plugin %s", pluginName)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) {
|
||||||
|
// if len(paths) > 0 {
|
||||||
|
// return path.Join(paths[0], plugin), nil
|
||||||
|
// }
|
||||||
|
// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths)
|
||||||
|
//}
|
||||||
|
|
||||||
|
func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) {
|
||||||
|
if exec == nil {
|
||||||
|
exec = defaultExec
|
||||||
|
}
|
||||||
|
|
||||||
|
stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Plugin must return result in same version as specified in netconf
|
||||||
|
versionDecoder := &version.ConfigDecoder{}
|
||||||
|
confVersion, err := versionDecoder.Decode(netconf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return version.NewResult(confVersion, stdoutBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error {
|
||||||
|
if exec == nil {
|
||||||
|
exec = defaultExec
|
||||||
|
}
|
||||||
|
_, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetVersionInfo returns the version information available about the plugin.
|
||||||
|
// For recent-enough plugins, it uses the information returned by the VERSION
|
||||||
|
// command. For older plugins which do not recognize that command, it reports
|
||||||
|
// version 0.1.0
|
||||||
|
func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) {
|
||||||
|
if exec == nil {
|
||||||
|
exec = defaultExec
|
||||||
|
}
|
||||||
|
args := &Args{
|
||||||
|
Command: "VERSION",
|
||||||
|
|
||||||
|
// set fake values required by plugins built against an older version of skel
|
||||||
|
NetNS: "dummy",
|
||||||
|
IfName: "dummy",
|
||||||
|
Path: "dummy",
|
||||||
|
}
|
||||||
|
stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current()))
|
||||||
|
stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv())
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() == "unknown CNI_COMMAND: VERSION" {
|
||||||
|
return version.PluginSupports("0.1.0"), nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return exec.Decode(stdoutBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultExec is an object that implements the Exec interface which looks
|
||||||
|
// for and executes plugins from disk.
|
||||||
|
type DefaultExec struct {
|
||||||
|
*RawExec
|
||||||
|
version.PluginDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultExec implements the Exec interface
|
||||||
|
var _ Exec = &DefaultExec{}
|
||||||
|
|
||||||
|
var defaultExec = &DefaultExec{
|
||||||
|
RawExec: &RawExec{Stderr: os.Stderr},
|
||||||
|
}
|
43
vendor/github.com/containernetworking/cni/pkg/invoke/find.go
generated
vendored
Normal file
43
vendor/github.com/containernetworking/cni/pkg/invoke/find.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
// Copyright 2015 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package invoke
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FindInPath returns the full path of the plugin by searching in the provided path
|
||||||
|
func FindInPath(plugin string, paths []string) (string, error) {
|
||||||
|
if plugin == "" {
|
||||||
|
return "", fmt.Errorf("no plugin name provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(paths) == 0 {
|
||||||
|
return "", fmt.Errorf("no paths provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, path := range paths {
|
||||||
|
for _, fe := range ExecutableFileExtensions {
|
||||||
|
fullpath := filepath.Join(path, plugin) + fe
|
||||||
|
if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() {
|
||||||
|
return fullpath, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths)
|
||||||
|
}
|
20
vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
generated
vendored
Normal file
20
vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright 2016 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||||
|
|
||||||
|
package invoke
|
||||||
|
|
||||||
|
// Valid file extensions for plugin executables.
|
||||||
|
var ExecutableFileExtensions = []string{""}
|
18
vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go
generated
vendored
Normal file
18
vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
// Copyright 2016 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package invoke
|
||||||
|
|
||||||
|
// Valid file extensions for plugin executables.
|
||||||
|
var ExecutableFileExtensions = []string{".exe", ""}
|
62
vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
generated
vendored
Normal file
62
vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
// Copyright 2016 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package invoke
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os/exec"
|
||||||
|
|
||||||
|
"github.com/containernetworking/cni/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RawExec struct {
|
||||||
|
Stderr io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
|
||||||
|
stdout := &bytes.Buffer{}
|
||||||
|
c := exec.CommandContext(ctx, pluginPath)
|
||||||
|
c.Env = environ
|
||||||
|
c.Stdin = bytes.NewBuffer(stdinData)
|
||||||
|
c.Stdout = stdout
|
||||||
|
c.Stderr = e.Stderr
|
||||||
|
if err := c.Run(); err != nil {
|
||||||
|
return nil, pluginErr(err, stdout.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
return stdout.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func pluginErr(err error, output []byte) error {
|
||||||
|
if _, ok := err.(*exec.ExitError); ok {
|
||||||
|
emsg := types.Error{}
|
||||||
|
if len(output) == 0 {
|
||||||
|
emsg.Msg = "netplugin failed with no error message"
|
||||||
|
} else if perr := json.Unmarshal(output, &emsg); perr != nil {
|
||||||
|
emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(output), perr)
|
||||||
|
}
|
||||||
|
return &emsg
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) {
|
||||||
|
return FindInPath(plugin, paths)
|
||||||
|
}
|
140
vendor/github.com/containernetworking/cni/pkg/types/020/types.go
generated
vendored
Normal file
140
vendor/github.com/containernetworking/cni/pkg/types/020/types.go
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
// Copyright 2016 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package types020
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/containernetworking/cni/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const ImplementedSpecVersion string = "0.2.0"
|
||||||
|
|
||||||
|
var SupportedVersions = []string{"", "0.1.0", ImplementedSpecVersion}
|
||||||
|
|
||||||
|
// Compatibility types for CNI version 0.1.0 and 0.2.0
|
||||||
|
|
||||||
|
func NewResult(data []byte) (types.Result, error) {
|
||||||
|
result := &Result{}
|
||||||
|
if err := json.Unmarshal(data, result); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetResult(r types.Result) (*Result, error) {
|
||||||
|
// We expect version 0.1.0/0.2.0 results
|
||||||
|
result020, err := r.GetAsVersion(ImplementedSpecVersion)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result, ok := result020.(*Result)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("failed to convert result")
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Result is what gets returned from the plugin (via stdout) to the caller
|
||||||
|
type Result struct {
|
||||||
|
CNIVersion string `json:"cniVersion,omitempty"`
|
||||||
|
IP4 *IPConfig `json:"ip4,omitempty"`
|
||||||
|
IP6 *IPConfig `json:"ip6,omitempty"`
|
||||||
|
DNS types.DNS `json:"dns,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Result) Version() string {
|
||||||
|
return ImplementedSpecVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Result) GetAsVersion(version string) (types.Result, error) {
|
||||||
|
for _, supportedVersion := range SupportedVersions {
|
||||||
|
if version == supportedVersion {
|
||||||
|
r.CNIVersion = version
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("cannot convert version %q to %s", SupportedVersions, version)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Result) Print() error {
|
||||||
|
return r.PrintTo(os.Stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Result) PrintTo(writer io.Writer) error {
|
||||||
|
data, err := json.MarshalIndent(r, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = writer.Write(data)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a formatted string in the form of "[IP4: $1,][ IP6: $2,] DNS: $3" where
|
||||||
|
// $1 represents the receiver's IPv4, $2 represents the receiver's IPv6 and $3 the
|
||||||
|
// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string.
|
||||||
|
func (r *Result) String() string {
|
||||||
|
var str string
|
||||||
|
if r.IP4 != nil {
|
||||||
|
str = fmt.Sprintf("IP4:%+v, ", *r.IP4)
|
||||||
|
}
|
||||||
|
if r.IP6 != nil {
|
||||||
|
str += fmt.Sprintf("IP6:%+v, ", *r.IP6)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%sDNS:%+v", str, r.DNS)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPConfig contains values necessary to configure an interface
|
||||||
|
type IPConfig struct {
|
||||||
|
IP net.IPNet
|
||||||
|
Gateway net.IP
|
||||||
|
Routes []types.Route
|
||||||
|
}
|
||||||
|
|
||||||
|
// net.IPNet is not JSON (un)marshallable so this duality is needed
|
||||||
|
// for our custom IPNet type
|
||||||
|
|
||||||
|
// JSON (un)marshallable types
|
||||||
|
type ipConfig struct {
|
||||||
|
IP types.IPNet `json:"ip"`
|
||||||
|
Gateway net.IP `json:"gateway,omitempty"`
|
||||||
|
Routes []types.Route `json:"routes,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *IPConfig) MarshalJSON() ([]byte, error) {
|
||||||
|
ipc := ipConfig{
|
||||||
|
IP: types.IPNet(c.IP),
|
||||||
|
Gateway: c.Gateway,
|
||||||
|
Routes: c.Routes,
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(ipc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *IPConfig) UnmarshalJSON(data []byte) error {
|
||||||
|
ipc := ipConfig{}
|
||||||
|
if err := json.Unmarshal(data, &ipc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.IP = net.IPNet(ipc.IP)
|
||||||
|
c.Gateway = ipc.Gateway
|
||||||
|
c.Routes = ipc.Routes
|
||||||
|
return nil
|
||||||
|
}
|
25
vendor/github.com/containernetworking/cni/pkg/types/args.go
generated
vendored
25
vendor/github.com/containernetworking/cni/pkg/types/args.go
generated
vendored
|
@ -41,6 +41,16 @@ func (b *UnmarshallableBool) UnmarshalText(data []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshallableString typedef for builtin string
|
||||||
|
type UnmarshallableString string
|
||||||
|
|
||||||
|
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||||
|
// Returns the string
|
||||||
|
func (s *UnmarshallableString) UnmarshalText(data []byte) error {
|
||||||
|
*s = UnmarshallableString(data)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// CommonArgs contains the IgnoreUnknown argument
|
// CommonArgs contains the IgnoreUnknown argument
|
||||||
// and must be embedded by all Arg structs
|
// and must be embedded by all Arg structs
|
||||||
type CommonArgs struct {
|
type CommonArgs struct {
|
||||||
|
@ -53,6 +63,12 @@ func GetKeyField(keyString string, v reflect.Value) reflect.Value {
|
||||||
return v.Elem().FieldByName(keyString)
|
return v.Elem().FieldByName(keyString)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalableArgsError is used to indicate error unmarshalling args
|
||||||
|
// from the args-string in the form "K=V;K2=V2;..."
|
||||||
|
type UnmarshalableArgsError struct {
|
||||||
|
error
|
||||||
|
}
|
||||||
|
|
||||||
// LoadArgs parses args from a string in the form "K=V;K2=V2;..."
|
// LoadArgs parses args from a string in the form "K=V;K2=V2;..."
|
||||||
func LoadArgs(args string, container interface{}) error {
|
func LoadArgs(args string, container interface{}) error {
|
||||||
if args == "" {
|
if args == "" {
|
||||||
|
@ -75,8 +91,13 @@ func LoadArgs(args string, container interface{}) error {
|
||||||
unknownArgs = append(unknownArgs, pair)
|
unknownArgs = append(unknownArgs, pair)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
keyFieldIface := keyField.Addr().Interface()
|
||||||
u := keyField.Addr().Interface().(encoding.TextUnmarshaler)
|
u, ok := keyFieldIface.(encoding.TextUnmarshaler)
|
||||||
|
if !ok {
|
||||||
|
return UnmarshalableArgsError{fmt.Errorf(
|
||||||
|
"ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler",
|
||||||
|
keyString, reflect.TypeOf(keyFieldIface))}
|
||||||
|
}
|
||||||
err := u.UnmarshalText([]byte(valueString))
|
err := u.UnmarshalText([]byte(valueString))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("ARGS: error parsing value of pair %q: %v)", pair, err)
|
return fmt.Errorf("ARGS: error parsing value of pair %q: %v)", pair, err)
|
||||||
|
|
293
vendor/github.com/containernetworking/cni/pkg/types/current/types.go
generated
vendored
Normal file
293
vendor/github.com/containernetworking/cni/pkg/types/current/types.go
generated
vendored
Normal file
|
@ -0,0 +1,293 @@
|
||||||
|
// Copyright 2016 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package current
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/containernetworking/cni/pkg/types"
|
||||||
|
"github.com/containernetworking/cni/pkg/types/020"
|
||||||
|
)
|
||||||
|
|
||||||
|
const ImplementedSpecVersion string = "0.4.0"
|
||||||
|
|
||||||
|
var SupportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion}
|
||||||
|
|
||||||
|
func NewResult(data []byte) (types.Result, error) {
|
||||||
|
result := &Result{}
|
||||||
|
if err := json.Unmarshal(data, result); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetResult(r types.Result) (*Result, error) {
|
||||||
|
resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result, ok := resultCurrent.(*Result)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("failed to convert result")
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var resultConverters = []struct {
|
||||||
|
versions []string
|
||||||
|
convert func(types.Result) (*Result, error)
|
||||||
|
}{
|
||||||
|
{types020.SupportedVersions, convertFrom020},
|
||||||
|
{SupportedVersions, convertFrom030},
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertFrom020(result types.Result) (*Result, error) {
|
||||||
|
oldResult, err := types020.GetResult(result)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
newResult := &Result{
|
||||||
|
CNIVersion: ImplementedSpecVersion,
|
||||||
|
DNS: oldResult.DNS,
|
||||||
|
Routes: []*types.Route{},
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldResult.IP4 != nil {
|
||||||
|
newResult.IPs = append(newResult.IPs, &IPConfig{
|
||||||
|
Version: "4",
|
||||||
|
Address: oldResult.IP4.IP,
|
||||||
|
Gateway: oldResult.IP4.Gateway,
|
||||||
|
})
|
||||||
|
for _, route := range oldResult.IP4.Routes {
|
||||||
|
newResult.Routes = append(newResult.Routes, &types.Route{
|
||||||
|
Dst: route.Dst,
|
||||||
|
GW: route.GW,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldResult.IP6 != nil {
|
||||||
|
newResult.IPs = append(newResult.IPs, &IPConfig{
|
||||||
|
Version: "6",
|
||||||
|
Address: oldResult.IP6.IP,
|
||||||
|
Gateway: oldResult.IP6.Gateway,
|
||||||
|
})
|
||||||
|
for _, route := range oldResult.IP6.Routes {
|
||||||
|
newResult.Routes = append(newResult.Routes, &types.Route{
|
||||||
|
Dst: route.Dst,
|
||||||
|
GW: route.GW,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return newResult, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertFrom030(result types.Result) (*Result, error) {
|
||||||
|
newResult, ok := result.(*Result)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("failed to convert result")
|
||||||
|
}
|
||||||
|
newResult.CNIVersion = ImplementedSpecVersion
|
||||||
|
return newResult, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewResultFromResult(result types.Result) (*Result, error) {
|
||||||
|
version := result.Version()
|
||||||
|
for _, converter := range resultConverters {
|
||||||
|
for _, supportedVersion := range converter.versions {
|
||||||
|
if version == supportedVersion {
|
||||||
|
return converter.convert(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unsupported CNI result22 version %q", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Result is what gets returned from the plugin (via stdout) to the caller
|
||||||
|
type Result struct {
|
||||||
|
CNIVersion string `json:"cniVersion,omitempty"`
|
||||||
|
Interfaces []*Interface `json:"interfaces,omitempty"`
|
||||||
|
IPs []*IPConfig `json:"ips,omitempty"`
|
||||||
|
Routes []*types.Route `json:"routes,omitempty"`
|
||||||
|
DNS types.DNS `json:"dns,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to the older 0.2.0 CNI spec Result type
|
||||||
|
func (r *Result) convertTo020() (*types020.Result, error) {
|
||||||
|
oldResult := &types020.Result{
|
||||||
|
CNIVersion: types020.ImplementedSpecVersion,
|
||||||
|
DNS: r.DNS,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ip := range r.IPs {
|
||||||
|
// Only convert the first IP address of each version as 0.2.0
|
||||||
|
// and earlier cannot handle multiple IP addresses
|
||||||
|
if ip.Version == "4" && oldResult.IP4 == nil {
|
||||||
|
oldResult.IP4 = &types020.IPConfig{
|
||||||
|
IP: ip.Address,
|
||||||
|
Gateway: ip.Gateway,
|
||||||
|
}
|
||||||
|
} else if ip.Version == "6" && oldResult.IP6 == nil {
|
||||||
|
oldResult.IP6 = &types020.IPConfig{
|
||||||
|
IP: ip.Address,
|
||||||
|
Gateway: ip.Gateway,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldResult.IP4 != nil && oldResult.IP6 != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, route := range r.Routes {
|
||||||
|
is4 := route.Dst.IP.To4() != nil
|
||||||
|
if is4 && oldResult.IP4 != nil {
|
||||||
|
oldResult.IP4.Routes = append(oldResult.IP4.Routes, types.Route{
|
||||||
|
Dst: route.Dst,
|
||||||
|
GW: route.GW,
|
||||||
|
})
|
||||||
|
} else if !is4 && oldResult.IP6 != nil {
|
||||||
|
oldResult.IP6.Routes = append(oldResult.IP6.Routes, types.Route{
|
||||||
|
Dst: route.Dst,
|
||||||
|
GW: route.GW,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldResult.IP4 == nil && oldResult.IP6 == nil {
|
||||||
|
return nil, fmt.Errorf("cannot convert: no valid IP addresses")
|
||||||
|
}
|
||||||
|
|
||||||
|
return oldResult, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Result) Version() string {
|
||||||
|
return ImplementedSpecVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Result) GetAsVersion(version string) (types.Result, error) {
|
||||||
|
switch version {
|
||||||
|
case "0.3.0", "0.3.1", ImplementedSpecVersion:
|
||||||
|
r.CNIVersion = version
|
||||||
|
return r, nil
|
||||||
|
case types020.SupportedVersions[0], types020.SupportedVersions[1], types020.SupportedVersions[2]:
|
||||||
|
return r.convertTo020()
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("cannot convert version 0.3.x to %q", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Result) Print() error {
|
||||||
|
return r.PrintTo(os.Stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Result) PrintTo(writer io.Writer) error {
|
||||||
|
data, err := json.MarshalIndent(r, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = writer.Write(data)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a formatted string in the form of "[Interfaces: $1,][ IP: $2,] DNS: $3" where
|
||||||
|
// $1 represents the receiver's Interfaces, $2 represents the receiver's IP addresses and $3 the
|
||||||
|
// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string.
|
||||||
|
func (r *Result) String() string {
|
||||||
|
var str string
|
||||||
|
if len(r.Interfaces) > 0 {
|
||||||
|
str += fmt.Sprintf("Interfaces:%+v, ", r.Interfaces)
|
||||||
|
}
|
||||||
|
if len(r.IPs) > 0 {
|
||||||
|
str += fmt.Sprintf("IP:%+v, ", r.IPs)
|
||||||
|
}
|
||||||
|
if len(r.Routes) > 0 {
|
||||||
|
str += fmt.Sprintf("Routes:%+v, ", r.Routes)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%sDNS:%+v", str, r.DNS)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert this old version result to the current CNI version result
|
||||||
|
func (r *Result) Convert() (*Result, error) {
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interface contains values about the created interfaces
|
||||||
|
type Interface struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Mac string `json:"mac,omitempty"`
|
||||||
|
Sandbox string `json:"sandbox,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Interface) String() string {
|
||||||
|
return fmt.Sprintf("%+v", *i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns a pointer to the int value passed in. Used to
|
||||||
|
// set the IPConfig.Interface field.
|
||||||
|
func Int(v int) *int {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPConfig contains values necessary to configure an IP address on an interface
|
||||||
|
type IPConfig struct {
|
||||||
|
// IP version, either "4" or "6"
|
||||||
|
Version string
|
||||||
|
// Index into Result structs Interfaces list
|
||||||
|
Interface *int
|
||||||
|
Address net.IPNet
|
||||||
|
Gateway net.IP
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPConfig) String() string {
|
||||||
|
return fmt.Sprintf("%+v", *i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSON (un)marshallable types
|
||||||
|
type ipConfig struct {
|
||||||
|
Version string `json:"version"`
|
||||||
|
Interface *int `json:"interface,omitempty"`
|
||||||
|
Address types.IPNet `json:"address"`
|
||||||
|
Gateway net.IP `json:"gateway,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *IPConfig) MarshalJSON() ([]byte, error) {
|
||||||
|
ipc := ipConfig{
|
||||||
|
Version: c.Version,
|
||||||
|
Interface: c.Interface,
|
||||||
|
Address: types.IPNet(c.Address),
|
||||||
|
Gateway: c.Gateway,
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(ipc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *IPConfig) UnmarshalJSON(data []byte) error {
|
||||||
|
ipc := ipConfig{}
|
||||||
|
if err := json.Unmarshal(data, &ipc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Version = ipc.Version
|
||||||
|
c.Interface = ipc.Interface
|
||||||
|
c.Address = net.IPNet(ipc.Address)
|
||||||
|
c.Gateway = ipc.Gateway
|
||||||
|
return nil
|
||||||
|
}
|
128
vendor/github.com/containernetworking/cni/pkg/types/types.go
generated
vendored
128
vendor/github.com/containernetworking/cni/pkg/types/types.go
generated
vendored
|
@ -16,7 +16,9 @@ package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
@ -57,44 +59,59 @@ func (n *IPNet) UnmarshalJSON(data []byte) error {
|
||||||
|
|
||||||
// NetConf describes a network.
|
// NetConf describes a network.
|
||||||
type NetConf struct {
|
type NetConf struct {
|
||||||
Name string `json:"name,omitempty"`
|
CNIVersion string `json:"cniVersion,omitempty"`
|
||||||
|
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
Capabilities map[string]bool `json:"capabilities,omitempty"`
|
||||||
|
IPAM IPAM `json:"ipam,omitempty"`
|
||||||
|
DNS DNS `json:"dns"`
|
||||||
|
|
||||||
|
RawPrevResult map[string]interface{} `json:"prevResult,omitempty"`
|
||||||
|
PrevResult Result `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IPAM struct {
|
||||||
Type string `json:"type,omitempty"`
|
Type string `json:"type,omitempty"`
|
||||||
IPAM struct {
|
|
||||||
Type string `json:"type,omitempty"`
|
|
||||||
} `json:"ipam,omitempty"`
|
|
||||||
DNS DNS `json:"dns"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Result is what gets returned from the plugin (via stdout) to the caller
|
// NetConfList describes an ordered list of networks.
|
||||||
type Result struct {
|
type NetConfList struct {
|
||||||
IP4 *IPConfig `json:"ip4,omitempty"`
|
CNIVersion string `json:"cniVersion,omitempty"`
|
||||||
IP6 *IPConfig `json:"ip6,omitempty"`
|
|
||||||
DNS DNS `json:"dns,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
|
DisableCheck bool `json:"disableCheck,omitempty"`
|
||||||
|
Plugins []*NetConf `json:"plugins,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Result) Print() error {
|
type ResultFactoryFunc func([]byte) (Result, error)
|
||||||
return prettyPrint(r)
|
|
||||||
|
// Result is an interface that provides the result of plugin execution
|
||||||
|
type Result interface {
|
||||||
|
// The highest CNI specification result version the result supports
|
||||||
|
// without having to convert
|
||||||
|
Version() string
|
||||||
|
|
||||||
|
// Returns the result converted into the requested CNI specification
|
||||||
|
// result version, or an error if conversion failed
|
||||||
|
GetAsVersion(version string) (Result, error)
|
||||||
|
|
||||||
|
// Prints the result in JSON format to stdout
|
||||||
|
Print() error
|
||||||
|
|
||||||
|
// Prints the result in JSON format to provided writer
|
||||||
|
PrintTo(writer io.Writer) error
|
||||||
|
|
||||||
|
// Returns a JSON string representation of the result
|
||||||
|
String() string
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns a formatted string in the form of "[IP4: $1,][ IP6: $2,] DNS: $3" where
|
func PrintResult(result Result, version string) error {
|
||||||
// $1 represents the receiver's IPv4, $2 represents the receiver's IPv6 and $3 the
|
newResult, err := result.GetAsVersion(version)
|
||||||
// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string.
|
if err != nil {
|
||||||
func (r *Result) String() string {
|
return err
|
||||||
var str string
|
|
||||||
if r.IP4 != nil {
|
|
||||||
str = fmt.Sprintf("IP4:%+v, ", *r.IP4)
|
|
||||||
}
|
}
|
||||||
if r.IP6 != nil {
|
return newResult.Print()
|
||||||
str += fmt.Sprintf("IP6:%+v, ", *r.IP6)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%sDNS:%+v", str, r.DNS)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IPConfig contains values necessary to configure an interface
|
|
||||||
type IPConfig struct {
|
|
||||||
IP net.IPNet
|
|
||||||
Gateway net.IP
|
|
||||||
Routes []Route
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DNS contains values interesting for DNS resolvers
|
// DNS contains values interesting for DNS resolvers
|
||||||
|
@ -110,6 +127,18 @@ type Route struct {
|
||||||
GW net.IP
|
GW net.IP
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *Route) String() string {
|
||||||
|
return fmt.Sprintf("%+v", *r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Well known error codes
|
||||||
|
// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes
|
||||||
|
const (
|
||||||
|
ErrUnknown uint = iota // 0
|
||||||
|
ErrIncompatibleCNIVersion // 1
|
||||||
|
ErrUnsupportedField // 2
|
||||||
|
)
|
||||||
|
|
||||||
type Error struct {
|
type Error struct {
|
||||||
Code uint `json:"code"`
|
Code uint `json:"code"`
|
||||||
Msg string `json:"msg"`
|
Msg string `json:"msg"`
|
||||||
|
@ -117,7 +146,11 @@ type Error struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
return e.Msg
|
details := ""
|
||||||
|
if e.Details != "" {
|
||||||
|
details = fmt.Sprintf("; %v", e.Details)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%v%v", e.Msg, details)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Error) Print() error {
|
func (e *Error) Print() error {
|
||||||
|
@ -128,39 +161,11 @@ func (e *Error) Print() error {
|
||||||
// for our custom IPNet type
|
// for our custom IPNet type
|
||||||
|
|
||||||
// JSON (un)marshallable types
|
// JSON (un)marshallable types
|
||||||
type ipConfig struct {
|
|
||||||
IP IPNet `json:"ip"`
|
|
||||||
Gateway net.IP `json:"gateway,omitempty"`
|
|
||||||
Routes []Route `json:"routes,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type route struct {
|
type route struct {
|
||||||
Dst IPNet `json:"dst"`
|
Dst IPNet `json:"dst"`
|
||||||
GW net.IP `json:"gw,omitempty"`
|
GW net.IP `json:"gw,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *IPConfig) MarshalJSON() ([]byte, error) {
|
|
||||||
ipc := ipConfig{
|
|
||||||
IP: IPNet(c.IP),
|
|
||||||
Gateway: c.Gateway,
|
|
||||||
Routes: c.Routes,
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(ipc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *IPConfig) UnmarshalJSON(data []byte) error {
|
|
||||||
ipc := ipConfig{}
|
|
||||||
if err := json.Unmarshal(data, &ipc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.IP = net.IPNet(ipc.IP)
|
|
||||||
c.Gateway = ipc.Gateway
|
|
||||||
c.Routes = ipc.Routes
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Route) UnmarshalJSON(data []byte) error {
|
func (r *Route) UnmarshalJSON(data []byte) error {
|
||||||
rt := route{}
|
rt := route{}
|
||||||
if err := json.Unmarshal(data, &rt); err != nil {
|
if err := json.Unmarshal(data, &rt); err != nil {
|
||||||
|
@ -172,7 +177,7 @@ func (r *Route) UnmarshalJSON(data []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Route) MarshalJSON() ([]byte, error) {
|
func (r Route) MarshalJSON() ([]byte, error) {
|
||||||
rt := route{
|
rt := route{
|
||||||
Dst: IPNet(r.Dst),
|
Dst: IPNet(r.Dst),
|
||||||
GW: r.GW,
|
GW: r.GW,
|
||||||
|
@ -189,3 +194,6 @@ func prettyPrint(obj interface{}) error {
|
||||||
_, err = os.Stdout.Write(data)
|
_, err = os.Stdout.Write(data)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NotImplementedError is used to indicate that a method is not implemented for the given platform
|
||||||
|
var NotImplementedError = errors.New("Not Implemented")
|
||||||
|
|
37
vendor/github.com/containernetworking/cni/pkg/version/conf.go
generated
vendored
Normal file
37
vendor/github.com/containernetworking/cni/pkg/version/conf.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2016 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package version
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfigDecoder can decode the CNI version available in network config data
|
||||||
|
type ConfigDecoder struct{}
|
||||||
|
|
||||||
|
func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) {
|
||||||
|
var conf struct {
|
||||||
|
CNIVersion string `json:"cniVersion"`
|
||||||
|
}
|
||||||
|
err := json.Unmarshal(jsonBytes, &conf)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("decoding version from network config: %s", err)
|
||||||
|
}
|
||||||
|
if conf.CNIVersion == "" {
|
||||||
|
return "0.1.0", nil
|
||||||
|
}
|
||||||
|
return conf.CNIVersion, nil
|
||||||
|
}
|
144
vendor/github.com/containernetworking/cni/pkg/version/plugin.go
generated
vendored
Normal file
144
vendor/github.com/containernetworking/cni/pkg/version/plugin.go
generated
vendored
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
// Copyright 2016 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package version
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PluginInfo reports information about CNI versioning
|
||||||
|
type PluginInfo interface {
|
||||||
|
// SupportedVersions returns one or more CNI spec versions that the plugin
|
||||||
|
// supports. If input is provided in one of these versions, then the plugin
|
||||||
|
// promises to use the same CNI version in its response
|
||||||
|
SupportedVersions() []string
|
||||||
|
|
||||||
|
// Encode writes this CNI version information as JSON to the given Writer
|
||||||
|
Encode(io.Writer) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type pluginInfo struct {
|
||||||
|
CNIVersion_ string `json:"cniVersion"`
|
||||||
|
SupportedVersions_ []string `json:"supportedVersions,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// pluginInfo implements the PluginInfo interface
|
||||||
|
var _ PluginInfo = &pluginInfo{}
|
||||||
|
|
||||||
|
func (p *pluginInfo) Encode(w io.Writer) error {
|
||||||
|
return json.NewEncoder(w).Encode(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *pluginInfo) SupportedVersions() []string {
|
||||||
|
return p.SupportedVersions_
|
||||||
|
}
|
||||||
|
|
||||||
|
// PluginSupports returns a new PluginInfo that will report the given versions
|
||||||
|
// as supported
|
||||||
|
func PluginSupports(supportedVersions ...string) PluginInfo {
|
||||||
|
if len(supportedVersions) < 1 {
|
||||||
|
panic("programmer error: you must support at least one version")
|
||||||
|
}
|
||||||
|
return &pluginInfo{
|
||||||
|
CNIVersion_: Current(),
|
||||||
|
SupportedVersions_: supportedVersions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PluginDecoder can decode the response returned by a plugin's VERSION command
|
||||||
|
type PluginDecoder struct{}
|
||||||
|
|
||||||
|
func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) {
|
||||||
|
var info pluginInfo
|
||||||
|
err := json.Unmarshal(jsonBytes, &info)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("decoding version info: %s", err)
|
||||||
|
}
|
||||||
|
if info.CNIVersion_ == "" {
|
||||||
|
return nil, fmt.Errorf("decoding version info: missing field cniVersion")
|
||||||
|
}
|
||||||
|
if len(info.SupportedVersions_) == 0 {
|
||||||
|
if info.CNIVersion_ == "0.2.0" {
|
||||||
|
return PluginSupports("0.1.0", "0.2.0"), nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("decoding version info: missing field supportedVersions")
|
||||||
|
}
|
||||||
|
return &info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major,
|
||||||
|
// minor, and micro numbers or returns an error
|
||||||
|
func ParseVersion(version string) (int, int, int, error) {
|
||||||
|
var major, minor, micro int
|
||||||
|
if version == "" {
|
||||||
|
return -1, -1, -1, fmt.Errorf("invalid version %q: the version is empty", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.Split(version, ".")
|
||||||
|
if len(parts) >= 4 {
|
||||||
|
return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
major, err := strconv.Atoi(parts[0])
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %v", parts[0], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(parts) >= 2 {
|
||||||
|
minor, err = strconv.Atoi(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %v", parts[1], err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(parts) >= 3 {
|
||||||
|
micro, err = strconv.Atoi(parts[2])
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %v", parts[2], err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return major, minor, micro, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro
|
||||||
|
// numbers, and compares them to determine whether the first version is greater
|
||||||
|
// than or equal to the second
|
||||||
|
func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) {
|
||||||
|
firstMajor, firstMinor, firstMicro, err := ParseVersion(version)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if firstMajor > secondMajor {
|
||||||
|
return true, nil
|
||||||
|
} else if firstMajor == secondMajor {
|
||||||
|
if firstMinor > secondMinor {
|
||||||
|
return true, nil
|
||||||
|
} else if firstMinor == secondMinor && firstMicro >= secondMicro {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
49
vendor/github.com/containernetworking/cni/pkg/version/reconcile.go
generated
vendored
Normal file
49
vendor/github.com/containernetworking/cni/pkg/version/reconcile.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
// Copyright 2016 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package version
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
type ErrorIncompatible struct {
|
||||||
|
Config string
|
||||||
|
Supported []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ErrorIncompatible) Details() string {
|
||||||
|
return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ErrorIncompatible) Error() string {
|
||||||
|
return fmt.Sprintf("incompatible CNI versions: %s", e.Details())
|
||||||
|
}
|
||||||
|
|
||||||
|
type Reconciler struct{}
|
||||||
|
|
||||||
|
func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible {
|
||||||
|
return r.CheckRaw(configVersion, pluginInfo.SupportedVersions())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible {
|
||||||
|
for _, supportedVersion := range supportedVersions {
|
||||||
|
if configVersion == supportedVersion {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ErrorIncompatible{
|
||||||
|
Config: configVersion,
|
||||||
|
Supported: supportedVersions,
|
||||||
|
}
|
||||||
|
}
|
83
vendor/github.com/containernetworking/cni/pkg/version/version.go
generated
vendored
Normal file
83
vendor/github.com/containernetworking/cni/pkg/version/version.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
// Copyright 2016 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package version
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/containernetworking/cni/pkg/types"
|
||||||
|
"github.com/containernetworking/cni/pkg/types/020"
|
||||||
|
"github.com/containernetworking/cni/pkg/types/current"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Current reports the version of the CNI spec implemented by this library
|
||||||
|
func Current() string {
|
||||||
|
return "0.4.0"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Legacy PluginInfo describes a plugin that is backwards compatible with the
|
||||||
|
// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0
|
||||||
|
// library ought to work correctly with a plugin that reports support for
|
||||||
|
// Legacy versions.
|
||||||
|
//
|
||||||
|
// Any future CNI spec versions which meet this definition should be added to
|
||||||
|
// this list.
|
||||||
|
var Legacy = PluginSupports("0.1.0", "0.2.0")
|
||||||
|
var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0")
|
||||||
|
|
||||||
|
var resultFactories = []struct {
|
||||||
|
supportedVersions []string
|
||||||
|
newResult types.ResultFactoryFunc
|
||||||
|
}{
|
||||||
|
{current.SupportedVersions, current.NewResult},
|
||||||
|
{types020.SupportedVersions, types020.NewResult},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finds a Result object matching the requested version (if any) and asks
|
||||||
|
// that object to parse the plugin result, returning an error if parsing failed.
|
||||||
|
func NewResult(version string, resultBytes []byte) (types.Result, error) {
|
||||||
|
reconciler := &Reconciler{}
|
||||||
|
for _, resultFactory := range resultFactories {
|
||||||
|
err := reconciler.CheckRaw(version, resultFactory.supportedVersions)
|
||||||
|
if err == nil {
|
||||||
|
// Result supports this version
|
||||||
|
return resultFactory.newResult(resultBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("unsupported CNI result version %q", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParsePrevResult parses a prevResult in a NetConf structure and sets
|
||||||
|
// the NetConf's PrevResult member to the parsed Result object.
|
||||||
|
func ParsePrevResult(conf *types.NetConf) error {
|
||||||
|
if conf.RawPrevResult == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resultBytes, err := json.Marshal(conf.RawPrevResult)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not serialize prevResult: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
conf.RawPrevResult = nil
|
||||||
|
conf.PrevResult, err = NewResult(conf.CNIVersion, resultBytes)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not parse prevResult: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
201
vendor/github.com/containernetworking/plugins/LICENSE
generated
vendored
Normal file
201
vendor/github.com/containernetworking/plugins/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
41
vendor/github.com/containernetworking/plugins/pkg/ns/README.md
generated
vendored
Normal file
41
vendor/github.com/containernetworking/plugins/pkg/ns/README.md
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
### Namespaces, Threads, and Go
|
||||||
|
On Linux each OS thread can have a different network namespace. Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines. This can result in a goroutine switching network namespaces without notice and lead to errors in your code.
|
||||||
|
|
||||||
|
### Namespace Switching
|
||||||
|
Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads.
|
||||||
|
|
||||||
|
Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits. Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in.
|
||||||
|
|
||||||
|
For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things. First, the goroutine calling `Set()` must have previously called `LockOSThread()`. Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between. You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine. Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly.
|
||||||
|
|
||||||
|
### Do() The Recommended Thing
|
||||||
|
The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
err = targetNs.Do(func(hostNs ns.NetNS) error {
|
||||||
|
dummy := &netlink.Dummy{
|
||||||
|
LinkAttrs: netlink.LinkAttrs{
|
||||||
|
Name: "dummy0",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return netlink.LinkAdd(dummy)
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. All goroutines spawned from within the `ns.Do` will not inherit the new namespace. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem.
|
||||||
|
|
||||||
|
When a new thread is spawned in Linux, it inherits the namespace of its parent. In versions of go **prior to 1.10**, if the runtime spawns a new OS thread, it picks the parent randomly. If the chosen parent thread has been moved to a new namespace (even temporarily), the new OS thread will be permanently "stuck in the wrong namespace", and goroutines will non-deterministically switch namespaces as they are rescheduled.
|
||||||
|
|
||||||
|
In short, **there was no safe way to change network namespaces, even temporarily, from within a long-lived, multithreaded Go process**. If you wish to do this, you must use go 1.10 or greater.
|
||||||
|
|
||||||
|
|
||||||
|
### Creating network namespaces
|
||||||
|
Earlier versions of this library managed namespace creation, but as CNI does not actually utilize this feature (and it was essentially unmaintained), it was removed. If you're writing a container runtime, you should implement namespace management yourself. However, there are some gotchas when doing so, especially around handling `/var/run/netns`. A reasonably correct reference implementation, borrowed from `rkt`, can be found in `pkg/testutils/netns_linux.go` if you're in need of a source of inspiration.
|
||||||
|
|
||||||
|
|
||||||
|
### Further Reading
|
||||||
|
- https://github.com/golang/go/wiki/LockOSThread
|
||||||
|
- http://morsmachine.dk/go-scheduler
|
||||||
|
- https://github.com/containernetworking/cni/issues/262
|
||||||
|
- https://golang.org/pkg/runtime/
|
||||||
|
- https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix
|
216
vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
generated
vendored
Normal file
216
vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,216 @@
|
||||||
|
// Copyright 2015-2017 CNI authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Returns an object representing the current OS thread's network namespace
|
||||||
|
func GetCurrentNS() (NetNS, error) {
|
||||||
|
return GetNS(getCurrentThreadNetNSPath())
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCurrentThreadNetNSPath() string {
|
||||||
|
// /proc/self/ns/net returns the namespace of the main thread, not
|
||||||
|
// of whatever thread this goroutine is running on. Make sure we
|
||||||
|
// use the thread's net namespace since the thread is switching around
|
||||||
|
return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *netNS) Close() error {
|
||||||
|
if err := ns.errorIfClosed(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ns.file.Close(); err != nil {
|
||||||
|
return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err)
|
||||||
|
}
|
||||||
|
ns.closed = true
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *netNS) Set() error {
|
||||||
|
if err := ns.errorIfClosed(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := unix.Setns(int(ns.Fd()), unix.CLONE_NEWNET); err != nil {
|
||||||
|
return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type NetNS interface {
|
||||||
|
// Executes the passed closure in this object's network namespace,
|
||||||
|
// attempting to restore the original namespace before returning.
|
||||||
|
// However, since each OS thread can have a different network namespace,
|
||||||
|
// and Go's thread scheduling is highly variable, callers cannot
|
||||||
|
// guarantee any specific namespace is set unless operations that
|
||||||
|
// require that namespace are wrapped with Do(). Also, no code called
|
||||||
|
// from Do() should call runtime.UnlockOSThread(), or the risk
|
||||||
|
// of executing code in an incorrect namespace will be greater. See
|
||||||
|
// https://github.com/golang/go/wiki/LockOSThread for further details.
|
||||||
|
Do(toRun func(NetNS) error) error
|
||||||
|
|
||||||
|
// Sets the current network namespace to this object's network namespace.
|
||||||
|
// Note that since Go's thread scheduling is highly variable, callers
|
||||||
|
// cannot guarantee the requested namespace will be the current namespace
|
||||||
|
// after this function is called; to ensure this wrap operations that
|
||||||
|
// require the namespace with Do() instead.
|
||||||
|
Set() error
|
||||||
|
|
||||||
|
// Returns the filesystem path representing this object's network namespace
|
||||||
|
Path() string
|
||||||
|
|
||||||
|
// Returns a file descriptor representing this object's network namespace
|
||||||
|
Fd() uintptr
|
||||||
|
|
||||||
|
// Cleans up this instance of the network namespace; if this instance
|
||||||
|
// is the last user the namespace will be destroyed
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type netNS struct {
|
||||||
|
file *os.File
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// netNS implements the NetNS interface
|
||||||
|
var _ NetNS = &netNS{}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h
|
||||||
|
NSFS_MAGIC = 0x6e736673
|
||||||
|
PROCFS_MAGIC = 0x9fa0
|
||||||
|
)
|
||||||
|
|
||||||
|
type NSPathNotExistErr struct{ msg string }
|
||||||
|
|
||||||
|
func (e NSPathNotExistErr) Error() string { return e.msg }
|
||||||
|
|
||||||
|
type NSPathNotNSErr struct{ msg string }
|
||||||
|
|
||||||
|
func (e NSPathNotNSErr) Error() string { return e.msg }
|
||||||
|
|
||||||
|
func IsNSorErr(nspath string) error {
|
||||||
|
stat := syscall.Statfs_t{}
|
||||||
|
if err := syscall.Statfs(nspath, &stat); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)}
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("failed to Statfs %q: %v", nspath, err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch stat.Type {
|
||||||
|
case PROCFS_MAGIC, NSFS_MAGIC:
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns an object representing the namespace referred to by @path
|
||||||
|
func GetNS(nspath string) (NetNS, error) {
|
||||||
|
err := IsNSorErr(nspath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := os.Open(nspath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &netNS{file: fd}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *netNS) Path() string {
|
||||||
|
return ns.file.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *netNS) Fd() uintptr {
|
||||||
|
return ns.file.Fd()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *netNS) errorIfClosed() error {
|
||||||
|
if ns.closed {
|
||||||
|
return fmt.Errorf("%q has already been closed", ns.file.Name())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *netNS) Do(toRun func(NetNS) error) error {
|
||||||
|
if err := ns.errorIfClosed(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
containedCall := func(hostNS NetNS) error {
|
||||||
|
threadNS, err := GetCurrentNS()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open current netns: %v", err)
|
||||||
|
}
|
||||||
|
defer threadNS.Close()
|
||||||
|
|
||||||
|
// switch to target namespace
|
||||||
|
if err = ns.Set(); err != nil {
|
||||||
|
return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err)
|
||||||
|
}
|
||||||
|
defer threadNS.Set() // switch back
|
||||||
|
|
||||||
|
return toRun(hostNS)
|
||||||
|
}
|
||||||
|
|
||||||
|
// save a handle to current network namespace
|
||||||
|
hostNS, err := GetCurrentNS()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to open current namespace: %v", err)
|
||||||
|
}
|
||||||
|
defer hostNS.Close()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
var innerError error
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
runtime.LockOSThread()
|
||||||
|
innerError = containedCall(hostNS)
|
||||||
|
}()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
return innerError
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithNetNSPath executes the passed closure under the given network
|
||||||
|
// namespace, restoring the original namespace afterwards.
|
||||||
|
func WithNetNSPath(nspath string, toRun func(NetNS) error) error {
|
||||||
|
ns, err := GetNS(nspath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer ns.Close()
|
||||||
|
return ns.Do(toRun)
|
||||||
|
}
|
68
vendor/github.com/hashicorp/consul/api/README.md
generated
vendored
68
vendor/github.com/hashicorp/consul/api/README.md
generated
vendored
|
@ -17,27 +17,51 @@ Usage
|
||||||
Below is an example of using the Consul client:
|
Below is an example of using the Consul client:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
// Get a new client
|
package main
|
||||||
client, err := api.NewClient(api.DefaultConfig())
|
|
||||||
if err != nil {
|
import "github.com/hashicorp/consul/api"
|
||||||
panic(err)
|
import "fmt"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Get a new client
|
||||||
|
client, err := api.NewClient(api.DefaultConfig())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a handle to the KV API
|
||||||
|
kv := client.KV()
|
||||||
|
|
||||||
|
// PUT a new KV pair
|
||||||
|
p := &api.KVPair{Key: "REDIS_MAXCLIENTS", Value: []byte("1000")}
|
||||||
|
_, err = kv.Put(p, nil)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup the pair
|
||||||
|
pair, _, err := kv.Get("REDIS_MAXCLIENTS", nil)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("KV: %v %s\n", pair.Key, pair.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get a handle to the KV API
|
|
||||||
kv := client.KV()
|
|
||||||
|
|
||||||
// PUT a new KV pair
|
|
||||||
p := &api.KVPair{Key: "foo", Value: []byte("test")}
|
|
||||||
_, err = kv.Put(p, nil)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lookup the pair
|
|
||||||
pair, _, err := kv.Get("foo", nil)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
fmt.Printf("KV: %v", pair)
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To run this example, start a Consul server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
consul agent -dev
|
||||||
|
```
|
||||||
|
|
||||||
|
Copy the code above into a file such as `main.go`.
|
||||||
|
|
||||||
|
Install and run. You'll see a key (`REDIS_MAXCLIENTS`) and value (`1000`) printed.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ go get
|
||||||
|
$ go run main.go
|
||||||
|
KV: REDIS_MAXCLIENTS 1000
|
||||||
|
```
|
||||||
|
|
||||||
|
After running the code, you can also view the values in the Consul UI on your local machine at http://localhost:8500/ui/dc1/kv
|
||||||
|
|
947
vendor/github.com/hashicorp/consul/api/acl.go
generated
vendored
947
vendor/github.com/hashicorp/consul/api/acl.go
generated
vendored
File diff suppressed because it is too large
Load diff
288
vendor/github.com/hashicorp/consul/api/agent.go
generated
vendored
288
vendor/github.com/hashicorp/consul/api/agent.go
generated
vendored
|
@ -2,7 +2,11 @@ package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ServiceKind is the kind of service being registered.
|
// ServiceKind is the kind of service being registered.
|
||||||
|
@ -38,6 +42,18 @@ const (
|
||||||
ProxyExecModeScript ProxyExecMode = "script"
|
ProxyExecModeScript ProxyExecMode = "script"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// UpstreamDestType is the type of upstream discovery mechanism.
|
||||||
|
type UpstreamDestType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UpstreamDestTypeService discovers instances via healthy service lookup.
|
||||||
|
UpstreamDestTypeService UpstreamDestType = "service"
|
||||||
|
|
||||||
|
// UpstreamDestTypePreparedQuery discovers instances via prepared query
|
||||||
|
// execution.
|
||||||
|
UpstreamDestTypePreparedQuery UpstreamDestType = "prepared_query"
|
||||||
|
)
|
||||||
|
|
||||||
// AgentCheck represents a check known to the agent
|
// AgentCheck represents a check known to the agent
|
||||||
type AgentCheck struct {
|
type AgentCheck struct {
|
||||||
Node string
|
Node string
|
||||||
|
@ -51,34 +67,64 @@ type AgentCheck struct {
|
||||||
Definition HealthCheckDefinition
|
Definition HealthCheckDefinition
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AgentWeights represent optional weights for a service
|
||||||
|
type AgentWeights struct {
|
||||||
|
Passing int
|
||||||
|
Warning int
|
||||||
|
}
|
||||||
|
|
||||||
// AgentService represents a service known to the agent
|
// AgentService represents a service known to the agent
|
||||||
type AgentService struct {
|
type AgentService struct {
|
||||||
Kind ServiceKind
|
Kind ServiceKind `json:",omitempty"`
|
||||||
ID string
|
ID string
|
||||||
Service string
|
Service string
|
||||||
Tags []string
|
Tags []string
|
||||||
Meta map[string]string
|
Meta map[string]string
|
||||||
Port int
|
Port int
|
||||||
Address string
|
Address string
|
||||||
|
Weights AgentWeights
|
||||||
EnableTagOverride bool
|
EnableTagOverride bool
|
||||||
CreateIndex uint64
|
CreateIndex uint64 `json:",omitempty" bexpr:"-"`
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64 `json:",omitempty" bexpr:"-"`
|
||||||
ProxyDestination string
|
ContentHash string `json:",omitempty" bexpr:"-"`
|
||||||
Connect *AgentServiceConnect
|
// DEPRECATED (ProxyDestination) - remove this field
|
||||||
|
ProxyDestination string `json:",omitempty" bexpr:"-"`
|
||||||
|
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
|
||||||
|
Connect *AgentServiceConnect `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentServiceChecksInfo returns information about a Service and its checks
|
||||||
|
type AgentServiceChecksInfo struct {
|
||||||
|
AggregatedStatus string
|
||||||
|
Service *AgentService
|
||||||
|
Checks HealthChecks
|
||||||
}
|
}
|
||||||
|
|
||||||
// AgentServiceConnect represents the Connect configuration of a service.
|
// AgentServiceConnect represents the Connect configuration of a service.
|
||||||
type AgentServiceConnect struct {
|
type AgentServiceConnect struct {
|
||||||
Native bool
|
Native bool `json:",omitempty"`
|
||||||
Proxy *AgentServiceConnectProxy
|
Proxy *AgentServiceConnectProxy `json:",omitempty" bexpr:"-"`
|
||||||
|
SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// AgentServiceConnectProxy represents the Connect Proxy configuration of a
|
// AgentServiceConnectProxy represents the Connect Proxy configuration of a
|
||||||
// service.
|
// service.
|
||||||
type AgentServiceConnectProxy struct {
|
type AgentServiceConnectProxy struct {
|
||||||
ExecMode ProxyExecMode
|
ExecMode ProxyExecMode `json:",omitempty"`
|
||||||
Command []string
|
Command []string `json:",omitempty"`
|
||||||
Config map[string]interface{}
|
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||||
|
Upstreams []Upstream `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy
|
||||||
|
// ServiceDefinition or response.
|
||||||
|
type AgentServiceConnectProxyConfig struct {
|
||||||
|
DestinationServiceName string
|
||||||
|
DestinationServiceID string `json:",omitempty"`
|
||||||
|
LocalServiceAddress string `json:",omitempty"`
|
||||||
|
LocalServicePort int `json:",omitempty"`
|
||||||
|
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||||
|
Upstreams []Upstream
|
||||||
}
|
}
|
||||||
|
|
||||||
// AgentMember represents a cluster member known to the agent
|
// AgentMember represents a cluster member known to the agent
|
||||||
|
@ -119,10 +165,13 @@ type AgentServiceRegistration struct {
|
||||||
Address string `json:",omitempty"`
|
Address string `json:",omitempty"`
|
||||||
EnableTagOverride bool `json:",omitempty"`
|
EnableTagOverride bool `json:",omitempty"`
|
||||||
Meta map[string]string `json:",omitempty"`
|
Meta map[string]string `json:",omitempty"`
|
||||||
|
Weights *AgentWeights `json:",omitempty"`
|
||||||
Check *AgentServiceCheck
|
Check *AgentServiceCheck
|
||||||
Checks AgentServiceChecks
|
Checks AgentServiceChecks
|
||||||
ProxyDestination string `json:",omitempty"`
|
// DEPRECATED (ProxyDestination) - remove this field
|
||||||
Connect *AgentServiceConnect `json:",omitempty"`
|
ProxyDestination string `json:",omitempty"`
|
||||||
|
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
|
||||||
|
Connect *AgentServiceConnect `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// AgentCheckRegistration is used to register a new check
|
// AgentCheckRegistration is used to register a new check
|
||||||
|
@ -153,6 +202,8 @@ type AgentServiceCheck struct {
|
||||||
TLSSkipVerify bool `json:",omitempty"`
|
TLSSkipVerify bool `json:",omitempty"`
|
||||||
GRPC string `json:",omitempty"`
|
GRPC string `json:",omitempty"`
|
||||||
GRPCUseTLS bool `json:",omitempty"`
|
GRPCUseTLS bool `json:",omitempty"`
|
||||||
|
AliasNode string `json:",omitempty"`
|
||||||
|
AliasService string `json:",omitempty"`
|
||||||
|
|
||||||
// In Consul 0.7 and later, checks that are associated with a service
|
// In Consul 0.7 and later, checks that are associated with a service
|
||||||
// may also contain this optional DeregisterCriticalServiceAfter field,
|
// may also contain this optional DeregisterCriticalServiceAfter field,
|
||||||
|
@ -225,9 +276,23 @@ type ConnectProxyConfig struct {
|
||||||
TargetServiceID string
|
TargetServiceID string
|
||||||
TargetServiceName string
|
TargetServiceName string
|
||||||
ContentHash string
|
ContentHash string
|
||||||
ExecMode ProxyExecMode
|
// DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs
|
||||||
Command []string
|
// but they don't need ExecMode or Command
|
||||||
Config map[string]interface{}
|
ExecMode ProxyExecMode `json:",omitempty"`
|
||||||
|
Command []string `json:",omitempty"`
|
||||||
|
Config map[string]interface{} `bexpr:"-"`
|
||||||
|
Upstreams []Upstream
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upstream is the response structure for a proxy upstream configuration.
|
||||||
|
type Upstream struct {
|
||||||
|
DestinationType UpstreamDestType `json:",omitempty"`
|
||||||
|
DestinationNamespace string `json:",omitempty"`
|
||||||
|
DestinationName string
|
||||||
|
Datacenter string `json:",omitempty"`
|
||||||
|
LocalBindAddress string `json:",omitempty"`
|
||||||
|
LocalBindPort int `json:",omitempty"`
|
||||||
|
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Agent can be used to query the Agent endpoints
|
// Agent can be used to query the Agent endpoints
|
||||||
|
@ -260,6 +325,24 @@ func (a *Agent) Self() (map[string]map[string]interface{}, error) {
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Host is used to retrieve information about the host the
|
||||||
|
// agent is running on such as CPU, memory, and disk. Requires
|
||||||
|
// a operator:read ACL token.
|
||||||
|
func (a *Agent) Host() (map[string]interface{}, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/agent/host")
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out map[string]interface{}
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Metrics is used to query the agent we are speaking to for
|
// Metrics is used to query the agent we are speaking to for
|
||||||
// its current internal metric data
|
// its current internal metric data
|
||||||
func (a *Agent) Metrics() (*MetricsInfo, error) {
|
func (a *Agent) Metrics() (*MetricsInfo, error) {
|
||||||
|
@ -304,7 +387,14 @@ func (a *Agent) NodeName() (string, error) {
|
||||||
|
|
||||||
// Checks returns the locally registered checks
|
// Checks returns the locally registered checks
|
||||||
func (a *Agent) Checks() (map[string]*AgentCheck, error) {
|
func (a *Agent) Checks() (map[string]*AgentCheck, error) {
|
||||||
|
return a.ChecksWithFilter("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChecksWithFilter returns a subset of the locally registered checks that match
|
||||||
|
// the given filter expression
|
||||||
|
func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) {
|
||||||
r := a.c.newRequest("GET", "/v1/agent/checks")
|
r := a.c.newRequest("GET", "/v1/agent/checks")
|
||||||
|
r.filterQuery(filter)
|
||||||
_, resp, err := requireOK(a.c.doRequest(r))
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -320,7 +410,14 @@ func (a *Agent) Checks() (map[string]*AgentCheck, error) {
|
||||||
|
|
||||||
// Services returns the locally registered services
|
// Services returns the locally registered services
|
||||||
func (a *Agent) Services() (map[string]*AgentService, error) {
|
func (a *Agent) Services() (map[string]*AgentService, error) {
|
||||||
|
return a.ServicesWithFilter("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicesWithFilter returns a subset of the locally registered services that match
|
||||||
|
// the given filter expression
|
||||||
|
func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) {
|
||||||
r := a.c.newRequest("GET", "/v1/agent/services")
|
r := a.c.newRequest("GET", "/v1/agent/services")
|
||||||
|
r.filterQuery(filter)
|
||||||
_, resp, err := requireOK(a.c.doRequest(r))
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -335,6 +432,100 @@ func (a *Agent) Services() (map[string]*AgentService, error) {
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AgentHealthServiceByID returns for a given serviceID: the aggregated health status, the service definition or an error if any
|
||||||
|
// - If the service is not found, will return status (critical, nil, nil)
|
||||||
|
// - If the service is found, will return (critical|passing|warning), AgentServiceChecksInfo, nil)
|
||||||
|
// - In all other cases, will return an error
|
||||||
|
func (a *Agent) AgentHealthServiceByID(serviceID string) (string, *AgentServiceChecksInfo, error) {
|
||||||
|
path := fmt.Sprintf("/v1/agent/health/service/id/%v", url.PathEscape(serviceID))
|
||||||
|
r := a.c.newRequest("GET", path)
|
||||||
|
r.params.Add("format", "json")
|
||||||
|
r.header.Set("Accept", "application/json")
|
||||||
|
_, resp, err := a.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
// Service not Found
|
||||||
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
|
return HealthCritical, nil, nil
|
||||||
|
}
|
||||||
|
var out *AgentServiceChecksInfo
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return HealthCritical, out, err
|
||||||
|
}
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case http.StatusOK:
|
||||||
|
return HealthPassing, out, nil
|
||||||
|
case http.StatusTooManyRequests:
|
||||||
|
return HealthWarning, out, nil
|
||||||
|
case http.StatusServiceUnavailable:
|
||||||
|
return HealthCritical, out, nil
|
||||||
|
}
|
||||||
|
return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentHealthServiceByName returns for a given service name: the aggregated health status for all services
|
||||||
|
// having the specified name.
|
||||||
|
// - If no service is not found, will return status (critical, [], nil)
|
||||||
|
// - If the service is found, will return (critical|passing|warning), []api.AgentServiceChecksInfo, nil)
|
||||||
|
// - In all other cases, will return an error
|
||||||
|
func (a *Agent) AgentHealthServiceByName(service string) (string, []AgentServiceChecksInfo, error) {
|
||||||
|
path := fmt.Sprintf("/v1/agent/health/service/name/%v", url.PathEscape(service))
|
||||||
|
r := a.c.newRequest("GET", path)
|
||||||
|
r.params.Add("format", "json")
|
||||||
|
r.header.Set("Accept", "application/json")
|
||||||
|
_, resp, err := a.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
// Service not Found
|
||||||
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
|
return HealthCritical, nil, nil
|
||||||
|
}
|
||||||
|
var out []AgentServiceChecksInfo
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return HealthCritical, out, err
|
||||||
|
}
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case http.StatusOK:
|
||||||
|
return HealthPassing, out, nil
|
||||||
|
case http.StatusTooManyRequests:
|
||||||
|
return HealthWarning, out, nil
|
||||||
|
case http.StatusServiceUnavailable:
|
||||||
|
return HealthCritical, out, nil
|
||||||
|
}
|
||||||
|
return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Service returns a locally registered service instance and allows for
|
||||||
|
// hash-based blocking.
|
||||||
|
//
|
||||||
|
// Note that this uses an unconventional blocking mechanism since it's
|
||||||
|
// agent-local state. That means there is no persistent raft index so we block
|
||||||
|
// based on object hash instead.
|
||||||
|
func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *QueryMeta, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/agent/service/"+serviceID)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out *AgentService
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Members returns the known gossip members. The WAN
|
// Members returns the known gossip members. The WAN
|
||||||
// flag can be used to query a server for WAN members.
|
// flag can be used to query a server for WAN members.
|
||||||
func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
|
func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
|
||||||
|
@ -751,41 +942,94 @@ func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions
|
||||||
|
|
||||||
// UpdateACLToken updates the agent's "acl_token". See updateToken for more
|
// UpdateACLToken updates the agent's "acl_token". See updateToken for more
|
||||||
// details.
|
// details.
|
||||||
|
//
|
||||||
|
// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateDefaultACLToken for v1.4.3 and above
|
||||||
func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
|
func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
return a.updateToken("acl_token", token, q)
|
return a.updateToken("acl_token", token, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken
|
// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken
|
||||||
// for more details.
|
// for more details.
|
||||||
|
//
|
||||||
|
// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentACLToken for v1.4.3 and above
|
||||||
func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) {
|
func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
return a.updateToken("acl_agent_token", token, q)
|
return a.updateToken("acl_agent_token", token, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See
|
// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See
|
||||||
// updateToken for more details.
|
// updateToken for more details.
|
||||||
|
//
|
||||||
|
// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentMasterACLToken for v1.4.3 and above
|
||||||
func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) {
|
func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
return a.updateToken("acl_agent_master_token", token, q)
|
return a.updateToken("acl_agent_master_token", token, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateACLReplicationToken updates the agent's "acl_replication_token". See
|
// UpdateACLReplicationToken updates the agent's "acl_replication_token". See
|
||||||
// updateToken for more details.
|
// updateToken for more details.
|
||||||
|
//
|
||||||
|
// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateReplicationACLToken for v1.4.3 and above
|
||||||
func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) {
|
func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
return a.updateToken("acl_replication_token", token, q)
|
return a.updateToken("acl_replication_token", token, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateToken can be used to update an agent's ACL token after the agent has
|
// UpdateDefaultACLToken updates the agent's "default" token. See updateToken
|
||||||
// started. The tokens are not persisted, so will need to be updated again if
|
// for more details
|
||||||
// the agent is restarted.
|
func (a *Agent) UpdateDefaultACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
return a.updateTokenFallback("default", "acl_token", token, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAgentACLToken updates the agent's "agent" token. See updateToken
|
||||||
|
// for more details
|
||||||
|
func (a *Agent) UpdateAgentACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
return a.updateTokenFallback("agent", "acl_agent_token", token, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAgentMasterACLToken updates the agent's "agent_master" token. See updateToken
|
||||||
|
// for more details
|
||||||
|
func (a *Agent) UpdateAgentMasterACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
return a.updateTokenFallback("agent_master", "acl_agent_master_token", token, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateReplicationACLToken updates the agent's "replication" token. See updateToken
|
||||||
|
// for more details
|
||||||
|
func (a *Agent) UpdateReplicationACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
return a.updateTokenFallback("replication", "acl_replication_token", token, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateToken can be used to update one of an agent's ACL tokens after the agent has
|
||||||
|
// started. The tokens are may not be persisted, so will need to be updated again if
|
||||||
|
// the agent is restarted unless the agent is configured to persist them.
|
||||||
func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) {
|
func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
meta, _, err := a.updateTokenOnce(target, token, q)
|
||||||
|
return meta, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Agent) updateTokenFallback(target, fallback, token string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
meta, status, err := a.updateTokenOnce(target, token, q)
|
||||||
|
if err != nil && status == 404 {
|
||||||
|
meta, _, err = a.updateTokenOnce(fallback, token, q)
|
||||||
|
}
|
||||||
|
return meta, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Agent) updateTokenOnce(target, token string, q *WriteOptions) (*WriteMeta, int, error) {
|
||||||
r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target))
|
r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target))
|
||||||
r.setWriteOptions(q)
|
r.setWriteOptions(q)
|
||||||
r.obj = &AgentToken{Token: token}
|
r.obj = &AgentToken{Token: token}
|
||||||
rtt, resp, err := requireOK(a.c.doRequest(r))
|
|
||||||
|
rtt, resp, err := a.c.doRequest(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
wm := &WriteMeta{RequestTime: rtt}
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
return wm, nil
|
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
io.Copy(&buf, resp.Body)
|
||||||
|
return wm, resp.StatusCode, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
return wm, resp.StatusCode, nil
|
||||||
}
|
}
|
||||||
|
|
139
vendor/github.com/hashicorp/consul/api/api.go
generated
vendored
139
vendor/github.com/hashicorp/consul/api/api.go
generated
vendored
|
@ -30,6 +30,10 @@ const (
|
||||||
// the HTTP token.
|
// the HTTP token.
|
||||||
HTTPTokenEnvName = "CONSUL_HTTP_TOKEN"
|
HTTPTokenEnvName = "CONSUL_HTTP_TOKEN"
|
||||||
|
|
||||||
|
// HTTPTokenFileEnvName defines an environment variable name which sets
|
||||||
|
// the HTTP token file.
|
||||||
|
HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE"
|
||||||
|
|
||||||
// HTTPAuthEnvName defines an environment variable name which sets
|
// HTTPAuthEnvName defines an environment variable name which sets
|
||||||
// the HTTP authentication header.
|
// the HTTP authentication header.
|
||||||
HTTPAuthEnvName = "CONSUL_HTTP_AUTH"
|
HTTPAuthEnvName = "CONSUL_HTTP_AUTH"
|
||||||
|
@ -61,6 +65,12 @@ const (
|
||||||
// HTTPSSLVerifyEnvName defines an environment variable name which sets
|
// HTTPSSLVerifyEnvName defines an environment variable name which sets
|
||||||
// whether or not to disable certificate checking.
|
// whether or not to disable certificate checking.
|
||||||
HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY"
|
HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY"
|
||||||
|
|
||||||
|
// GRPCAddrEnvName defines an environment variable name which sets the gRPC
|
||||||
|
// address for consul connect envoy. Note this isn't actually used by the api
|
||||||
|
// client in this package but is defined here for consistency with all the
|
||||||
|
// other ENV names we use.
|
||||||
|
GRPCAddrEnvName = "CONSUL_GRPC_ADDR"
|
||||||
)
|
)
|
||||||
|
|
||||||
// QueryOptions are used to parameterize a query
|
// QueryOptions are used to parameterize a query
|
||||||
|
@ -78,6 +88,27 @@ type QueryOptions struct {
|
||||||
// read.
|
// read.
|
||||||
RequireConsistent bool
|
RequireConsistent bool
|
||||||
|
|
||||||
|
// UseCache requests that the agent cache results locally. See
|
||||||
|
// https://www.consul.io/api/index.html#agent-caching for more details on the
|
||||||
|
// semantics.
|
||||||
|
UseCache bool
|
||||||
|
|
||||||
|
// MaxAge limits how old a cached value will be returned if UseCache is true.
|
||||||
|
// If there is a cached response that is older than the MaxAge, it is treated
|
||||||
|
// as a cache miss and a new fetch invoked. If the fetch fails, the error is
|
||||||
|
// returned. Clients that wish to allow for stale results on error can set
|
||||||
|
// StaleIfError to a longer duration to change this behavior. It is ignored
|
||||||
|
// if the endpoint supports background refresh caching. See
|
||||||
|
// https://www.consul.io/api/index.html#agent-caching for more details.
|
||||||
|
MaxAge time.Duration
|
||||||
|
|
||||||
|
// StaleIfError specifies how stale the client will accept a cached response
|
||||||
|
// if the servers are unavailable to fetch a fresh one. Only makes sense when
|
||||||
|
// UseCache is true and MaxAge is set to a lower, non-zero value. It is
|
||||||
|
// ignored if the endpoint supports background refresh caching. See
|
||||||
|
// https://www.consul.io/api/index.html#agent-caching for more details.
|
||||||
|
StaleIfError time.Duration
|
||||||
|
|
||||||
// WaitIndex is used to enable a blocking query. Waits
|
// WaitIndex is used to enable a blocking query. Waits
|
||||||
// until the timeout or the next index is reached
|
// until the timeout or the next index is reached
|
||||||
WaitIndex uint64
|
WaitIndex uint64
|
||||||
|
@ -119,6 +150,10 @@ type QueryOptions struct {
|
||||||
// ctx is an optional context pass through to the underlying HTTP
|
// ctx is an optional context pass through to the underlying HTTP
|
||||||
// request layer. Use Context() and WithContext() to manage this.
|
// request layer. Use Context() and WithContext() to manage this.
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
|
|
||||||
|
// Filter requests filtering data prior to it being returned. The string
|
||||||
|
// is a go-bexpr compatible expression.
|
||||||
|
Filter string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *QueryOptions) Context() context.Context {
|
func (o *QueryOptions) Context() context.Context {
|
||||||
|
@ -196,6 +231,13 @@ type QueryMeta struct {
|
||||||
|
|
||||||
// Is address translation enabled for HTTP responses on this agent
|
// Is address translation enabled for HTTP responses on this agent
|
||||||
AddressTranslationEnabled bool
|
AddressTranslationEnabled bool
|
||||||
|
|
||||||
|
// CacheHit is true if the result was served from agent-local cache.
|
||||||
|
CacheHit bool
|
||||||
|
|
||||||
|
// CacheAge is set if request was ?cached and indicates how stale the cached
|
||||||
|
// response is.
|
||||||
|
CacheAge time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteMeta is used to return meta data about a write
|
// WriteMeta is used to return meta data about a write
|
||||||
|
@ -242,6 +284,10 @@ type Config struct {
|
||||||
// which overrides the agent's default token.
|
// which overrides the agent's default token.
|
||||||
Token string
|
Token string
|
||||||
|
|
||||||
|
// TokenFile is a file containing the current token to use for this client.
|
||||||
|
// If provided it is read once at startup and never again.
|
||||||
|
TokenFile string
|
||||||
|
|
||||||
TLSConfig TLSConfig
|
TLSConfig TLSConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -276,7 +322,7 @@ type TLSConfig struct {
|
||||||
// DefaultConfig returns a default configuration for the client. By default this
|
// DefaultConfig returns a default configuration for the client. By default this
|
||||||
// will pool and reuse idle connections to Consul. If you have a long-lived
|
// will pool and reuse idle connections to Consul. If you have a long-lived
|
||||||
// client object, this is the desired behavior and should make the most efficient
|
// client object, this is the desired behavior and should make the most efficient
|
||||||
// use of the connections to Consul. If you don't reuse a client object , which
|
// use of the connections to Consul. If you don't reuse a client object, which
|
||||||
// is not recommended, then you may notice idle connections building up over
|
// is not recommended, then you may notice idle connections building up over
|
||||||
// time. To avoid this, use the DefaultNonPooledConfig() instead.
|
// time. To avoid this, use the DefaultNonPooledConfig() instead.
|
||||||
func DefaultConfig() *Config {
|
func DefaultConfig() *Config {
|
||||||
|
@ -305,6 +351,10 @@ func defaultConfig(transportFn func() *http.Transport) *Config {
|
||||||
config.Address = addr
|
config.Address = addr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" {
|
||||||
|
config.TokenFile = tokenFile
|
||||||
|
}
|
||||||
|
|
||||||
if token := os.Getenv(HTTPTokenEnvName); token != "" {
|
if token := os.Getenv(HTTPTokenEnvName); token != "" {
|
||||||
config.Token = token
|
config.Token = token
|
||||||
}
|
}
|
||||||
|
@ -411,6 +461,7 @@ func (c *Config) GenerateEnv() []string {
|
||||||
env = append(env,
|
env = append(env,
|
||||||
fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address),
|
fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address),
|
||||||
fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token),
|
fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token),
|
||||||
|
fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile),
|
||||||
fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"),
|
fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"),
|
||||||
fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile),
|
fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile),
|
||||||
fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath),
|
fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath),
|
||||||
|
@ -503,6 +554,19 @@ func NewClient(config *Config) (*Client, error) {
|
||||||
config.Address = parts[1]
|
config.Address = parts[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the TokenFile is set, always use that, even if a Token is configured.
|
||||||
|
// This is because when TokenFile is set it is read into the Token field.
|
||||||
|
// We want any derived clients to have to re-read the token file.
|
||||||
|
if config.TokenFile != "" {
|
||||||
|
data, err := ioutil.ReadFile(config.TokenFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error loading token file: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if token := strings.TrimSpace(string(data)); token != "" {
|
||||||
|
config.Token = token
|
||||||
|
}
|
||||||
|
}
|
||||||
if config.Token == "" {
|
if config.Token == "" {
|
||||||
config.Token = defConfig.Token
|
config.Token = defConfig.Token
|
||||||
}
|
}
|
||||||
|
@ -580,6 +644,9 @@ func (r *request) setQueryOptions(q *QueryOptions) {
|
||||||
if q.Near != "" {
|
if q.Near != "" {
|
||||||
r.params.Set("near", q.Near)
|
r.params.Set("near", q.Near)
|
||||||
}
|
}
|
||||||
|
if q.Filter != "" {
|
||||||
|
r.params.Set("filter", q.Filter)
|
||||||
|
}
|
||||||
if len(q.NodeMeta) > 0 {
|
if len(q.NodeMeta) > 0 {
|
||||||
for key, value := range q.NodeMeta {
|
for key, value := range q.NodeMeta {
|
||||||
r.params.Add("node-meta", key+":"+value)
|
r.params.Add("node-meta", key+":"+value)
|
||||||
|
@ -591,6 +658,20 @@ func (r *request) setQueryOptions(q *QueryOptions) {
|
||||||
if q.Connect {
|
if q.Connect {
|
||||||
r.params.Set("connect", "true")
|
r.params.Set("connect", "true")
|
||||||
}
|
}
|
||||||
|
if q.UseCache && !q.RequireConsistent {
|
||||||
|
r.params.Set("cached", "")
|
||||||
|
|
||||||
|
cc := []string{}
|
||||||
|
if q.MaxAge > 0 {
|
||||||
|
cc = append(cc, fmt.Sprintf("max-age=%.0f", q.MaxAge.Seconds()))
|
||||||
|
}
|
||||||
|
if q.StaleIfError > 0 {
|
||||||
|
cc = append(cc, fmt.Sprintf("stale-if-error=%.0f", q.StaleIfError.Seconds()))
|
||||||
|
}
|
||||||
|
if len(cc) > 0 {
|
||||||
|
r.header.Set("Cache-Control", strings.Join(cc, ", "))
|
||||||
|
}
|
||||||
|
}
|
||||||
r.ctx = q.ctx
|
r.ctx = q.ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -725,7 +806,7 @@ func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
|
||||||
func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
|
func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
|
||||||
r := c.newRequest("GET", endpoint)
|
r := c.newRequest("GET", endpoint)
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
rtt, resp, err := requireOK(c.doRequest(r))
|
rtt, resp, err := c.doRequest(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -765,6 +846,8 @@ func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseQueryMeta is used to help parse query meta-data
|
// parseQueryMeta is used to help parse query meta-data
|
||||||
|
//
|
||||||
|
// TODO(rb): bug? the error from this function is never handled
|
||||||
func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
|
func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
|
||||||
header := resp.Header
|
header := resp.Header
|
||||||
|
|
||||||
|
@ -802,6 +885,18 @@ func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
|
||||||
q.AddressTranslationEnabled = false
|
q.AddressTranslationEnabled = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse Cache info
|
||||||
|
if cacheStr := header.Get("X-Cache"); cacheStr != "" {
|
||||||
|
q.CacheHit = strings.EqualFold(cacheStr, "HIT")
|
||||||
|
}
|
||||||
|
if ageStr := header.Get("Age"); ageStr != "" {
|
||||||
|
age, err := strconv.ParseUint(ageStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to parse Age Header: %v", err)
|
||||||
|
}
|
||||||
|
q.CacheAge = time.Duration(age) * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -830,10 +925,42 @@ func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *h
|
||||||
return d, nil, e
|
return d, nil, e
|
||||||
}
|
}
|
||||||
if resp.StatusCode != 200 {
|
if resp.StatusCode != 200 {
|
||||||
var buf bytes.Buffer
|
return d, nil, generateUnexpectedResponseCodeError(resp)
|
||||||
io.Copy(&buf, resp.Body)
|
|
||||||
resp.Body.Close()
|
|
||||||
return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
|
|
||||||
}
|
}
|
||||||
return d, resp, nil
|
return d, resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (req *request) filterQuery(filter string) {
|
||||||
|
if filter == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req.params.Set("filter", filter)
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateUnexpectedResponseCodeError consumes the rest of the body, closes
|
||||||
|
// the body stream and generates an error indicating the status code was
|
||||||
|
// unexpected.
|
||||||
|
func generateUnexpectedResponseCodeError(resp *http.Response) error {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
io.Copy(&buf, resp.Body)
|
||||||
|
resp.Body.Close()
|
||||||
|
return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) {
|
||||||
|
if e != nil {
|
||||||
|
if resp != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
return false, d, nil, e
|
||||||
|
}
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case 200:
|
||||||
|
return true, d, resp, nil
|
||||||
|
case 404:
|
||||||
|
return false, d, resp, nil
|
||||||
|
default:
|
||||||
|
return false, d, nil, generateUnexpectedResponseCodeError(resp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
45
vendor/github.com/hashicorp/consul/api/catalog.go
generated
vendored
45
vendor/github.com/hashicorp/consul/api/catalog.go
generated
vendored
|
@ -1,5 +1,10 @@
|
||||||
package api
|
package api
|
||||||
|
|
||||||
|
type Weights struct {
|
||||||
|
Passing int
|
||||||
|
Warning int
|
||||||
|
}
|
||||||
|
|
||||||
type Node struct {
|
type Node struct {
|
||||||
ID string
|
ID string
|
||||||
Node string
|
Node string
|
||||||
|
@ -24,9 +29,14 @@ type CatalogService struct {
|
||||||
ServiceTags []string
|
ServiceTags []string
|
||||||
ServiceMeta map[string]string
|
ServiceMeta map[string]string
|
||||||
ServicePort int
|
ServicePort int
|
||||||
|
ServiceWeights Weights
|
||||||
ServiceEnableTagOverride bool
|
ServiceEnableTagOverride bool
|
||||||
CreateIndex uint64
|
// DEPRECATED (ProxyDestination) - remove the next comment!
|
||||||
ModifyIndex uint64
|
// We forgot to ever add ServiceProxyDestination here so no need to deprecate!
|
||||||
|
ServiceProxy *AgentServiceConnectProxyConfig
|
||||||
|
CreateIndex uint64
|
||||||
|
Checks HealthChecks
|
||||||
|
ModifyIndex uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
type CatalogNode struct {
|
type CatalogNode struct {
|
||||||
|
@ -43,6 +53,7 @@ type CatalogRegistration struct {
|
||||||
Datacenter string
|
Datacenter string
|
||||||
Service *AgentService
|
Service *AgentService
|
||||||
Check *AgentCheck
|
Check *AgentCheck
|
||||||
|
Checks HealthChecks
|
||||||
SkipNodeUpdate bool
|
SkipNodeUpdate bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,23 +167,43 @@ func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, er
|
||||||
|
|
||||||
// Service is used to query catalog entries for a given service
|
// Service is used to query catalog entries for a given service
|
||||||
func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
|
func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
|
||||||
return c.service(service, tag, q, false)
|
var tags []string
|
||||||
|
if tag != "" {
|
||||||
|
tags = []string{tag}
|
||||||
|
}
|
||||||
|
return c.service(service, tags, q, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Supports multiple tags for filtering
|
||||||
|
func (c *Catalog) ServiceMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
|
||||||
|
return c.service(service, tags, q, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect is used to query catalog entries for a given Connect-enabled service
|
// Connect is used to query catalog entries for a given Connect-enabled service
|
||||||
func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
|
func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
|
||||||
return c.service(service, tag, q, true)
|
var tags []string
|
||||||
|
if tag != "" {
|
||||||
|
tags = []string{tag}
|
||||||
|
}
|
||||||
|
return c.service(service, tags, q, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Catalog) service(service, tag string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) {
|
// Supports multiple tags for filtering
|
||||||
|
func (c *Catalog) ConnectMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
|
||||||
|
return c.service(service, tags, q, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Catalog) service(service string, tags []string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) {
|
||||||
path := "/v1/catalog/service/" + service
|
path := "/v1/catalog/service/" + service
|
||||||
if connect {
|
if connect {
|
||||||
path = "/v1/catalog/connect/" + service
|
path = "/v1/catalog/connect/" + service
|
||||||
}
|
}
|
||||||
r := c.c.newRequest("GET", path)
|
r := c.c.newRequest("GET", path)
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
if tag != "" {
|
if len(tags) > 0 {
|
||||||
r.params.Set("tag", tag)
|
for _, tag := range tags {
|
||||||
|
r.params.Add("tag", tag)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
rtt, resp, err := requireOK(c.c.doRequest(r))
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
255
vendor/github.com/hashicorp/consul/api/config_entry.go
generated
vendored
Normal file
255
vendor/github.com/hashicorp/consul/api/config_entry.go
generated
vendored
Normal file
|
@ -0,0 +1,255 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/mitchellh/mapstructure"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ServiceDefaults string = "service-defaults"
|
||||||
|
ProxyDefaults string = "proxy-defaults"
|
||||||
|
ProxyConfigGlobal string = "global"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ConfigEntry interface {
|
||||||
|
GetKind() string
|
||||||
|
GetName() string
|
||||||
|
GetCreateIndex() uint64
|
||||||
|
GetModifyIndex() uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServiceConfigEntry struct {
|
||||||
|
Kind string
|
||||||
|
Name string
|
||||||
|
Protocol string
|
||||||
|
CreateIndex uint64
|
||||||
|
ModifyIndex uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ServiceConfigEntry) GetKind() string {
|
||||||
|
return s.Kind
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ServiceConfigEntry) GetName() string {
|
||||||
|
return s.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ServiceConfigEntry) GetCreateIndex() uint64 {
|
||||||
|
return s.CreateIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ServiceConfigEntry) GetModifyIndex() uint64 {
|
||||||
|
return s.ModifyIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
type ProxyConfigEntry struct {
|
||||||
|
Kind string
|
||||||
|
Name string
|
||||||
|
Config map[string]interface{}
|
||||||
|
CreateIndex uint64
|
||||||
|
ModifyIndex uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProxyConfigEntry) GetKind() string {
|
||||||
|
return p.Kind
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProxyConfigEntry) GetName() string {
|
||||||
|
return p.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProxyConfigEntry) GetCreateIndex() uint64 {
|
||||||
|
return p.CreateIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProxyConfigEntry) GetModifyIndex() uint64 {
|
||||||
|
return p.ModifyIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
type rawEntryListResponse struct {
|
||||||
|
kind string
|
||||||
|
Entries []map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeConfigEntry(kind, name string) (ConfigEntry, error) {
|
||||||
|
switch kind {
|
||||||
|
case ServiceDefaults:
|
||||||
|
return &ServiceConfigEntry{Name: name}, nil
|
||||||
|
case ProxyDefaults:
|
||||||
|
return &ProxyConfigEntry{Name: name}, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("invalid config entry kind: %s", kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) {
|
||||||
|
var entry ConfigEntry
|
||||||
|
|
||||||
|
kindVal, ok := raw["Kind"]
|
||||||
|
if !ok {
|
||||||
|
kindVal, ok = raw["kind"]
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level")
|
||||||
|
}
|
||||||
|
|
||||||
|
if kindStr, ok := kindVal.(string); ok {
|
||||||
|
newEntry, err := makeConfigEntry(kindStr, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entry = newEntry
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("Kind value in payload is not a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
decodeConf := &mapstructure.DecoderConfig{
|
||||||
|
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
||||||
|
Result: &entry,
|
||||||
|
WeaklyTypedInput: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
decoder, err := mapstructure.NewDecoder(decodeConf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry, decoder.Decode(raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) {
|
||||||
|
var raw map[string]interface{}
|
||||||
|
if err := json.Unmarshal(data, &raw); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return DecodeConfigEntry(raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config can be used to query the Config endpoints
|
||||||
|
type ConfigEntries struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config returns a handle to the Config endpoints
|
||||||
|
func (c *Client) ConfigEntries() *ConfigEntries {
|
||||||
|
return &ConfigEntries{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) {
|
||||||
|
if kind == "" || name == "" {
|
||||||
|
return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
entry, err := makeConfigEntry(kind, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name))
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(conf.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
if err := decodeBody(resp, entry); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) {
|
||||||
|
if kind == "" {
|
||||||
|
return nil, nil, fmt.Errorf("The kind parameter must not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind))
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(conf.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var raw []map[string]interface{}
|
||||||
|
if err := decodeBody(resp, &raw); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var entries []ConfigEntry
|
||||||
|
for _, rawEntry := range raw {
|
||||||
|
entry, err := DecodeConfigEntry(rawEntry)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
return conf.set(entry, nil, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
r := conf.c.newRequest("PUT", "/v1/config")
|
||||||
|
r.setWriteOptions(w)
|
||||||
|
for param, value := range params {
|
||||||
|
r.params.Set(param, value)
|
||||||
|
}
|
||||||
|
r.obj = entry
|
||||||
|
rtt, resp, err := requireOK(conf.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if _, err := io.Copy(&buf, resp.Body); err != nil {
|
||||||
|
return false, nil, fmt.Errorf("Failed to read response: %v", err)
|
||||||
|
}
|
||||||
|
res := strings.Contains(buf.String(), "true")
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
return res, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) {
|
||||||
|
if kind == "" || name == "" {
|
||||||
|
return nil, fmt.Errorf("Both kind and name parameters must not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name))
|
||||||
|
r.setWriteOptions(w)
|
||||||
|
rtt, resp, err := requireOK(conf.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
return wm, nil
|
||||||
|
}
|
11
vendor/github.com/hashicorp/consul/api/connect_ca.go
generated
vendored
11
vendor/github.com/hashicorp/consul/api/connect_ca.go
generated
vendored
|
@ -21,8 +21,18 @@ type CAConfig struct {
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CommonCAProviderConfig is the common options available to all CA providers.
|
||||||
|
type CommonCAProviderConfig struct {
|
||||||
|
LeafCertTTL time.Duration
|
||||||
|
SkipValidate bool
|
||||||
|
CSRMaxPerSecond float32
|
||||||
|
CSRMaxConcurrent int
|
||||||
|
}
|
||||||
|
|
||||||
// ConsulCAProviderConfig is the config for the built-in Consul CA provider.
|
// ConsulCAProviderConfig is the config for the built-in Consul CA provider.
|
||||||
type ConsulCAProviderConfig struct {
|
type ConsulCAProviderConfig struct {
|
||||||
|
CommonCAProviderConfig `mapstructure:",squash"`
|
||||||
|
|
||||||
PrivateKey string
|
PrivateKey string
|
||||||
RootCert string
|
RootCert string
|
||||||
RotationPeriod time.Duration
|
RotationPeriod time.Duration
|
||||||
|
@ -34,7 +44,6 @@ func ParseConsulCAConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, e
|
||||||
var config ConsulCAProviderConfig
|
var config ConsulCAProviderConfig
|
||||||
decodeConf := &mapstructure.DecoderConfig{
|
decodeConf := &mapstructure.DecoderConfig{
|
||||||
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
||||||
ErrorUnused: true,
|
|
||||||
Result: &config,
|
Result: &config,
|
||||||
WeaklyTypedInput: true,
|
WeaklyTypedInput: true,
|
||||||
}
|
}
|
||||||
|
|
106
vendor/github.com/hashicorp/consul/api/debug.go
generated
vendored
Normal file
106
vendor/github.com/hashicorp/consul/api/debug.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Debug can be used to query the /debug/pprof endpoints to gather
|
||||||
|
// profiling information about the target agent.Debug
|
||||||
|
//
|
||||||
|
// The agent must have enable_debug set to true for profiling to be enabled
|
||||||
|
// and for these endpoints to function.
|
||||||
|
type Debug struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug returns a handle that exposes the internal debug endpoints.
|
||||||
|
func (c *Client) Debug() *Debug {
|
||||||
|
return &Debug{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Heap returns a pprof heap dump
|
||||||
|
func (d *Debug) Heap() ([]byte, error) {
|
||||||
|
r := d.c.newRequest("GET", "/debug/pprof/heap")
|
||||||
|
_, resp, err := d.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error making request: %s", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// We return a raw response because we're just passing through a response
|
||||||
|
// from the pprof handlers
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding body: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Profile returns a pprof CPU profile for the specified number of seconds
|
||||||
|
func (d *Debug) Profile(seconds int) ([]byte, error) {
|
||||||
|
r := d.c.newRequest("GET", "/debug/pprof/profile")
|
||||||
|
|
||||||
|
// Capture a profile for the specified number of seconds
|
||||||
|
r.params.Set("seconds", strconv.Itoa(seconds))
|
||||||
|
|
||||||
|
_, resp, err := d.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error making request: %s", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// We return a raw response because we're just passing through a response
|
||||||
|
// from the pprof handlers
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding body: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trace returns an execution trace
|
||||||
|
func (d *Debug) Trace(seconds int) ([]byte, error) {
|
||||||
|
r := d.c.newRequest("GET", "/debug/pprof/trace")
|
||||||
|
|
||||||
|
// Capture a trace for the specified number of seconds
|
||||||
|
r.params.Set("seconds", strconv.Itoa(seconds))
|
||||||
|
|
||||||
|
_, resp, err := d.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error making request: %s", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// We return a raw response because we're just passing through a response
|
||||||
|
// from the pprof handlers
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding body: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Goroutine returns a pprof goroutine profile
|
||||||
|
func (d *Debug) Goroutine() ([]byte, error) {
|
||||||
|
r := d.c.newRequest("GET", "/debug/pprof/goroutine")
|
||||||
|
|
||||||
|
_, resp, err := d.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error making request: %s", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// We return a raw response because we're just passing through a response
|
||||||
|
// from the pprof handlers
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding body: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, nil
|
||||||
|
}
|
118
vendor/github.com/hashicorp/consul/api/health.go
generated
vendored
118
vendor/github.com/hashicorp/consul/api/health.go
generated
vendored
|
@ -1,8 +1,10 @@
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -36,21 +38,99 @@ type HealthCheck struct {
|
||||||
ServiceTags []string
|
ServiceTags []string
|
||||||
|
|
||||||
Definition HealthCheckDefinition
|
Definition HealthCheckDefinition
|
||||||
|
|
||||||
|
CreateIndex uint64
|
||||||
|
ModifyIndex uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// HealthCheckDefinition is used to store the details about
|
// HealthCheckDefinition is used to store the details about
|
||||||
// a health check's execution.
|
// a health check's execution.
|
||||||
type HealthCheckDefinition struct {
|
type HealthCheckDefinition struct {
|
||||||
HTTP string
|
HTTP string
|
||||||
Header map[string][]string
|
Header map[string][]string
|
||||||
Method string
|
Method string
|
||||||
TLSSkipVerify bool
|
TLSSkipVerify bool
|
||||||
TCP string
|
TCP string
|
||||||
|
IntervalDuration time.Duration `json:"-"`
|
||||||
|
TimeoutDuration time.Duration `json:"-"`
|
||||||
|
DeregisterCriticalServiceAfterDuration time.Duration `json:"-"`
|
||||||
|
|
||||||
|
// DEPRECATED in Consul 1.4.1. Use the above time.Duration fields instead.
|
||||||
Interval ReadableDuration
|
Interval ReadableDuration
|
||||||
Timeout ReadableDuration
|
Timeout ReadableDuration
|
||||||
DeregisterCriticalServiceAfter ReadableDuration
|
DeregisterCriticalServiceAfter ReadableDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *HealthCheckDefinition) MarshalJSON() ([]byte, error) {
|
||||||
|
type Alias HealthCheckDefinition
|
||||||
|
out := &struct {
|
||||||
|
Interval string
|
||||||
|
Timeout string
|
||||||
|
DeregisterCriticalServiceAfter string
|
||||||
|
*Alias
|
||||||
|
}{
|
||||||
|
Interval: d.Interval.String(),
|
||||||
|
Timeout: d.Timeout.String(),
|
||||||
|
DeregisterCriticalServiceAfter: d.DeregisterCriticalServiceAfter.String(),
|
||||||
|
Alias: (*Alias)(d),
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.IntervalDuration != 0 {
|
||||||
|
out.Interval = d.IntervalDuration.String()
|
||||||
|
} else if d.Interval != 0 {
|
||||||
|
out.Interval = d.Interval.String()
|
||||||
|
}
|
||||||
|
if d.TimeoutDuration != 0 {
|
||||||
|
out.Timeout = d.TimeoutDuration.String()
|
||||||
|
} else if d.Timeout != 0 {
|
||||||
|
out.Timeout = d.Timeout.String()
|
||||||
|
}
|
||||||
|
if d.DeregisterCriticalServiceAfterDuration != 0 {
|
||||||
|
out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfterDuration.String()
|
||||||
|
} else if d.DeregisterCriticalServiceAfter != 0 {
|
||||||
|
out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfter.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HealthCheckDefinition) UnmarshalJSON(data []byte) error {
|
||||||
|
type Alias HealthCheckDefinition
|
||||||
|
aux := &struct {
|
||||||
|
Interval string
|
||||||
|
Timeout string
|
||||||
|
DeregisterCriticalServiceAfter string
|
||||||
|
*Alias
|
||||||
|
}{
|
||||||
|
Alias: (*Alias)(d),
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(data, &aux); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the values into both the time.Duration and old ReadableDuration fields.
|
||||||
|
var err error
|
||||||
|
if aux.Interval != "" {
|
||||||
|
if d.IntervalDuration, err = time.ParseDuration(aux.Interval); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.Interval = ReadableDuration(d.IntervalDuration)
|
||||||
|
}
|
||||||
|
if aux.Timeout != "" {
|
||||||
|
if d.TimeoutDuration, err = time.ParseDuration(aux.Timeout); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.Timeout = ReadableDuration(d.TimeoutDuration)
|
||||||
|
}
|
||||||
|
if aux.DeregisterCriticalServiceAfter != "" {
|
||||||
|
if d.DeregisterCriticalServiceAfterDuration, err = time.ParseDuration(aux.DeregisterCriticalServiceAfter); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.DeregisterCriticalServiceAfter = ReadableDuration(d.DeregisterCriticalServiceAfterDuration)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// HealthChecks is a collection of HealthCheck structs.
|
// HealthChecks is a collection of HealthCheck structs.
|
||||||
type HealthChecks []*HealthCheck
|
type HealthChecks []*HealthCheck
|
||||||
|
|
||||||
|
@ -159,7 +239,15 @@ func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMe
|
||||||
// for a given service. It can optionally do server-side filtering on a tag
|
// for a given service. It can optionally do server-side filtering on a tag
|
||||||
// or nodes with passing health checks only.
|
// or nodes with passing health checks only.
|
||||||
func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
|
func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
|
||||||
return h.service(service, tag, passingOnly, q, false)
|
var tags []string
|
||||||
|
if tag != "" {
|
||||||
|
tags = []string{tag}
|
||||||
|
}
|
||||||
|
return h.service(service, tags, passingOnly, q, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
|
||||||
|
return h.service(service, tags, passingOnly, q, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect is equivalent to Service except that it will only return services
|
// Connect is equivalent to Service except that it will only return services
|
||||||
|
@ -168,18 +256,28 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions)
|
||||||
// passingOnly is true only instances where both the service and any proxy are
|
// passingOnly is true only instances where both the service and any proxy are
|
||||||
// healthy will be returned.
|
// healthy will be returned.
|
||||||
func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
|
func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
|
||||||
return h.service(service, tag, passingOnly, q, true)
|
var tags []string
|
||||||
|
if tag != "" {
|
||||||
|
tags = []string{tag}
|
||||||
|
}
|
||||||
|
return h.service(service, tags, passingOnly, q, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Health) service(service, tag string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) {
|
func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
|
||||||
|
return h.service(service, tags, passingOnly, q, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) {
|
||||||
path := "/v1/health/service/" + service
|
path := "/v1/health/service/" + service
|
||||||
if connect {
|
if connect {
|
||||||
path = "/v1/health/connect/" + service
|
path = "/v1/health/connect/" + service
|
||||||
}
|
}
|
||||||
r := h.c.newRequest("GET", path)
|
r := h.c.newRequest("GET", path)
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
if tag != "" {
|
if len(tags) > 0 {
|
||||||
r.params.Set("tag", tag)
|
for _, tag := range tags {
|
||||||
|
r.params.Add("tag", tag)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if passingOnly {
|
if passingOnly {
|
||||||
r.params.Set(HealthPassing, "1")
|
r.params.Set(HealthPassing, "1")
|
||||||
|
|
160
vendor/github.com/hashicorp/consul/api/kv.go
generated
vendored
160
vendor/github.com/hashicorp/consul/api/kv.go
generated
vendored
|
@ -45,44 +45,6 @@ type KVPair struct {
|
||||||
// KVPairs is a list of KVPair objects
|
// KVPairs is a list of KVPair objects
|
||||||
type KVPairs []*KVPair
|
type KVPairs []*KVPair
|
||||||
|
|
||||||
// KVOp constants give possible operations available in a KVTxn.
|
|
||||||
type KVOp string
|
|
||||||
|
|
||||||
const (
|
|
||||||
KVSet KVOp = "set"
|
|
||||||
KVDelete KVOp = "delete"
|
|
||||||
KVDeleteCAS KVOp = "delete-cas"
|
|
||||||
KVDeleteTree KVOp = "delete-tree"
|
|
||||||
KVCAS KVOp = "cas"
|
|
||||||
KVLock KVOp = "lock"
|
|
||||||
KVUnlock KVOp = "unlock"
|
|
||||||
KVGet KVOp = "get"
|
|
||||||
KVGetTree KVOp = "get-tree"
|
|
||||||
KVCheckSession KVOp = "check-session"
|
|
||||||
KVCheckIndex KVOp = "check-index"
|
|
||||||
KVCheckNotExists KVOp = "check-not-exists"
|
|
||||||
)
|
|
||||||
|
|
||||||
// KVTxnOp defines a single operation inside a transaction.
|
|
||||||
type KVTxnOp struct {
|
|
||||||
Verb KVOp
|
|
||||||
Key string
|
|
||||||
Value []byte
|
|
||||||
Flags uint64
|
|
||||||
Index uint64
|
|
||||||
Session string
|
|
||||||
}
|
|
||||||
|
|
||||||
// KVTxnOps defines a set of operations to be performed inside a single
|
|
||||||
// transaction.
|
|
||||||
type KVTxnOps []*KVTxnOp
|
|
||||||
|
|
||||||
// KVTxnResponse has the outcome of a transaction.
|
|
||||||
type KVTxnResponse struct {
|
|
||||||
Results []*KVPair
|
|
||||||
Errors TxnErrors
|
|
||||||
}
|
|
||||||
|
|
||||||
// KV is used to manipulate the K/V API
|
// KV is used to manipulate the K/V API
|
||||||
type KV struct {
|
type KV struct {
|
||||||
c *Client
|
c *Client
|
||||||
|
@ -300,121 +262,25 @@ func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOption
|
||||||
return res, qm, nil
|
return res, qm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TxnOp is the internal format we send to Consul. It's not specific to KV,
|
// The Txn function has been deprecated from the KV object; please see the Txn
|
||||||
// though currently only KV operations are supported.
|
// object for more information about Transactions.
|
||||||
type TxnOp struct {
|
|
||||||
KV *KVTxnOp
|
|
||||||
}
|
|
||||||
|
|
||||||
// TxnOps is a list of transaction operations.
|
|
||||||
type TxnOps []*TxnOp
|
|
||||||
|
|
||||||
// TxnResult is the internal format we receive from Consul.
|
|
||||||
type TxnResult struct {
|
|
||||||
KV *KVPair
|
|
||||||
}
|
|
||||||
|
|
||||||
// TxnResults is a list of TxnResult objects.
|
|
||||||
type TxnResults []*TxnResult
|
|
||||||
|
|
||||||
// TxnError is used to return information about an operation in a transaction.
|
|
||||||
type TxnError struct {
|
|
||||||
OpIndex int
|
|
||||||
What string
|
|
||||||
}
|
|
||||||
|
|
||||||
// TxnErrors is a list of TxnError objects.
|
|
||||||
type TxnErrors []*TxnError
|
|
||||||
|
|
||||||
// TxnResponse is the internal format we receive from Consul.
|
|
||||||
type TxnResponse struct {
|
|
||||||
Results TxnResults
|
|
||||||
Errors TxnErrors
|
|
||||||
}
|
|
||||||
|
|
||||||
// Txn is used to apply multiple KV operations in a single, atomic transaction.
|
|
||||||
//
|
|
||||||
// Note that Go will perform the required base64 encoding on the values
|
|
||||||
// automatically because the type is a byte slice. Transactions are defined as a
|
|
||||||
// list of operations to perform, using the KVOp constants and KVTxnOp structure
|
|
||||||
// to define operations. If any operation fails, none of the changes are applied
|
|
||||||
// to the state store. Note that this hides the internal raw transaction interface
|
|
||||||
// and munges the input and output types into KV-specific ones for ease of use.
|
|
||||||
// If there are more non-KV operations in the future we may break out a new
|
|
||||||
// transaction API client, but it will be easy to keep this KV-specific variant
|
|
||||||
// supported.
|
|
||||||
//
|
|
||||||
// Even though this is generally a write operation, we take a QueryOptions input
|
|
||||||
// and return a QueryMeta output. If the transaction contains only read ops, then
|
|
||||||
// Consul will fast-path it to a different endpoint internally which supports
|
|
||||||
// consistency controls, but not blocking. If there are write operations then
|
|
||||||
// the request will always be routed through raft and any consistency settings
|
|
||||||
// will be ignored.
|
|
||||||
//
|
|
||||||
// Here's an example:
|
|
||||||
//
|
|
||||||
// ops := KVTxnOps{
|
|
||||||
// &KVTxnOp{
|
|
||||||
// Verb: KVLock,
|
|
||||||
// Key: "test/lock",
|
|
||||||
// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
|
|
||||||
// Value: []byte("hello"),
|
|
||||||
// },
|
|
||||||
// &KVTxnOp{
|
|
||||||
// Verb: KVGet,
|
|
||||||
// Key: "another/key",
|
|
||||||
// },
|
|
||||||
// }
|
|
||||||
// ok, response, _, err := kv.Txn(&ops, nil)
|
|
||||||
//
|
|
||||||
// If there is a problem making the transaction request then an error will be
|
|
||||||
// returned. Otherwise, the ok value will be true if the transaction succeeded
|
|
||||||
// or false if it was rolled back. The response is a structured return value which
|
|
||||||
// will have the outcome of the transaction. Its Results member will have entries
|
|
||||||
// for each operation. Deleted keys will have a nil entry in the, and to save
|
|
||||||
// space, the Value of each key in the Results will be nil unless the operation
|
|
||||||
// is a KVGet. If the transaction was rolled back, the Errors member will have
|
|
||||||
// entries referencing the index of the operation that failed along with an error
|
|
||||||
// message.
|
|
||||||
func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) {
|
func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) {
|
||||||
r := k.c.newRequest("PUT", "/v1/txn")
|
var ops TxnOps
|
||||||
r.setQueryOptions(q)
|
for _, op := range txn {
|
||||||
|
ops = append(ops, &TxnOp{KV: op})
|
||||||
// Convert into the internal format since this is an all-KV txn.
|
|
||||||
ops := make(TxnOps, 0, len(txn))
|
|
||||||
for _, kvOp := range txn {
|
|
||||||
ops = append(ops, &TxnOp{KV: kvOp})
|
|
||||||
}
|
}
|
||||||
r.obj = ops
|
|
||||||
rtt, resp, err := k.c.doRequest(r)
|
respOk, txnResp, qm, err := k.c.txn(ops, q)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, nil, err
|
return false, nil, nil, err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
qm := &QueryMeta{}
|
// Convert from the internal format.
|
||||||
parseQueryMeta(resp, qm)
|
kvResp := KVTxnResponse{
|
||||||
qm.RequestTime = rtt
|
Errors: txnResp.Errors,
|
||||||
|
|
||||||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict {
|
|
||||||
var txnResp TxnResponse
|
|
||||||
if err := decodeBody(resp, &txnResp); err != nil {
|
|
||||||
return false, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert from the internal format.
|
|
||||||
kvResp := KVTxnResponse{
|
|
||||||
Errors: txnResp.Errors,
|
|
||||||
}
|
|
||||||
for _, result := range txnResp.Results {
|
|
||||||
kvResp.Results = append(kvResp.Results, result.KV)
|
|
||||||
}
|
|
||||||
return resp.StatusCode == http.StatusOK, &kvResp, qm, nil
|
|
||||||
}
|
}
|
||||||
|
for _, result := range txnResp.Results {
|
||||||
var buf bytes.Buffer
|
kvResp.Results = append(kvResp.Results, result.KV)
|
||||||
if _, err := io.Copy(&buf, resp.Body); err != nil {
|
|
||||||
return false, nil, nil, fmt.Errorf("Failed to read response: %v", err)
|
|
||||||
}
|
}
|
||||||
return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String())
|
return respOk, &kvResp, qm, nil
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/hashicorp/consul/api/operator_area.go
generated
vendored
3
vendor/github.com/hashicorp/consul/api/operator_area.go
generated
vendored
|
@ -1,9 +1,10 @@
|
||||||
|
package api
|
||||||
|
|
||||||
// The /v1/operator/area endpoints are available only in Consul Enterprise and
|
// The /v1/operator/area endpoints are available only in Consul Enterprise and
|
||||||
// interact with its network area subsystem. Network areas are used to link
|
// interact with its network area subsystem. Network areas are used to link
|
||||||
// together Consul servers in different Consul datacenters. With network areas,
|
// together Consul servers in different Consul datacenters. With network areas,
|
||||||
// Consul datacenters can be linked together in ways other than a fully-connected
|
// Consul datacenters can be linked together in ways other than a fully-connected
|
||||||
// mesh, as is required for Consul's WAN.
|
// mesh, as is required for Consul's WAN.
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
|
|
3
vendor/github.com/hashicorp/consul/api/operator_keyring.go
generated
vendored
3
vendor/github.com/hashicorp/consul/api/operator_keyring.go
generated
vendored
|
@ -16,6 +16,9 @@ type KeyringResponse struct {
|
||||||
// Segment has the network segment this request corresponds to.
|
// Segment has the network segment this request corresponds to.
|
||||||
Segment string
|
Segment string
|
||||||
|
|
||||||
|
// Messages has information or errors from serf
|
||||||
|
Messages map[string]string `json:",omitempty"`
|
||||||
|
|
||||||
// A map of the encryption keys to the number of nodes they're installed on
|
// A map of the encryption keys to the number of nodes they're installed on
|
||||||
Keys map[string]int
|
Keys map[string]int
|
||||||
|
|
||||||
|
|
5
vendor/github.com/hashicorp/consul/api/prepared_query.go
generated
vendored
5
vendor/github.com/hashicorp/consul/api/prepared_query.go
generated
vendored
|
@ -55,6 +55,11 @@ type ServiceQuery struct {
|
||||||
// service entry to be returned.
|
// service entry to be returned.
|
||||||
NodeMeta map[string]string
|
NodeMeta map[string]string
|
||||||
|
|
||||||
|
// ServiceMeta is a map of required service metadata fields. If a key/value
|
||||||
|
// pair is in this map it must be present on the node in order for the
|
||||||
|
// service entry to be returned.
|
||||||
|
ServiceMeta map[string]string
|
||||||
|
|
||||||
// Connect if true will filter the prepared query results to only
|
// Connect if true will filter the prepared query results to only
|
||||||
// include Connect-capable services. These include both native services
|
// include Connect-capable services. These include both native services
|
||||||
// and proxies for matching services. Note that if a proxy matches,
|
// and proxies for matching services. Note that if a proxy matches,
|
||||||
|
|
230
vendor/github.com/hashicorp/consul/api/txn.go
generated
vendored
Normal file
230
vendor/github.com/hashicorp/consul/api/txn.go
generated
vendored
Normal file
|
@ -0,0 +1,230 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Txn is used to manipulate the Txn API
|
||||||
|
type Txn struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Txn is used to return a handle to the K/V apis
|
||||||
|
func (c *Client) Txn() *Txn {
|
||||||
|
return &Txn{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxnOp is the internal format we send to Consul. Currently only K/V and
|
||||||
|
// check operations are supported.
|
||||||
|
type TxnOp struct {
|
||||||
|
KV *KVTxnOp
|
||||||
|
Node *NodeTxnOp
|
||||||
|
Service *ServiceTxnOp
|
||||||
|
Check *CheckTxnOp
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxnOps is a list of transaction operations.
|
||||||
|
type TxnOps []*TxnOp
|
||||||
|
|
||||||
|
// TxnResult is the internal format we receive from Consul.
|
||||||
|
type TxnResult struct {
|
||||||
|
KV *KVPair
|
||||||
|
Node *Node
|
||||||
|
Service *CatalogService
|
||||||
|
Check *HealthCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxnResults is a list of TxnResult objects.
|
||||||
|
type TxnResults []*TxnResult
|
||||||
|
|
||||||
|
// TxnError is used to return information about an operation in a transaction.
|
||||||
|
type TxnError struct {
|
||||||
|
OpIndex int
|
||||||
|
What string
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxnErrors is a list of TxnError objects.
|
||||||
|
type TxnErrors []*TxnError
|
||||||
|
|
||||||
|
// TxnResponse is the internal format we receive from Consul.
|
||||||
|
type TxnResponse struct {
|
||||||
|
Results TxnResults
|
||||||
|
Errors TxnErrors
|
||||||
|
}
|
||||||
|
|
||||||
|
// KVOp constants give possible operations available in a transaction.
|
||||||
|
type KVOp string
|
||||||
|
|
||||||
|
const (
|
||||||
|
KVSet KVOp = "set"
|
||||||
|
KVDelete KVOp = "delete"
|
||||||
|
KVDeleteCAS KVOp = "delete-cas"
|
||||||
|
KVDeleteTree KVOp = "delete-tree"
|
||||||
|
KVCAS KVOp = "cas"
|
||||||
|
KVLock KVOp = "lock"
|
||||||
|
KVUnlock KVOp = "unlock"
|
||||||
|
KVGet KVOp = "get"
|
||||||
|
KVGetTree KVOp = "get-tree"
|
||||||
|
KVCheckSession KVOp = "check-session"
|
||||||
|
KVCheckIndex KVOp = "check-index"
|
||||||
|
KVCheckNotExists KVOp = "check-not-exists"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KVTxnOp defines a single operation inside a transaction.
|
||||||
|
type KVTxnOp struct {
|
||||||
|
Verb KVOp
|
||||||
|
Key string
|
||||||
|
Value []byte
|
||||||
|
Flags uint64
|
||||||
|
Index uint64
|
||||||
|
Session string
|
||||||
|
}
|
||||||
|
|
||||||
|
// KVTxnOps defines a set of operations to be performed inside a single
|
||||||
|
// transaction.
|
||||||
|
type KVTxnOps []*KVTxnOp
|
||||||
|
|
||||||
|
// KVTxnResponse has the outcome of a transaction.
|
||||||
|
type KVTxnResponse struct {
|
||||||
|
Results []*KVPair
|
||||||
|
Errors TxnErrors
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeOp constants give possible operations available in a transaction.
|
||||||
|
type NodeOp string
|
||||||
|
|
||||||
|
const (
|
||||||
|
NodeGet NodeOp = "get"
|
||||||
|
NodeSet NodeOp = "set"
|
||||||
|
NodeCAS NodeOp = "cas"
|
||||||
|
NodeDelete NodeOp = "delete"
|
||||||
|
NodeDeleteCAS NodeOp = "delete-cas"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NodeTxnOp defines a single operation inside a transaction.
|
||||||
|
type NodeTxnOp struct {
|
||||||
|
Verb NodeOp
|
||||||
|
Node Node
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceOp constants give possible operations available in a transaction.
|
||||||
|
type ServiceOp string
|
||||||
|
|
||||||
|
const (
|
||||||
|
ServiceGet ServiceOp = "get"
|
||||||
|
ServiceSet ServiceOp = "set"
|
||||||
|
ServiceCAS ServiceOp = "cas"
|
||||||
|
ServiceDelete ServiceOp = "delete"
|
||||||
|
ServiceDeleteCAS ServiceOp = "delete-cas"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServiceTxnOp defines a single operation inside a transaction.
|
||||||
|
type ServiceTxnOp struct {
|
||||||
|
Verb ServiceOp
|
||||||
|
Node string
|
||||||
|
Service AgentService
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckOp constants give possible operations available in a transaction.
|
||||||
|
type CheckOp string
|
||||||
|
|
||||||
|
const (
|
||||||
|
CheckGet CheckOp = "get"
|
||||||
|
CheckSet CheckOp = "set"
|
||||||
|
CheckCAS CheckOp = "cas"
|
||||||
|
CheckDelete CheckOp = "delete"
|
||||||
|
CheckDeleteCAS CheckOp = "delete-cas"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CheckTxnOp defines a single operation inside a transaction.
|
||||||
|
type CheckTxnOp struct {
|
||||||
|
Verb CheckOp
|
||||||
|
Check HealthCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// Txn is used to apply multiple Consul operations in a single, atomic transaction.
|
||||||
|
//
|
||||||
|
// Note that Go will perform the required base64 encoding on the values
|
||||||
|
// automatically because the type is a byte slice. Transactions are defined as a
|
||||||
|
// list of operations to perform, using the different fields in the TxnOp structure
|
||||||
|
// to define operations. If any operation fails, none of the changes are applied
|
||||||
|
// to the state store.
|
||||||
|
//
|
||||||
|
// Even though this is generally a write operation, we take a QueryOptions input
|
||||||
|
// and return a QueryMeta output. If the transaction contains only read ops, then
|
||||||
|
// Consul will fast-path it to a different endpoint internally which supports
|
||||||
|
// consistency controls, but not blocking. If there are write operations then
|
||||||
|
// the request will always be routed through raft and any consistency settings
|
||||||
|
// will be ignored.
|
||||||
|
//
|
||||||
|
// Here's an example:
|
||||||
|
//
|
||||||
|
// ops := KVTxnOps{
|
||||||
|
// &KVTxnOp{
|
||||||
|
// Verb: KVLock,
|
||||||
|
// Key: "test/lock",
|
||||||
|
// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
|
||||||
|
// Value: []byte("hello"),
|
||||||
|
// },
|
||||||
|
// &KVTxnOp{
|
||||||
|
// Verb: KVGet,
|
||||||
|
// Key: "another/key",
|
||||||
|
// },
|
||||||
|
// &CheckTxnOp{
|
||||||
|
// Verb: CheckSet,
|
||||||
|
// HealthCheck: HealthCheck{
|
||||||
|
// Node: "foo",
|
||||||
|
// CheckID: "redis:a",
|
||||||
|
// Name: "Redis Health Check",
|
||||||
|
// Status: "passing",
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// ok, response, _, err := kv.Txn(&ops, nil)
|
||||||
|
//
|
||||||
|
// If there is a problem making the transaction request then an error will be
|
||||||
|
// returned. Otherwise, the ok value will be true if the transaction succeeded
|
||||||
|
// or false if it was rolled back. The response is a structured return value which
|
||||||
|
// will have the outcome of the transaction. Its Results member will have entries
|
||||||
|
// for each operation. For KV operations, Deleted keys will have a nil entry in the
|
||||||
|
// results, and to save space, the Value of each key in the Results will be nil
|
||||||
|
// unless the operation is a KVGet. If the transaction was rolled back, the Errors
|
||||||
|
// member will have entries referencing the index of the operation that failed
|
||||||
|
// along with an error message.
|
||||||
|
func (t *Txn) Txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) {
|
||||||
|
return t.c.txn(txn, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) {
|
||||||
|
r := c.newRequest("PUT", "/v1/txn")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
|
||||||
|
r.obj = txn
|
||||||
|
rtt, resp, err := c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict {
|
||||||
|
var txnResp TxnResponse
|
||||||
|
if err := decodeBody(resp, &txnResp); err != nil {
|
||||||
|
return false, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp.StatusCode == http.StatusOK, &txnResp, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if _, err := io.Copy(&buf, resp.Body); err != nil {
|
||||||
|
return false, nil, nil, fmt.Errorf("Failed to read response: %v", err)
|
||||||
|
}
|
||||||
|
return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String())
|
||||||
|
}
|
10
vendor/vendor.json
vendored
10
vendor/vendor.json
vendored
|
@ -63,7 +63,13 @@
|
||||||
{"path":"github.com/containerd/console","checksumSHA1":"IGtuR58l2zmYRcNf8sPDlCSgovE=","origin":"github.com/opencontainers/runc/vendor/github.com/containerd/console","revision":"459bfaec1fc6c17d8bfb12d0a0f69e7e7271ed2a","revisionTime":"2018-08-23T14:46:37Z"},
|
{"path":"github.com/containerd/console","checksumSHA1":"IGtuR58l2zmYRcNf8sPDlCSgovE=","origin":"github.com/opencontainers/runc/vendor/github.com/containerd/console","revision":"459bfaec1fc6c17d8bfb12d0a0f69e7e7271ed2a","revisionTime":"2018-08-23T14:46:37Z"},
|
||||||
{"path":"github.com/containerd/continuity/pathdriver","checksumSHA1":"GqIrOttKaO7k6HIaHQLPr3cY7rY=","origin":"github.com/docker/docker/vendor/github.com/containerd/continuity/pathdriver","revision":"320063a2ad06a1d8ada61c94c29dbe44e2d87473","revisionTime":"2018-08-16T08:14:46Z"},
|
{"path":"github.com/containerd/continuity/pathdriver","checksumSHA1":"GqIrOttKaO7k6HIaHQLPr3cY7rY=","origin":"github.com/docker/docker/vendor/github.com/containerd/continuity/pathdriver","revision":"320063a2ad06a1d8ada61c94c29dbe44e2d87473","revisionTime":"2018-08-16T08:14:46Z"},
|
||||||
{"path":"github.com/containerd/fifo","checksumSHA1":"Ur3lVmFp+HTGUzQU+/ZBolKe8FU=","revision":"3d5202aec260678c48179c56f40e6f38a095738c","revisionTime":"2018-03-07T16:51:37Z"},
|
{"path":"github.com/containerd/fifo","checksumSHA1":"Ur3lVmFp+HTGUzQU+/ZBolKe8FU=","revision":"3d5202aec260678c48179c56f40e6f38a095738c","revisionTime":"2018-03-07T16:51:37Z"},
|
||||||
{"path":"github.com/containernetworking/cni/pkg/types","checksumSHA1":"NeAp/3+Hedu9tnMai+LihERPj84=","revision":"5c3c17164270150467498a32c71436c7cd5501be","revisionTime":"2016-06-02T16:00:07Z"},
|
{"path":"github.com/containernetworking/cni/libcni","checksumSHA1":"3CsFN6YsShG9EU2oB9vJIqYTxq4=","revision":"dc953e2fd91f9bc624b03cf9ea3706796bfee920","revisionTime":"2019-06-12T15:24:20Z"},
|
||||||
|
{"path":"github.com/containernetworking/cni/pkg/invoke","checksumSHA1":"Xf2DxXUyjBO9u4LeyDzS38pdL+I=","revision":"dc953e2fd91f9bc624b03cf9ea3706796bfee920","revisionTime":"2019-06-12T15:24:20Z"},
|
||||||
|
{"path":"github.com/containernetworking/cni/pkg/types","checksumSHA1":"Dhi4+8X7U2oVzVkgxPrmLaN8qFI=","revision":"dc953e2fd91f9bc624b03cf9ea3706796bfee920","revisionTime":"2019-06-12T15:24:20Z"},
|
||||||
|
{"path":"github.com/containernetworking/cni/pkg/types/020","checksumSHA1":"6+ng8oaM9SB0TyCE7I7N940IpPY=","revision":"dc953e2fd91f9bc624b03cf9ea3706796bfee920","revisionTime":"2019-06-12T15:24:20Z"},
|
||||||
|
{"path":"github.com/containernetworking/cni/pkg/types/current","checksumSHA1":"X6dNZ3yc3V9ffW9vz4yIyeKXGD0=","revision":"dc953e2fd91f9bc624b03cf9ea3706796bfee920","revisionTime":"2019-06-12T15:24:20Z"},
|
||||||
|
{"path":"github.com/containernetworking/cni/pkg/version","checksumSHA1":"aojwjPoA9XSGB/zVtOGzWvmv/i8=","revision":"dc953e2fd91f9bc624b03cf9ea3706796bfee920","revisionTime":"2019-06-12T15:24:20Z"},
|
||||||
|
{"path":"github.com/containernetworking/plugins/pkg/ns","checksumSHA1":"n3dCDigZOU+eD84Cr4Kg30GO4nI=","revision":"2d6d46d308b2c45a0466324c9e3f1330ab5cacd6","revisionTime":"2019-05-01T19:17:48Z"},
|
||||||
{"path":"github.com/coreos/go-semver/semver","checksumSHA1":"97BsbXOiZ8+Kr+LIuZkQFtSj7H4=","revision":"1817cd4bea52af76542157eeabd74b057d1a199e","revisionTime":"2017-06-13T09:22:38Z"},
|
{"path":"github.com/coreos/go-semver/semver","checksumSHA1":"97BsbXOiZ8+Kr+LIuZkQFtSj7H4=","revision":"1817cd4bea52af76542157eeabd74b057d1a199e","revisionTime":"2017-06-13T09:22:38Z"},
|
||||||
{"path":"github.com/coreos/go-systemd/dbus","checksumSHA1":"/zxxFPYjUB7Wowz33r5AhTDvoz0=","origin":"github.com/opencontainers/runc/vendor/github.com/coreos/go-systemd/dbus","revision":"459bfaec1fc6c17d8bfb12d0a0f69e7e7271ed2a","revisionTime":"2018-08-23T14:46:37Z"},
|
{"path":"github.com/coreos/go-systemd/dbus","checksumSHA1":"/zxxFPYjUB7Wowz33r5AhTDvoz0=","origin":"github.com/opencontainers/runc/vendor/github.com/coreos/go-systemd/dbus","revision":"459bfaec1fc6c17d8bfb12d0a0f69e7e7271ed2a","revisionTime":"2018-08-23T14:46:37Z"},
|
||||||
{"path":"github.com/coreos/go-systemd/util","checksumSHA1":"e8qgBHxXbij3RVspqrkeBzMZ564=","origin":"github.com/opencontainers/runc/vendor/github.com/coreos/go-systemd/util","revision":"459bfaec1fc6c17d8bfb12d0a0f69e7e7271ed2a","revisionTime":"2018-08-23T14:46:37Z"},
|
{"path":"github.com/coreos/go-systemd/util","checksumSHA1":"e8qgBHxXbij3RVspqrkeBzMZ564=","origin":"github.com/opencontainers/runc/vendor/github.com/coreos/go-systemd/util","revision":"459bfaec1fc6c17d8bfb12d0a0f69e7e7271ed2a","revisionTime":"2018-08-23T14:46:37Z"},
|
||||||
|
@ -169,7 +175,7 @@
|
||||||
{"path":"github.com/hashicorp/consul-template/version","checksumSHA1":"85qK+LAbb/oAjvdDqVOLi4tMxZk=","revision":"4058b146979c4feb0551d39b8795a31409b3e6bf","revisionTime":"2019-07-17T18:51:08Z"},
|
{"path":"github.com/hashicorp/consul-template/version","checksumSHA1":"85qK+LAbb/oAjvdDqVOLi4tMxZk=","revision":"4058b146979c4feb0551d39b8795a31409b3e6bf","revisionTime":"2019-07-17T18:51:08Z"},
|
||||||
{"path":"github.com/hashicorp/consul-template/watch","checksumSHA1":"cJxopvJKg7DBBb8tnDsfmBp5Q8I=","revision":"4058b146979c4feb0551d39b8795a31409b3e6bf","revisionTime":"2019-07-17T18:51:08Z"},
|
{"path":"github.com/hashicorp/consul-template/watch","checksumSHA1":"cJxopvJKg7DBBb8tnDsfmBp5Q8I=","revision":"4058b146979c4feb0551d39b8795a31409b3e6bf","revisionTime":"2019-07-17T18:51:08Z"},
|
||||||
{"path":"github.com/hashicorp/consul/agent/consul/autopilot","checksumSHA1":"+I7fgoQlrnTUGW5krqNLadWwtjg=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
{"path":"github.com/hashicorp/consul/agent/consul/autopilot","checksumSHA1":"+I7fgoQlrnTUGW5krqNLadWwtjg=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||||
{"path":"github.com/hashicorp/consul/api","checksumSHA1":"sjEf6EMTPP4NT3m5a0JJmlbLZ8Y=","revision":"39f93f011e591c842acc8053a7f5972aa6e592fd","revisionTime":"2018-07-12T16:33:56Z"},
|
{"path":"github.com/hashicorp/consul/api","checksumSHA1":"7JPBtnIgLkdcJ0ldXMTEnVjNEjA=","revision":"40cec98468b829e5cdaacb0629b3e23a028db688","revisionTime":"2019-05-22T20:19:12Z"},
|
||||||
{"path":"github.com/hashicorp/consul/command/flags","checksumSHA1":"soNN4xaHTbeXFgNkZ7cX0gbFXQk=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
{"path":"github.com/hashicorp/consul/command/flags","checksumSHA1":"soNN4xaHTbeXFgNkZ7cX0gbFXQk=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||||
{"path":"github.com/hashicorp/consul/lib","checksumSHA1":"Nrh9BhiivRyJiuPzttstmq9xl/w=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
{"path":"github.com/hashicorp/consul/lib","checksumSHA1":"Nrh9BhiivRyJiuPzttstmq9xl/w=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||||
{"path":"github.com/hashicorp/consul/lib/freeport","checksumSHA1":"E28E4zR1FN2v1Xiq4FUER7KVN9M=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
{"path":"github.com/hashicorp/consul/lib/freeport","checksumSHA1":"E28E4zR1FN2v1Xiq4FUER7KVN9M=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||||
|
|
|
@ -136,6 +136,17 @@ driver) but will be removed in a future release.
|
||||||
generated, but setting this to `false` will use the system's UUID. Before
|
generated, but setting this to `false` will use the system's UUID. Before
|
||||||
Nomad 0.6 the default was to use the system UUID.
|
Nomad 0.6 the default was to use the system UUID.
|
||||||
|
|
||||||
|
- `cni_path` `(string: "/opt/cni/bin")` - Sets the search path that is used for
|
||||||
|
CNI plugin discovery. Multiple paths can be searched using colon delimited
|
||||||
|
paths
|
||||||
|
|
||||||
|
- `bridge_network name` `(string: "nomad")` - Sets the name of the bridge to be
|
||||||
|
created by nomad for allocations running with bridge networking mode on the
|
||||||
|
client.
|
||||||
|
|
||||||
|
- `bridge_network_subnet` `(string: "172.26.66.0/23")` - Specifies the subnet
|
||||||
|
which the client will use to allocate IP addresses from.
|
||||||
|
|
||||||
### `chroot_env` Parameters
|
### `chroot_env` Parameters
|
||||||
|
|
||||||
Drivers based on [isolated fork/exec](/docs/drivers/exec.html) implement file
|
Drivers based on [isolated fork/exec](/docs/drivers/exec.html) implement file
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue