build: run gofmt on all go source files
Go 1.19 will forecefully format all your doc strings. To get this out of the way, here is one big commit with all the changes gofmt wants to make.
This commit is contained in:
parent
91e32eec9b
commit
b3ea68948b
|
@ -82,8 +82,9 @@ func TestAllocDir_BuildAlloc(t *testing.T) {
|
|||
}
|
||||
|
||||
// HACK: This function is copy/pasted from client.testutil to prevent a test
|
||||
// import cycle, due to testutil transitively importing allocdir. This
|
||||
// should be fixed after DriverManager is implemented.
|
||||
//
|
||||
// import cycle, due to testutil transitively importing allocdir. This
|
||||
// should be fixed after DriverManager is implemented.
|
||||
func MountCompatible(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Windows does not support mount")
|
||||
|
|
|
@ -258,8 +258,9 @@ func (t *Tracker) setTaskHealth(healthy, terminal bool) {
|
|||
// returns true if health is propagated and no more health monitoring is needed
|
||||
//
|
||||
// todo: this is currently being shared by watchConsulEvents and watchNomadEvents,
|
||||
// and must be split up if/when we support registering services (and thus checks)
|
||||
// of different providers.
|
||||
//
|
||||
// and must be split up if/when we support registering services (and thus checks)
|
||||
// of different providers.
|
||||
func (t *Tracker) setCheckHealth(healthy bool) bool {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
|
|
@ -152,8 +152,8 @@ func newEnvoyBootstrapHook(c *envoyBootstrapHookConfig) *envoyBootstrapHook {
|
|||
}
|
||||
|
||||
// getConsulNamespace will resolve the Consul namespace, choosing between
|
||||
// - agent config (low precedence)
|
||||
// - task group config (high precedence)
|
||||
// - agent config (low precedence)
|
||||
// - task group config (high precedence)
|
||||
func (h *envoyBootstrapHook) getConsulNamespace() string {
|
||||
var namespace string
|
||||
if h.consulConfig.Namespace != "" {
|
||||
|
|
|
@ -181,9 +181,9 @@ func (h *envoyVersionHook) tweakImage(configured string, supported map[string][]
|
|||
// semver sanitizes the envoy version string coming from Consul into the format
|
||||
// used by the Envoy project when publishing images (i.e. proper semver). This
|
||||
// resulting string value does NOT contain the 'v' prefix for 2 reasons:
|
||||
// 1) the version library does not include the 'v'
|
||||
// 2) its plausible unofficial images use the 3 numbers without the prefix for
|
||||
// tagging their own images
|
||||
// 1. the version library does not include the 'v'
|
||||
// 2. its plausible unofficial images use the 3 numbers without the prefix for
|
||||
// tagging their own images
|
||||
func semver(chosen string) (string, error) {
|
||||
v, err := version.NewVersion(chosen)
|
||||
if err != nil {
|
||||
|
|
|
@ -22,11 +22,11 @@ import (
|
|||
// to their requisite plugin manager.
|
||||
//
|
||||
// It provides a few things to a plugin task running inside Nomad. These are:
|
||||
// * A mount to the `csi_plugin.mount_dir` where the plugin will create its csi.sock
|
||||
// * A mount to `local/csi` that node plugins will use to stage volume mounts.
|
||||
// * When the task has started, it starts a loop of attempting to connect to the
|
||||
// plugin, to perform initial fingerprinting of the plugins capabilities before
|
||||
// notifying the plugin manager of the plugin.
|
||||
// - A mount to the `csi_plugin.mount_dir` where the plugin will create its csi.sock
|
||||
// - A mount to `local/csi` that node plugins will use to stage volume mounts.
|
||||
// - When the task has started, it starts a loop of attempting to connect to the
|
||||
// plugin, to perform initial fingerprinting of the plugins capabilities before
|
||||
// notifying the plugin manager of the plugin.
|
||||
type csiPluginSupervisorHook struct {
|
||||
logger hclog.Logger
|
||||
alloc *structs.Allocation
|
||||
|
@ -247,13 +247,13 @@ func (h *csiPluginSupervisorHook) Poststart(_ context.Context, _ *interfaces.Tas
|
|||
// the passed in context is terminated.
|
||||
//
|
||||
// The supervisor works by:
|
||||
// - Initially waiting for the plugin to become available. This loop is expensive
|
||||
// and may do things like create new gRPC Clients on every iteration.
|
||||
// - After receiving an initial healthy status, it will inform the plugin catalog
|
||||
// of the plugin, registering it with the plugins fingerprinted capabilities.
|
||||
// - We then perform a more lightweight check, simply probing the plugin on a less
|
||||
// frequent interval to ensure it is still alive, emitting task events when this
|
||||
// status changes.
|
||||
// - Initially waiting for the plugin to become available. This loop is expensive
|
||||
// and may do things like create new gRPC Clients on every iteration.
|
||||
// - After receiving an initial healthy status, it will inform the plugin catalog
|
||||
// of the plugin, registering it with the plugins fingerprinted capabilities.
|
||||
// - We then perform a more lightweight check, simply probing the plugin on a less
|
||||
// frequent interval to ensure it is still alive, emitting task events when this
|
||||
// status changes.
|
||||
//
|
||||
// Deeper fingerprinting of the plugin is implemented by the csimanager.
|
||||
func (h *csiPluginSupervisorHook) ensureSupervisorLoop(ctx context.Context) {
|
||||
|
|
|
@ -32,10 +32,10 @@ func (h *remoteTaskHook) Name() string {
|
|||
}
|
||||
|
||||
// Prestart performs 2 remote task driver related tasks:
|
||||
// 1. If there is no local handle, see if there is a handle propagated from a
|
||||
// previous alloc to be restored.
|
||||
// 2. If the alloc is lost make sure the task signal is set to detach instead
|
||||
// of kill.
|
||||
// 1. If there is no local handle, see if there is a handle propagated from a
|
||||
// previous alloc to be restored.
|
||||
// 2. If the alloc is lost make sure the task signal is set to detach instead
|
||||
// of kill.
|
||||
func (h *remoteTaskHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error {
|
||||
if h.tr.getDriverHandle() != nil {
|
||||
// Driver handle already exists so don't try to load remote
|
||||
|
|
|
@ -139,11 +139,11 @@ func (r *RestartTracker) GetCount() int {
|
|||
|
||||
// GetState returns the tasks next state given the set exit code and start
|
||||
// error. One of the following states are returned:
|
||||
// * TaskRestarting - Task should be restarted
|
||||
// * TaskNotRestarting - Task should not be restarted and has exceeded its
|
||||
// restart policy.
|
||||
// * TaskTerminated - Task has terminated successfully and does not need a
|
||||
// restart.
|
||||
// - TaskRestarting - Task should be restarted
|
||||
// - TaskNotRestarting - Task should not be restarted and has exceeded its
|
||||
// restart policy.
|
||||
// - TaskTerminated - Task has terminated successfully and does not need a
|
||||
// restart.
|
||||
//
|
||||
// If TaskRestarting is returned, the duration is how long to wait until
|
||||
// starting the task again.
|
||||
|
|
|
@ -1413,7 +1413,7 @@ func (tr *TaskRunner) UpdateStats(ru *cstructs.TaskResourceUsage) {
|
|||
}
|
||||
}
|
||||
|
||||
//TODO Remove Backwardscompat or use tr.Alloc()?
|
||||
// TODO Remove Backwardscompat or use tr.Alloc()?
|
||||
func (tr *TaskRunner) setGaugeForMemory(ru *cstructs.TaskResourceUsage) {
|
||||
alloc := tr.Alloc()
|
||||
var allocatedMem float32
|
||||
|
@ -1445,7 +1445,7 @@ func (tr *TaskRunner) setGaugeForMemory(ru *cstructs.TaskResourceUsage) {
|
|||
}
|
||||
}
|
||||
|
||||
//TODO Remove Backwardscompat or use tr.Alloc()?
|
||||
// TODO Remove Backwardscompat or use tr.Alloc()?
|
||||
func (tr *TaskRunner) setGaugeForCPU(ru *cstructs.TaskResourceUsage) {
|
||||
alloc := tr.Alloc()
|
||||
var allocatedCPU float32
|
||||
|
|
|
@ -136,7 +136,7 @@ type ClientStatsReporter interface {
|
|||
}
|
||||
|
||||
// AllocRunner is the interface implemented by the core alloc runner.
|
||||
//TODO Create via factory to allow testing Client with mock AllocRunners.
|
||||
// TODO Create via factory to allow testing Client with mock AllocRunners.
|
||||
type AllocRunner interface {
|
||||
Alloc() *structs.Allocation
|
||||
AllocState() *arstate.State
|
||||
|
@ -1252,8 +1252,8 @@ func (c *Client) restoreState() error {
|
|||
// wait until it gets allocs from server to launch them.
|
||||
//
|
||||
// See:
|
||||
// * https://github.com/hashicorp/nomad/pull/6207
|
||||
// * https://github.com/hashicorp/nomad/issues/5984
|
||||
// - https://github.com/hashicorp/nomad/pull/6207
|
||||
// - https://github.com/hashicorp/nomad/issues/5984
|
||||
//
|
||||
// COMPAT(0.12): remove once upgrading from 0.9.5 is no longer supported
|
||||
func (c *Client) hasLocalState(alloc *structs.Allocation) bool {
|
||||
|
|
|
@ -98,7 +98,8 @@ type PluginInfo struct {
|
|||
|
||||
// PluginConnectionInfo is the data required to connect to the plugin.
|
||||
// note: We currently only support Unix Domain Sockets, but this may be expanded
|
||||
// to support other connection modes in the future.
|
||||
//
|
||||
// to support other connection modes in the future.
|
||||
type PluginConnectionInfo struct {
|
||||
// SocketPath is the path to the plugins api socket.
|
||||
SocketPath string
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
/**
|
||||
/*
|
||||
*
|
||||
csimanager manages locally running CSI Plugins on a Nomad host, and provides a
|
||||
few different interfaces.
|
||||
|
||||
It provides:
|
||||
- a pluginmanager.PluginManager implementation that is used to fingerprint and
|
||||
heartbeat local node plugins
|
||||
- (TODO) a csimanager.AttachmentWaiter implementation that can be used to wait for an
|
||||
external CSIVolume to be attached to the node before returning
|
||||
- (TODO) a csimanager.NodeController implementation that is used to manage the node-local
|
||||
portions of the CSI specification, and encompassess volume staging/publishing
|
||||
- (TODO) a csimanager.VolumeChecker implementation that can be used by hooks to ensure
|
||||
their volumes are healthy(ish)
|
||||
- a pluginmanager.PluginManager implementation that is used to fingerprint and
|
||||
heartbeat local node plugins
|
||||
- (TODO) a csimanager.AttachmentWaiter implementation that can be used to wait for an
|
||||
external CSIVolume to be attached to the node before returning
|
||||
- (TODO) a csimanager.NodeController implementation that is used to manage the node-local
|
||||
portions of the CSI specification, and encompassess volume staging/publishing
|
||||
- (TODO) a csimanager.VolumeChecker implementation that can be used by hooks to ensure
|
||||
their volumes are healthy(ish)
|
||||
*/
|
||||
package csimanager
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
// changed over the life-cycle of the alloc_runner in Nomad 0.8.
|
||||
//
|
||||
// https://github.com/hashicorp/nomad/blob/v0.8.6/client/alloc_runner.go#L146-L153
|
||||
//
|
||||
type allocRunnerMutableState08 struct {
|
||||
// AllocClientStatus does not need to be upgraded as it is computed
|
||||
// from task states.
|
||||
|
|
|
@ -82,15 +82,14 @@ func backupDB(bdb *bbolt.DB, dst string) error {
|
|||
|
||||
// UpgradeAllocs upgrades the boltdb schema. Example 0.8 schema:
|
||||
//
|
||||
// * allocations
|
||||
// * 15d83e8a-74a2-b4da-3f17-ed5c12895ea8
|
||||
// * echo
|
||||
// - simple-all (342 bytes)
|
||||
// - alloc (2827 bytes)
|
||||
// - alloc-dir (166 bytes)
|
||||
// - immutable (15 bytes)
|
||||
// - mutable (1294 bytes)
|
||||
//
|
||||
// allocations
|
||||
// 15d83e8a-74a2-b4da-3f17-ed5c12895ea8
|
||||
// echo
|
||||
// simple-all (342 bytes)
|
||||
// alloc (2827 bytes)
|
||||
// alloc-dir (166 bytes)
|
||||
// immutable (15 bytes)
|
||||
// mutable (1294 bytes)
|
||||
func UpgradeAllocs(logger hclog.Logger, tx *boltdd.Tx) error {
|
||||
btx := tx.BoltTx()
|
||||
allocationsBucket := btx.Bucket(allocationsBucketName)
|
||||
|
|
|
@ -903,7 +903,6 @@ func (b *Builder) SetDriverNetwork(n *drivers.DriverNetwork) *Builder {
|
|||
// Handled by setAlloc -> otherPorts:
|
||||
//
|
||||
// Task: NOMAD_TASK_{IP,PORT,ADDR}_<task>_<label> # Always host values
|
||||
//
|
||||
func buildNetworkEnv(envMap map[string]string, nets structs.Networks, driverNet *drivers.DriverNetwork) {
|
||||
for _, n := range nets {
|
||||
for _, p := range n.ReservedPorts {
|
||||
|
|
|
@ -31,9 +31,9 @@ import (
|
|||
// Config is the configuration for the Nomad agent.
|
||||
//
|
||||
// time.Duration values have two parts:
|
||||
// - a string field tagged with an hcl:"foo" and json:"-"
|
||||
// - a time.Duration field in the same struct and a call to duration
|
||||
// in config_parse.go ParseConfigFile
|
||||
// - a string field tagged with an hcl:"foo" and json:"-"
|
||||
// - a time.Duration field in the same struct and a call to duration
|
||||
// in config_parse.go ParseConfigFile
|
||||
//
|
||||
// All config structs should have an ExtraKeysHCL field to check for
|
||||
// unexpected keys
|
||||
|
|
|
@ -1126,7 +1126,7 @@ func TestConfig_templateNetworkInterface(t *testing.T) {
|
|||
{
|
||||
name: "insignificant whitespace",
|
||||
clientConfig: &ClientConfig{
|
||||
Enabled: true,
|
||||
Enabled: true,
|
||||
NetworkInterface: ` {{GetAllInterfaces | attr "name" }}`,
|
||||
},
|
||||
expectedInterface: iface.Name,
|
||||
|
|
|
@ -57,7 +57,6 @@ func newFakeCheckRestarter(w *checkWatcher, allocID, taskName, checkName string,
|
|||
// watching and is normally fulfilled by a TaskRunner.
|
||||
//
|
||||
// Restarts are recorded in the []restarts field and re-Watch the check.
|
||||
//func (c *fakeCheckRestarter) Restart(source, reason string, failure bool) {
|
||||
func (c *fakeCheckRestarter) Restart(ctx context.Context, event *structs.TaskEvent, failure bool) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
|
|
@ -67,8 +67,7 @@ func (ns *NamespacesClient) allowable(now time.Time) bool {
|
|||
|
||||
// List returns a list of Consul Namespaces.
|
||||
//
|
||||
// TODO(shoenig): return empty string instead of "default" when namespaces are not
|
||||
// enabled. (Coming in followup PR).
|
||||
// TODO(shoenig): return empty string instead of "default" when namespaces are not enabled. (Coming in followup PR).
|
||||
func (ns *NamespacesClient) List() ([]string, error) {
|
||||
if !ns.allowable(time.Now()) {
|
||||
// TODO(shoenig): lets return the empty string instead, that way we do not
|
||||
|
|
|
@ -37,9 +37,10 @@ func Namespaces(info Self) bool {
|
|||
// feature returns whether the indicated feature is enabled by Consul and the
|
||||
// associated License.
|
||||
// possible values as of v1.9.5+ent:
|
||||
// Automated Backups, Automated Upgrades, Enhanced Read Scalability,
|
||||
// Network Segments, Redundancy Zone, Advanced Network Federation,
|
||||
// Namespaces, SSO, Audit Logging
|
||||
//
|
||||
// Automated Backups, Automated Upgrades, Enhanced Read Scalability,
|
||||
// Network Segments, Redundancy Zone, Advanced Network Federation,
|
||||
// Namespaces, SSO, Audit Logging
|
||||
func feature(name string, info Self) bool {
|
||||
lic, licOK := info["Stats"]["license"].(map[string]interface{})
|
||||
if !licOK {
|
||||
|
|
|
@ -143,15 +143,15 @@ type ACLsAPI interface {
|
|||
// of a service definition are different from the existing service definition as
|
||||
// known by Consul.
|
||||
//
|
||||
// reason - The syncReason that triggered this synchronization with the consul
|
||||
// agent API.
|
||||
// wanted - Nomad's view of what the service definition is intended to be.
|
||||
// Not nil.
|
||||
// existing - Consul's view (agent, not catalog) of the actual service definition.
|
||||
// Not nil.
|
||||
// sidecar - Consul's view (agent, not catalog) of the service definition of the sidecar
|
||||
// associated with existing that may or may not exist.
|
||||
// May be nil.
|
||||
// reason - The syncReason that triggered this synchronization with the consul
|
||||
// agent API.
|
||||
// wanted - Nomad's view of what the service definition is intended to be.
|
||||
// Not nil.
|
||||
// existing - Consul's view (agent, not catalog) of the actual service definition.
|
||||
// Not nil.
|
||||
// sidecar - Consul's view (agent, not catalog) of the service definition of the sidecar
|
||||
// associated with existing that may or may not exist.
|
||||
// May be nil.
|
||||
func agentServiceUpdateRequired(reason syncReason, wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool {
|
||||
switch reason {
|
||||
case syncPeriodic:
|
||||
|
@ -1499,14 +1499,13 @@ func (c *ServiceClient) removeRegistration(allocID, taskName string) {
|
|||
// {nomadServicePrefix}-{ROLE}-b32(sha1({Service.Name}-{Service.Tags...})
|
||||
// Example Server ID: _nomad-server-fbbk265qn4tmt25nd4ep42tjvmyj3hr4
|
||||
// Example Client ID: _nomad-client-ggnjpgl7yn7rgmvxzilmpvrzzvrszc7l
|
||||
//
|
||||
func makeAgentServiceID(role string, service *structs.Service) string {
|
||||
return fmt.Sprintf("%s-%s-%s", nomadServicePrefix, role, service.Hash(role, "", false))
|
||||
}
|
||||
|
||||
// MakeCheckID creates a unique ID for a check.
|
||||
//
|
||||
// Example Check ID: _nomad-check-434ae42f9a57c5705344974ac38de2aee0ee089d
|
||||
// Example Check ID: _nomad-check-434ae42f9a57c5705344974ac38de2aee0ee089d
|
||||
func MakeCheckID(serviceID string, check *structs.ServiceCheck) string {
|
||||
return fmt.Sprintf("%s%s", nomadCheckPrefix, check.Hash(serviceID))
|
||||
}
|
||||
|
@ -1597,7 +1596,6 @@ func isNomadService(id string) bool {
|
|||
//
|
||||
// {nomadServicePrefix}-executor-{ALLOC_ID}-{Service.Name}-{Service.Tags...}
|
||||
// Example Service ID: _nomad-executor-1234-echo-http-tag1-tag2-tag3
|
||||
//
|
||||
func isOldNomadService(id string) bool {
|
||||
const prefix = nomadServicePrefix + "-executor"
|
||||
return strings.HasPrefix(id, prefix)
|
||||
|
@ -1615,7 +1613,6 @@ const (
|
|||
// It is important not to reference the parent service, which may or may not still
|
||||
// be tracked by Nomad internally.
|
||||
//
|
||||
//
|
||||
// For example if you have a Connect enabled service with the ID:
|
||||
//
|
||||
// _nomad-task-5229c7f8-376b-3ccc-edd9-981e238f7033-cache-redis-cache-db
|
||||
|
@ -1623,7 +1620,6 @@ const (
|
|||
// Consul will create a service for the sidecar proxy with the ID:
|
||||
//
|
||||
// _nomad-task-5229c7f8-376b-3ccc-edd9-981e238f7033-cache-redis-cache-db-sidecar-proxy
|
||||
//
|
||||
func maybeConnectSidecar(id string) bool {
|
||||
return strings.HasSuffix(id, sidecarSuffix)
|
||||
}
|
||||
|
|
|
@ -197,11 +197,11 @@ func (s *HTTPServer) FileCatRequest(resp http.ResponseWriter, req *http.Request)
|
|||
|
||||
// Stream streams the content of a file blocking on EOF.
|
||||
// The parameters are:
|
||||
// * path: path to file to stream.
|
||||
// * follow: A boolean of whether to follow the file, defaults to true.
|
||||
// * offset: The offset to start streaming data at, defaults to zero.
|
||||
// * origin: Either "start" or "end" and defines from where the offset is
|
||||
// applied. Defaults to "start".
|
||||
// - path: path to file to stream.
|
||||
// - follow: A boolean of whether to follow the file, defaults to true.
|
||||
// - offset: The offset to start streaming data at, defaults to zero.
|
||||
// - origin: Either "start" or "end" and defines from where the offset is
|
||||
// applied. Defaults to "start".
|
||||
func (s *HTTPServer) Stream(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var allocID, path string
|
||||
var err error
|
||||
|
@ -255,12 +255,12 @@ func (s *HTTPServer) Stream(resp http.ResponseWriter, req *http.Request) (interf
|
|||
}
|
||||
|
||||
// Logs streams the content of a log blocking on EOF. The parameters are:
|
||||
// * task: task name to stream logs for.
|
||||
// * type: stdout/stderr to stream.
|
||||
// * follow: A boolean of whether to follow the logs.
|
||||
// * offset: The offset to start streaming data at, defaults to zero.
|
||||
// * origin: Either "start" or "end" and defines from where the offset is
|
||||
// applied. Defaults to "start".
|
||||
// - task: task name to stream logs for.
|
||||
// - type: stdout/stderr to stream.
|
||||
// - follow: A boolean of whether to follow the logs.
|
||||
// - offset: The offset to start streaming data at, defaults to zero.
|
||||
// - origin: Either "start" or "end" and defines from where the offset is
|
||||
// applied. Defaults to "start".
|
||||
func (s *HTTPServer) Logs(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var allocID, task, logType string
|
||||
var plain, follow bool
|
||||
|
|
|
@ -3486,6 +3486,7 @@ func TestJobs_ApiJobToStructsJobUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestJobs_Matching_Resources asserts:
|
||||
//
|
||||
// api.{Default,Min}Resources == structs.{Default,Min}Resources
|
||||
//
|
||||
// While this is an odd place to test that, this is where both are imported,
|
||||
|
|
|
@ -16,7 +16,7 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
//DataFormatter is a transformer of the data.
|
||||
// DataFormatter is a transformer of the data.
|
||||
type DataFormatter interface {
|
||||
// TransformData should return transformed string data.
|
||||
TransformData(interface{}) (string, error)
|
||||
|
|
|
@ -4,7 +4,7 @@ package docker
|
|||
|
||||
import docker "github.com/fsouza/go-dockerclient"
|
||||
|
||||
//Currently Windows containers don't support host ip in port binding.
|
||||
// Currently Windows containers don't support host ip in port binding.
|
||||
func getPortBinding(ip string, port string) docker.PortBinding {
|
||||
return docker.PortBinding{HostIP: "", HostPort: port}
|
||||
}
|
||||
|
|
|
@ -167,7 +167,8 @@ func TestQemuDriver_User(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
// Verifies getting resource usage stats
|
||||
// Verifies getting resource usage stats
|
||||
//
|
||||
// TODO(preetha) this test needs random sleeps to pass
|
||||
func TestQemuDriver_Stats(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
|
|
@ -136,8 +136,9 @@ func LegacySupported() *Set {
|
|||
//
|
||||
// cap_add takes precedence over cap_drop, enabling the common pattern of dropping
|
||||
// all capabilities, then adding back the desired smaller set. e.g.
|
||||
// cap_drop = ["all"]
|
||||
// cap_add = ["chown", "kill"]
|
||||
//
|
||||
// cap_drop = ["all"]
|
||||
// cap_add = ["chown", "kill"]
|
||||
//
|
||||
// Note that the resulting capability names are upper-cased and prefixed with
|
||||
// "CAP_", which is the expected input for the exec/java driver implementation.
|
||||
|
|
|
@ -89,7 +89,7 @@ func (tc *ConsulTemplateTest) AfterEach(f *framework.F) {
|
|||
// - missing keys block allocations from starting
|
||||
// - key updates trigger re-render
|
||||
// - service updates trigger re-render
|
||||
// - 'noop' vs ''restart' configuration
|
||||
// - 'noop' vs ”restart' configuration
|
||||
func (tc *ConsulTemplateTest) TestTemplateUpdateTriggers(f *framework.F) {
|
||||
|
||||
wc := &e2eutil.WaitConfig{}
|
||||
|
|
|
@ -5,7 +5,7 @@ include conditions under which the suite will run and a list of TestCase
|
|||
implementations to run. TestCases can be implemented with methods that run
|
||||
before/after each and all tests.
|
||||
|
||||
Writing Tests
|
||||
# Writing Tests
|
||||
|
||||
Tests follow a similar patterns as go tests. They are functions that must start
|
||||
with 'Test' and instead of a *testing.T argument, a *framework.F is passed and
|
||||
|
@ -78,7 +78,7 @@ As demonstrated in the previous example, TC also exposes functions that return
|
|||
configured api clients including Nomad, Consul and Vault. If Consul or Vault
|
||||
are not provisioned their respective getter functions will return nil.
|
||||
|
||||
Testify Integration
|
||||
# Testify Integration
|
||||
|
||||
Test cases expose a T() function to fetch the current *testing.T context.
|
||||
While this means the author is able to most other testing libraries,
|
||||
|
@ -93,7 +93,7 @@ yields a testify Require if that flavor is desired.
|
|||
// Or tc.Require().NoError(err)
|
||||
}
|
||||
|
||||
Parallelism
|
||||
# Parallelism
|
||||
|
||||
The test framework honors go test's parallel feature under certain conditions.
|
||||
A TestSuite can be created with the Parallel field set to true to enable
|
||||
|
@ -119,6 +119,5 @@ each test case. The framework.F struct exposes an ID() function that will return
|
|||
string that is unique with in a test. Therefore, multiple tests with in the case
|
||||
can reliably create unique IDs between tests and setup/teardown. The string
|
||||
returned is 8 alpha numeric characters.
|
||||
|
||||
*/
|
||||
package framework
|
||||
|
|
|
@ -16,10 +16,10 @@ type Handler func(c byte) bool
|
|||
// For illustrative purposes, we use `~` in documentation as a shorthand for escaping character.
|
||||
//
|
||||
// If following a new line, reader sees:
|
||||
// * `~~`, only one is emitted
|
||||
// * `~.` (or any character), the handler is invoked with the character.
|
||||
// - `~~`, only one is emitted
|
||||
// - `~.` (or any character), the handler is invoked with the character.
|
||||
// If handler returns true, `~.` will be skipped; otherwise, it's propagated.
|
||||
// * `~` and it's the last character in stream, it's propagated
|
||||
// - `~` and it's the last character in stream, it's propagated
|
||||
//
|
||||
// Appearances of `~` when not preceded by a new line are propagated unmodified.
|
||||
func NewReader(r io.Reader, c byte, h Handler) io.Reader {
|
||||
|
|
|
@ -40,12 +40,8 @@ func (b *HCLParser) WithVars(vars map[string]cty.Value) *HCLParser {
|
|||
// out parameter should be a golang reference to a driver specific TaskConfig reference.
|
||||
// The function terminates and reports errors if any is found during conversion.
|
||||
//
|
||||
// Sample invocation would be
|
||||
//
|
||||
// ```
|
||||
// var tc *TaskConfig
|
||||
// hclutils.NewConfigParser(spec).ParseJson(t, configString, &tc)
|
||||
// ```
|
||||
// var tc *TaskConfig
|
||||
// hclutils.NewConfigParser(spec).ParseJson(t, configString, &tc)
|
||||
func (b *HCLParser) ParseJson(t *testing.T, configStr string, out interface{}) {
|
||||
config := JsonConfigToInterface(t, configStr)
|
||||
b.parse(t, config, out)
|
||||
|
@ -55,7 +51,7 @@ func (b *HCLParser) ParseJson(t *testing.T, configStr string, out interface{}) {
|
|||
// out parameter should be a golang reference to a driver specific TaskConfig reference.
|
||||
// The function terminates and reports errors if any is found during conversion.
|
||||
//
|
||||
// Sample invocation would be
|
||||
// # Sample invocation would be
|
||||
//
|
||||
// ```
|
||||
// var tc *TaskConfig
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
// basedir is used with file functions and allows a user to reference a file
|
||||
// using local path. Usually basedir is the directory in which the config file
|
||||
// is located
|
||||
//
|
||||
func Functions(basedir string, allowFS bool) map[string]function.Function {
|
||||
funcs := map[string]function.Function{
|
||||
"abs": stdlib.AbsoluteFunc,
|
||||
|
|
|
@ -341,9 +341,10 @@ func decodeTask(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagno
|
|||
//
|
||||
// ```hcl
|
||||
// # block assignment
|
||||
// env {
|
||||
// ENV = "production"
|
||||
// }
|
||||
//
|
||||
// env {
|
||||
// ENV = "production"
|
||||
// }
|
||||
//
|
||||
// # as attribute
|
||||
// env = { ENV: "production" }
|
||||
|
@ -357,7 +358,6 @@ func decodeTask(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagno
|
|||
// found map, the remaining body and diagnostics. If the named field is found
|
||||
// with block syntax, it returns a nil map, and caller falls back to reading
|
||||
// with block syntax.
|
||||
//
|
||||
func decodeAsAttribute(body hcl.Body, ctx *hcl.EvalContext, name string) (map[string]string, hcl.Body, hcl.Diagnostics) {
|
||||
b, remain, diags := body.PartialContent(&hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
|
|
|
@ -14,17 +14,21 @@ import (
|
|||
// More concretely, it changes the following:
|
||||
//
|
||||
// ```
|
||||
// config {
|
||||
// meta { ... }
|
||||
// }
|
||||
//
|
||||
// config {
|
||||
// meta { ... }
|
||||
// }
|
||||
//
|
||||
// ```
|
||||
//
|
||||
// to
|
||||
//
|
||||
// ```
|
||||
// config {
|
||||
// meta = { ... } # <- attribute now
|
||||
// }
|
||||
//
|
||||
// config {
|
||||
// meta = { ... } # <- attribute now
|
||||
// }
|
||||
//
|
||||
// ```
|
||||
func BlocksAsAttrs(body hcl.Body) hcl.Body {
|
||||
if hclb, ok := body.(*hcls.Body); ok {
|
||||
|
|
|
@ -1033,7 +1033,7 @@ func TestParseServiceCheck(t *testing.T) {
|
|||
|
||||
func TestWaitConfig(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
hclBytes, err := os.ReadFile("test-fixtures/template-wait-config.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -78,7 +78,6 @@ func (v *Variable) GoString() string {
|
|||
|
||||
// validateValue ensures that all of the configured custom validations for a
|
||||
// variable value are passing.
|
||||
//
|
||||
func (v *Variable) validateValue(val VariableAssignment) (diags hcl.Diagnostics) {
|
||||
if len(v.Validations) == 0 {
|
||||
return nil
|
||||
|
|
|
@ -70,7 +70,7 @@ func TestWriter_BlockingWrite(t *testing.T) {
|
|||
|
||||
func TestWriter_CloseClose(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
require := require.New(t)
|
||||
w := New(ioutil.Discard, 64)
|
||||
require.NoError(w.Close())
|
||||
|
|
|
@ -76,7 +76,7 @@ func TestDelayHeap_PushPop(t *testing.T) {
|
|||
|
||||
func TestDelayHeap_Update(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
delayHeap := NewDelayHeap()
|
||||
now := time.Now()
|
||||
require := require.New(t)
|
||||
|
|
|
@ -461,7 +461,7 @@ func TestConsulPolicy_namespaceCheck(t *testing.T) {
|
|||
|
||||
func TestConsulPolicy_allowKeystoreRead(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
require.False(t, new(ConsulPolicy).allowsKeystoreRead(true, "default"))
|
||||
})
|
||||
|
|
|
@ -46,7 +46,7 @@ func assertDrainingNode(t *testing.T, dn *drainingNode, isDone bool, remaining,
|
|||
|
||||
func TestDrainingNode_Table(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
isDone bool
|
||||
|
|
|
@ -549,7 +549,7 @@ func TestHandeTaskGroup_Table(t *testing.T) {
|
|||
|
||||
func testHandleTaskGroup(t *testing.T, tc handleTaskGroupTestCase) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
|
|
|
@ -200,9 +200,9 @@ func (b *EvalBroker) Enqueue(eval *structs.Evaluation) {
|
|||
// enqueued. The evaluation is handled in one of the following ways:
|
||||
// * Evaluation not outstanding: Process as a normal Enqueue
|
||||
// * Evaluation outstanding: Do not allow the evaluation to be dequeued til:
|
||||
// * Ack received: Unblock the evaluation allowing it to be dequeued
|
||||
// * Nack received: Drop the evaluation as it was created as a result of a
|
||||
// scheduler run that was Nack'd
|
||||
// - Ack received: Unblock the evaluation allowing it to be dequeued
|
||||
// - Nack received: Drop the evaluation as it was created as a result of a
|
||||
// scheduler run that was Nack'd
|
||||
func (b *EvalBroker) EnqueueAll(evals map[*structs.Evaluation]string) {
|
||||
// The lock needs to be held until all evaluations are enqueued. This is so
|
||||
// that when Dequeue operations are unblocked they will pick the highest
|
||||
|
|
|
@ -51,9 +51,9 @@ func (jobExposeCheckHook) Mutate(job *structs.Job) (_ *structs.Job, warnings []e
|
|||
}
|
||||
|
||||
// Validate will ensure:
|
||||
// - The job contains valid network configuration for each task group in which
|
||||
// an expose path is configured. The network must be of type bridge mode.
|
||||
// - The check Expose field is configured only for connect-enabled group-services.
|
||||
// - The job contains valid network configuration for each task group in which
|
||||
// an expose path is configured. The network must be of type bridge mode.
|
||||
// - The check Expose field is configured only for connect-enabled group-services.
|
||||
func (jobExposeCheckHook) Validate(job *structs.Job) (warnings []error, err error) {
|
||||
for _, tg := range job.TaskGroups {
|
||||
// Make sure any group that contains a group-service that enables expose
|
||||
|
|
|
@ -317,7 +317,7 @@ func TestJobExposeCheckHook_exposePathForCheck(t *testing.T) {
|
|||
Name: "group1",
|
||||
Services: []*structs.Service{s},
|
||||
Networks: structs.Networks{{
|
||||
Mode: "bridge",
|
||||
Mode: "bridge",
|
||||
DynamicPorts: []structs.Port{
|
||||
// service declares "sPort", but does not exist
|
||||
},
|
||||
|
|
|
@ -4959,7 +4959,6 @@ func TestJobEndpoint_ListJobs(t *testing.T) {
|
|||
|
||||
// TestJobEndpoint_ListJobs_AllNamespaces_OSS asserts that server
|
||||
// returns all jobs across namespace.
|
||||
//
|
||||
func TestJobEndpoint_ListJobs_AllNamespaces_OSS(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
|
|
@ -1764,10 +1764,10 @@ func (s *Server) generateClusterID() (string, error) {
|
|||
//
|
||||
// The function checks the server is the leader and uses a mutex to avoid any
|
||||
// potential timings problems. Consider the following timings:
|
||||
// - operator updates the configuration via the API
|
||||
// - the RPC handler applies the change via Raft
|
||||
// - leadership transitions with write barrier
|
||||
// - the RPC handler call this function to enact the change
|
||||
// - operator updates the configuration via the API
|
||||
// - the RPC handler applies the change via Raft
|
||||
// - leadership transitions with write barrier
|
||||
// - the RPC handler call this function to enact the change
|
||||
//
|
||||
// The mutex also protects against a situation where leadership is revoked
|
||||
// while this function is being called. Ensuring the correct series of actions
|
||||
|
|
|
@ -92,7 +92,6 @@ func newPlanner(s *Server) (*planner, error) {
|
|||
// in anticipation of this case we cannot respond to the plan until
|
||||
// the Raft log is updated. This means our schedulers will stall,
|
||||
// but there are many of those and only a single plan verifier.
|
||||
//
|
||||
func (p *planner) planApply() {
|
||||
// planIndexCh is used to track an outstanding application and receive
|
||||
// its committed index while snap holds an optimistic state which
|
||||
|
|
|
@ -186,8 +186,9 @@ func (s *Search) getFuzzyMatches(iter memdb.ResultIterator, text string) (map[st
|
|||
}
|
||||
|
||||
// fuzzyIndex returns the index of text in name, ignoring case.
|
||||
// text is assumed to be lower case.
|
||||
// -1 is returned if name does not contain text.
|
||||
//
|
||||
// text is assumed to be lower case.
|
||||
// -1 is returned if name does not contain text.
|
||||
func fuzzyIndex(name, text string) int {
|
||||
lower := strings.ToLower(name)
|
||||
return strings.Index(lower, text)
|
||||
|
@ -238,12 +239,12 @@ func (s *Search) fuzzyMatchSingle(raw interface{}, text string) (structs.Context
|
|||
// of matchable Context. Results are categorized by Context and paired with their
|
||||
// score, but are unsorted.
|
||||
//
|
||||
// job.name
|
||||
// job|group.name
|
||||
// job|group|service.name
|
||||
// job|group|task.name
|
||||
// job|group|task|service.name
|
||||
// job|group|task|driver.{image,command,class}
|
||||
// job.name
|
||||
// job|group.name
|
||||
// job|group|service.name
|
||||
// job|group|task.name
|
||||
// job|group|task|service.name
|
||||
// job|group|task|driver.{image,command,class}
|
||||
func (*Search) fuzzyMatchesJob(j *structs.Job, text string) map[structs.Context][]fuzzyMatch {
|
||||
sm := make(map[structs.Context][]fuzzyMatch)
|
||||
ns := j.Namespace
|
||||
|
@ -654,17 +655,20 @@ func sufficientSearchPerms(aclObj *acl.ACL, namespace string, context structs.Co
|
|||
// results are limited to policies of the provided ACL token.
|
||||
//
|
||||
// These types are limited to prefix UUID searching:
|
||||
// Evals, Deployments, ScalingPolicies, Volumes
|
||||
//
|
||||
// Evals, Deployments, ScalingPolicies, Volumes
|
||||
//
|
||||
// These types are available for fuzzy searching:
|
||||
// Nodes, Namespaces, Jobs, Allocs, Plugins
|
||||
//
|
||||
// Nodes, Namespaces, Jobs, Allocs, Plugins
|
||||
//
|
||||
// Jobs are a special case that expand into multiple types, and whose return
|
||||
// values include Scope which is a descending list of IDs of parent objects,
|
||||
// starting with the Namespace. The subtypes of jobs are fuzzy searchable.
|
||||
//
|
||||
// The Jobs type expands into these sub types:
|
||||
// Jobs, Groups, Services, Tasks, Images, Commands, Classes
|
||||
//
|
||||
// Jobs, Groups, Services, Tasks, Images, Commands, Classes
|
||||
//
|
||||
// The results are in descending order starting with strongest match, per Context type.
|
||||
func (s *Search) FuzzySearch(args *structs.FuzzySearchRequest, reply *structs.FuzzySearchResponse) error {
|
||||
|
|
|
@ -33,27 +33,28 @@ type CreateIndexGetter interface {
|
|||
// StructsTokenizerOptions is the configuration provided to a StructsTokenizer.
|
||||
//
|
||||
// These are some of the common use cases:
|
||||
// - Structs that can be uniquely identified with only its own ID:
|
||||
//
|
||||
// StructsTokenizerOptions {
|
||||
// WithID: true,
|
||||
// }
|
||||
// Structs that can be uniquely identified with only its own ID:
|
||||
//
|
||||
// - Structs that are only unique within their namespace:
|
||||
// StructsTokenizerOptions {
|
||||
// WithID: true,
|
||||
// }
|
||||
//
|
||||
// StructsTokenizerOptions {
|
||||
// WithID: true,
|
||||
// WithNamespace: true,
|
||||
// }
|
||||
// Structs that are only unique within their namespace:
|
||||
//
|
||||
// - Structs that can be sorted by their create index should also set
|
||||
// `WithCreateIndex` to `true` along with the other options:
|
||||
// StructsTokenizerOptions {
|
||||
// WithID: true,
|
||||
// WithNamespace: true,
|
||||
// }
|
||||
//
|
||||
// StructsTokenizerOptions {
|
||||
// WithID: true,
|
||||
// WithNamespace: true,
|
||||
// WithCreateIndex: true,
|
||||
// }
|
||||
// Structs that can be sorted by their create index should also set
|
||||
// `WithCreateIndex` to `true` along with the other options:
|
||||
//
|
||||
// StructsTokenizerOptions {
|
||||
// WithID: true,
|
||||
// WithNamespace: true,
|
||||
// WithCreateIndex: true,
|
||||
// }
|
||||
type StructsTokenizerOptions struct {
|
||||
WithCreateIndex bool
|
||||
WithNamespace bool
|
||||
|
|
|
@ -190,7 +190,7 @@ func TestFilter_NamespaceAll(t *testing.T) {
|
|||
|
||||
func TestFilter_FilterKeys(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
events := make([]structs.Event, 0, 5)
|
||||
events = append(events, structs.Event{Topic: "Test", Key: "One", FilterKeys: []string{"extra-key"}}, structs.Event{Topic: "Test", Key: "Two"}, structs.Event{Topic: "Test", Key: "Two"})
|
||||
|
||||
|
|
|
@ -16,11 +16,11 @@ import (
|
|||
//
|
||||
// - Register services and their checks with Consul
|
||||
//
|
||||
// - Bootstrap this Nomad Client with the list of Nomad Servers registered
|
||||
// with Consul
|
||||
// - Bootstrap this Nomad Client with the list of Nomad Servers registered
|
||||
// with Consul
|
||||
//
|
||||
// - Establish how this Nomad Client will resolve Envoy Connect Sidecar
|
||||
// images.
|
||||
// - Establish how this Nomad Client will resolve Envoy Connect Sidecar
|
||||
// images.
|
||||
//
|
||||
// Both the Agent and the executor need to be able to import ConsulConfig.
|
||||
type ConsulConfig struct {
|
||||
|
|
|
@ -204,7 +204,7 @@ func TestTLS_Copy(t *testing.T) {
|
|||
// object
|
||||
func TestTLS_GetKeyloader(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
require := require.New(t)
|
||||
a := &TLSConfig{}
|
||||
require.NotNil(a.GetKeyLoader())
|
||||
|
|
|
@ -211,7 +211,7 @@ func TestDeviceAccounter_AddReserved(t *testing.T) {
|
|||
// Test that collision detection works
|
||||
func TestDeviceAccounter_AddReserved_Collision(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
require := require.New(t)
|
||||
n := devNode()
|
||||
d := NewDeviceAccounter(n)
|
||||
|
|
|
@ -12,29 +12,29 @@ import (
|
|||
// "zone", "rack", etc.
|
||||
//
|
||||
// According to CSI, there are a few requirements for the keys within this map:
|
||||
// - Valid keys have two segments: an OPTIONAL prefix and name, separated
|
||||
// by a slash (/), for example: "com.company.example/zone".
|
||||
// - The key name segment is REQUIRED. The prefix is OPTIONAL.
|
||||
// - The key name MUST be 63 characters or less, begin and end with an
|
||||
// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
|
||||
// underscores (_), dots (.), or alphanumerics in between, for example
|
||||
// "zone".
|
||||
// - The key prefix MUST be 63 characters or less, begin and end with a
|
||||
// lower-case alphanumeric character ([a-z0-9]), contain only
|
||||
// dashes (-), dots (.), or lower-case alphanumerics in between, and
|
||||
// follow domain name notation format
|
||||
// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
|
||||
// - The key prefix SHOULD include the plugin's host company name and/or
|
||||
// the plugin name, to minimize the possibility of collisions with keys
|
||||
// from other plugins.
|
||||
// - If a key prefix is specified, it MUST be identical across all
|
||||
// topology keys returned by the SP (across all RPCs).
|
||||
// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone"
|
||||
// MUST not both exist.
|
||||
// - Each value (topological segment) MUST contain 1 or more strings.
|
||||
// - Each string MUST be 63 characters or less and begin and end with an
|
||||
// alphanumeric character with '-', '_', '.', or alphanumerics in
|
||||
// between.
|
||||
// - Valid keys have two segments: an OPTIONAL prefix and name, separated
|
||||
// by a slash (/), for example: "com.company.example/zone".
|
||||
// - The key name segment is REQUIRED. The prefix is OPTIONAL.
|
||||
// - The key name MUST be 63 characters or less, begin and end with an
|
||||
// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
|
||||
// underscores (_), dots (.), or alphanumerics in between, for example
|
||||
// "zone".
|
||||
// - The key prefix MUST be 63 characters or less, begin and end with a
|
||||
// lower-case alphanumeric character ([a-z0-9]), contain only
|
||||
// dashes (-), dots (.), or lower-case alphanumerics in between, and
|
||||
// follow domain name notation format
|
||||
// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
|
||||
// - The key prefix SHOULD include the plugin's host company name and/or
|
||||
// the plugin name, to minimize the possibility of collisions with keys
|
||||
// from other plugins.
|
||||
// - If a key prefix is specified, it MUST be identical across all
|
||||
// topology keys returned by the SP (across all RPCs).
|
||||
// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone"
|
||||
// MUST not both exist.
|
||||
// - Each value (topological segment) MUST contain 1 or more strings.
|
||||
// - Each string MUST be 63 characters or less and begin and end with an
|
||||
// alphanumeric character with '-', '_', '.', or alphanumerics in
|
||||
// between.
|
||||
//
|
||||
// However, Nomad applies lighter restrictions to these, as they are already
|
||||
// only referenced by plugin within the scheduler and as such collisions and
|
||||
|
|
|
@ -217,7 +217,7 @@ func TestNode_ComputedClass_Meta(t *testing.T) {
|
|||
|
||||
func TestNode_EscapedConstraints(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
// Non-escaped constraints
|
||||
ne1 := &Constraint{
|
||||
LTarget: "${attr.kernel.name}",
|
||||
|
|
|
@ -94,7 +94,8 @@ type SearchRequest struct {
|
|||
// ID.
|
||||
//
|
||||
// e.g. A Task-level service would have scope like,
|
||||
// ["<namespace>", "<job>", "<group>", "<task>"]
|
||||
//
|
||||
// ["<namespace>", "<job>", "<group>", "<task>"]
|
||||
type FuzzyMatch struct {
|
||||
ID string // ID is UUID or Name of object
|
||||
Scope []string `json:",omitempty"` // IDs of parent objects
|
||||
|
|
|
@ -225,7 +225,7 @@ func TestPeriodicConfig_DSTChange_Transitions(t *testing.T) {
|
|||
|
||||
func TestPeriodConfig_DSTSprintForward_Property(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
locName := "America/Los_Angeles"
|
||||
loc, err := time.LoadLocation(locName)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -605,9 +605,10 @@ func (v *vaultClient) renewalLoop() {
|
|||
//
|
||||
// It should increase the amount of backoff each time, with the following rules:
|
||||
//
|
||||
// * If token expired already despite earlier renewal attempts,
|
||||
// back off for 1 minute + jitter
|
||||
// * If we have an existing authentication that is going to expire,
|
||||
// - If token expired already despite earlier renewal attempts,
|
||||
// back off for 1 minute + jitter
|
||||
// - If we have an existing authentication that is going to expire,
|
||||
//
|
||||
// never back off more than half of the amount of time remaining
|
||||
// until expiration (with 5s floor)
|
||||
// * Never back off more than 30 seconds multiplied by a random
|
||||
|
@ -1241,13 +1242,13 @@ func (v *vaultClient) parallelRevoke(ctx context.Context, accessors []*structs.V
|
|||
// and purge at any given time.
|
||||
//
|
||||
// Limiting the revocation batch size is beneficial for few reasons:
|
||||
// * A single revocation failure of any entry in batch result into retrying the whole batch;
|
||||
// the larger the batch is the higher likelihood of such failure
|
||||
// * Smaller batch sizes result into more co-operativeness: provides hooks for
|
||||
// reconsidering token TTL and leadership steps down.
|
||||
// * Batches limit the size of the Raft message purging tokens. Due to bugs
|
||||
// pre-0.11.3, expired tokens were not properly purged, so users upgrading from
|
||||
// older versions may have huge numbers (millions) of expired tokens to purge.
|
||||
// - A single revocation failure of any entry in batch result into retrying the whole batch;
|
||||
// the larger the batch is the higher likelihood of such failure
|
||||
// - Smaller batch sizes result into more co-operativeness: provides hooks for
|
||||
// reconsidering token TTL and leadership steps down.
|
||||
// - Batches limit the size of the Raft message purging tokens. Due to bugs
|
||||
// pre-0.11.3, expired tokens were not properly purged, so users upgrading from
|
||||
// older versions may have huge numbers (millions) of expired tokens to purge.
|
||||
const maxVaultRevokeBatchSize = 1000
|
||||
|
||||
// revokeDaemon should be called in a goroutine and is used to periodically
|
||||
|
|
|
@ -35,29 +35,29 @@ type NodeGetInfoResponse struct {
|
|||
// "zone", "rack", etc.
|
||||
//
|
||||
// According to CSI, there are a few requirements for the keys within this map:
|
||||
// - Valid keys have two segments: an OPTIONAL prefix and name, separated
|
||||
// by a slash (/), for example: "com.company.example/zone".
|
||||
// - The key name segment is REQUIRED. The prefix is OPTIONAL.
|
||||
// - The key name MUST be 63 characters or less, begin and end with an
|
||||
// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
|
||||
// underscores (_), dots (.), or alphanumerics in between, for example
|
||||
// "zone".
|
||||
// - The key prefix MUST be 63 characters or less, begin and end with a
|
||||
// lower-case alphanumeric character ([a-z0-9]), contain only
|
||||
// dashes (-), dots (.), or lower-case alphanumerics in between, and
|
||||
// follow domain name notation format
|
||||
// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
|
||||
// - The key prefix SHOULD include the plugin's host company name and/or
|
||||
// the plugin name, to minimize the possibility of collisions with keys
|
||||
// from other plugins.
|
||||
// - If a key prefix is specified, it MUST be identical across all
|
||||
// topology keys returned by the SP (across all RPCs).
|
||||
// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone"
|
||||
// MUST not both exist.
|
||||
// - Each value (topological segment) MUST contain 1 or more strings.
|
||||
// - Each string MUST be 63 characters or less and begin and end with an
|
||||
// alphanumeric character with '-', '_', '.', or alphanumerics in
|
||||
// between.
|
||||
// - Valid keys have two segments: an OPTIONAL prefix and name, separated
|
||||
// by a slash (/), for example: "com.company.example/zone".
|
||||
// - The key name segment is REQUIRED. The prefix is OPTIONAL.
|
||||
// - The key name MUST be 63 characters or less, begin and end with an
|
||||
// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
|
||||
// underscores (_), dots (.), or alphanumerics in between, for example
|
||||
// "zone".
|
||||
// - The key prefix MUST be 63 characters or less, begin and end with a
|
||||
// lower-case alphanumeric character ([a-z0-9]), contain only
|
||||
// dashes (-), dots (.), or lower-case alphanumerics in between, and
|
||||
// follow domain name notation format
|
||||
// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
|
||||
// - The key prefix SHOULD include the plugin's host company name and/or
|
||||
// the plugin name, to minimize the possibility of collisions with keys
|
||||
// from other plugins.
|
||||
// - If a key prefix is specified, it MUST be identical across all
|
||||
// topology keys returned by the SP (across all RPCs).
|
||||
// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone"
|
||||
// MUST not both exist.
|
||||
// - Each value (topological segment) MUST contain 1 or more strings.
|
||||
// - Each string MUST be 63 characters or less and begin and end with an
|
||||
// alphanumeric character with '-', '_', '.', or alphanumerics in
|
||||
// between.
|
||||
type Topology struct {
|
||||
Segments map[string]string
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ func (c *Client) PluginProbe(ctx context.Context) (bool, error) {
|
|||
|
||||
// PluginGetInfo is used to return semantic data about the plugin.
|
||||
// Response:
|
||||
// - string: name, the name of the plugin in domain notation format.
|
||||
// - string: name, the name of the plugin in domain notation format.
|
||||
func (c *Client) PluginGetInfo(ctx context.Context) (string, string, error) {
|
||||
c.Mu.Lock()
|
||||
defer c.Mu.Unlock()
|
||||
|
|
|
@ -458,10 +458,10 @@ type FingerprintResponse struct {
|
|||
Attributes map[string]*proto1.Attribute `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// Health is used to determine the state of the health the driver is in.
|
||||
// Health can be one of the following states:
|
||||
// * UNDETECTED: driver dependencies are not met and the driver can not start
|
||||
// * UNHEALTHY: driver dependencies are met but the driver is unable to
|
||||
// perform operations due to some other problem
|
||||
// * HEALTHY: driver is able to perform all operations
|
||||
// - UNDETECTED: driver dependencies are not met and the driver can not start
|
||||
// - UNHEALTHY: driver dependencies are met but the driver is unable to
|
||||
// perform operations due to some other problem
|
||||
// - HEALTHY: driver is able to perform all operations
|
||||
Health FingerprintResponse_HealthState `protobuf:"varint,2,opt,name=health,proto3,enum=hashicorp.nomad.plugins.drivers.proto.FingerprintResponse_HealthState" json:"health,omitempty"`
|
||||
// HealthDescription is a human readable message describing the current
|
||||
// state of driver health
|
||||
|
@ -641,9 +641,9 @@ type StartTaskResponse struct {
|
|||
// Result is set depending on the type of error that occurred while starting
|
||||
// a task:
|
||||
//
|
||||
// * SUCCESS: No error occurred, handle is set
|
||||
// * RETRY: An error occurred, but is recoverable and the RPC should be retried
|
||||
// * FATAL: A fatal error occurred and is not likely to succeed if retried
|
||||
// - SUCCESS: No error occurred, handle is set
|
||||
// - RETRY: An error occurred, but is recoverable and the RPC should be retried
|
||||
// - FATAL: A fatal error occurred and is not likely to succeed if retried
|
||||
//
|
||||
// If Result is not successful, the DriverErrorMsg will be set.
|
||||
Result StartTaskResponse_Result `protobuf:"varint,1,opt,name=result,proto3,enum=hashicorp.nomad.plugins.drivers.proto.StartTaskResponse_Result" json:"result,omitempty"`
|
||||
|
@ -2842,9 +2842,9 @@ type Device struct {
|
|||
HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"`
|
||||
// CgroupPermissions defines the Cgroup permissions of the device.
|
||||
// One or more of the following options can be set:
|
||||
// * r - allows the task to read from the specified device.
|
||||
// * w - allows the task to write to the specified device.
|
||||
// * m - allows the task to create device files that do not yet exist.
|
||||
// - r - allows the task to read from the specified device.
|
||||
// - w - allows the task to write to the specified device.
|
||||
// - m - allows the task to create device files that do not yet exist.
|
||||
//
|
||||
// Example: "rw"
|
||||
CgroupPermissions string `protobuf:"bytes,3,opt,name=cgroup_permissions,json=cgroupPermissions,proto3" json:"cgroup_permissions,omitempty"`
|
||||
|
|
|
@ -74,6 +74,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|||
// Spec defines the available specification types.
|
||||
type Spec struct {
|
||||
// Types that are valid to be assigned to Block:
|
||||
//
|
||||
// *Spec_Object
|
||||
// *Spec_Array
|
||||
// *Spec_Attr
|
||||
|
@ -273,18 +274,18 @@ func (*Spec) XXX_OneofWrappers() []interface{} {
|
|||
}
|
||||
|
||||
// Attr spec type reads the value of an attribute in the current body
|
||||
//and returns that value as its result. It also creates validation constraints
|
||||
//for the given attribute name and its value.
|
||||
// and returns that value as its result. It also creates validation constraints
|
||||
// for the given attribute name and its value.
|
||||
//
|
||||
//```hcl
|
||||
//Attr {
|
||||
// ```hcl
|
||||
// Attr {
|
||||
// name = "document_root"
|
||||
// type = string
|
||||
// required = true
|
||||
//}
|
||||
//```
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
//`Attr` spec blocks accept the following arguments:
|
||||
// `Attr` spec blocks accept the following arguments:
|
||||
//
|
||||
// `name` (required) - The attribute name to expect within the HCL input file.
|
||||
// This may be omitted when a default name selector is created by a parent
|
||||
|
@ -299,7 +300,7 @@ func (*Spec) XXX_OneofWrappers() []interface{} {
|
|||
// `required` (optional) - If set to `true`, `hcldec` will produce an error
|
||||
// if a value is not provided for the source attribute.
|
||||
//
|
||||
//`Attr` is a leaf spec type, so no nested spec blocks are permitted.
|
||||
// `Attr` is a leaf spec type, so no nested spec blocks are permitted.
|
||||
type Attr struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
|
||||
|
@ -356,11 +357,11 @@ func (m *Attr) GetRequired() bool {
|
|||
}
|
||||
|
||||
// Block spec type applies one nested spec block to the contents of a
|
||||
//block within the current body and returns the result of that spec. It also
|
||||
//creates validation constraints for the given block type name.
|
||||
// block within the current body and returns the result of that spec. It also
|
||||
// creates validation constraints for the given block type name.
|
||||
//
|
||||
//```hcl
|
||||
//Block {
|
||||
// ```hcl
|
||||
// Block {
|
||||
// name = "logging"
|
||||
//
|
||||
// Object {
|
||||
|
@ -371,10 +372,10 @@ func (m *Attr) GetRequired() bool {
|
|||
// type = string
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
//```
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
//`Block` spec blocks accept the following arguments:
|
||||
// `Block` spec blocks accept the following arguments:
|
||||
//
|
||||
// `name` (required) - The block type name to expect within the HCL
|
||||
// input file. This may be omitted when a default name selector is created
|
||||
|
@ -384,12 +385,11 @@ func (m *Attr) GetRequired() bool {
|
|||
// `required` (optional) - If set to `true`, `hcldec` will produce an error
|
||||
// if a block of the specified type is not present in the current body.
|
||||
//
|
||||
//`Block` creates a validation constraint that there must be zero or one blocks
|
||||
//of the given type name, or exactly one if `required` is set.
|
||||
//
|
||||
//`Block` expects a single nested spec block, which is applied to the body of
|
||||
//the block of the given type when it is present.
|
||||
// `Block` creates a validation constraint that there must be zero or one blocks
|
||||
// of the given type name, or exactly one if `required` is set.
|
||||
//
|
||||
// `Block` expects a single nested spec block, which is applied to the body of
|
||||
// the block of the given type when it is present.
|
||||
type Block struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Required bool `protobuf:"varint,2,opt,name=required,proto3" json:"required,omitempty"`
|
||||
|
@ -445,7 +445,6 @@ func (m *Block) GetNested() *Spec {
|
|||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
// The BlockAttrs spec type is similar to an Attr spec block of a map type,
|
||||
// but it produces a map from the attributes of a block rather than from an
|
||||
// attribute's expression.
|
||||
|
@ -533,11 +532,11 @@ func (m *BlockAttrs) GetRequired() bool {
|
|||
}
|
||||
|
||||
// BlockList spec type is similar to `Block`, but it accepts zero or
|
||||
//more blocks of a specified type rather than requiring zero or one. The
|
||||
//result is a JSON array with one entry per block of the given type.
|
||||
// more blocks of a specified type rather than requiring zero or one. The
|
||||
// result is a JSON array with one entry per block of the given type.
|
||||
//
|
||||
//```hcl
|
||||
//BlockList {
|
||||
// ```hcl
|
||||
// BlockList {
|
||||
// name = "log_file"
|
||||
//
|
||||
// Object {
|
||||
|
@ -549,10 +548,10 @@ func (m *BlockAttrs) GetRequired() bool {
|
|||
// required = true
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
//```
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
//`BlockList` spec blocks accept the following arguments:
|
||||
// `BlockList` spec blocks accept the following arguments:
|
||||
//
|
||||
// `name` (required) - The block type name to expect within the HCL
|
||||
// input file. This may be omitted when a default name selector is created
|
||||
|
@ -566,12 +565,11 @@ func (m *BlockAttrs) GetRequired() bool {
|
|||
// produce an error if more than the given number of blocks are present. This
|
||||
// attribute must be greater than or equal to `min_items` if both are set.
|
||||
//
|
||||
//`Block` creates a validation constraint on the number of blocks of the given
|
||||
//type that must be present.
|
||||
//
|
||||
//`Block` expects a single nested spec block, which is applied to the body of
|
||||
//each matching block to produce the resulting list items.
|
||||
// `Block` creates a validation constraint on the number of blocks of the given
|
||||
// type that must be present.
|
||||
//
|
||||
// `Block` expects a single nested spec block, which is applied to the body of
|
||||
// each matching block to produce the resulting list items.
|
||||
type BlockList struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
MinItems uint64 `protobuf:"varint,2,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
|
||||
|
@ -636,10 +634,10 @@ func (m *BlockList) GetNested() *Spec {
|
|||
}
|
||||
|
||||
// BlockSet spec type behaves the same as BlockList except that
|
||||
//the result is in no specific order and any duplicate items are removed.
|
||||
// the result is in no specific order and any duplicate items are removed.
|
||||
//
|
||||
//```hcl
|
||||
//BlockSet {
|
||||
// ```hcl
|
||||
// BlockSet {
|
||||
// name = "log_file"
|
||||
//
|
||||
// Object {
|
||||
|
@ -651,11 +649,10 @@ func (m *BlockList) GetNested() *Spec {
|
|||
// required = true
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
//```
|
||||
//
|
||||
//The contents of `BlockSet` are the same as for `BlockList`.
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// The contents of `BlockSet` are the same as for `BlockList`.
|
||||
type BlockSet struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
MinItems uint64 `protobuf:"varint,2,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
|
||||
|
@ -720,12 +717,12 @@ func (m *BlockSet) GetNested() *Spec {
|
|||
}
|
||||
|
||||
// BlockMap spec type is similar to `Block`, but it accepts zero or
|
||||
//more blocks of a specified type rather than requiring zero or one. The
|
||||
//result is a JSON object, or possibly multiple nested JSON objects, whose
|
||||
//properties are derived from the labels set on each matching block.
|
||||
// more blocks of a specified type rather than requiring zero or one. The
|
||||
// result is a JSON object, or possibly multiple nested JSON objects, whose
|
||||
// properties are derived from the labels set on each matching block.
|
||||
//
|
||||
//```hcl
|
||||
//BlockMap {
|
||||
// ```hcl
|
||||
// BlockMap {
|
||||
// name = "log_file"
|
||||
// labels = ["filename"]
|
||||
//
|
||||
|
@ -735,10 +732,10 @@ func (m *BlockSet) GetNested() *Spec {
|
|||
// required = true
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
//```
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
//`BlockMap` spec blocks accept the following arguments:
|
||||
// `BlockMap` spec blocks accept the following arguments:
|
||||
//
|
||||
// `name` (required) - The block type name to expect within the HCL
|
||||
// input file. This may be omitted when a default name selector is created
|
||||
|
@ -751,12 +748,11 @@ func (m *BlockSet) GetNested() *Spec {
|
|||
// Block header labels are the quoted strings that appear after the block type
|
||||
// name but before the opening `{`.
|
||||
//
|
||||
//`Block` creates a validation constraint on the number of labels that blocks
|
||||
//of the given type must have.
|
||||
//
|
||||
//`Block` expects a single nested spec block, which is applied to the body of
|
||||
//each matching block to produce the resulting map items.
|
||||
// `Block` creates a validation constraint on the number of labels that blocks
|
||||
// of the given type must have.
|
||||
//
|
||||
// `Block` expects a single nested spec block, which is applied to the body of
|
||||
// each matching block to produce the resulting map items.
|
||||
type BlockMap struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Labels []string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"`
|
||||
|
@ -813,23 +809,23 @@ func (m *BlockMap) GetNested() *Spec {
|
|||
}
|
||||
|
||||
// Literal spec type returns a given literal value, and creates no
|
||||
//validation constraints. It is most commonly used with the `Default` spec
|
||||
//type to create a fallback value, but can also be used e.g. to fill out
|
||||
//required properties in an `Object` spec that do not correspond to any
|
||||
//construct in the input configuration.
|
||||
// validation constraints. It is most commonly used with the `Default` spec
|
||||
// type to create a fallback value, but can also be used e.g. to fill out
|
||||
// required properties in an `Object` spec that do not correspond to any
|
||||
// construct in the input configuration.
|
||||
//
|
||||
//```hcl
|
||||
//Literal {
|
||||
// ```hcl
|
||||
// Literal {
|
||||
// value = "hello world"
|
||||
//}
|
||||
//```
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
//`Literal` spec blocks accept the following argument:
|
||||
// `Literal` spec blocks accept the following argument:
|
||||
//
|
||||
// `value` (required) - The value to return. This attribute may be an expression
|
||||
// that uses [functions](#spec-definition-functions).
|
||||
//
|
||||
//`Literal` is a leaf spec type, so no nested spec blocks are permitted.
|
||||
// `Literal` is a leaf spec type, so no nested spec blocks are permitted.
|
||||
type Literal struct {
|
||||
Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
|
@ -870,12 +866,12 @@ func (m *Literal) GetValue() string {
|
|||
}
|
||||
|
||||
// Default spec type evaluates a sequence of nested specs in turn and
|
||||
//returns the result of the first one that produces a non-null value.
|
||||
//It creates no validation constraints of its own, but passes on the validation
|
||||
//constraints from its first nested block.
|
||||
// returns the result of the first one that produces a non-null value.
|
||||
// It creates no validation constraints of its own, but passes on the validation
|
||||
// constraints from its first nested block.
|
||||
//
|
||||
//```hcl
|
||||
//Default {
|
||||
// ```hcl
|
||||
// Default {
|
||||
// Attr {
|
||||
// name = "private"
|
||||
// type = bool
|
||||
|
@ -883,17 +879,16 @@ func (m *Literal) GetValue() string {
|
|||
// Literal {
|
||||
// value = false
|
||||
// }
|
||||
//}
|
||||
//```
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
//A `Default` spec block must have at least one nested spec block, and should
|
||||
//generally have at least two since otherwise the `Default` wrapper is a no-op.
|
||||
//
|
||||
//The second and any subsequent spec blocks are _fallback_ specs. These exhibit
|
||||
//their usual behavior but are not able to impose validation constraints on the
|
||||
//current body since they are not evaluated unless all prior specs produce
|
||||
//`null` as their result.
|
||||
// A `Default` spec block must have at least one nested spec block, and should
|
||||
// generally have at least two since otherwise the `Default` wrapper is a no-op.
|
||||
//
|
||||
// The second and any subsequent spec blocks are _fallback_ specs. These exhibit
|
||||
// their usual behavior but are not able to impose validation constraints on the
|
||||
// current body since they are not evaluated unless all prior specs produce
|
||||
// `null` as their result.
|
||||
type Default struct {
|
||||
Primary *Spec `protobuf:"bytes,1,opt,name=primary,proto3" json:"primary,omitempty"`
|
||||
Default *Spec `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"`
|
||||
|
@ -942,11 +937,11 @@ func (m *Default) GetDefault() *Spec {
|
|||
}
|
||||
|
||||
// Object spec type is the most commonly used at the root of a spec file.
|
||||
//Its result is a JSON object whose properties are set based on any nested
|
||||
//spec blocks:
|
||||
// Its result is a JSON object whose properties are set based on any nested
|
||||
// spec blocks:
|
||||
//
|
||||
//```hcl
|
||||
//Object {
|
||||
// ```hcl
|
||||
// Object {
|
||||
// Attr "name" {
|
||||
// type = "string"
|
||||
// }
|
||||
|
@ -958,18 +953,18 @@ func (m *Default) GetDefault() *Spec {
|
|||
// # ...
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
//```
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
//Nested spec blocks inside `Object` must always have an extra block label
|
||||
//`"name"`, `"address"` and `"street"` in the above example) that specifies
|
||||
//the name of the property that should be created in the JSON object result.
|
||||
//This label also acts as a default name selector for the nested spec, allowing
|
||||
//the `Attr` blocks in the above example to omit the usually-required `name`
|
||||
//argument in cases where the HCL input name and JSON output name are the same.
|
||||
// Nested spec blocks inside `Object` must always have an extra block label
|
||||
// `"name"`, `"address"` and `"street"` in the above example) that specifies
|
||||
// the name of the property that should be created in the JSON object result.
|
||||
// This label also acts as a default name selector for the nested spec, allowing
|
||||
// the `Attr` blocks in the above example to omit the usually-required `name`
|
||||
// argument in cases where the HCL input name and JSON output name are the same.
|
||||
//
|
||||
//An `Object` spec block creates no validation constraints, but it passes on
|
||||
//any validation constraints created by the nested specs.
|
||||
// An `Object` spec block creates no validation constraints, but it passes on
|
||||
// any validation constraints created by the nested specs.
|
||||
type Object struct {
|
||||
Attributes map[string]*Spec `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
|
@ -1010,10 +1005,10 @@ func (m *Object) GetAttributes() map[string]*Spec {
|
|||
}
|
||||
|
||||
// Array spec type produces a JSON array whose elements are set based on
|
||||
//any nested spec blocks:
|
||||
// any nested spec blocks:
|
||||
//
|
||||
//```hcl
|
||||
//Array {
|
||||
// ```hcl
|
||||
// Array {
|
||||
// Attr {
|
||||
// name = "first_element"
|
||||
// type = "string"
|
||||
|
@ -1022,11 +1017,11 @@ func (m *Object) GetAttributes() map[string]*Spec {
|
|||
// name = "second_element"
|
||||
// type = "string"
|
||||
// }
|
||||
//}
|
||||
//```
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
//An `Array` spec block creates no validation constraints, but it passes on
|
||||
//any validation constraints created by the nested specs.
|
||||
// An `Array` spec block creates no validation constraints, but it passes on
|
||||
// any validation constraints created by the nested specs.
|
||||
type Array struct {
|
||||
Values []*Spec `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
|
|
|
@ -24,6 +24,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|||
// specifying units
|
||||
type Attribute struct {
|
||||
// Types that are valid to be assigned to Value:
|
||||
//
|
||||
// *Attribute_FloatVal
|
||||
// *Attribute_IntVal
|
||||
// *Attribute_StringVal
|
||||
|
|
|
@ -30,11 +30,12 @@ const (
|
|||
//
|
||||
// Currently the things that are annotated are:
|
||||
// * Task group changes will be annotated with:
|
||||
// * Count up and count down changes
|
||||
// * Update counts (creates, destroys, migrates, etc)
|
||||
// - Count up and count down changes
|
||||
// - Update counts (creates, destroys, migrates, etc)
|
||||
//
|
||||
// * Task changes will be annotated with:
|
||||
// * forces create/destroy update
|
||||
// * forces in-place update
|
||||
// - forces create/destroy update
|
||||
// - forces in-place update
|
||||
func Annotate(diff *structs.JobDiff, annotations *structs.PlanAnnotations) error {
|
||||
tgDiffs := diff.TaskGroups
|
||||
if len(tgDiffs) == 0 {
|
||||
|
|
|
@ -16,9 +16,9 @@ import (
|
|||
// benchmark for the Nomad scheduler. The starting state for your
|
||||
// implementation will depend on the following environment variables:
|
||||
//
|
||||
// - NOMAD_BENCHMARK_DATADIR: path to data directory
|
||||
// - NOMAD_BENCHMARK_SNAPSHOT: path to raft snapshot
|
||||
// - neither: empty starting state
|
||||
// - NOMAD_BENCHMARK_DATADIR: path to data directory
|
||||
// - NOMAD_BENCHMARK_SNAPSHOT: path to raft snapshot
|
||||
// - neither: empty starting state
|
||||
//
|
||||
// You can run a profile for this benchmark with the usual -cpuprofile
|
||||
// -memprofile flags.
|
||||
|
|
|
@ -157,7 +157,6 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
|
|||
// Preempted allocs are removed from the allocs propsed for a node.
|
||||
//
|
||||
// See https://github.com/hashicorp/nomad/issues/6787
|
||||
//
|
||||
func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
state, ctx := testContext(t)
|
||||
|
|
Loading…
Reference in New Issue