Merge branch 'master' into refactor-parser

This commit is contained in:
Alex Dadgar 2017-02-20 15:13:10 -08:00
commit b67c59f03c
57 changed files with 1019 additions and 282 deletions

View File

@ -11,12 +11,16 @@ IMPROVEMENTS:
[GH-2277]
* jobspec: Add leader task to allow graceful shutdown of other tasks within
the task group [GH-2308]
* periodic: Allow specification of timezones in Periodic Jobs [GH-2321]
* server/vault: Vault Client on Server handles SIGHUP to reload configs
[GH-2270]
* telemetry: Clients report allocated/unallocated resources [GH-2327]
* template: Permissions can be set on template destination file [GH-2262]
* vault: Server side Vault telemetry [GH-2318]
* vault: Disallow root policy from being specified [GH-2309]
BUG FIXES:
* api: Fix escaping of HTML characters [GH-2322]
* client: Fix race condition with deriving vault tokens [GH-2275]
* config: Fix Consul Config Merging/Copying [GH-2278]
* config: Fix Client reserved resource merging panic [GH-2281]

2
Vagrantfile vendored
View File

@ -6,7 +6,7 @@ VAGRANTFILE_API_VERSION = "2"
DEFAULT_CPU_COUNT = 2
$script = <<SCRIPT
GO_VERSION="1.7.5"
GO_VERSION="1.8.0"
export DEBIAN_FRONTEND=noninteractive

View File

@ -227,6 +227,7 @@ type PeriodicConfig struct {
Spec *string
SpecType *string
ProhibitOverlap *bool
TimeZone *string
}
func (p *PeriodicConfig) Canonicalize() {
@ -239,6 +240,9 @@ func (p *PeriodicConfig) Canonicalize() {
if p.ProhibitOverlap == nil {
p.ProhibitOverlap = helper.BoolToPtr(false)
}
if p.TimeZone == nil || *p.TimeZone == "" {
p.TimeZone = helper.StringToPtr("UTC")
}
}
// Next returns the closest time instant matching the spec that is after the
@ -255,6 +259,14 @@ func (p *PeriodicConfig) Next(fromTime time.Time) time.Time {
return time.Time{}
}
func (p *PeriodicConfig) GetLocation() (*time.Location, error) {
if p.TimeZone == nil || *p.TimeZone == "" {
return time.UTC, nil
}
return time.LoadLocation(*p.TimeZone)
}
// ParameterizedJobConfig is used to configure the parameterized job.
type ParameterizedJobConfig struct {
Payload string
@ -307,7 +319,6 @@ func (j *Job) Canonicalize() {
if j.ParentID == nil {
j.ParentID = helper.StringToPtr("")
}
if j.Priority == nil {
j.Priority = helper.IntToPtr(50)
}
@ -601,5 +612,5 @@ type JobDispatchResponse struct {
EvalID string
EvalCreateIndex uint64
JobCreateIndex uint64
QueryMeta
WriteMeta
}

View File

@ -80,7 +80,6 @@ func TestJobs_Validate(t *testing.T) {
}
func TestJobs_Canonicalize(t *testing.T) {
testCases := []struct {
name string
expected *Job
@ -195,13 +194,41 @@ func TestJobs_Canonicalize(t *testing.T) {
},
},
},
{
name: "periodic",
input: &Job{
ID: helper.StringToPtr("bar"),
Periodic: &PeriodicConfig{},
},
expected: &Job{
ID: helper.StringToPtr("bar"),
ParentID: helper.StringToPtr(""),
Name: helper.StringToPtr("bar"),
Region: helper.StringToPtr("global"),
Type: helper.StringToPtr("service"),
Priority: helper.IntToPtr(50),
AllAtOnce: helper.BoolToPtr(false),
VaultToken: helper.StringToPtr(""),
Status: helper.StringToPtr(""),
StatusDescription: helper.StringToPtr(""),
CreateIndex: helper.Uint64ToPtr(0),
ModifyIndex: helper.Uint64ToPtr(0),
JobModifyIndex: helper.Uint64ToPtr(0),
Periodic: &PeriodicConfig{
Enabled: helper.BoolToPtr(true),
SpecType: helper.StringToPtr(PeriodicSpecCron),
ProhibitOverlap: helper.BoolToPtr(false),
TimeZone: helper.StringToPtr("UTC"),
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
tc.input.Canonicalize()
if !reflect.DeepEqual(tc.input, tc.expected) {
t.Fatalf("Name: %v, expected: %#v, actual: %#v", tc.name, tc.expected, tc.input)
t.Fatalf("Name: %v, expected:\n%#v\nactual:\n%#v", tc.name, tc.expected, tc.input)
}
})
}

View File

@ -135,6 +135,10 @@ type Client struct {
blockedAllocations map[string]*structs.Allocation
blockedAllocsLock sync.RWMutex
// migratingAllocs is the set of allocs whose data migration is in flight
migratingAllocs map[string]*migrateAllocCtrl
migratingAllocsLock sync.Mutex
// allocUpdates stores allocations that need to be synced to the server.
allocUpdates chan *structs.Allocation
@ -151,10 +155,6 @@ type Client struct {
// vaultClient is used to interact with Vault for token and secret renewals
vaultClient vaultclient.VaultClient
// migratingAllocs is the set of allocs whose data migration is in flight
migratingAllocs map[string]*migrateAllocCtrl
migratingAllocsLock sync.Mutex
// garbageCollector is used to garbage collect terminal allocations present
// in the node automatically
garbageCollector *AllocGarbageCollector
@ -162,14 +162,16 @@ type Client struct {
// migrateAllocCtrl indicates whether migration is complete
type migrateAllocCtrl struct {
alloc *structs.Allocation
ch chan struct{}
closed bool
chLock sync.Mutex
}
func newMigrateAllocCtrl() *migrateAllocCtrl {
func newMigrateAllocCtrl(alloc *structs.Allocation) *migrateAllocCtrl {
return &migrateAllocCtrl{
ch: make(chan struct{}),
ch: make(chan struct{}),
alloc: alloc,
}
}
@ -1501,7 +1503,7 @@ func (c *Client) runAllocs(update *allocUpdates) {
// prevents a race between a finishing blockForRemoteAlloc and
// another invocation of runAllocs
if _, ok := c.getAllocRunners()[add.PreviousAllocation]; !ok {
c.migratingAllocs[add.ID] = newMigrateAllocCtrl()
c.migratingAllocs[add.ID] = newMigrateAllocCtrl(add)
go c.blockForRemoteAlloc(add)
}
}
@ -2220,6 +2222,111 @@ func (c *Client) emitStats(hStats *stats.HostStats) {
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "used_percent"}, float32(disk.UsedPercent))
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "inodes_percent"}, float32(disk.InodesUsedPercent))
}
// Get all the resources for the node
c.configLock.RLock()
node := c.configCopy.Node
c.configLock.RUnlock()
total := node.Resources
res := node.Reserved
allocated := c.getAllocatedResources(node)
// Emit allocated
metrics.SetGauge([]string{"client", "allocated", "memory", nodeID}, float32(allocated.MemoryMB))
metrics.SetGauge([]string{"client", "allocated", "disk", nodeID}, float32(allocated.DiskMB))
metrics.SetGauge([]string{"client", "allocated", "cpu", nodeID}, float32(allocated.CPU))
metrics.SetGauge([]string{"client", "allocated", "iops", nodeID}, float32(allocated.IOPS))
for _, n := range allocated.Networks {
metrics.SetGauge([]string{"client", "allocated", "network", n.Device, nodeID}, float32(n.MBits))
}
// Emit unallocated
unallocatedMem := total.MemoryMB - res.MemoryMB - allocated.MemoryMB
unallocatedDisk := total.DiskMB - res.DiskMB - allocated.DiskMB
unallocatedCpu := total.CPU - res.CPU - allocated.CPU
unallocatedIops := total.IOPS - res.IOPS - allocated.IOPS
metrics.SetGauge([]string{"client", "unallocated", "memory", nodeID}, float32(unallocatedMem))
metrics.SetGauge([]string{"client", "unallocated", "disk", nodeID}, float32(unallocatedDisk))
metrics.SetGauge([]string{"client", "unallocated", "cpu", nodeID}, float32(unallocatedCpu))
metrics.SetGauge([]string{"client", "unallocated", "iops", nodeID}, float32(unallocatedIops))
for _, n := range allocated.Networks {
totalMbits := 0
totalIdx := total.NetIndex(n)
if totalIdx != -1 {
totalMbits = total.Networks[totalIdx].MBits
continue
}
unallocatedMbits := totalMbits - n.MBits
metrics.SetGauge([]string{"client", "unallocated", "network", n.Device, nodeID}, float32(unallocatedMbits))
}
}
func (c *Client) getAllocatedResources(selfNode *structs.Node) *structs.Resources {
// Unfortunately the allocs only have IP so we need to match them to the
// device
cidrToDevice := make(map[*net.IPNet]string, len(selfNode.Resources.Networks))
for _, n := range selfNode.Resources.Networks {
_, ipnet, err := net.ParseCIDR(n.CIDR)
if err != nil {
continue
}
cidrToDevice[ipnet] = n.Device
}
// Sum the allocated resources
allocs := c.allAllocs()
var allocated structs.Resources
allocatedDeviceMbits := make(map[string]int)
for _, alloc := range allocs {
if !alloc.TerminalStatus() {
allocated.Add(alloc.Resources)
for _, allocatedNetwork := range alloc.Resources.Networks {
for cidr, dev := range cidrToDevice {
ip := net.ParseIP(allocatedNetwork.IP)
if cidr.Contains(ip) {
allocatedDeviceMbits[dev] += allocatedNetwork.MBits
break
}
}
}
}
}
// Clear the networks
allocated.Networks = nil
for dev, speed := range allocatedDeviceMbits {
net := &structs.NetworkResource{
Device: dev,
MBits: speed,
}
allocated.Networks = append(allocated.Networks, net)
}
return &allocated
}
// allAllocs returns all the allocations managed by the client
func (c *Client) allAllocs() []*structs.Allocation {
var allocs []*structs.Allocation
for _, ar := range c.getAllocRunners() {
allocs = append(allocs, ar.Alloc())
}
c.blockedAllocsLock.Lock()
for _, alloc := range c.blockedAllocations {
allocs = append(allocs, alloc)
}
c.blockedAllocsLock.Unlock()
c.migratingAllocsLock.Lock()
for _, ctrl := range c.migratingAllocs {
allocs = append(allocs, ctrl.alloc)
}
c.migratingAllocsLock.Unlock()
return allocs
}
// resolveServer given a sever's address as a string, return it's resolved

View File

@ -903,7 +903,7 @@ func TestClient_UnarchiveAllocDir(t *testing.T) {
rc := ioutil.NopCloser(buf)
c1.migratingAllocs["123"] = newMigrateAllocCtrl()
c1.migratingAllocs["123"] = newMigrateAllocCtrl(mock.Alloc())
if err := c1.unarchiveAllocDir(rc, "123", dir1); err != nil {
t.Fatalf("err: %v", err)
}

View File

@ -249,7 +249,13 @@ func (c *vaultClient) DeriveToken(alloc *structs.Allocation, taskNames []string)
// Use the token supplied to interact with vault
c.client.SetToken("")
return c.tokenDeriver(alloc, taskNames, c.client)
tokens, err := c.tokenDeriver(alloc, taskNames, c.client)
if err != nil {
c.logger.Printf("[ERR] client.vault: failed to derive token for allocation %q and tasks %v: %v", alloc.ID, taskNames, err)
return nil, err
}
return tokens, nil
}
// GetConsulACL creates a vault API client and reads from vault a consul ACL

View File

@ -33,8 +33,13 @@ const (
var (
// jsonHandle and jsonHandlePretty are the codec handles to JSON encode
// structs. The pretty handle will add indents for easier human consumption.
jsonHandle = &codec.JsonHandle{}
jsonHandlePretty = &codec.JsonHandle{Indent: 4}
jsonHandle = &codec.JsonHandle{
HTMLCharsAsIs: true,
}
jsonHandlePretty = &codec.JsonHandle{
HTMLCharsAsIs: true,
Indent: 4,
}
)
// HTTPServer is used to wrap an Agent and expose it over an HTTP interface

View File

@ -2,10 +2,18 @@ package command
import (
"bytes"
"encoding/json"
"fmt"
"io"
"text/template"
"github.com/ugorji/go/codec"
)
var (
jsonHandlePretty = &codec.JsonHandle{
HTMLCharsAsIs: true,
Indent: 4,
}
)
//DataFormatter is a transformer of the data.
@ -33,12 +41,14 @@ type JSONFormat struct {
// TransformData returns JSON format string data.
func (p *JSONFormat) TransformData(data interface{}) (string, error) {
out, err := json.MarshalIndent(&data, "", " ")
var buf bytes.Buffer
enc := codec.NewEncoder(&buf, jsonHandlePretty)
err := enc.Encode(data)
if err != nil {
return "", err
}
return string(out), nil
return buf.String(), nil
}
type TemplateFormat struct {

View File

@ -12,9 +12,9 @@ type testData struct {
}
const expectJSON = `{
"Region": "global",
"ID": "1",
"Name": "example"
"Name": "example",
"Region": "global"
}`
var (
@ -36,7 +36,7 @@ func TestDataFormat(t *testing.T) {
}
if result != expectOutput[k] {
t.Fatalf("expected output: %s, actual: %s", expectOutput[k], result)
t.Fatalf("expected output:\n%s\nactual:\n%s", expectOutput[k], result)
}
}
}

View File

@ -1,7 +1,6 @@
package command
import (
"encoding/json"
"fmt"
"strings"
@ -158,12 +157,17 @@ func (c *InspectCommand) Run(args []string) int {
// Print the contents of the job
req := api.RegisterJobRequest{Job: job}
buf, err := json.MarshalIndent(req, "", " ")
f, err := DataFormat("json", "")
if err != nil {
c.Ui.Error(fmt.Sprintf("Error converting job: %s", err))
c.Ui.Error(fmt.Sprintf("Error getting formatter: %s", err))
return 1
}
c.Ui.Output(string(buf))
out, err := f.TransformData(req)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error formatting the data: %s", err))
return 1
}
c.Ui.Output(out)
return 0
}

View File

@ -201,8 +201,14 @@ func formatDryRun(resp *api.JobPlanResponse, job *api.Job) string {
}
if next := resp.NextPeriodicLaunch; !next.IsZero() {
out += fmt.Sprintf("[green]- If submitted now, next periodic launch would be at %s (%s from now).\n",
formatTime(next), formatTimeDifference(time.Now().UTC(), next, time.Second))
loc, err := job.Periodic.GetLocation()
if err != nil {
out += fmt.Sprintf("[yellow]- Invalid time zone: %v", err)
} else {
now := time.Now().In(loc)
out += fmt.Sprintf("[green]- If submitted now, next periodic launch would be at %s (%s from now).\n",
formatTime(next), formatTimeDifference(now, next, time.Second))
}
}
out = strings.TrimSuffix(out, "\n")

View File

@ -224,10 +224,13 @@ OUTSIDE:
if detach || periodic || paramjob {
c.Ui.Output("Job registration successful")
if periodic {
now := time.Now().UTC()
next := job.Periodic.Next(now)
c.Ui.Output(fmt.Sprintf("Approximate next launch time: %s (%s from now)",
formatTime(next), formatTimeDifference(now, next, time.Second)))
loc, err := job.Periodic.GetLocation()
if err == nil {
now := time.Now().In(loc)
next := job.Periodic.Next(now)
c.Ui.Output(fmt.Sprintf("Approximate next launch time: %s (%s from now)",
formatTime(next), formatTimeDifference(now, next, time.Second)))
}
} else if !paramjob {
c.Ui.Output("Evaluation ID: " + evalID)
}

View File

@ -147,11 +147,14 @@ func (c *StatusCommand) Run(args []string) int {
}
if periodic {
now := time.Now().UTC()
next := job.Periodic.Next(now)
basic = append(basic, fmt.Sprintf("Next Periodic Launch|%s",
fmt.Sprintf("%s (%s from now)",
formatTime(next), formatTimeDifference(now, next, time.Second))))
location, err := job.Periodic.GetLocation()
if err == nil {
now := time.Now().In(location)
next := job.Periodic.Next(now)
basic = append(basic, fmt.Sprintf("Next Periodic Launch|%s",
fmt.Sprintf("%s (%s from now)",
formatTime(next), formatTimeDifference(now, next, time.Second))))
}
}
c.Ui.Output(formatKV(basic))

View File

@ -1197,6 +1197,7 @@ func parsePeriodic(result **api.PeriodicConfig, list *ast.ObjectList) error {
"enabled",
"cron",
"prohibit_overlap",
"time_zone",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return err

View File

@ -333,6 +333,7 @@ func TestParse(t *testing.T) {
SpecType: structs.PeriodicSpecCron,
Spec: "*/5 * * *",
ProhibitOverlap: true,
TimeZone: "Europe/Minsk",
},
},
false,

View File

@ -2,5 +2,6 @@ job "foo" {
periodic {
cron = "*/5 * * *"
prohibit_overlap = true
time_zone = "Europe/Minsk"
}
}

View File

@ -706,7 +706,7 @@ func (j *Job) Plan(args *structs.JobPlanRequest, reply *structs.JobPlanResponse)
// If it is a periodic job calculate the next launch
if args.Job.IsPeriodic() && args.Job.Periodic.Enabled {
reply.NextPeriodicLaunch = args.Job.Periodic.Next(time.Now().UTC())
reply.NextPeriodicLaunch = args.Job.Periodic.Next(time.Now().In(args.Job.Periodic.GetLocation()))
}
reply.FailedTGAllocs = updatedEval.FailedTGAllocs

View File

@ -1061,7 +1061,7 @@ func (n *Node) DeriveVaultToken(args *structs.DeriveVaultTokenRequest,
secret, err := n.srv.vault.CreateToken(ctx, alloc, task)
if err != nil {
wrapped := fmt.Errorf("failed to create token for task %q: %v", task, err)
wrapped := fmt.Errorf("failed to create token for task %q on alloc %q: %v", task, alloc.ID, err)
if rerr, ok := err.(*structs.RecoverableError); ok && rerr.Recoverable {
// If the error is recoverable, propogate it
return structs.NewRecoverableError(wrapped, true)
@ -1117,10 +1117,10 @@ func (n *Node) DeriveVaultToken(args *structs.DeriveVaultTokenRequest,
// If there was an error revoke the created tokens
if createErr != nil {
n.srv.logger.Printf("[ERR] nomad.node: Vault token creation failed: %v", createErr)
n.srv.logger.Printf("[ERR] nomad.node: Vault token creation for alloc %q failed: %v", alloc.ID, createErr)
if revokeErr := n.srv.vault.RevokeTokens(context.Background(), accessors, false); revokeErr != nil {
n.srv.logger.Printf("[ERR] nomad.node: Vault token revocation failed: %v", revokeErr)
n.srv.logger.Printf("[ERR] nomad.node: Vault token revocation for alloc %q failed: %v", alloc.ID, revokeErr)
}
if rerr, ok := createErr.(*structs.RecoverableError); ok {
@ -1136,7 +1136,7 @@ func (n *Node) DeriveVaultToken(args *structs.DeriveVaultTokenRequest,
req := structs.VaultAccessorsRequest{Accessors: accessors}
_, index, err := n.srv.raftApply(structs.VaultAccessorRegisterRequestType, &req)
if err != nil {
n.srv.logger.Printf("[ERR] nomad.client: Register Vault accessors failed: %v", err)
n.srv.logger.Printf("[ERR] nomad.client: Register Vault accessors for alloc %q failed: %v", alloc.ID, err)
// Determine if we can recover from the error
retry := false

View File

@ -209,7 +209,7 @@ func (p *PeriodicDispatch) Add(job *structs.Job) error {
// Add or update the job.
p.tracked[job.ID] = job
next := job.Periodic.Next(time.Now().UTC())
next := job.Periodic.Next(time.Now().In(job.Periodic.GetLocation()))
if tracked {
if err := p.heap.Update(job, next); err != nil {
return fmt.Errorf("failed to update job %v launch time: %v", job.ID, err)
@ -289,7 +289,7 @@ func (p *PeriodicDispatch) ForceRun(jobID string) (*structs.Evaluation, error) {
}
p.l.Unlock()
return p.createEval(job, time.Now().UTC())
return p.createEval(job, time.Now().In(job.Periodic.GetLocation()))
}
// shouldRun returns whether the long lived run function should run.
@ -309,7 +309,7 @@ func (p *PeriodicDispatch) run() {
if launch.IsZero() {
launchCh = nil
} else {
launchDur := launch.Sub(time.Now().UTC())
launchDur := launch.Sub(time.Now().In(job.Periodic.GetLocation()))
launchCh = time.After(launchDur)
p.logger.Printf("[DEBUG] nomad.periodic: launching job %q in %s", job.ID, launchDur)
}

View File

@ -286,6 +286,9 @@ func NewServer(config *Config, consulSyncer *consul.Syncer, logger *log.Logger)
// Emit metrics for the blocked eval tracker.
go blockedEvals.EmitStats(time.Second, s.shutdownCh)
// Emit metrics for the Vault client.
go s.vault.EmitStats(time.Second, s.shutdownCh)
// Emit metrics
go s.heartbeatStats()

View File

@ -515,6 +515,7 @@ func TestJobDiff(t *testing.T) {
Spec: "*/15 * * * * *",
SpecType: "foo",
ProhibitOverlap: false,
TimeZone: "Europe/Minsk",
},
},
Expected: &JobDiff{
@ -548,6 +549,12 @@ func TestJobDiff(t *testing.T) {
Old: "",
New: "foo",
},
{
Type: DiffTypeAdded,
Name: "TimeZone",
Old: "",
New: "Europe/Minsk",
},
},
},
},
@ -561,6 +568,7 @@ func TestJobDiff(t *testing.T) {
Spec: "*/15 * * * * *",
SpecType: "foo",
ProhibitOverlap: false,
TimeZone: "Europe/Minsk",
},
},
New: &Job{},
@ -595,6 +603,12 @@ func TestJobDiff(t *testing.T) {
Old: "foo",
New: "",
},
{
Type: DiffTypeDeleted,
Name: "TimeZone",
Old: "Europe/Minsk",
New: "",
},
},
},
},
@ -608,6 +622,7 @@ func TestJobDiff(t *testing.T) {
Spec: "*/15 * * * * *",
SpecType: "foo",
ProhibitOverlap: false,
TimeZone: "Europe/Minsk",
},
},
New: &Job{
@ -616,6 +631,7 @@ func TestJobDiff(t *testing.T) {
Spec: "* * * * * *",
SpecType: "cron",
ProhibitOverlap: true,
TimeZone: "America/Los_Angeles",
},
},
Expected: &JobDiff{
@ -649,6 +665,12 @@ func TestJobDiff(t *testing.T) {
Old: "foo",
New: "cron",
},
{
Type: DiffTypeEdited,
Name: "TimeZone",
Old: "Europe/Minsk",
New: "America/Los_Angeles",
},
},
},
},
@ -663,6 +685,7 @@ func TestJobDiff(t *testing.T) {
Spec: "*/15 * * * * *",
SpecType: "foo",
ProhibitOverlap: false,
TimeZone: "Europe/Minsk",
},
},
New: &Job{
@ -671,6 +694,7 @@ func TestJobDiff(t *testing.T) {
Spec: "* * * * * *",
SpecType: "foo",
ProhibitOverlap: false,
TimeZone: "Europe/Minsk",
},
},
Expected: &JobDiff{
@ -704,6 +728,12 @@ func TestJobDiff(t *testing.T) {
Old: "foo",
New: "foo",
},
{
Type: DiffTypeNone,
Name: "TimeZone",
Old: "Europe/Minsk",
New: "Europe/Minsk",
},
},
},
},

View File

@ -556,7 +556,7 @@ type JobDispatchResponse struct {
EvalID string
EvalCreateIndex uint64
JobCreateIndex uint64
QueryMeta
WriteMeta
}
// JobListResponse is used for a list request
@ -1191,6 +1191,10 @@ func (j *Job) Canonicalize() {
if j.ParameterizedJob != nil {
j.ParameterizedJob.Canonicalize()
}
if j.Periodic != nil {
j.Periodic.Canonicalize()
}
}
// Copy returns a deep copy of the Job. It is expected that callers use recover.
@ -1559,6 +1563,16 @@ type PeriodicConfig struct {
// ProhibitOverlap enforces that spawned jobs do not run in parallel.
ProhibitOverlap bool `mapstructure:"prohibit_overlap"`
// TimeZone is the user specified string that determines the time zone to
// launch against. The time zones must be specified from IANA Time Zone
// database, such as "America/New_York".
// Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
// Reference: https://www.iana.org/time-zones
TimeZone string `mapstructure:"time_zone"`
// location is the time zone to evaluate the launch time against
location *time.Location
}
func (p *PeriodicConfig) Copy() *PeriodicConfig {
@ -1575,23 +1589,41 @@ func (p *PeriodicConfig) Validate() error {
return nil
}
var mErr multierror.Error
if p.Spec == "" {
return fmt.Errorf("Must specify a spec")
multierror.Append(&mErr, fmt.Errorf("Must specify a spec"))
}
// Check if we got a valid time zone
if p.TimeZone != "" {
if _, err := time.LoadLocation(p.TimeZone); err != nil {
multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err))
}
}
switch p.SpecType {
case PeriodicSpecCron:
// Validate the cron spec
if _, err := cronexpr.Parse(p.Spec); err != nil {
return fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err)
multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err))
}
case PeriodicSpecTest:
// No-op
default:
return fmt.Errorf("Unknown periodic specification type %q", p.SpecType)
multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType))
}
return nil
return mErr.ErrorOrNil()
}
func (p *PeriodicConfig) Canonicalize() {
// Load the location
l, err := time.LoadLocation(p.TimeZone)
if err != nil {
p.location = time.UTC
}
p.location = l
}
// Next returns the closest time instant matching the spec that is after the
@ -1632,6 +1664,17 @@ func (p *PeriodicConfig) Next(fromTime time.Time) time.Time {
return time.Time{}
}
// GetLocation returns the location to use for determining the time zone to run
// the periodic job against.
func (p *PeriodicConfig) GetLocation() *time.Location {
// Jobs pre 0.5.5 will not have this
if p.location != nil {
return p.location
}
return time.UTC
}
const (
// PeriodicLaunchSuffix is the string appended to the periodic jobs ID
// when launching derived instances of it.

View File

@ -1217,12 +1217,19 @@ func TestPeriodicConfig_EnabledInvalid(t *testing.T) {
if err := p.Validate(); err == nil {
t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid")
}
// Create a config that is enabled, with a bad time zone.
p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"}
if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") {
t.Fatal("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err)
}
}
func TestPeriodicConfig_InvalidCron(t *testing.T) {
specs := []string{"foo", "* *", "@foo"}
for _, spec := range specs {
p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
p.Canonicalize()
if err := p.Validate(); err == nil {
t.Fatal("Invalid cron spec")
}
@ -1233,6 +1240,7 @@ func TestPeriodicConfig_ValidCron(t *testing.T) {
specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"}
for _, spec := range specs {
p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
p.Canonicalize()
if err := p.Validate(); err != nil {
t.Fatal("Passed valid cron")
}
@ -1245,6 +1253,7 @@ func TestPeriodicConfig_NextCron(t *testing.T) {
expected := []time.Time{time.Time{}, time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC)}
for i, spec := range specs {
p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
p.Canonicalize()
n := p.Next(from)
if expected[i] != n {
t.Fatalf("Next(%v) returned %v; want %v", from, n, expected[i])
@ -1252,6 +1261,45 @@ func TestPeriodicConfig_NextCron(t *testing.T) {
}
}
func TestPeriodicConfig_ValidTimeZone(t *testing.T) {
zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"}
for _, zone := range zones {
p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone}
p.Canonicalize()
if err := p.Validate(); err != nil {
t.Fatal("Valid tz errored: %v", err)
}
}
}
func TestPeriodicConfig_DST(t *testing.T) {
// On Sun, Mar 12, 2:00 am 2017: +1 hour UTC
p := &PeriodicConfig{
Enabled: true,
SpecType: PeriodicSpecCron,
Spec: "0 2 11-12 3 * 2017",
TimeZone: "America/Los_Angeles",
}
p.Canonicalize()
t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location)
t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location)
// E1 is an 8 hour adjustment, E2 is a 7 hour adjustment
e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC)
e2 := time.Date(2017, time.March, 12, 9, 0, 0, 0, time.UTC)
n1 := p.Next(t1).UTC()
n2 := p.Next(t2).UTC()
if !reflect.DeepEqual(e1, n1) {
t.Fatalf("Got %v; want %v", n1, e1)
}
if !reflect.DeepEqual(e2, n2) {
t.Fatalf("Got %v; want %v", n1, e1)
}
}
func TestRestartPolicy_Validate(t *testing.T) {
// Policy with acceptable restart options passes
p := &RestartPolicy{

View File

@ -13,6 +13,7 @@ import (
"gopkg.in/tomb.v2"
metrics "github.com/armon/go-metrics"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
@ -135,6 +136,21 @@ type VaultClient interface {
// Running returns whether the Vault client is running
Running() bool
// Stats returns the Vault clients statistics
Stats() *VaultStats
// EmitStats emits that clients statistics at the given period until stopCh
// is called.
EmitStats(period time.Duration, stopCh chan struct{})
}
// VaultStats returns all the stats about Vault tokens created and managed by
// Nomad.
type VaultStats struct {
// TrackedForRevoke is the count of tokens that are being tracked to be
// revoked since they could not be immediately revoked.
TrackedForRevoke int
}
// PurgeVaultAccessor is called to remove VaultAccessors from the system. If
@ -204,6 +220,10 @@ type vaultClient struct {
tomb *tomb.Tomb
logger *log.Logger
// stats stores the stats
stats *VaultStats
statsLock sync.RWMutex
// l is used to lock the configuration aspects of the client such that
// multiple callers can't cause conflicting config updates
l sync.Mutex
@ -227,6 +247,7 @@ func NewVaultClient(c *config.VaultConfig, logger *log.Logger, purgeFn PurgeVaul
revoking: make(map[*structs.VaultAccessor]time.Time),
purgeFn: purgeFn,
tomb: &tomb.Tomb{},
stats: new(VaultStats),
}
if v.config.IsEnabled() {
@ -821,7 +842,6 @@ func (v *vaultClient) CreateToken(ctx context.Context, a *structs.Allocation, ta
if !v.Enabled() {
return nil, fmt.Errorf("Vault integration disabled")
}
if !v.Active() {
return nil, structs.NewRecoverableError(fmt.Errorf("Vault client not active"), true)
}
@ -833,6 +853,9 @@ func (v *vaultClient) CreateToken(ctx context.Context, a *structs.Allocation, ta
return nil, fmt.Errorf("Connection to Vault failed: %v", err)
}
// Track how long the request takes
defer metrics.MeasureSince([]string{"nomad", "vault", "create_token"}, time.Now())
// Retrieve the Vault block for the task
policies := a.Job.VaultPolicies()
if policies == nil {
@ -908,6 +931,9 @@ func (v *vaultClient) LookupToken(ctx context.Context, token string) (*vapi.Secr
return nil, fmt.Errorf("Connection to Vault failed: %v", err)
}
// Track how long the request takes
defer metrics.MeasureSince([]string{"nomad", "vault", "lookup_token"}, time.Now())
// Ensure we are under our rate limit
if err := v.limiter.Wait(ctx); err != nil {
return nil, err
@ -943,6 +969,9 @@ func (v *vaultClient) RevokeTokens(ctx context.Context, accessors []*structs.Vau
return fmt.Errorf("Vault client not active")
}
// Track how long the request takes
defer metrics.MeasureSince([]string{"nomad", "vault", "revoke_tokens"}, time.Now())
// Check if we have established a connection with Vault. If not just add it
// to the queue
if established, err := v.ConnectionEstablished(); !established && err == nil {
@ -952,22 +981,29 @@ func (v *vaultClient) RevokeTokens(ctx context.Context, accessors []*structs.Vau
v.storeForRevocation(accessors)
}
// Track that we are abandoning these accessors.
metrics.IncrCounter([]string{"nomad", "vault", "undistributed_tokens_abandoned"}, float32(len(accessors)))
return nil
}
// Attempt to revoke immediately and if it fails, add it to the revoke queue
err := v.parallelRevoke(ctx, accessors)
if !committed {
if err != nil {
// If it is uncommitted, it is a best effort revoke as it will shortly
// TTL within the cubbyhole and has not been leaked to any outside
// system
return nil
}
if !committed {
metrics.IncrCounter([]string{"nomad", "vault", "undistributed_tokens_abandoned"}, float32(len(accessors)))
return nil
}
if err != nil {
v.logger.Printf("[WARN] vault: failed to revoke tokens. Will reattempt til TTL: %v", err)
v.storeForRevocation(accessors)
return nil
} else if !committed {
// Mark that it was revoked but there is nothing to purge so exit
metrics.IncrCounter([]string{"nomad", "vault", "undistributed_tokens_revoked"}, float32(len(accessors)))
return nil
}
if err := v.purgeFn(accessors); err != nil {
@ -976,6 +1012,9 @@ func (v *vaultClient) RevokeTokens(ctx context.Context, accessors []*structs.Vau
return nil
}
// Track that it was revoked successfully
metrics.IncrCounter([]string{"nomad", "vault", "distributed_tokens_revoked"}, float32(len(accessors)))
return nil
}
@ -984,10 +1023,13 @@ func (v *vaultClient) RevokeTokens(ctx context.Context, accessors []*structs.Vau
// time.
func (v *vaultClient) storeForRevocation(accessors []*structs.VaultAccessor) {
v.revLock.Lock()
v.statsLock.Lock()
now := time.Now()
for _, a := range accessors {
v.revoking[a] = now.Add(time.Duration(a.CreationTTL) * time.Second)
}
v.stats.TrackedForRevoke = len(v.revoking)
v.statsLock.Unlock()
v.revLock.Unlock()
}
@ -1103,12 +1145,19 @@ func (v *vaultClient) revokeDaemon() {
continue
}
// Track that tokens were revoked successfully
metrics.IncrCounter([]string{"nomad", "vault", "distributed_tokens_revoked"}, float32(len(revoking)))
// Can delete from the tracked list now that we have purged
v.revLock.Lock()
v.statsLock.Lock()
for _, va := range revoking {
delete(v.revoking, va)
}
v.stats.TrackedForRevoke = len(v.revoking)
v.statsLock.Unlock()
v.revLock.Unlock()
}
}
}
@ -1137,3 +1186,30 @@ func (v *vaultClient) setLimit(l rate.Limit) {
defer v.l.Unlock()
v.limiter = rate.NewLimiter(l, int(l))
}
// Stats is used to query the state of the blocked eval tracker.
func (v *vaultClient) Stats() *VaultStats {
// Allocate a new stats struct
stats := new(VaultStats)
v.statsLock.RLock()
defer v.statsLock.RUnlock()
// Copy all the stats
stats.TrackedForRevoke = v.stats.TrackedForRevoke
return stats
}
// EmitStats is used to export metrics about the blocked eval tracker while enabled
func (v *vaultClient) EmitStats(period time.Duration, stopCh chan struct{}) {
for {
select {
case <-time.After(period):
stats := v.Stats()
metrics.SetGauge([]string{"nomad", "vault", "distributed_tokens_revoking"}, float32(stats.TrackedForRevoke))
case <-stopCh:
return
}
}
}

View File

@ -993,6 +993,10 @@ func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) {
if len(client.revoking) != 2 {
t.Fatalf("didn't add to revoke loop")
}
if client.Stats().TrackedForRevoke != 2 {
t.Fatalf("didn't add to revoke loop")
}
}
func TestVaultClient_RevokeTokens_Root(t *testing.T) {

View File

@ -2,6 +2,7 @@ package nomad
import (
"context"
"time"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
@ -134,7 +135,9 @@ func (v *TestVaultClient) RevokeTokens(ctx context.Context, accessors []*structs
return nil
}
func (v *TestVaultClient) Stop() {}
func (v *TestVaultClient) SetActive(enabled bool) {}
func (v *TestVaultClient) SetConfig(config *config.VaultConfig) error { return nil }
func (v *TestVaultClient) Running() bool { return true }
func (v *TestVaultClient) Stop() {}
func (v *TestVaultClient) SetActive(enabled bool) {}
func (v *TestVaultClient) SetConfig(config *config.VaultConfig) error { return nil }
func (v *TestVaultClient) Running() bool { return true }
func (v *TestVaultClient) Stats() *VaultStats { return new(VaultStats) }
func (v *TestVaultClient) EmitStats(period time.Duration, stopCh chan struct{}) {}

View File

@ -68,7 +68,7 @@ Rich Feature Set includes:
- Encode/Decode from/to chan types (for iterative streaming support)
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
- Handle unique idiosynchracies of codecs e.g.
- Handle unique idiosyncrasies of codecs e.g.
- For messagepack, configure how ambiguities in handling raw bytes are resolved
- For messagepack, provide rpc server/client codec to support
msgpack-rpc protocol defined at:
@ -181,7 +181,7 @@ package codec
// - Decoding using a chan is good, but incurs concurrency costs.
// This is because there's no fast way to use a channel without it
// having to switch goroutines constantly.
// Callback pattern is still the best. Maybe cnsider supporting something like:
// Callback pattern is still the best. Maybe consider supporting something like:
// type X struct {
// Name string
// Ys []Y

View File

@ -68,7 +68,7 @@ Rich Feature Set includes:
- Encode/Decode from/to chan types (for iterative streaming support)
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
- Handle unique idiosynchracies of codecs e.g.
- Handle unique idiosyncrasies of codecs e.g.
- For messagepack, configure how ambiguities in handling raw bytes are resolved
- For messagepack, provide rpc server/client codec to support
msgpack-rpc protocol defined at:

View File

@ -348,6 +348,13 @@ func (d *bincDecDriver) readNextBd() {
d.bdRead = true
}
func (d *bincDecDriver) uncacheRead() {
if d.bdRead {
d.r.unreadn1()
d.bdRead = false
}
}
func (d *bincDecDriver) ContainerType() (vt valueType) {
if d.vd == bincVdSpecial && d.vs == bincSpNil {
return valueTypeNil
@ -632,12 +639,12 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
if d.br {
bs2 = d.r.readx(slen)
} else if len(bs) == 0 {
bs2 = decByteSlice(d.r, slen, d.b[:])
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:])
} else {
bs2 = decByteSlice(d.r, slen, bs)
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
}
} else {
bs2 = decByteSlice(d.r, slen, bs)
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
}
if withString {
s = string(bs2)
@ -689,7 +696,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
// since using symbols, do not store any part of
// the parameter bs in the map, as it might be a shared buffer.
// bs2 = decByteSlice(d.r, slen, bs)
bs2 = decByteSlice(d.r, slen, nil)
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil)
if withString {
s = string(bs2)
}
@ -705,7 +712,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
}
func (d *bincDecDriver) DecodeString() (s string) {
// DecodeBytes does not accomodate symbols, whose impl stores string version in map.
// DecodeBytes does not accommodate symbols, whose impl stores string version in map.
// Use decStringAndBytes directly.
// return string(d.DecodeBytes(d.b[:], true, true))
_, s = d.decStringAndBytes(d.b[:], true, true)
@ -740,7 +747,7 @@ func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
}
func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {

View File

@ -188,6 +188,13 @@ func (d *cborDecDriver) readNextBd() {
d.bdRead = true
}
func (d *cborDecDriver) uncacheRead() {
if d.bdRead {
d.r.unreadn1()
d.bdRead = false
}
}
func (d *cborDecDriver) ContainerType() (vt valueType) {
if d.bd == cborBdNil {
return valueTypeNil
@ -414,7 +421,7 @@ func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
}
func (d *cborDecDriver) DecodeString() (s string) {

View File

@ -91,10 +91,12 @@ type decDriver interface {
uncacheRead()
}
type decNoSeparator struct{}
type decNoSeparator struct {
}
func (_ decNoSeparator) ReadEnd() {}
func (_ decNoSeparator) uncacheRead() {}
func (_ decNoSeparator) ReadEnd() {}
// func (_ decNoSeparator) uncacheRead() {}
type DecodeOptions struct {
// MapType specifies type to use during schema-less decoding of a map in the stream.
@ -105,10 +107,10 @@ type DecodeOptions struct {
// If nil, we use []interface{}
SliceType reflect.Type
// MaxInitLen defines the initial length that we "make" a collection (slice, chan or map) with.
// MaxInitLen defines the maxinum initial length that we "make" a collection (string, slice, map, chan).
// If 0 or negative, we default to a sensible value based on the size of an element in the collection.
//
// For example, when decoding, a stream may say that it has MAX_UINT elements.
// For example, when decoding, a stream may say that it has 2^64 elements.
// We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash.
// Instead, we provision up to MaxInitLen, fill that up, and start appending after that.
MaxInitLen int
@ -161,6 +163,15 @@ type DecodeOptions struct {
// Note: Handles will be smart when using the intern functionality.
// So everything will not be interned.
InternString bool
// PreferArrayOverSlice controls whether to decode to an array or a slice.
//
// This only impacts decoding into a nil interface{}.
// Consequently, it has no effect on codecgen.
//
// *Note*: This only applies if using go1.5 and above,
// as it requires reflect.ArrayOf support which was absent before go1.5.
PreferArrayOverSlice bool
}
// ------------------------------------
@ -433,6 +444,10 @@ func (f *decFnInfo) rawExt(rv reflect.Value) {
f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil)
}
func (f *decFnInfo) raw(rv reflect.Value) {
rv.SetBytes(f.d.raw())
}
func (f *decFnInfo) ext(rv reflect.Value) {
f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn)
}
@ -605,8 +620,11 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) {
n.ss = append(n.ss, nil)
var v2 interface{} = &n.ss[l]
d.decode(v2)
rvn = reflect.ValueOf(v2).Elem()
n.ss = n.ss[:l]
rvn = reflect.ValueOf(v2).Elem()
if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice {
rvn = reflectArrayOf(rvn)
}
} else {
rvn = reflect.New(d.h.SliceType).Elem()
d.decodeValue(rvn, nil)
@ -1169,7 +1187,7 @@ type decRtidFn struct {
// primitives are being decoded.
//
// maps and arrays are not handled by this mechanism.
// However, RawExt is, and we accomodate for extensions that decode
// However, RawExt is, and we accommodate for extensions that decode
// RawExt from DecodeNaked, but need to decode the value subsequently.
// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat.
//
@ -1507,6 +1525,8 @@ func (d *Decoder) decode(iv interface{}) {
*v = 0
case *[]uint8:
*v = nil
case *Raw:
*v = nil
case reflect.Value:
if v.Kind() != reflect.Ptr || v.IsNil() {
d.errNotValidPtrValue(v)
@ -1576,6 +1596,9 @@ func (d *Decoder) decode(iv interface{}) {
case *[]uint8:
*v = d.d.DecodeBytes(*v, false, false)
case *Raw:
*v = d.raw()
case *interface{}:
d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil)
@ -1697,6 +1720,8 @@ func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool
fn.f = (*decFnInfo).selferUnmarshal
} else if rtid == rawExtTypId {
fn.f = (*decFnInfo).rawExt
} else if rtid == rawTypId {
fn.f = (*decFnInfo).raw
} else if d.d.IsBuiltinType(rtid) {
fn.f = (*decFnInfo).builtin
} else if xfFn := d.h.getExt(rtid); xfFn != nil {
@ -1873,6 +1898,15 @@ func (d *Decoder) nextValueBytes() []byte {
return d.r.stopTrack()
}
func (d *Decoder) raw() []byte {
// ensure that this is not a view into the bytes
// i.e. make new copy always.
bs := d.nextValueBytes()
bs2 := make([]byte, len(bs))
copy(bs2, bs)
return bs2
}
// --------------------------------------------------
// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
@ -1927,18 +1961,31 @@ func (x decSliceHelper) ElemContainerState(index int) {
}
}
func decByteSlice(r decReader, clen int, bs []byte) (bsOut []byte) {
func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) {
if clen == 0 {
return zeroByteSlice
}
if len(bs) == clen {
bsOut = bs
r.readb(bsOut)
} else if cap(bs) >= clen {
bsOut = bs[:clen]
r.readb(bsOut)
} else {
bsOut = make([]byte, clen)
// bsOut = make([]byte, clen)
len2, _ := decInferLen(clen, maxInitLen, 1)
bsOut = make([]byte, len2)
r.readb(bsOut)
for len2 < clen {
len3, _ := decInferLen(clen-len2, maxInitLen, 1)
// fmt.Printf(">>>>> TESTING: in loop: clen: %v, maxInitLen: %v, len2: %v, len3: %v\n", clen, maxInitLen, len2, len3)
bs3 := bsOut
bsOut = make([]byte, len2+len3)
copy(bsOut, bs3)
r.readb(bsOut[len2:])
len2 += len3
}
}
r.readb(bsOut)
return
}

16
vendor/github.com/ugorji/go/codec/decode_go.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.5
package codec
import "reflect"
const reflectArrayOfSupported = true
func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) {
rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem()
reflect.Copy(rvn2, rvn)
return
}

14
vendor/github.com/ugorji/go/codec/decode_go14.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.5
package codec
import "reflect"
const reflectArrayOfSupported = false
func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) {
panic("reflect.ArrayOf unsupported")
}

View File

@ -119,6 +119,19 @@ type EncodeOptions struct {
// This is opt-in, as there may be a performance hit to checking circular references.
CheckCircularRef bool
// RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers
// when checking if a value is empty.
//
// Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls.
RecursiveEmptyCheck bool
// Raw controls whether we encode Raw values.
// This is a "dangerous" option and must be explicitly set.
// If set, we blindly encode Raw values as-is, without checking
// if they are a correct representation of a value in that format.
// If unset, we error out.
Raw bool
// AsSymbols defines what should be encoded as symbols.
//
// Encoding as symbols can reduce the encoded size significantly.
@ -222,45 +235,57 @@ type bytesEncWriter struct {
}
func (z *bytesEncWriter) writeb(s []byte) {
if len(s) > 0 {
c := z.grow(len(s))
copy(z.b[c:], s)
if len(s) == 0 {
return
}
oc, a := z.growNoAlloc(len(s))
if a {
z.growAlloc(len(s), oc)
}
copy(z.b[oc:], s)
}
func (z *bytesEncWriter) writestr(s string) {
if len(s) > 0 {
c := z.grow(len(s))
copy(z.b[c:], s)
if len(s) == 0 {
return
}
oc, a := z.growNoAlloc(len(s))
if a {
z.growAlloc(len(s), oc)
}
copy(z.b[oc:], s)
}
func (z *bytesEncWriter) writen1(b1 byte) {
c := z.grow(1)
z.b[c] = b1
oc, a := z.growNoAlloc(1)
if a {
z.growAlloc(1, oc)
}
z.b[oc] = b1
}
func (z *bytesEncWriter) writen2(b1 byte, b2 byte) {
c := z.grow(2)
z.b[c] = b1
z.b[c+1] = b2
oc, a := z.growNoAlloc(2)
if a {
z.growAlloc(2, oc)
}
z.b[oc+1] = b2
z.b[oc] = b1
}
func (z *bytesEncWriter) atEndOfEncode() {
*(z.out) = z.b[:z.c]
}
func (z *bytesEncWriter) grow(n int) (oldcursor int) {
// have a growNoalloc(n int), which can be inlined.
// if allocation is needed, then call growAlloc(n int)
func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) {
oldcursor = z.c
z.c = oldcursor + n
z.c = z.c + n
if z.c > len(z.b) {
if z.c > cap(z.b) {
// appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls.
// bytes.Buffer model (2*cap + n): much better
// bs := make([]byte, 2*cap(z.b)+n)
bs := make([]byte, growCap(cap(z.b), 1, n))
copy(bs, z.b[:oldcursor])
z.b = bs
allocNeeded = true
} else {
z.b = z.b[:cap(z.b)]
}
@ -268,6 +293,15 @@ func (z *bytesEncWriter) grow(n int) (oldcursor int) {
return
}
func (z *bytesEncWriter) growAlloc(n int, oldcursor int) {
// appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls.
// bytes.Buffer model (2*cap + n): much better
// bs := make([]byte, 2*cap(z.b)+n)
bs := make([]byte, growCap(cap(z.b), 1, n))
copy(bs, z.b[:oldcursor])
z.b = bs
}
// ---------------------------------------------
type encFnInfo struct {
@ -282,6 +316,10 @@ func (f *encFnInfo) builtin(rv reflect.Value) {
f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface())
}
func (f *encFnInfo) raw(rv reflect.Value) {
f.e.raw(rv.Interface().(Raw))
}
func (f *encFnInfo) rawExt(rv reflect.Value) {
// rev := rv.Interface().(RawExt)
// f.e.e.EncodeRawExt(&rev, f.e)
@ -308,7 +346,7 @@ func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v
v = rv.Interface()
} else if indir == -1 {
// If a non-pointer was passed to Encode(), then that value is not addressable.
// Take addr if addresable, else copy value to an addressable value.
// Take addr if addressable, else copy value to an addressable value.
if rv.CanAddr() {
v = rv.Addr().Interface()
} else {
@ -524,20 +562,20 @@ func (f *encFnInfo) kStruct(rv reflect.Value) {
}
newlen = 0
var kv stringRv
recur := e.h.RecursiveEmptyCheck
for _, si := range tisfi {
kv.r = si.field(rv, false)
if toMap {
if si.omitEmpty && isEmptyValue(kv.r) {
if si.omitEmpty && isEmptyValue(kv.r, recur, recur) {
continue
}
kv.v = si.encName
} else {
// use the zero value.
// if a reference or struct, set to nil (so you do not output too much)
if si.omitEmpty && isEmptyValue(kv.r) {
if si.omitEmpty && isEmptyValue(kv.r, recur, recur) {
switch kv.r.Kind() {
case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array,
reflect.Map, reflect.Slice:
case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice:
kv.r = reflect.Value{} //encode as nil
}
}
@ -548,7 +586,7 @@ func (f *encFnInfo) kStruct(rv reflect.Value) {
// debugf(">>>> kStruct: newlen: %v", newlen)
// sep := !e.be
ee := e.e //don't dereference everytime
ee := e.e //don't dereference every time
if toMap {
ee.EncodeMapStart(newlen)
@ -935,7 +973,7 @@ func newEncoder(h Handle) *Encoder {
// Reset the Encoder with a new output stream.
//
// This accomodates using the state of the Encoder,
// This accommodates using the state of the Encoder,
// where it has "cached" information about sub-engines.
func (e *Encoder) Reset(w io.Writer) {
ww, ok := w.(ioEncWriterWriter)
@ -1042,20 +1080,6 @@ func (e *Encoder) MustEncode(v interface{}) {
e.w.atEndOfEncode()
}
// comment out these (Must)Write methods. They were only put there to support cbor.
// However, users already have access to the streams, and can write directly.
//
// // Write allows users write to the Encoder stream directly.
// func (e *Encoder) Write(bs []byte) (err error) {
// defer panicToErr(&err)
// e.w.writeb(bs)
// return
// }
// // MustWrite is like write, but panics if unable to Write.
// func (e *Encoder) MustWrite(bs []byte) {
// e.w.writeb(bs)
// }
func (e *Encoder) encode(iv interface{}) {
// if ics, ok := iv.(Selfer); ok {
// ics.CodecEncodeSelf(e)
@ -1067,7 +1091,8 @@ func (e *Encoder) encode(iv interface{}) {
e.e.EncodeNil()
case Selfer:
v.CodecEncodeSelf(e)
case Raw:
e.raw(v)
case reflect.Value:
e.encodeValue(v, nil)
@ -1252,6 +1277,8 @@ func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCo
if checkCodecSelfer && ti.cs {
fn.f = (*encFnInfo).selferMarshal
} else if rtid == rawTypId {
fn.f = (*encFnInfo).raw
} else if rtid == rawExtTypId {
fn.f = (*encFnInfo).rawExt
} else if e.e.IsBuiltinType(rtid) {
@ -1356,6 +1383,18 @@ func (e *Encoder) asis(v []byte) {
}
}
func (e *Encoder) raw(vv Raw) {
v := []byte(vv)
if !e.h.Raw {
e.errorf("Raw values cannot be encoded: %v", v)
}
if e.as == nil {
e.w.writeb(v)
} else {
e.as.EncodeAsis(v)
}
}
func (e *Encoder) errorf(format string, params ...interface{}) {
err := fmt.Errorf(format, params...)
panic(err)

View File

@ -23,7 +23,7 @@ package codec
// Currently support
// - slice of all builtin types,
// - map of all builtin types to string or interface value
// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8)
// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
// This should provide adequate "typical" implementations.
//
// Note that fast track decode functions must handle values for which an address cannot be obtained.
@ -38,6 +38,8 @@ import (
"sort"
)
const fastpathEnabled = true
const fastpathCheckNilFalse = false // for reflect
const fastpathCheckNilTrue = true // for type switch
@ -81,9 +83,6 @@ var fastpathAV fastpathA
// due to possible initialization loop error, make fastpath in an init()
func init() {
if !fastpathEnabled {
return
}
i := 0
fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
xrt := reflect.TypeOf(v)
@ -373,9 +372,6 @@ func init() {
// -- -- fast path type switch
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
if !fastpathEnabled {
return false
}
switch v := iv.(type) {
case []interface{}:
@ -1741,9 +1737,6 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
}
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
if !fastpathEnabled {
return false
}
switch v := iv.(type) {
case []interface{}:
@ -1829,9 +1822,6 @@ func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
}
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
if !fastpathEnabled {
return false
}
switch v := iv.(type) {
case map[interface{}]interface{}:
@ -15954,9 +15944,6 @@ func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) {
// -- -- fast path type switch
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
if !fastpathEnabled {
return false
}
switch v := iv.(type) {
case []interface{}:

View File

@ -23,7 +23,7 @@ package codec
// Currently support
// - slice of all builtin types,
// - map of all builtin types to string or interface value
// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8)
// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
// This should provide adequate "typical" implementations.
//
// Note that fast track decode functions must handle values for which an address cannot be obtained.
@ -38,6 +38,8 @@ import (
"sort"
)
const fastpathEnabled = true
const fastpathCheckNilFalse = false // for reflect
const fastpathCheckNilTrue = true // for type switch
@ -81,9 +83,6 @@ var fastpathAV fastpathA
// due to possible initialization loop error, make fastpath in an init()
func init() {
if !fastpathEnabled {
return
}
i := 0
fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
xrt := reflect.TypeOf(v)
@ -106,9 +105,6 @@ func init() {
// -- -- fast path type switch
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
if !fastpathEnabled {
return false
}
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}:{{else}}
@ -126,9 +122,6 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
}
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
if !fastpathEnabled {
return false
}
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}:
@ -144,9 +137,6 @@ func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
}
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
if !fastpathEnabled {
return false
}
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
case map[{{ .MapKey }}]{{ .Elem }}:
@ -286,9 +276,6 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele
// -- -- fast path type switch
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
if !fastpathEnabled {
return false
}
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}:{{else}}

View File

@ -4,6 +4,8 @@ package codec
import "reflect"
const fastpathEnabled = false
// The generated fast-path code is very large, and adds a few seconds to the build time.
// This causes test execution, execution of small tools which use codec, etc
// to take a long time.

View File

@ -1,4 +1,4 @@
// //+build ignore
/* // +build ignore */
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
@ -17,7 +17,7 @@ import (
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continously and without notice.
// library users. They WILL change continuously and without notice.
//
// To help enforce this, we create an unexported type with exported members.
// The only way to get the type is via the one exported type that we control (somewhat).
@ -83,6 +83,11 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
f.e.marshal(bs, fnerr, false, c_RAW)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) {
f.e.raw(iv)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
if _, ok := f.e.hh.(*BincHandle); ok {
@ -191,6 +196,11 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte {
return f.d.raw()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
if _, ok := f.d.hh.(*BincHandle); ok {

View File

@ -1,4 +1,4 @@
// //+build ignore
/* // +build ignore */
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
@ -17,7 +17,7 @@ import (
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continously and without notice.
// library users. They WILL change continuously and without notice.
//
// To help enforce this, we create an unexported type with exported members.
// The only way to get the type is via the one exported type that we control (somewhat).
@ -79,6 +79,10 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
f.e.marshal(bs, fnerr, false, c_RAW)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) {
f.e.raw(iv)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
if _, ok := f.e.hh.(*BincHandle); ok {
return timeTypId
@ -172,6 +176,10 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte {
return f.d.raw()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
if _, ok := f.d.hh.(*BincHandle); ok {
return timeTypId

View File

@ -27,6 +27,7 @@ import (
// ---------------------------------------------------
// codecgen supports the full cycle of reflection-based codec:
// - RawExt
// - Raw
// - Builtins
// - Extensions
// - (Binary|Text|JSON)(Unm|M)arshal
@ -77,7 +78,7 @@ import (
// codecgen will panic if the file was generated with an old version of the library in use.
//
// Note:
// It was a concious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
// This way, there isn't a function call overhead just to see that we should not enter a block of code.
// GenVersion is the current version of codecgen.
@ -164,15 +165,9 @@ type genRunner struct {
//
// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.*
func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) {
// trim out all types which already implement Selfer
typ2 := make([]reflect.Type, 0, len(typ))
for _, t := range typ {
if reflect.PtrTo(t).Implements(selferTyp) || t.Implements(selferTyp) {
continue
}
typ2 = append(typ2, t)
}
typ = typ2
// All types passed to this method do not have a codec.Selfer method implemented directly.
// codecgen already checks the AST and skips any types that define the codec.Selfer methods.
// Consequently, there's no need to check and trim them if they implement codec.Selfer
if len(typ) == 0 {
return
@ -211,7 +206,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
x.genRefPkgs(t)
}
if buildTags != "" {
x.line("//+build " + buildTags)
x.line("// +build " + buildTags)
x.line("")
}
x.line(`
@ -701,13 +696,17 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
}
// check if
// - type is RawExt
// - type is RawExt, Raw
// - the type implements (Text|JSON|Binary)(Unm|M)arshal
x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi)
x.linef("_ = %sm%s", genTempVarPfx, mi)
x.line("if false {") //start if block
defer func() { x.line("}") }() //end if block
if t == rawTyp {
x.linef("} else { z.EncRaw(%v)", varname)
return
}
if t == rawExtTyp {
x.linef("} else { r.EncodeRawExt(%v, e)", varname)
return
@ -987,6 +986,14 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
}
func (x *genRunner) encListFallback(varname string, t reflect.Type) {
if t.AssignableTo(uint8SliceTyp) {
x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname)
return
}
if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname)
return
}
i := x.varsfx()
g := genTempVarPfx
x.line("r.EncodeArrayStart(len(" + varname + "))")
@ -1123,7 +1130,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
}
// check if
// - type is RawExt
// - type is Raw, RawExt
// - the type implements (Text|JSON|Binary)(Unm|M)arshal
mi := x.varsfx()
x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi)
@ -1131,6 +1138,10 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
x.line("if false {") //start if block
defer func() { x.line("}") }() //end if block
if t == rawTyp {
x.linef("} else { *%v = z.DecRaw()", varname)
return
}
if t == rawExtTyp {
x.linef("} else { r.DecodeExt(%v, 0, nil)", varname)
return
@ -1306,6 +1317,14 @@ func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAs
}
func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) {
if t.AssignableTo(uint8SliceTyp) {
x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false, false)")
return
}
if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], false, true)", t.Len(), varname)
return
}
type tstruc struct {
TempVar string
Rand string
@ -1421,7 +1440,7 @@ func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintpt
if si.i != -1 {
t2 = t.Field(int(si.i))
} else {
//we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
//we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
// t2 = t.FieldByIndex(si.is)
t2typ := t
varname3 := varname
@ -1509,7 +1528,7 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid
if si.i != -1 {
t2 = t.Field(int(si.i))
} else {
//we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
//we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
// t2 = t.FieldByIndex(si.is)
t2typ := t
varname3 := varname
@ -1931,7 +1950,7 @@ func genInternalInit() {
}
var gt genInternal
// For each slice or map type, there must be a (symetrical) Encode and Decode fast-path function
// For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function
for _, s := range types {
gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]})
if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already.

10
vendor/github.com/ugorji/go/codec/gen_17.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.7
package codec
func init() {
genCheckVendor = true
}

View File

@ -38,10 +38,6 @@ package codec
// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that
// it has to be binary, and we do not even try to read separators.
//
// The only codec that may suffer (slightly) is cbor, and only when decoding indefinite-length.
// It may suffer because we treat it like a text-based codec, and read separators.
// However, this read is a no-op and the cost is insignificant.
//
// Philosophy
// ------------
// On decode, this codec will update containers appropriately:
@ -137,17 +133,6 @@ const (
// Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
recoverPanicToErr = true
// Fast path functions try to create a fast path encode or decode implementation
// for common maps and slices, by by-passing reflection altogether.
fastpathEnabled = true
// if checkStructForEmptyValue, check structs fields to see if an empty value.
// This could be an expensive call, so possibly disable it.
checkStructForEmptyValue = false
// if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue
derefForIsEmptyValue = false
// if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first.
// Only concern is that, if the slice already contained some garbage, we will decode into that garbage.
// The chances of this are slim, so leave this "optimization".
@ -217,16 +202,21 @@ const (
containerArrayEnd
)
type rgetPoolT struct {
encNames [8]string
fNames [8]string
etypes [8]uintptr
sfis [8]*structFieldInfo
// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo
type sfiIdx struct {
name string
index int
}
var rgetPool = sync.Pool{
New: func() interface{} { return new(rgetPoolT) },
}
// do not recurse if a containing type refers to an embedded type
// which refers back to its containing type (via a pointer).
// The second time this back-reference happens, break out,
// so as not to cause an infinite loop.
const rgetMaxRecursion = 2
// Anecdotally, we believe most types have <= 12 fields.
// Java's PMD rules set TooManyFields threshold to 15.
const rgetPoolTArrayLen = 12
type rgetT struct {
fNames []string
@ -235,6 +225,18 @@ type rgetT struct {
sfis []*structFieldInfo
}
type rgetPoolT struct {
fNames [rgetPoolTArrayLen]string
encNames [rgetPoolTArrayLen]string
etypes [rgetPoolTArrayLen]uintptr
sfis [rgetPoolTArrayLen]*structFieldInfo
sfiidx [rgetPoolTArrayLen]sfiIdx
}
var rgetPool = sync.Pool{
New: func() interface{} { return new(rgetPoolT) },
}
type containerStateRecv interface {
sendContainerState(containerState)
}
@ -260,6 +262,7 @@ var (
stringTyp = reflect.TypeOf("")
timeTyp = reflect.TypeOf(time.Time{})
rawExtTyp = reflect.TypeOf(RawExt{})
rawTyp = reflect.TypeOf(Raw{})
uint8SliceTyp = reflect.TypeOf([]uint8(nil))
mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
@ -277,6 +280,7 @@ var (
uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer()
rawTypId = reflect.ValueOf(rawTyp).Pointer()
intfTypId = reflect.ValueOf(intfTyp).Pointer()
timeTypId = reflect.ValueOf(timeTyp).Pointer()
stringTypId = reflect.ValueOf(stringTyp).Pointer()
@ -357,6 +361,11 @@ type Handle interface {
isBinary() bool
}
// Raw represents raw formatted bytes.
// We "blindly" store it during encode and store the raw bytes during decode.
// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set.
type Raw []byte
// RawExt represents raw unprocessed extension data.
// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag.
//
@ -367,7 +376,7 @@ type RawExt struct {
// Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types
Data []byte
// Value represents the extension, if Data is nil.
// Value is used by codecs (e.g. cbor) which use the format to do custom serialization of the types.
// Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types.
Value interface{}
}
@ -545,7 +554,7 @@ func (o *extHandle) AddExt(
func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
// o is a pointer, because we may need to initialize it
if rt.PkgPath() == "" || rt.Kind() == reflect.Interface {
err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T",
err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T",
reflect.Zero(rt).Interface())
return
}
@ -588,7 +597,8 @@ func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn {
}
type structFieldInfo struct {
encName string // encode name
encName string // encode name
fieldName string // field name
// only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set.
@ -849,21 +859,18 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
}
if rt.Kind() == reflect.Struct {
var siInfo *structFieldInfo
var omitEmpty bool
if f, ok := rt.FieldByName(structInfoFieldName); ok {
siInfo = parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag))
siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag))
ti.toArray = siInfo.toArray
omitEmpty = siInfo.omitEmpty
}
pi := rgetPool.Get()
pv := pi.(*rgetPoolT)
pv.etypes[0] = ti.baseId
vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
x.rget(rt, rtid, nil, &vv, siInfo)
ti.sfip = make([]*structFieldInfo, len(vv.sfis))
ti.sfi = make([]*structFieldInfo, len(vv.sfis))
copy(ti.sfip, vv.sfis)
sort.Sort(sfiSortedByEncName(vv.sfis))
copy(ti.sfi, vv.sfis)
x.rget(rt, rtid, omitEmpty, nil, &vv)
ti.sfip, ti.sfi = rgetResolveSFI(vv.sfis, pv.sfiidx[:0])
rgetPool.Put(pi)
}
// sfi = sfip
@ -877,26 +884,17 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
return
}
func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr,
indexstack []int, pv *rgetT, siInfo *structFieldInfo,
func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool,
indexstack []int, pv *rgetT,
) {
// This will read up the fields and store how to access the value.
// It uses the go language's rules for embedding, as below:
// - if a field has been seen while traversing, skip it
// - if an encName has been seen while traversing, skip it
// - if an embedded type has been seen, skip it
// Read up fields and store how to access the value.
//
// Also, per Go's rules, embedded fields must be analyzed AFTER all top-level fields.
// It uses go's rules for message selectors,
// which say that the field with the shallowest depth is selected.
//
// Note: we consciously use slices, not a map, to simulate a set.
// Typically, types have < 16 fields, and iteration using equals is faster than maps there
type anonField struct {
ft reflect.Type
idx int
}
var anonFields []anonField
// Typically, types have < 16 fields,
// and iteration using equals is faster than maps there
LOOP:
for j, jlen := 0, rt.NumField(); j < jlen; j++ {
@ -908,7 +906,8 @@ LOOP:
continue LOOP
}
// if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
// if r1, _ := utf8.DecodeRuneInString(f.Name);
// r1 == utf8.RuneError || !unicode.IsUpper(r1) {
if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded
continue
}
@ -917,7 +916,8 @@ LOOP:
continue
}
var si *structFieldInfo
// if anonymous and no struct tag (or it's blank), and a struct (or pointer to struct), inline it.
// if anonymous and no struct tag (or it's blank),
// and a struct (or pointer to struct), inline it.
if f.Anonymous && fkind != reflect.Interface {
doInline := stag == ""
if !doInline {
@ -931,8 +931,31 @@ LOOP:
ft = ft.Elem()
}
if ft.Kind() == reflect.Struct {
// handle anonymous fields after handling all the non-anon fields
anonFields = append(anonFields, anonField{ft, j})
// if etypes contains this, don't call rget again (as fields are already seen here)
ftid := reflect.ValueOf(ft).Pointer()
// We cannot recurse forever, but we need to track other field depths.
// So - we break if we see a type twice (not the first time).
// This should be sufficient to handle an embedded type that refers to its
// owning type, which then refers to its embedded type.
processIt := true
numk := 0
for _, k := range pv.etypes {
if k == ftid {
numk++
if numk == rgetMaxRecursion {
processIt = false
break
}
}
}
if processIt {
pv.etypes = append(pv.etypes, ftid)
indexstack2 := make([]int, len(indexstack)+1)
copy(indexstack2, indexstack)
indexstack2[len(indexstack)] = j
// indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
x.rget(ft, ftid, omitEmpty, indexstack2, pv)
}
continue
}
}
@ -947,11 +970,6 @@ LOOP:
panic(noFieldNameToStructFieldInfoErr)
}
for _, k := range pv.fNames {
if k == f.Name {
continue LOOP
}
}
pv.fNames = append(pv.fNames, f.Name)
if si == nil {
@ -959,12 +977,8 @@ LOOP:
} else if si.encName == "" {
si.encName = f.Name
}
si.fieldName = f.Name
for _, k := range pv.encNames {
if k == si.encName {
continue LOOP
}
}
pv.encNames = append(pv.encNames, si.encName)
// si.ikind = int(f.Type.Kind())
@ -978,32 +992,60 @@ LOOP:
// si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
}
if siInfo != nil {
if siInfo.omitEmpty {
si.omitEmpty = true
}
if omitEmpty {
si.omitEmpty = true
}
pv.sfis = append(pv.sfis, si)
}
}
// now handle anonymous fields
LOOP2:
for _, af := range anonFields {
// if etypes contains this, then do not call rget again (as the fields are already seen here)
ftid := reflect.ValueOf(af.ft).Pointer()
for _, k := range pv.etypes {
if k == ftid {
continue LOOP2
// resolves the struct field info got from a call to rget.
// Returns a trimmed, unsorted and sorted []*structFieldInfo.
func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) {
var n int
for i, v := range x {
xn := v.encName //TODO: fieldName or encName? use encName for now.
var found bool
for j, k := range pv {
if k.name == xn {
// one of them must be reset to nil, and the index updated appropriately to the other one
if len(v.is) == len(x[k.index].is) {
} else if len(v.is) < len(x[k.index].is) {
pv[j].index = i
if x[k.index] != nil {
x[k.index] = nil
n++
}
} else {
if x[i] != nil {
x[i] = nil
n++
}
}
found = true
break
}
}
pv.etypes = append(pv.etypes, ftid)
indexstack2 := make([]int, len(indexstack)+1)
copy(indexstack2, indexstack)
indexstack2[len(indexstack)] = af.idx
// indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
x.rget(af.ft, ftid, indexstack2, pv, siInfo)
if !found {
pv = append(pv, sfiIdx{xn, i})
}
}
// remove all the nils
y = make([]*structFieldInfo, len(x)-n)
n = 0
for _, v := range x {
if v == nil {
continue
}
y[n] = v
n++
}
z = make([]*structFieldInfo, len(y))
copy(z, y)
sort.Sort(sfiSortedByEncName(z))
return
}
func panicToErr(err *error) {

View File

@ -70,8 +70,8 @@ func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
return false
}
func isEmptyValue(v reflect.Value) bool {
return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue)
func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
return hIsEmptyValue(v, deref, checkStruct)
}
func pruneSignExt(v []byte, pos bool) (n int) {

View File

@ -1,4 +1,4 @@
//+build !unsafe
// +build !unsafe
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.

View File

@ -1,4 +1,4 @@
//+build unsafe
// +build unsafe
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.

View File

@ -197,12 +197,20 @@ func (e *jsonEncDriver) EncodeBool(b bool) {
}
func (e *jsonEncDriver) EncodeFloat32(f float32) {
e.w.writeb(strconv.AppendFloat(e.b[:0], float64(f), 'E', -1, 32))
e.encodeFloat(float64(f), 32)
}
func (e *jsonEncDriver) EncodeFloat64(f float64) {
// e.w.writestr(strconv.FormatFloat(f, 'E', -1, 64))
e.w.writeb(strconv.AppendFloat(e.b[:0], f, 'E', -1, 64))
e.encodeFloat(f, 64)
}
func (e *jsonEncDriver) encodeFloat(f float64, numbits int) {
x := strconv.AppendFloat(e.b[:0], f, 'G', -1, numbits)
e.w.writeb(x)
if bytes.IndexByte(x, 'E') == -1 && bytes.IndexByte(x, '.') == -1 {
e.w.writen2('.', '0')
}
}
func (e *jsonEncDriver) EncodeInt(v int64) {
@ -302,6 +310,8 @@ func (e *jsonEncDriver) quoteStr(s string) {
w.writen1('"')
start := 0
for i := 0; i < len(s); {
// encode all bytes < 0x20 (except \r, \n).
// also encode < > & to prevent security holes when served to some browsers.
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
i++
@ -323,9 +333,14 @@ func (e *jsonEncDriver) quoteStr(s string) {
w.writen2('\\', 'f')
case '\t':
w.writen2('\\', 't')
case '<', '>', '&':
if e.h.HTMLCharsAsIs {
w.writen1(b)
} else {
w.writestr(`\u00`)
w.writen2(hex[b>>4], hex[b&0xF])
}
default:
// encode all bytes < 0x20 (except \r, \n).
// also encode < > & to prevent security holes when served to some browsers.
w.writestr(`\u00`)
w.writen2(hex[b>>4], hex[b&0xF])
}
@ -344,7 +359,7 @@ func (e *jsonEncDriver) quoteStr(s string) {
continue
}
// U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
// Both technically valid JSON, but bomb on JSONP, so fix here.
// Both technically valid JSON, but bomb on JSONP, so fix here unconditionally.
if c == '\u2028' || c == '\u2029' {
if start < i {
w.writestr(s[start:i])
@ -923,6 +938,11 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
if isstring {
return d.bs
}
// if appendStringAsBytes returned a zero-len slice, then treat as nil.
// This should only happen for null, and "".
if len(d.bs) == 0 {
return nil
}
bs0 := d.bs
slen := base64.StdEncoding.DecodedLen(len(bs0))
if slen <= cap(bs) {
@ -960,6 +980,14 @@ func (d *jsonDecDriver) appendStringAsBytes() {
}
d.tok = b
}
// handle null as a string
if d.tok == 'n' {
d.readStrIdx(10, 13) // ull
d.bs = d.bs[:0]
return
}
if d.tok != '"' {
d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok)
}
@ -1152,6 +1180,12 @@ type JsonHandle struct {
// containing the exact integer representation as a decimal.
// - else encode all integers as a json number (default)
IntegerAsString uint8
// HTMLCharsAsIs controls how to encode some special characters to html: < > &
//
// By default, we encode them as \uXXX
// to prevent security holes when served from some browsers.
HTMLCharsAsIs bool
}
func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {

View File

@ -549,7 +549,7 @@ func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOu
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
}
func (d *msgpackDecDriver) DecodeString() (s string) {
@ -561,6 +561,13 @@ func (d *msgpackDecDriver) readNextBd() {
d.bdRead = true
}
func (d *msgpackDecDriver) uncacheRead() {
if d.bdRead {
d.r.unreadn1()
d.bdRead = false
}
}
func (d *msgpackDecDriver) ContainerType() (vt valueType) {
bd := d.bd
if bd == mpNil {

View File

@ -25,7 +25,7 @@ type Rpc interface {
}
// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
// used by the rpc connection. It accomodates use-cases where the connection
// used by the rpc connection. It accommodates use-cases where the connection
// should be used by rpc and non-rpc functions, e.g. streaming a file after
// sending an rpc response.
type RpcCodecBuffered interface {

View File

@ -166,6 +166,13 @@ func (d *simpleDecDriver) readNextBd() {
d.bdRead = true
}
func (d *simpleDecDriver) uncacheRead() {
if d.bdRead {
d.r.unreadn1()
d.bdRead = false
}
}
func (d *simpleDecDriver) ContainerType() (vt valueType) {
if d.bd == simpleVdNil {
return valueTypeNil
@ -340,7 +347,7 @@ func (d *simpleDecDriver) decLen() int {
}
return int(ui)
}
d.d.errorf("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8)
d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
return -1
}
@ -365,7 +372,7 @@ func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
}
func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
@ -474,7 +481,7 @@ func (d *simpleDecDriver) DecodeNaked() {
// SimpleHandle is a Handle for a very simple encoding format.
//
// simple is a simplistic codec similar to binc, but not as compact.
// - Encoding of a value is always preceeded by the descriptor byte (bd)
// - Encoding of a value is always preceded by the descriptor byte (bd)
// - True, false, nil are encoded fully in 1 byte (the descriptor)
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.

View File

@ -34,7 +34,7 @@ def get_test_data_list():
True,
u"null",
None,
u"someday",
u"some&day>some<day",
1328176922000002000,
u"",
-2206187877999998000,

View File

@ -17,7 +17,8 @@ _run() {
zargs=""
local OPTIND
OPTIND=1
while getopts "_xurtcinsvgzmefdl" flag
# "_xurtcinsvgzmefdl" === "_cdefgilmnrtsuvxz"
while getopts "_cdefgilmnrtsuvwxz" flag
do
case "x$flag" in
'xr') ;;
@ -29,6 +30,7 @@ _run() {
'xz') zargs="$zargs -tr" ;;
'xm') zargs="$zargs -tm" ;;
'xl') zargs="$zargs -tl" ;;
'xw') zargs="$zargs -tx=10" ;;
*) ;;
esac
done
@ -37,7 +39,7 @@ _run() {
# echo ">>>>>>> TAGS: $ztags"
OPTIND=1
while getopts "_xurtcinsvgzmefdl" flag
while getopts "_cdefgilmnrtsuvwxz" flag
do
case "x$flag" in
'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;;
@ -59,10 +61,11 @@ _run() {
}
# echo ">>>>>>> RUNNING VARIATIONS OF TESTS"
if [[ "x$@" = "x" ]]; then
if [[ "x$@" = "x" || "x$@" = "x-A" ]]; then
# All: r, x, g, gu
_run "-_tcinsed_ml" # regular
_run "-_tcinsed_ml_z" # regular with reset
_run "-w_tcinsed_ml" # regular with max init len
_run "-_tcinsed_ml_f" # regular with no fastpath (notfastpath)
_run "-x_tcinsed_ml" # external
_run "-gx_tcinsed_ml" # codecgen: requires external
@ -75,6 +78,30 @@ elif [[ "x$@" = "x-F" ]]; then
# regular with notfastpath
_run "-_tcinsed_ml_f" # regular
_run "-_tcinsed_ml_zf" # regular with reset
elif [[ "x$@" = "x-C" ]]; then
# codecgen
_run "-gx_tcinsed_ml" # codecgen: requires external
_run "-gxu_tcinsed_ml" # codecgen + unsafe
_run "-gxuw_tcinsed_ml" # codecgen + unsafe + maxinitlen
elif [[ "x$@" = "x-X" ]]; then
# external
_run "-x_tcinsed_ml" # external
elif [[ "x$@" = "x-h" || "x$@" = "x-?" ]]; then
cat <<EOF
Usage: tests.sh [options...]
-A run through all tests (regular, external, codecgen)
-Z regular tests only
-F regular tests only (without fastpath, so they run quickly)
-C codecgen only
-X external only
-h show help (usage)
-? same as -h
(no options)
same as -A
(unrecognized options)
just pass on the options from the command line
EOF
else
# e.g. ./tests.sh "-w_tcinsed_ml"
_run "$@"
fi

10
vendor/vendor.json vendored
View File

@ -1125,16 +1125,16 @@
"revisionTime": "2016-09-30T03:27:40Z"
},
{
"checksumSHA1": "tHm2SMtuRfrwh6NnnymsuoJ6e0Q=",
"checksumSHA1": "CoxdaTYdPZNJXr8mJfLxye428N0=",
"path": "github.com/ugorji/go/codec",
"revision": "a396ed22fc049df733440d90efe17475e3929ccb",
"revisionTime": "2016-03-28T06:07:40Z"
"revision": "c88ee250d0221a57af388746f5cf03768c21d6e2",
"revisionTime": "2017-02-15T20:11:44Z"
},
{
"checksumSHA1": "CjQp8RP9SgJGltsq01UVaU4UYK0=",
"path": "github.com/ugorji/go/codec/codecgen",
"revision": "a396ed22fc049df733440d90efe17475e3929ccb",
"revisionTime": "2016-03-28T06:07:40Z"
"revision": "c88ee250d0221a57af388746f5cf03768c21d6e2",
"revisionTime": "2017-02-15T20:11:44Z"
},
{
"checksumSHA1": "9jjO5GjLa0XF/nfWihF02RoH4qc=",

View File

@ -239,13 +239,75 @@ page for more details.
<th>Type</th>
</tr>
<tr>
<td>`nomad.client.host.memmory.<HostID>.total`</td>
<td>`nomad.client.allocated.cpu.<HostID>`</td>
<td>Total amount of CPU shares the scheduler has allocated to tasks</td>
<td>MHz</td>
<td>Gauge</td>
</tr>
<tr>
<td>`nomad.client.unallocated.cpu.<HostID>`</td>
<td>Total amount of CPU shares free for the scheduler to allocate to tasks</td>
<td>MHz</td>
<td>Gauge</td>
</tr>
<tr>
<td>`nomad.client.allocated.memory.<HostID>`</td>
<td>Total amount of memory the scheduler has allocated to tasks</td>
<td>Megabytes</td>
<td>Gauge</td>
</tr>
<tr>
<td>`nomad.client.unallocated.memory.<HostID>`</td>
<td>Total amount of memory free for the scheduler to allocate to tasks</td>
<td>Megabytes</td>
<td>Gauge</td>
</tr>
<tr>
<td>`nomad.client.allocated.disk.<HostID>`</td>
<td>Total amount of disk space the scheduler has allocated to tasks</td>
<td>Megabytes</td>
<td>Gauge</td>
</tr>
<tr>
<td>`nomad.client.unallocated.disk.<HostID>`</td>
<td>Total amount of disk space free for the scheduler to allocate to tasks</td>
<td>Megabytes</td>
<td>Gauge</td>
</tr>
<tr>
<td>`nomad.client.allocated.iops.<HostID>`</td>
<td>Total amount of IOPS the scheduler has allocated to tasks</td>
<td>IOPS</td>
<td>Gauge</td>
</tr>
<tr>
<td>`nomad.client.unallocated.iops.<HostID>`</td>
<td>Total amount of IOPS free for the scheduler to allocate to tasks</td>
<td>IOPS</td>
<td>Gauge</td>
</tr>
<tr>
<td>`nomad.client.allocated.network.<Device-Name>.<HostID>`</td>
<td>Total amount of bandwidth the scheduler has allocated to tasks on the
given device</td>
<td>Megabits</td>
<td>Gauge</td>
</tr>
<tr>
<td>`nomad.client.unallocated.network.<Device-Name>.<HostID>`</td>
<td>Total amount of bandwidth free for the scheduler to allocate to tasks on
the given device</td>
<td>Megabits</td>
<td>Gauge</td>
</tr>
<tr>
<td>`nomad.client.host.memory.<HostID>.total`</td>
<td>Total amount of physical memory on the node</td>
<td>Bytes</td>
<td>Gauge</td>
</tr>
<tr>
<td>`nomad.client.host.memmory.<HostID>.available`</td>
<td>`nomad.client.host.memory.<HostID>.available`</td>
<td>Total amount of memory available to processes which includes free and
cached memory</td>
<td>Bytes</td>

View File

@ -370,8 +370,6 @@ region is used; another region can be specified using the `?region=` query param
```javascript
{
"KnownLeader": false,
"LastContact": 0,
"Index": 13,
"JobCreateIndex": 12,
"EvalCreateIndex": 13,

View File

@ -265,6 +265,12 @@ The `Job` object supports the following keys:
* `Enabled` - `Enabled` determines whether the periodic job will spawn child
jobs.
* `time_zone` - Specifies the time zone to evaluate the next launch interval
against. This is useful when wanting to account for day light savings in
various time zones. The time zone must be parsable by Golang's
[LoadLocation](https://golang.org/pkg/time/#LoadLocation). The default is
UTC.
* `SpecType` - `SpecType` determines how Nomad is going to interpret the
periodic expression. `cron` is the only supported `SpecType` currently.

View File

@ -49,6 +49,11 @@ consistent evaluation when Nomad spans multiple time zones.
previous instances of this job have completed. This only applies to this job;
it does not prevent other periodic jobs from running at the same time.
- `time_zone` `(string: "UTC")` - Specifies the time zone to evaluate the next
launch interval against. This is useful when wanting to account for day light
savings in various time zones. The time zone must be parsable by Golang's
[LoadLocation](https://golang.org/pkg/time/#LoadLocation).
## `periodic` Examples
The following examples only show the `periodic` stanzas. Remember that the