Merge pull request #3401 from hashicorp/f-nomad
This commit is contained in:
commit
ff23426e98
|
@ -0,0 +1,68 @@
|
|||
package nomad
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
)
|
||||
|
||||
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
|
||||
b := Backend()
|
||||
if err := b.Setup(conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func Backend() *backend {
|
||||
var b backend
|
||||
b.Backend = &framework.Backend{
|
||||
PathsSpecial: &logical.Paths{
|
||||
SealWrapStorage: []string{
|
||||
"config/access",
|
||||
},
|
||||
},
|
||||
|
||||
Paths: []*framework.Path{
|
||||
pathConfigAccess(&b),
|
||||
pathConfigLease(&b),
|
||||
pathListRoles(&b),
|
||||
pathRoles(&b),
|
||||
pathCredsCreate(&b),
|
||||
},
|
||||
|
||||
Secrets: []*framework.Secret{
|
||||
secretToken(&b),
|
||||
},
|
||||
BackendType: logical.TypeLogical,
|
||||
}
|
||||
|
||||
return &b
|
||||
}
|
||||
|
||||
type backend struct {
|
||||
*framework.Backend
|
||||
}
|
||||
|
||||
func (b *backend) client(s logical.Storage) (*api.Client, error) {
|
||||
conf, err := b.readConfigAccess(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nomadConf := api.DefaultConfig()
|
||||
if conf != nil {
|
||||
if conf.Address != "" {
|
||||
nomadConf.Address = conf.Address
|
||||
}
|
||||
if conf.Token != "" {
|
||||
nomadConf.SecretID = conf.Token
|
||||
}
|
||||
}
|
||||
|
||||
client, err := api.NewClient(nomadConf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
|
@ -0,0 +1,302 @@
|
|||
package nomad
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
nomadapi "github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
dockertest "gopkg.in/ory-am/dockertest.v3"
|
||||
)
|
||||
|
||||
func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, nomadToken string) {
|
||||
nomadToken = os.Getenv("NOMAD_TOKEN")
|
||||
|
||||
retAddress = os.Getenv("NOMAD_ADDR")
|
||||
|
||||
if retAddress != "" {
|
||||
return func() {}, retAddress, nomadToken
|
||||
}
|
||||
|
||||
pool, err := dockertest.NewPool("")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to connect to docker: %s", err)
|
||||
}
|
||||
|
||||
dockerOptions := &dockertest.RunOptions{
|
||||
Repository: "djenriquez/nomad",
|
||||
Tag: "latest",
|
||||
Cmd: []string{"agent", "-dev"},
|
||||
Env: []string{`NOMAD_LOCAL_CONFIG=bind_addr = "0.0.0.0" acl { enabled = true }`},
|
||||
}
|
||||
resource, err := pool.RunWithOptions(dockerOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not start local Nomad docker container: %s", err)
|
||||
}
|
||||
|
||||
cleanup = func() {
|
||||
err := pool.Purge(resource)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to cleanup local container: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
retAddress = fmt.Sprintf("http://localhost:%s/", resource.GetPort("4646/tcp"))
|
||||
// Give Nomad time to initialize
|
||||
|
||||
time.Sleep(5000 * time.Millisecond)
|
||||
// exponential backoff-retry
|
||||
if err = pool.Retry(func() error {
|
||||
var err error
|
||||
nomadapiConfig := nomadapi.DefaultConfig()
|
||||
nomadapiConfig.Address = retAddress
|
||||
nomad, err := nomadapi.NewClient(nomadapiConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
aclbootstrap, _, err := nomad.ACLTokens().Bootstrap(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
nomadToken = aclbootstrap.SecretID
|
||||
t.Logf("[WARN] Generated Master token: %s", nomadToken)
|
||||
policy := &nomadapi.ACLPolicy{
|
||||
Name: "test",
|
||||
Description: "test",
|
||||
Rules: `namespace "default" {
|
||||
policy = "read"
|
||||
}
|
||||
`,
|
||||
}
|
||||
anonPolicy := &nomadapi.ACLPolicy{
|
||||
Name: "anonymous",
|
||||
Description: "Deny all access for anonymous requests",
|
||||
Rules: `namespace "default" {
|
||||
policy = "deny"
|
||||
}
|
||||
agent {
|
||||
policy = "deny"
|
||||
}
|
||||
node {
|
||||
policy = "deny"
|
||||
}
|
||||
`,
|
||||
}
|
||||
nomadAuthConfig := nomadapi.DefaultConfig()
|
||||
nomadAuthConfig.Address = retAddress
|
||||
nomadAuthConfig.SecretID = nomadToken
|
||||
nomadAuth, err := nomadapi.NewClient(nomadAuthConfig)
|
||||
_, err = nomadAuth.ACLPolicies().Upsert(policy, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = nomadAuth.ACLPolicies().Upsert(anonPolicy, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return err
|
||||
}); err != nil {
|
||||
cleanup()
|
||||
t.Fatalf("Could not connect to docker: %s", err)
|
||||
}
|
||||
return cleanup, retAddress, nomadToken
|
||||
}
|
||||
|
||||
func TestBackend_config_access(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
b, err := Factory(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cleanup, connURL, connToken := prepareTestContainer(t)
|
||||
defer cleanup()
|
||||
|
||||
connData := map[string]interface{}{
|
||||
"address": connURL,
|
||||
"token": connToken,
|
||||
}
|
||||
|
||||
confReq := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/access",
|
||||
Storage: config.StorageView,
|
||||
Data: connData,
|
||||
}
|
||||
|
||||
resp, err := b.HandleRequest(confReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) || resp != nil {
|
||||
t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err)
|
||||
}
|
||||
|
||||
confReq.Operation = logical.ReadOperation
|
||||
resp, err = b.HandleRequest(confReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err)
|
||||
}
|
||||
|
||||
expected := map[string]interface{}{
|
||||
"address": connData["address"].(string),
|
||||
}
|
||||
if !reflect.DeepEqual(expected, resp.Data) {
|
||||
t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data)
|
||||
}
|
||||
if resp.Data["token"] != nil {
|
||||
t.Fatalf("token should not be set in the response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackend_renew_revoke(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
b, err := Factory(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cleanup, connURL, connToken := prepareTestContainer(t)
|
||||
defer cleanup()
|
||||
connData := map[string]interface{}{
|
||||
"address": connURL,
|
||||
"token": connToken,
|
||||
}
|
||||
|
||||
req := &logical.Request{
|
||||
Storage: config.StorageView,
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/access",
|
||||
Data: connData,
|
||||
}
|
||||
resp, err := b.HandleRequest(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req.Path = "role/test"
|
||||
req.Data = map[string]interface{}{
|
||||
"policies": []string{"policy"},
|
||||
"lease": "6h",
|
||||
}
|
||||
resp, err = b.HandleRequest(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req.Operation = logical.ReadOperation
|
||||
req.Path = "creds/test"
|
||||
resp, err = b.HandleRequest(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatal("resp nil")
|
||||
}
|
||||
if resp.IsError() {
|
||||
t.Fatalf("resp is error: %v", resp.Error())
|
||||
}
|
||||
|
||||
generatedSecret := resp.Secret
|
||||
generatedSecret.IssueTime = time.Now()
|
||||
generatedSecret.TTL = 6 * time.Hour
|
||||
|
||||
var d struct {
|
||||
Token string `mapstructure:"secret_id"`
|
||||
Accessor string `mapstructure:"accessor_id"`
|
||||
}
|
||||
if err := mapstructure.Decode(resp.Data, &d); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("[WARN] Generated token: %s with accesor %s", d.Token, d.Accessor)
|
||||
|
||||
// Build a client and verify that the credentials work
|
||||
nomadapiConfig := nomadapi.DefaultConfig()
|
||||
nomadapiConfig.Address = connData["address"].(string)
|
||||
nomadapiConfig.SecretID = d.Token
|
||||
client, err := nomadapi.NewClient(nomadapiConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log("[WARN] Verifying that the generated token works...")
|
||||
_, err = client.Agent().Members, nil
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req.Operation = logical.RenewOperation
|
||||
req.Secret = generatedSecret
|
||||
resp, err = b.HandleRequest(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatal("got nil response from renew")
|
||||
}
|
||||
|
||||
req.Operation = logical.RevokeOperation
|
||||
resp, err = b.HandleRequest(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Build a management client and verify that the token does not exist anymore
|
||||
nomadmgmtConfig := nomadapi.DefaultConfig()
|
||||
nomadmgmtConfig.Address = connData["address"].(string)
|
||||
nomadmgmtConfig.SecretID = connData["token"].(string)
|
||||
mgmtclient, err := nomadapi.NewClient(nomadmgmtConfig)
|
||||
|
||||
q := &nomadapi.QueryOptions{
|
||||
Namespace: "default",
|
||||
}
|
||||
|
||||
t.Log("[WARN] Verifying that the generated token does not exist...")
|
||||
_, _, err = mgmtclient.ACLTokens().Info(d.Accessor, q)
|
||||
if err == nil {
|
||||
t.Fatal("err: expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackend_CredsCreateEnvVar(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
b, err := Factory(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cleanup, connURL, connToken := prepareTestContainer(t)
|
||||
defer cleanup()
|
||||
|
||||
req := logical.TestRequest(t, logical.UpdateOperation, "role/test")
|
||||
req.Data = map[string]interface{}{
|
||||
"policies": []string{"policy"},
|
||||
"lease": "6h",
|
||||
}
|
||||
resp, err := b.HandleRequest(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
os.Setenv("NOMAD_TOKEN", connToken)
|
||||
defer os.Unsetenv("NOMAD_TOKEN")
|
||||
os.Setenv("NOMAD_ADDR", connURL)
|
||||
defer os.Unsetenv("NOMAD_ADDR")
|
||||
|
||||
req.Operation = logical.ReadOperation
|
||||
req.Path = "creds/test"
|
||||
resp, err = b.HandleRequest(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatal("resp nil")
|
||||
}
|
||||
if resp.IsError() {
|
||||
t.Fatalf("resp is error: %v", resp.Error())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
package nomad
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
)
|
||||
|
||||
const configAccessKey = "config/access"
|
||||
|
||||
func pathConfigAccess(b *backend) *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/access",
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"address": &framework.FieldSchema{
|
||||
Type: framework.TypeString,
|
||||
Description: "Nomad server address",
|
||||
},
|
||||
|
||||
"token": &framework.FieldSchema{
|
||||
Type: framework.TypeString,
|
||||
Description: "Token for API calls",
|
||||
},
|
||||
},
|
||||
|
||||
Callbacks: map[logical.Operation]framework.OperationFunc{
|
||||
logical.ReadOperation: b.pathConfigAccessRead,
|
||||
logical.CreateOperation: b.pathConfigAccessWrite,
|
||||
logical.UpdateOperation: b.pathConfigAccessWrite,
|
||||
logical.DeleteOperation: b.pathConfigAccessDelete,
|
||||
},
|
||||
|
||||
ExistenceCheck: b.configExistenceCheck,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) configExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
|
||||
entry, err := b.readConfigAccess(req.Storage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return entry != nil, nil
|
||||
}
|
||||
|
||||
func (b *backend) readConfigAccess(storage logical.Storage) (*accessConfig, error) {
|
||||
entry, err := storage.Get(configAccessKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
conf := &accessConfig{}
|
||||
if err := entry.DecodeJSON(conf); err != nil {
|
||||
return nil, errwrap.Wrapf("error reading nomad access configuration: {{err}}", err)
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigAccessRead(
|
||||
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
conf, err := b.readConfigAccess(req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if conf == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"address": conf.Address,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigAccessWrite(
|
||||
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
conf, err := b.readConfigAccess(req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if conf == nil {
|
||||
conf = &accessConfig{}
|
||||
}
|
||||
|
||||
address, ok := data.GetOk("address")
|
||||
if ok {
|
||||
conf.Address = address.(string)
|
||||
}
|
||||
token, ok := data.GetOk("token")
|
||||
if ok {
|
||||
conf.Token = token.(string)
|
||||
}
|
||||
|
||||
entry, err := logical.StorageEntryJSON("config/access", conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := req.Storage.Put(entry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigAccessDelete(
|
||||
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
if err := req.Storage.Delete(configAccessKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type accessConfig struct {
|
||||
Address string `json:"address"`
|
||||
Token string `json:"token"`
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
package nomad
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
)
|
||||
|
||||
const leaseConfigKey = "config/lease"
|
||||
|
||||
func pathConfigLease(b *backend) *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/lease",
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"ttl": &framework.FieldSchema{
|
||||
Type: framework.TypeDurationSecond,
|
||||
Description: "Duration before which the issued token needs renewal",
|
||||
},
|
||||
"max_ttl": &framework.FieldSchema{
|
||||
Type: framework.TypeDurationSecond,
|
||||
Description: `Duration after which the issued token should not be allowed to be renewed`,
|
||||
},
|
||||
},
|
||||
|
||||
Callbacks: map[logical.Operation]framework.OperationFunc{
|
||||
logical.ReadOperation: b.pathLeaseRead,
|
||||
logical.UpdateOperation: b.pathLeaseUpdate,
|
||||
logical.DeleteOperation: b.pathLeaseDelete,
|
||||
},
|
||||
|
||||
HelpSynopsis: pathConfigLeaseHelpSyn,
|
||||
HelpDescription: pathConfigLeaseHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
// Sets the lease configuration parameters
|
||||
func (b *backend) pathLeaseUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
entry, err := logical.StorageEntryJSON("config/lease", &configLease{
|
||||
TTL: time.Second * time.Duration(d.Get("ttl").(int)),
|
||||
MaxTTL: time.Second * time.Duration(d.Get("max_ttl").(int)),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := req.Storage.Put(entry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathLeaseDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
if err := req.Storage.Delete(leaseConfigKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Returns the lease configuration parameters
|
||||
func (b *backend) pathLeaseRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
lease, err := b.LeaseConfig(req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lease == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"ttl": int64(lease.TTL.Seconds()),
|
||||
"max_ttl": int64(lease.MaxTTL.Seconds()),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Lease returns the lease information
|
||||
func (b *backend) LeaseConfig(s logical.Storage) (*configLease, error) {
|
||||
entry, err := s.Get(leaseConfigKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var result configLease
|
||||
if err := entry.DecodeJSON(&result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// Lease configuration information for the secrets issued by this backend
|
||||
type configLease struct {
|
||||
TTL time.Duration `json:"ttl" mapstructure:"ttl"`
|
||||
MaxTTL time.Duration `json:"max_ttl" mapstructure:"max_ttl"`
|
||||
}
|
||||
|
||||
var pathConfigLeaseHelpSyn = "Configure the lease parameters for generated tokens"
|
||||
|
||||
var pathConfigLeaseHelpDesc = `
|
||||
Sets the ttl and max_ttl values for the secrets to be issued by this backend.
|
||||
Both ttl and max_ttl takes in an integer number of seconds as input as well as
|
||||
inputs like "1h".
|
||||
`
|
|
@ -0,0 +1,80 @@
|
|||
package nomad
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
)
|
||||
|
||||
func pathCredsCreate(b *backend) *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "creds/" + framework.GenericNameRegex("name"),
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"name": &framework.FieldSchema{
|
||||
Type: framework.TypeString,
|
||||
Description: "Name of the role",
|
||||
},
|
||||
},
|
||||
|
||||
Callbacks: map[logical.Operation]framework.OperationFunc{
|
||||
logical.ReadOperation: b.pathTokenRead,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathTokenRead(
|
||||
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
name := d.Get("name").(string)
|
||||
|
||||
role, err := b.Role(req.Storage, name)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("error retrieving role: {{err}}", err)
|
||||
}
|
||||
if role == nil {
|
||||
return logical.ErrorResponse(fmt.Sprintf("role %q not found", name)), nil
|
||||
}
|
||||
|
||||
// Determine if we have a lease configuration
|
||||
leaseConfig, err := b.LeaseConfig(req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if leaseConfig == nil {
|
||||
leaseConfig = &configLease{}
|
||||
}
|
||||
|
||||
// Get the nomad client
|
||||
c, err := b.client(req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate a name for the token
|
||||
tokenName := fmt.Sprintf("vault-%s-%s-%d", name, req.DisplayName, time.Now().UnixNano())
|
||||
|
||||
// Create it
|
||||
token, _, err := c.ACLTokens().Create(&api.ACLToken{
|
||||
Name: tokenName,
|
||||
Type: role.TokenType,
|
||||
Policies: role.Policies,
|
||||
Global: role.Global,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use the helper to create the secret
|
||||
resp := b.Secret(SecretTokenType).Response(map[string]interface{}{
|
||||
"secret_id": token.SecretID,
|
||||
"accessor_id": token.AccessorID,
|
||||
}, map[string]interface{}{
|
||||
"accessor_id": token.AccessorID,
|
||||
})
|
||||
resp.Secret.TTL = leaseConfig.TTL
|
||||
|
||||
return resp, nil
|
||||
}
|
|
@ -0,0 +1,189 @@
|
|||
package nomad
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
)
|
||||
|
||||
func pathListRoles(b *backend) *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "role/?$",
|
||||
|
||||
Callbacks: map[logical.Operation]framework.OperationFunc{
|
||||
logical.ListOperation: b.pathRoleList,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func pathRoles(b *backend) *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "role/" + framework.GenericNameRegex("name"),
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"name": &framework.FieldSchema{
|
||||
Type: framework.TypeString,
|
||||
Description: "Name of the role",
|
||||
},
|
||||
|
||||
"policies": &framework.FieldSchema{
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Description: "Comma-separated string or list of policies as previously created in Nomad. Required for 'client' token.",
|
||||
},
|
||||
|
||||
"global": &framework.FieldSchema{
|
||||
Type: framework.TypeBool,
|
||||
Description: "Boolean value describing if the token should be global or not. Defaults to false.",
|
||||
},
|
||||
|
||||
"type": &framework.FieldSchema{
|
||||
Type: framework.TypeString,
|
||||
Default: "client",
|
||||
Description: `Which type of token to create: 'client'
|
||||
or 'management'. If a 'management' token,
|
||||
the "policies" parameter is not required.
|
||||
Defaults to 'client'.`,
|
||||
},
|
||||
},
|
||||
|
||||
Callbacks: map[logical.Operation]framework.OperationFunc{
|
||||
logical.ReadOperation: b.pathRolesRead,
|
||||
logical.CreateOperation: b.pathRolesWrite,
|
||||
logical.UpdateOperation: b.pathRolesWrite,
|
||||
logical.DeleteOperation: b.pathRolesDelete,
|
||||
},
|
||||
|
||||
ExistenceCheck: b.rolesExistenceCheck,
|
||||
}
|
||||
}
|
||||
|
||||
// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
|
||||
// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
|
||||
func (b *backend) rolesExistenceCheck(req *logical.Request, d *framework.FieldData) (bool, error) {
|
||||
name := d.Get("name").(string)
|
||||
entry, err := b.Role(req.Storage, name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return entry != nil, nil
|
||||
}
|
||||
|
||||
func (b *backend) Role(storage logical.Storage, name string) (*roleConfig, error) {
|
||||
if name == "" {
|
||||
return nil, errors.New("invalid role name")
|
||||
}
|
||||
|
||||
entry, err := storage.Get("role/" + name)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("error retrieving role: {{err}}", err)
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var result roleConfig
|
||||
if err := entry.DecodeJSON(&result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathRoleList(
|
||||
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
entries, err := req.Storage.List("role/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return logical.ListResponse(entries), nil
|
||||
}
|
||||
|
||||
func (b *backend) pathRolesRead(
|
||||
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
name := d.Get("name").(string)
|
||||
|
||||
role, err := b.Role(req.Storage, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if role == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Generate the response
|
||||
resp := &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"type": role.TokenType,
|
||||
"global": role.Global,
|
||||
"policies": role.Policies,
|
||||
},
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathRolesWrite(
|
||||
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
name := d.Get("name").(string)
|
||||
|
||||
role, err := b.Role(req.Storage, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if role == nil {
|
||||
role = new(roleConfig)
|
||||
}
|
||||
|
||||
policies, ok := d.GetOk("policies")
|
||||
if ok {
|
||||
role.Policies = policies.([]string)
|
||||
}
|
||||
|
||||
role.TokenType = d.Get("type").(string)
|
||||
switch role.TokenType {
|
||||
case "client":
|
||||
if len(role.Policies) == 0 {
|
||||
return logical.ErrorResponse(
|
||||
"policies cannot be empty when using client tokens"), nil
|
||||
}
|
||||
case "management":
|
||||
if len(role.Policies) != 0 {
|
||||
return logical.ErrorResponse(
|
||||
"policies should be empty when using management tokens"), nil
|
||||
}
|
||||
default:
|
||||
return logical.ErrorResponse(
|
||||
`type must be "client" or "management"`), nil
|
||||
}
|
||||
|
||||
global, ok := d.GetOk("global")
|
||||
if ok {
|
||||
role.Global = global.(bool)
|
||||
}
|
||||
|
||||
entry, err := logical.StorageEntryJSON("role/"+name, role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := req.Storage.Put(entry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathRolesDelete(
|
||||
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
name := d.Get("name").(string)
|
||||
if err := req.Storage.Delete("role/" + name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type roleConfig struct {
|
||||
Policies []string `json:"policies"`
|
||||
TokenType string `json:"type"`
|
||||
Global bool `json:"global"`
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
package nomad
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
SecretTokenType = "token"
|
||||
)
|
||||
|
||||
func secretToken(b *backend) *framework.Secret {
|
||||
return &framework.Secret{
|
||||
Type: SecretTokenType,
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"token": &framework.FieldSchema{
|
||||
Type: framework.TypeString,
|
||||
Description: "Request token",
|
||||
},
|
||||
},
|
||||
|
||||
Renew: b.secretTokenRenew,
|
||||
Revoke: b.secretTokenRevoke,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) secretTokenRenew(
|
||||
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
lease, err := b.LeaseConfig(req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lease == nil {
|
||||
lease = &configLease{}
|
||||
}
|
||||
|
||||
return framework.LeaseExtend(lease.TTL, lease.MaxTTL, b.System())(req, d)
|
||||
}
|
||||
|
||||
func (b *backend) secretTokenRevoke(
|
||||
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
c, err := b.client(req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c == nil {
|
||||
return nil, fmt.Errorf("error getting Nomad client")
|
||||
}
|
||||
|
||||
accessorIDRaw, ok := req.Secret.InternalData["accessor_id"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("accessor_id is missing on the lease")
|
||||
}
|
||||
accessorID, ok := accessorIDRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("unable to convert accessor_id")
|
||||
}
|
||||
_, err = c.ACLTokens().Delete(accessorID, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
|
@ -45,6 +45,7 @@ import (
|
|||
"github.com/hashicorp/vault/builtin/logical/mongodb"
|
||||
"github.com/hashicorp/vault/builtin/logical/mssql"
|
||||
"github.com/hashicorp/vault/builtin/logical/mysql"
|
||||
"github.com/hashicorp/vault/builtin/logical/nomad"
|
||||
"github.com/hashicorp/vault/builtin/logical/pki"
|
||||
"github.com/hashicorp/vault/builtin/logical/postgresql"
|
||||
"github.com/hashicorp/vault/builtin/logical/rabbitmq"
|
||||
|
@ -107,6 +108,7 @@ func Commands(metaPtr *meta.Meta) map[string]cli.CommandFactory {
|
|||
LogicalBackends: map[string]logical.Factory{
|
||||
"aws": aws.Factory,
|
||||
"consul": consul.Factory,
|
||||
"nomad": nomad.Factory,
|
||||
"postgresql": postgresql.Factory,
|
||||
"cassandra": cassandra.Factory,
|
||||
"pki": pki.Factory,
|
||||
|
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,674 @@
|
|||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. {http://fsf.org/}
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
{one line to give the program's name and a brief idea of what it does.}
|
||||
Copyright (C) {year} {name of author}
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see {http://www.gnu.org/licenses/}.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
cronexpr Copyright (C) 2013 Raymond Hill
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
{http://www.gnu.org/licenses/}.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
{http://www.gnu.org/philosophy/why-not-lgpl.html}.
|
|
@ -0,0 +1,134 @@
|
|||
Golang Cron expression parser
|
||||
=============================
|
||||
Given a cron expression and a time stamp, you can get the next time stamp which satisfies the cron expression.
|
||||
|
||||
In another project, I decided to use cron expression syntax to encode scheduling information. Thus this standalone library to parse and apply time stamps to cron expressions.
|
||||
|
||||
The time-matching algorithm in this implementation is efficient, it avoids as much as possible to guess the next matching time stamp, a common technique seen in a number of implementations out there.
|
||||
|
||||
There is also a companion command-line utility to evaluate cron time expressions: <https://github.com/gorhill/cronexpr/tree/master/cronexpr> (which of course uses this library).
|
||||
|
||||
Implementation
|
||||
--------------
|
||||
The reference documentation for this implementation is found at
|
||||
<https://en.wikipedia.org/wiki/Cron#CRON_expression>, which I copy/pasted here (laziness!) with modifications where this implementation differs:
|
||||
|
||||
Field name Mandatory? Allowed values Allowed special characters
|
||||
---------- ---------- -------------- --------------------------
|
||||
Seconds No 0-59 * / , -
|
||||
Minutes Yes 0-59 * / , -
|
||||
Hours Yes 0-23 * / , -
|
||||
Day of month Yes 1-31 * / , - L W
|
||||
Month Yes 1-12 or JAN-DEC * / , -
|
||||
Day of week Yes 0-6 or SUN-SAT * / , - L #
|
||||
Year No 1970–2099 * / , -
|
||||
|
||||
#### Asterisk ( * )
|
||||
The asterisk indicates that the cron expression matches for all values of the field. E.g., using an asterisk in the 4th field (month) indicates every month.
|
||||
|
||||
#### Slash ( / )
|
||||
Slashes describe increments of ranges. For example `3-59/15` in the minute field indicate the third minute of the hour and every 15 minutes thereafter. The form `*/...` is equivalent to the form "first-last/...", that is, an increment over the largest possible range of the field.
|
||||
|
||||
#### Comma ( , )
|
||||
Commas are used to separate items of a list. For example, using `MON,WED,FRI` in the 5th field (day of week) means Mondays, Wednesdays and Fridays.
|
||||
|
||||
#### Hyphen ( - )
|
||||
Hyphens define ranges. For example, 2000-2010 indicates every year between 2000 and 2010 AD, inclusive.
|
||||
|
||||
#### L
|
||||
`L` stands for "last". When used in the day-of-week field, it allows you to specify constructs such as "the last Friday" (`5L`) of a given month. In the day-of-month field, it specifies the last day of the month.
|
||||
|
||||
#### W
|
||||
The `W` character is allowed for the day-of-month field. This character is used to specify the business day (Monday-Friday) nearest the given day. As an example, if you were to specify `15W` as the value for the day-of-month field, the meaning is: "the nearest business day to the 15th of the month."
|
||||
|
||||
So, if the 15th is a Saturday, the trigger fires on Friday the 14th. If the 15th is a Sunday, the trigger fires on Monday the 16th. If the 15th is a Tuesday, then it fires on Tuesday the 15th. However if you specify `1W` as the value for day-of-month, and the 1st is a Saturday, the trigger fires on Monday the 3rd, as it does not 'jump' over the boundary of a month's days.
|
||||
|
||||
The `W` character can be specified only when the day-of-month is a single day, not a range or list of days.
|
||||
|
||||
The `W` character can also be combined with `L`, i.e. `LW` to mean "the last business day of the month."
|
||||
|
||||
#### Hash ( # )
|
||||
`#` is allowed for the day-of-week field, and must be followed by a number between one and five. It allows you to specify constructs such as "the second Friday" of a given month.
|
||||
|
||||
Predefined cron expressions
|
||||
---------------------------
|
||||
(Copied from <https://en.wikipedia.org/wiki/Cron#Predefined_scheduling_definitions>, with text modified according to this implementation)
|
||||
|
||||
Entry Description Equivalent to
|
||||
@annually Run once a year at midnight in the morning of January 1 0 0 0 1 1 * *
|
||||
@yearly Run once a year at midnight in the morning of January 1 0 0 0 1 1 * *
|
||||
@monthly Run once a month at midnight in the morning of the first of the month 0 0 0 1 * * *
|
||||
@weekly Run once a week at midnight in the morning of Sunday 0 0 0 * * 0 *
|
||||
@daily Run once a day at midnight 0 0 0 * * * *
|
||||
@hourly Run once an hour at the beginning of the hour 0 0 * * * * *
|
||||
@reboot Not supported
|
||||
|
||||
Other details
|
||||
-------------
|
||||
* If only six fields are present, a `0` second field is prepended, that is, `* * * * * 2013` internally become `0 * * * * * 2013`.
|
||||
* If only five fields are present, a `0` second field is prepended and a wildcard year field is appended, that is, `* * * * Mon` internally become `0 * * * * Mon *`.
|
||||
* Domain for day-of-week field is [0-7] instead of [0-6], 7 being Sunday (like 0). This to comply with http://linux.die.net/man/5/crontab#.
|
||||
* As of now, the behavior of the code is undetermined if a malformed cron expression is supplied
|
||||
|
||||
Install
|
||||
-------
|
||||
go get github.com/gorhill/cronexpr
|
||||
|
||||
Usage
|
||||
-----
|
||||
Import the library:
|
||||
|
||||
import "github.com/gorhill/cronexpr"
|
||||
import "time"
|
||||
|
||||
Simplest way:
|
||||
|
||||
nextTime := cronexpr.MustParse("0 0 29 2 *").Next(time.Now())
|
||||
|
||||
Assuming `time.Now()` is "2013-08-29 09:28:00", then `nextTime` will be "2016-02-29 00:00:00".
|
||||
|
||||
You can keep the returned Expression pointer around if you want to reuse it:
|
||||
|
||||
expr := cronexpr.MustParse("0 0 29 2 *")
|
||||
nextTime := expr.Next(time.Now())
|
||||
...
|
||||
nextTime = expr.Next(nextTime)
|
||||
|
||||
Use `time.IsZero()` to find out whether a valid time was returned. For example,
|
||||
|
||||
cronexpr.MustParse("* * * * * 1980").Next(time.Now()).IsZero()
|
||||
|
||||
will return `true`, whereas
|
||||
|
||||
cronexpr.MustParse("* * * * * 2050").Next(time.Now()).IsZero()
|
||||
|
||||
will return `false` (as of 2013-08-29...)
|
||||
|
||||
You may also query for `n` next time stamps:
|
||||
|
||||
cronexpr.MustParse("0 0 29 2 *").NextN(time.Now(), 5)
|
||||
|
||||
which returns a slice of time.Time objects, containing the following time stamps (as of 2013-08-30):
|
||||
|
||||
2016-02-29 00:00:00
|
||||
2020-02-29 00:00:00
|
||||
2024-02-29 00:00:00
|
||||
2028-02-29 00:00:00
|
||||
2032-02-29 00:00:00
|
||||
|
||||
The time zone of time values returned by `Next` and `NextN` is always the
|
||||
time zone of the time value passed as argument, unless a zero time value is
|
||||
returned.
|
||||
|
||||
API
|
||||
---
|
||||
<http://godoc.org/github.com/gorhill/cronexpr>
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
License: pick the one which suits you best:
|
||||
|
||||
- GPL v3 see <https://www.gnu.org/licenses/gpl.html>
|
||||
- APL v2 see <http://www.apache.org/licenses/LICENSE-2.0>
|
||||
|
|
@ -0,0 +1,266 @@
|
|||
/*!
|
||||
* Copyright 2013 Raymond Hill
|
||||
*
|
||||
* Project: github.com/gorhill/cronexpr
|
||||
* File: cronexpr.go
|
||||
* Version: 1.0
|
||||
* License: pick the one which suits you :
|
||||
* GPL v3 see <https://www.gnu.org/licenses/gpl.html>
|
||||
* APL v2 see <http://www.apache.org/licenses/LICENSE-2.0>
|
||||
*
|
||||
*/
|
||||
|
||||
// Package cronexpr parses cron time expressions.
|
||||
package cronexpr
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
// A Expression represents a specific cron time expression as defined at
|
||||
// <https://github.com/gorhill/cronexpr#implementation>
|
||||
type Expression struct {
|
||||
expression string
|
||||
secondList []int
|
||||
minuteList []int
|
||||
hourList []int
|
||||
daysOfMonth map[int]bool
|
||||
workdaysOfMonth map[int]bool
|
||||
lastDayOfMonth bool
|
||||
lastWorkdayOfMonth bool
|
||||
daysOfMonthRestricted bool
|
||||
actualDaysOfMonthList []int
|
||||
monthList []int
|
||||
daysOfWeek map[int]bool
|
||||
specificWeekDaysOfWeek map[int]bool
|
||||
lastWeekDaysOfWeek map[int]bool
|
||||
daysOfWeekRestricted bool
|
||||
yearList []int
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
// MustParse returns a new Expression pointer. It expects a well-formed cron
|
||||
// expression. If a malformed cron expression is supplied, it will `panic`.
|
||||
// See <https://github.com/gorhill/cronexpr#implementation> for documentation
|
||||
// about what is a well-formed cron expression from this library's point of
|
||||
// view.
|
||||
func MustParse(cronLine string) *Expression {
|
||||
expr, err := Parse(cronLine)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return expr
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
// Parse returns a new Expression pointer. An error is returned if a malformed
|
||||
// cron expression is supplied.
|
||||
// See <https://github.com/gorhill/cronexpr#implementation> for documentation
|
||||
// about what is a well-formed cron expression from this library's point of
|
||||
// view.
|
||||
func Parse(cronLine string) (*Expression, error) {
|
||||
|
||||
// Maybe one of the built-in aliases is being used
|
||||
cron := cronNormalizer.Replace(cronLine)
|
||||
|
||||
indices := fieldFinder.FindAllStringIndex(cron, -1)
|
||||
fieldCount := len(indices)
|
||||
if fieldCount < 5 {
|
||||
return nil, fmt.Errorf("missing field(s)")
|
||||
}
|
||||
// ignore fields beyond 7th
|
||||
if fieldCount > 7 {
|
||||
fieldCount = 7
|
||||
}
|
||||
|
||||
var expr = Expression{}
|
||||
var field = 0
|
||||
var err error
|
||||
|
||||
// second field (optional)
|
||||
if fieldCount == 7 {
|
||||
err = expr.secondFieldHandler(cron[indices[field][0]:indices[field][1]])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
field += 1
|
||||
} else {
|
||||
expr.secondList = []int{0}
|
||||
}
|
||||
|
||||
// minute field
|
||||
err = expr.minuteFieldHandler(cron[indices[field][0]:indices[field][1]])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
field += 1
|
||||
|
||||
// hour field
|
||||
err = expr.hourFieldHandler(cron[indices[field][0]:indices[field][1]])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
field += 1
|
||||
|
||||
// day of month field
|
||||
err = expr.domFieldHandler(cron[indices[field][0]:indices[field][1]])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
field += 1
|
||||
|
||||
// month field
|
||||
err = expr.monthFieldHandler(cron[indices[field][0]:indices[field][1]])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
field += 1
|
||||
|
||||
// day of week field
|
||||
err = expr.dowFieldHandler(cron[indices[field][0]:indices[field][1]])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
field += 1
|
||||
|
||||
// year field
|
||||
if field < fieldCount {
|
||||
err = expr.yearFieldHandler(cron[indices[field][0]:indices[field][1]])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
expr.yearList = yearDescriptor.defaultList
|
||||
}
|
||||
|
||||
return &expr, nil
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
// Next returns the closest time instant immediately following `fromTime` which
|
||||
// matches the cron expression `expr`.
|
||||
//
|
||||
// The `time.Location` of the returned time instant is the same as that of
|
||||
// `fromTime`.
|
||||
//
|
||||
// The zero value of time.Time is returned if no matching time instant exists
|
||||
// or if a `fromTime` is itself a zero value.
|
||||
func (expr *Expression) Next(fromTime time.Time) time.Time {
|
||||
// Special case
|
||||
if fromTime.IsZero() {
|
||||
return fromTime
|
||||
}
|
||||
|
||||
// Since expr.nextSecond()-expr.nextMonth() expects that the
|
||||
// supplied time stamp is a perfect match to the underlying cron
|
||||
// expression, and since this function is an entry point where `fromTime`
|
||||
// does not necessarily matches the underlying cron expression,
|
||||
// we first need to ensure supplied time stamp matches
|
||||
// the cron expression. If not, this means the supplied time
|
||||
// stamp falls in between matching time stamps, thus we move
|
||||
// to closest future matching immediately upon encountering a mismatching
|
||||
// time stamp.
|
||||
|
||||
// year
|
||||
v := fromTime.Year()
|
||||
i := sort.SearchInts(expr.yearList, v)
|
||||
if i == len(expr.yearList) {
|
||||
return time.Time{}
|
||||
}
|
||||
if v != expr.yearList[i] {
|
||||
return expr.nextYear(fromTime)
|
||||
}
|
||||
// month
|
||||
v = int(fromTime.Month())
|
||||
i = sort.SearchInts(expr.monthList, v)
|
||||
if i == len(expr.monthList) {
|
||||
return expr.nextYear(fromTime)
|
||||
}
|
||||
if v != expr.monthList[i] {
|
||||
return expr.nextMonth(fromTime)
|
||||
}
|
||||
|
||||
expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(fromTime.Year(), int(fromTime.Month()))
|
||||
if len(expr.actualDaysOfMonthList) == 0 {
|
||||
return expr.nextMonth(fromTime)
|
||||
}
|
||||
|
||||
// day of month
|
||||
v = fromTime.Day()
|
||||
i = sort.SearchInts(expr.actualDaysOfMonthList, v)
|
||||
if i == len(expr.actualDaysOfMonthList) {
|
||||
return expr.nextMonth(fromTime)
|
||||
}
|
||||
if v != expr.actualDaysOfMonthList[i] {
|
||||
return expr.nextDayOfMonth(fromTime)
|
||||
}
|
||||
// hour
|
||||
v = fromTime.Hour()
|
||||
i = sort.SearchInts(expr.hourList, v)
|
||||
if i == len(expr.hourList) {
|
||||
return expr.nextDayOfMonth(fromTime)
|
||||
}
|
||||
if v != expr.hourList[i] {
|
||||
return expr.nextHour(fromTime)
|
||||
}
|
||||
// minute
|
||||
v = fromTime.Minute()
|
||||
i = sort.SearchInts(expr.minuteList, v)
|
||||
if i == len(expr.minuteList) {
|
||||
return expr.nextHour(fromTime)
|
||||
}
|
||||
if v != expr.minuteList[i] {
|
||||
return expr.nextMinute(fromTime)
|
||||
}
|
||||
// second
|
||||
v = fromTime.Second()
|
||||
i = sort.SearchInts(expr.secondList, v)
|
||||
if i == len(expr.secondList) {
|
||||
return expr.nextMinute(fromTime)
|
||||
}
|
||||
|
||||
// If we reach this point, there is nothing better to do
|
||||
// than to move to the next second
|
||||
|
||||
return expr.nextSecond(fromTime)
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
// NextN returns a slice of `n` closest time instants immediately following
|
||||
// `fromTime` which match the cron expression `expr`.
|
||||
//
|
||||
// The time instants in the returned slice are in chronological ascending order.
|
||||
// The `time.Location` of the returned time instants is the same as that of
|
||||
// `fromTime`.
|
||||
//
|
||||
// A slice with len between [0-`n`] is returned, that is, if not enough existing
|
||||
// matching time instants exist, the number of returned entries will be less
|
||||
// than `n`.
|
||||
func (expr *Expression) NextN(fromTime time.Time, n uint) []time.Time {
|
||||
nextTimes := make([]time.Time, 0, n)
|
||||
if n > 0 {
|
||||
fromTime = expr.Next(fromTime)
|
||||
for {
|
||||
if fromTime.IsZero() {
|
||||
break
|
||||
}
|
||||
nextTimes = append(nextTimes, fromTime)
|
||||
n -= 1
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
fromTime = expr.nextSecond(fromTime)
|
||||
}
|
||||
}
|
||||
return nextTimes
|
||||
}
|
|
@ -0,0 +1,292 @@
|
|||
/*!
|
||||
* Copyright 2013 Raymond Hill
|
||||
*
|
||||
* Project: github.com/gorhill/cronexpr
|
||||
* File: cronexpr_next.go
|
||||
* Version: 1.0
|
||||
* License: pick the one which suits you :
|
||||
* GPL v3 see <https://www.gnu.org/licenses/gpl.html>
|
||||
* APL v2 see <http://www.apache.org/licenses/LICENSE-2.0>
|
||||
*
|
||||
*/
|
||||
|
||||
package cronexpr
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
var dowNormalizedOffsets = [][]int{
|
||||
{1, 8, 15, 22, 29},
|
||||
{2, 9, 16, 23, 30},
|
||||
{3, 10, 17, 24, 31},
|
||||
{4, 11, 18, 25},
|
||||
{5, 12, 19, 26},
|
||||
{6, 13, 20, 27},
|
||||
{7, 14, 21, 28},
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func (expr *Expression) nextYear(t time.Time) time.Time {
|
||||
// Find index at which item in list is greater or equal to
|
||||
// candidate year
|
||||
i := sort.SearchInts(expr.yearList, t.Year()+1)
|
||||
if i == len(expr.yearList) {
|
||||
return time.Time{}
|
||||
}
|
||||
// Year changed, need to recalculate actual days of month
|
||||
expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(expr.yearList[i], expr.monthList[0])
|
||||
if len(expr.actualDaysOfMonthList) == 0 {
|
||||
return expr.nextMonth(time.Date(
|
||||
expr.yearList[i],
|
||||
time.Month(expr.monthList[0]),
|
||||
1,
|
||||
expr.hourList[0],
|
||||
expr.minuteList[0],
|
||||
expr.secondList[0],
|
||||
0,
|
||||
t.Location()))
|
||||
}
|
||||
return time.Date(
|
||||
expr.yearList[i],
|
||||
time.Month(expr.monthList[0]),
|
||||
expr.actualDaysOfMonthList[0],
|
||||
expr.hourList[0],
|
||||
expr.minuteList[0],
|
||||
expr.secondList[0],
|
||||
0,
|
||||
t.Location())
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func (expr *Expression) nextMonth(t time.Time) time.Time {
|
||||
// Find index at which item in list is greater or equal to
|
||||
// candidate month
|
||||
i := sort.SearchInts(expr.monthList, int(t.Month())+1)
|
||||
if i == len(expr.monthList) {
|
||||
return expr.nextYear(t)
|
||||
}
|
||||
// Month changed, need to recalculate actual days of month
|
||||
expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(t.Year(), expr.monthList[i])
|
||||
if len(expr.actualDaysOfMonthList) == 0 {
|
||||
return expr.nextMonth(time.Date(
|
||||
t.Year(),
|
||||
time.Month(expr.monthList[i]),
|
||||
1,
|
||||
expr.hourList[0],
|
||||
expr.minuteList[0],
|
||||
expr.secondList[0],
|
||||
0,
|
||||
t.Location()))
|
||||
}
|
||||
|
||||
return time.Date(
|
||||
t.Year(),
|
||||
time.Month(expr.monthList[i]),
|
||||
expr.actualDaysOfMonthList[0],
|
||||
expr.hourList[0],
|
||||
expr.minuteList[0],
|
||||
expr.secondList[0],
|
||||
0,
|
||||
t.Location())
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func (expr *Expression) nextDayOfMonth(t time.Time) time.Time {
|
||||
// Find index at which item in list is greater or equal to
|
||||
// candidate day of month
|
||||
i := sort.SearchInts(expr.actualDaysOfMonthList, t.Day()+1)
|
||||
if i == len(expr.actualDaysOfMonthList) {
|
||||
return expr.nextMonth(t)
|
||||
}
|
||||
|
||||
return time.Date(
|
||||
t.Year(),
|
||||
t.Month(),
|
||||
expr.actualDaysOfMonthList[i],
|
||||
expr.hourList[0],
|
||||
expr.minuteList[0],
|
||||
expr.secondList[0],
|
||||
0,
|
||||
t.Location())
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func (expr *Expression) nextHour(t time.Time) time.Time {
|
||||
// Find index at which item in list is greater or equal to
|
||||
// candidate hour
|
||||
i := sort.SearchInts(expr.hourList, t.Hour()+1)
|
||||
if i == len(expr.hourList) {
|
||||
return expr.nextDayOfMonth(t)
|
||||
}
|
||||
|
||||
return time.Date(
|
||||
t.Year(),
|
||||
t.Month(),
|
||||
t.Day(),
|
||||
expr.hourList[i],
|
||||
expr.minuteList[0],
|
||||
expr.secondList[0],
|
||||
0,
|
||||
t.Location())
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func (expr *Expression) nextMinute(t time.Time) time.Time {
|
||||
// Find index at which item in list is greater or equal to
|
||||
// candidate minute
|
||||
i := sort.SearchInts(expr.minuteList, t.Minute()+1)
|
||||
if i == len(expr.minuteList) {
|
||||
return expr.nextHour(t)
|
||||
}
|
||||
|
||||
return time.Date(
|
||||
t.Year(),
|
||||
t.Month(),
|
||||
t.Day(),
|
||||
t.Hour(),
|
||||
expr.minuteList[i],
|
||||
expr.secondList[0],
|
||||
0,
|
||||
t.Location())
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func (expr *Expression) nextSecond(t time.Time) time.Time {
|
||||
// nextSecond() assumes all other fields are exactly matched
|
||||
// to the cron expression
|
||||
|
||||
// Find index at which item in list is greater or equal to
|
||||
// candidate second
|
||||
i := sort.SearchInts(expr.secondList, t.Second()+1)
|
||||
if i == len(expr.secondList) {
|
||||
return expr.nextMinute(t)
|
||||
}
|
||||
|
||||
return time.Date(
|
||||
t.Year(),
|
||||
t.Month(),
|
||||
t.Day(),
|
||||
t.Hour(),
|
||||
t.Minute(),
|
||||
expr.secondList[i],
|
||||
0,
|
||||
t.Location())
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func (expr *Expression) calculateActualDaysOfMonth(year, month int) []int {
|
||||
actualDaysOfMonthMap := make(map[int]bool)
|
||||
firstDayOfMonth := time.Date(year, time.Month(month), 1, 0, 0, 0, 0, time.UTC)
|
||||
lastDayOfMonth := firstDayOfMonth.AddDate(0, 1, -1)
|
||||
|
||||
// As per crontab man page (http://linux.die.net/man/5/crontab#):
|
||||
// "The day of a command's execution can be specified by two
|
||||
// "fields - day of month, and day of week. If both fields are
|
||||
// "restricted (ie, aren't *), the command will be run when
|
||||
// "either field matches the current time"
|
||||
|
||||
// If both fields are not restricted, all days of the month are a hit
|
||||
if expr.daysOfMonthRestricted == false && expr.daysOfWeekRestricted == false {
|
||||
return genericDefaultList[1 : lastDayOfMonth.Day()+1]
|
||||
}
|
||||
|
||||
// day-of-month != `*`
|
||||
if expr.daysOfMonthRestricted {
|
||||
// Last day of month
|
||||
if expr.lastDayOfMonth {
|
||||
actualDaysOfMonthMap[lastDayOfMonth.Day()] = true
|
||||
}
|
||||
// Last work day of month
|
||||
if expr.lastWorkdayOfMonth {
|
||||
actualDaysOfMonthMap[workdayOfMonth(lastDayOfMonth, lastDayOfMonth)] = true
|
||||
}
|
||||
// Days of month
|
||||
for v := range expr.daysOfMonth {
|
||||
// Ignore days beyond end of month
|
||||
if v <= lastDayOfMonth.Day() {
|
||||
actualDaysOfMonthMap[v] = true
|
||||
}
|
||||
}
|
||||
// Work days of month
|
||||
// As per Wikipedia: month boundaries are not crossed.
|
||||
for v := range expr.workdaysOfMonth {
|
||||
// Ignore days beyond end of month
|
||||
if v <= lastDayOfMonth.Day() {
|
||||
actualDaysOfMonthMap[workdayOfMonth(firstDayOfMonth.AddDate(0, 0, v-1), lastDayOfMonth)] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// day-of-week != `*`
|
||||
if expr.daysOfWeekRestricted {
|
||||
// How far first sunday is from first day of month
|
||||
offset := 7 - int(firstDayOfMonth.Weekday())
|
||||
// days of week
|
||||
// offset : (7 - day_of_week_of_1st_day_of_month)
|
||||
// target : 1 + (7 * week_of_month) + (offset + day_of_week) % 7
|
||||
for v := range expr.daysOfWeek {
|
||||
w := dowNormalizedOffsets[(offset+v)%7]
|
||||
actualDaysOfMonthMap[w[0]] = true
|
||||
actualDaysOfMonthMap[w[1]] = true
|
||||
actualDaysOfMonthMap[w[2]] = true
|
||||
actualDaysOfMonthMap[w[3]] = true
|
||||
if len(w) > 4 && w[4] <= lastDayOfMonth.Day() {
|
||||
actualDaysOfMonthMap[w[4]] = true
|
||||
}
|
||||
}
|
||||
// days of week of specific week in the month
|
||||
// offset : (7 - day_of_week_of_1st_day_of_month)
|
||||
// target : 1 + (7 * week_of_month) + (offset + day_of_week) % 7
|
||||
for v := range expr.specificWeekDaysOfWeek {
|
||||
v = 1 + 7*(v/7) + (offset+v)%7
|
||||
if v <= lastDayOfMonth.Day() {
|
||||
actualDaysOfMonthMap[v] = true
|
||||
}
|
||||
}
|
||||
// Last days of week of the month
|
||||
lastWeekOrigin := firstDayOfMonth.AddDate(0, 1, -7)
|
||||
offset = 7 - int(lastWeekOrigin.Weekday())
|
||||
for v := range expr.lastWeekDaysOfWeek {
|
||||
v = lastWeekOrigin.Day() + (offset+v)%7
|
||||
if v <= lastDayOfMonth.Day() {
|
||||
actualDaysOfMonthMap[v] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return toList(actualDaysOfMonthMap)
|
||||
}
|
||||
|
||||
func workdayOfMonth(targetDom, lastDom time.Time) int {
|
||||
// If saturday, then friday
|
||||
// If sunday, then monday
|
||||
dom := targetDom.Day()
|
||||
dow := targetDom.Weekday()
|
||||
if dow == time.Saturday {
|
||||
if dom > 1 {
|
||||
dom -= 1
|
||||
} else {
|
||||
dom += 2
|
||||
}
|
||||
} else if dow == time.Sunday {
|
||||
if dom < lastDom.Day() {
|
||||
dom += 1
|
||||
} else {
|
||||
dom -= 2
|
||||
}
|
||||
}
|
||||
return dom
|
||||
}
|
|
@ -0,0 +1,498 @@
|
|||
/*!
|
||||
* Copyright 2013 Raymond Hill
|
||||
*
|
||||
* Project: github.com/gorhill/cronexpr
|
||||
* File: cronexpr_parse.go
|
||||
* Version: 1.0
|
||||
* License: pick the one which suits you best:
|
||||
* GPL v3 see <https://www.gnu.org/licenses/gpl.html>
|
||||
* APL v2 see <http://www.apache.org/licenses/LICENSE-2.0>
|
||||
*
|
||||
*/
|
||||
|
||||
package cronexpr
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
var (
|
||||
genericDefaultList = []int{
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
||||
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
||||
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
||||
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
||||
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
||||
}
|
||||
yearDefaultList = []int{
|
||||
1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979,
|
||||
1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989,
|
||||
1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
|
||||
2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
|
||||
2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019,
|
||||
2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029,
|
||||
2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039,
|
||||
2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049,
|
||||
2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059,
|
||||
2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069,
|
||||
2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079,
|
||||
2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089,
|
||||
2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099,
|
||||
}
|
||||
)
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
var (
|
||||
numberTokens = map[string]int{
|
||||
"0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9,
|
||||
"00": 0, "01": 1, "02": 2, "03": 3, "04": 4, "05": 5, "06": 6, "07": 7, "08": 8, "09": 9,
|
||||
"10": 10, "11": 11, "12": 12, "13": 13, "14": 14, "15": 15, "16": 16, "17": 17, "18": 18, "19": 19,
|
||||
"20": 20, "21": 21, "22": 22, "23": 23, "24": 24, "25": 25, "26": 26, "27": 27, "28": 28, "29": 29,
|
||||
"30": 30, "31": 31, "32": 32, "33": 33, "34": 34, "35": 35, "36": 36, "37": 37, "38": 38, "39": 39,
|
||||
"40": 40, "41": 41, "42": 42, "43": 43, "44": 44, "45": 45, "46": 46, "47": 47, "48": 48, "49": 49,
|
||||
"50": 50, "51": 51, "52": 52, "53": 53, "54": 54, "55": 55, "56": 56, "57": 57, "58": 58, "59": 59,
|
||||
"1970": 1970, "1971": 1971, "1972": 1972, "1973": 1973, "1974": 1974, "1975": 1975, "1976": 1976, "1977": 1977, "1978": 1978, "1979": 1979,
|
||||
"1980": 1980, "1981": 1981, "1982": 1982, "1983": 1983, "1984": 1984, "1985": 1985, "1986": 1986, "1987": 1987, "1988": 1988, "1989": 1989,
|
||||
"1990": 1990, "1991": 1991, "1992": 1992, "1993": 1993, "1994": 1994, "1995": 1995, "1996": 1996, "1997": 1997, "1998": 1998, "1999": 1999,
|
||||
"2000": 2000, "2001": 2001, "2002": 2002, "2003": 2003, "2004": 2004, "2005": 2005, "2006": 2006, "2007": 2007, "2008": 2008, "2009": 2009,
|
||||
"2010": 2010, "2011": 2011, "2012": 2012, "2013": 2013, "2014": 2014, "2015": 2015, "2016": 2016, "2017": 2017, "2018": 2018, "2019": 2019,
|
||||
"2020": 2020, "2021": 2021, "2022": 2022, "2023": 2023, "2024": 2024, "2025": 2025, "2026": 2026, "2027": 2027, "2028": 2028, "2029": 2029,
|
||||
"2030": 2030, "2031": 2031, "2032": 2032, "2033": 2033, "2034": 2034, "2035": 2035, "2036": 2036, "2037": 2037, "2038": 2038, "2039": 2039,
|
||||
"2040": 2040, "2041": 2041, "2042": 2042, "2043": 2043, "2044": 2044, "2045": 2045, "2046": 2046, "2047": 2047, "2048": 2048, "2049": 2049,
|
||||
"2050": 2050, "2051": 2051, "2052": 2052, "2053": 2053, "2054": 2054, "2055": 2055, "2056": 2056, "2057": 2057, "2058": 2058, "2059": 2059,
|
||||
"2060": 2060, "2061": 2061, "2062": 2062, "2063": 2063, "2064": 2064, "2065": 2065, "2066": 2066, "2067": 2067, "2068": 2068, "2069": 2069,
|
||||
"2070": 2070, "2071": 2071, "2072": 2072, "2073": 2073, "2074": 2074, "2075": 2075, "2076": 2076, "2077": 2077, "2078": 2078, "2079": 2079,
|
||||
"2080": 2080, "2081": 2081, "2082": 2082, "2083": 2083, "2084": 2084, "2085": 2085, "2086": 2086, "2087": 2087, "2088": 2088, "2089": 2089,
|
||||
"2090": 2090, "2091": 2091, "2092": 2092, "2093": 2093, "2094": 2094, "2095": 2095, "2096": 2096, "2097": 2097, "2098": 2098, "2099": 2099,
|
||||
}
|
||||
monthTokens = map[string]int{
|
||||
`1`: 1, `jan`: 1, `january`: 1,
|
||||
`2`: 2, `feb`: 2, `february`: 2,
|
||||
`3`: 3, `mar`: 3, `march`: 3,
|
||||
`4`: 4, `apr`: 4, `april`: 4,
|
||||
`5`: 5, `may`: 5,
|
||||
`6`: 6, `jun`: 6, `june`: 6,
|
||||
`7`: 7, `jul`: 7, `july`: 7,
|
||||
`8`: 8, `aug`: 8, `august`: 8,
|
||||
`9`: 9, `sep`: 9, `september`: 9,
|
||||
`10`: 10, `oct`: 10, `october`: 10,
|
||||
`11`: 11, `nov`: 11, `november`: 11,
|
||||
`12`: 12, `dec`: 12, `december`: 12,
|
||||
}
|
||||
dowTokens = map[string]int{
|
||||
`0`: 0, `sun`: 0, `sunday`: 0,
|
||||
`1`: 1, `mon`: 1, `monday`: 1,
|
||||
`2`: 2, `tue`: 2, `tuesday`: 2,
|
||||
`3`: 3, `wed`: 3, `wednesday`: 3,
|
||||
`4`: 4, `thu`: 4, `thursday`: 4,
|
||||
`5`: 5, `fri`: 5, `friday`: 5,
|
||||
`6`: 6, `sat`: 6, `saturday`: 6,
|
||||
`7`: 0,
|
||||
}
|
||||
)
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func atoi(s string) int {
|
||||
return numberTokens[s]
|
||||
}
|
||||
|
||||
type fieldDescriptor struct {
|
||||
name string
|
||||
min, max int
|
||||
defaultList []int
|
||||
valuePattern string
|
||||
atoi func(string) int
|
||||
}
|
||||
|
||||
var (
|
||||
secondDescriptor = fieldDescriptor{
|
||||
name: "second",
|
||||
min: 0,
|
||||
max: 59,
|
||||
defaultList: genericDefaultList[0:60],
|
||||
valuePattern: `0?[0-9]|[1-5][0-9]`,
|
||||
atoi: atoi,
|
||||
}
|
||||
minuteDescriptor = fieldDescriptor{
|
||||
name: "minute",
|
||||
min: 0,
|
||||
max: 59,
|
||||
defaultList: genericDefaultList[0:60],
|
||||
valuePattern: `0?[0-9]|[1-5][0-9]`,
|
||||
atoi: atoi,
|
||||
}
|
||||
hourDescriptor = fieldDescriptor{
|
||||
name: "hour",
|
||||
min: 0,
|
||||
max: 23,
|
||||
defaultList: genericDefaultList[0:24],
|
||||
valuePattern: `0?[0-9]|1[0-9]|2[0-3]`,
|
||||
atoi: atoi,
|
||||
}
|
||||
domDescriptor = fieldDescriptor{
|
||||
name: "day-of-month",
|
||||
min: 1,
|
||||
max: 31,
|
||||
defaultList: genericDefaultList[1:32],
|
||||
valuePattern: `0?[1-9]|[12][0-9]|3[01]`,
|
||||
atoi: atoi,
|
||||
}
|
||||
monthDescriptor = fieldDescriptor{
|
||||
name: "month",
|
||||
min: 1,
|
||||
max: 12,
|
||||
defaultList: genericDefaultList[1:13],
|
||||
valuePattern: `0?[1-9]|1[012]|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec|january|february|march|april|march|april|june|july|august|september|october|november|december`,
|
||||
atoi: func(s string) int {
|
||||
return monthTokens[s]
|
||||
},
|
||||
}
|
||||
dowDescriptor = fieldDescriptor{
|
||||
name: "day-of-week",
|
||||
min: 0,
|
||||
max: 6,
|
||||
defaultList: genericDefaultList[0:7],
|
||||
valuePattern: `0?[0-7]|sun|mon|tue|wed|thu|fri|sat|sunday|monday|tuesday|wednesday|thursday|friday|saturday`,
|
||||
atoi: func(s string) int {
|
||||
return dowTokens[s]
|
||||
},
|
||||
}
|
||||
yearDescriptor = fieldDescriptor{
|
||||
name: "year",
|
||||
min: 1970,
|
||||
max: 2099,
|
||||
defaultList: yearDefaultList[:],
|
||||
valuePattern: `19[789][0-9]|20[0-9]{2}`,
|
||||
atoi: atoi,
|
||||
}
|
||||
)
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
var (
|
||||
layoutWildcard = `^\*$|^\?$`
|
||||
layoutValue = `^(%value%)$`
|
||||
layoutRange = `^(%value%)-(%value%)$`
|
||||
layoutWildcardAndInterval = `^\*/(\d+)$`
|
||||
layoutValueAndInterval = `^(%value%)/(\d+)$`
|
||||
layoutRangeAndInterval = `^(%value%)-(%value%)/(\d+)$`
|
||||
layoutLastDom = `^l$`
|
||||
layoutWorkdom = `^(%value%)w$`
|
||||
layoutLastWorkdom = `^lw$`
|
||||
layoutDowOfLastWeek = `^(%value%)l$`
|
||||
layoutDowOfSpecificWeek = `^(%value%)#([1-5])$`
|
||||
fieldFinder = regexp.MustCompile(`\S+`)
|
||||
entryFinder = regexp.MustCompile(`[^,]+`)
|
||||
layoutRegexp = make(map[string]*regexp.Regexp)
|
||||
)
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
var cronNormalizer = strings.NewReplacer(
|
||||
"@yearly", "0 0 0 1 1 * *",
|
||||
"@annually", "0 0 0 1 1 * *",
|
||||
"@monthly", "0 0 0 1 * * *",
|
||||
"@weekly", "0 0 0 * * 0 *",
|
||||
"@daily", "0 0 0 * * * *",
|
||||
"@hourly", "0 0 * * * * *")
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func (expr *Expression) secondFieldHandler(s string) error {
|
||||
var err error
|
||||
expr.secondList, err = genericFieldHandler(s, secondDescriptor)
|
||||
return err
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func (expr *Expression) minuteFieldHandler(s string) error {
|
||||
var err error
|
||||
expr.minuteList, err = genericFieldHandler(s, minuteDescriptor)
|
||||
return err
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func (expr *Expression) hourFieldHandler(s string) error {
|
||||
var err error
|
||||
expr.hourList, err = genericFieldHandler(s, hourDescriptor)
|
||||
return err
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func (expr *Expression) monthFieldHandler(s string) error {
|
||||
var err error
|
||||
expr.monthList, err = genericFieldHandler(s, monthDescriptor)
|
||||
return err
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func (expr *Expression) yearFieldHandler(s string) error {
|
||||
var err error
|
||||
expr.yearList, err = genericFieldHandler(s, yearDescriptor)
|
||||
return err
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
const (
|
||||
none = 0
|
||||
one = 1
|
||||
span = 2
|
||||
all = 3
|
||||
)
|
||||
|
||||
type cronDirective struct {
|
||||
kind int
|
||||
first int
|
||||
last int
|
||||
step int
|
||||
sbeg int
|
||||
send int
|
||||
}
|
||||
|
||||
func genericFieldHandler(s string, desc fieldDescriptor) ([]int, error) {
|
||||
directives, err := genericFieldParse(s, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values := make(map[int]bool)
|
||||
for _, directive := range directives {
|
||||
switch directive.kind {
|
||||
case none:
|
||||
return nil, fmt.Errorf("syntax error in %s field: '%s'", desc.name, s[directive.sbeg:directive.send])
|
||||
case one:
|
||||
populateOne(values, directive.first)
|
||||
case span:
|
||||
populateMany(values, directive.first, directive.last, directive.step)
|
||||
case all:
|
||||
return desc.defaultList, nil
|
||||
}
|
||||
}
|
||||
return toList(values), nil
|
||||
}
|
||||
|
||||
func (expr *Expression) dowFieldHandler(s string) error {
|
||||
expr.daysOfWeekRestricted = true
|
||||
expr.daysOfWeek = make(map[int]bool)
|
||||
expr.lastWeekDaysOfWeek = make(map[int]bool)
|
||||
expr.specificWeekDaysOfWeek = make(map[int]bool)
|
||||
|
||||
directives, err := genericFieldParse(s, dowDescriptor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, directive := range directives {
|
||||
switch directive.kind {
|
||||
case none:
|
||||
sdirective := s[directive.sbeg:directive.send]
|
||||
snormal := strings.ToLower(sdirective)
|
||||
// `5L`
|
||||
pairs := makeLayoutRegexp(layoutDowOfLastWeek, dowDescriptor.valuePattern).FindStringSubmatchIndex(snormal)
|
||||
if len(pairs) > 0 {
|
||||
populateOne(expr.lastWeekDaysOfWeek, dowDescriptor.atoi(snormal[pairs[2]:pairs[3]]))
|
||||
} else {
|
||||
// `5#3`
|
||||
pairs := makeLayoutRegexp(layoutDowOfSpecificWeek, dowDescriptor.valuePattern).FindStringSubmatchIndex(snormal)
|
||||
if len(pairs) > 0 {
|
||||
populateOne(expr.specificWeekDaysOfWeek, (dowDescriptor.atoi(snormal[pairs[4]:pairs[5]])-1)*7+(dowDescriptor.atoi(snormal[pairs[2]:pairs[3]])%7))
|
||||
} else {
|
||||
return fmt.Errorf("syntax error in day-of-week field: '%s'", sdirective)
|
||||
}
|
||||
}
|
||||
case one:
|
||||
populateOne(expr.daysOfWeek, directive.first)
|
||||
case span:
|
||||
populateMany(expr.daysOfWeek, directive.first, directive.last, directive.step)
|
||||
case all:
|
||||
populateMany(expr.daysOfWeek, directive.first, directive.last, directive.step)
|
||||
expr.daysOfWeekRestricted = false
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (expr *Expression) domFieldHandler(s string) error {
|
||||
expr.daysOfMonthRestricted = true
|
||||
expr.lastDayOfMonth = false
|
||||
expr.lastWorkdayOfMonth = false
|
||||
expr.daysOfMonth = make(map[int]bool) // days of month map
|
||||
expr.workdaysOfMonth = make(map[int]bool) // work days of month map
|
||||
|
||||
directives, err := genericFieldParse(s, domDescriptor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, directive := range directives {
|
||||
switch directive.kind {
|
||||
case none:
|
||||
sdirective := s[directive.sbeg:directive.send]
|
||||
snormal := strings.ToLower(sdirective)
|
||||
// `L`
|
||||
if makeLayoutRegexp(layoutLastDom, domDescriptor.valuePattern).MatchString(snormal) {
|
||||
expr.lastDayOfMonth = true
|
||||
} else {
|
||||
// `LW`
|
||||
if makeLayoutRegexp(layoutLastWorkdom, domDescriptor.valuePattern).MatchString(snormal) {
|
||||
expr.lastWorkdayOfMonth = true
|
||||
} else {
|
||||
// `15W`
|
||||
pairs := makeLayoutRegexp(layoutWorkdom, domDescriptor.valuePattern).FindStringSubmatchIndex(snormal)
|
||||
if len(pairs) > 0 {
|
||||
populateOne(expr.workdaysOfMonth, domDescriptor.atoi(snormal[pairs[2]:pairs[3]]))
|
||||
} else {
|
||||
return fmt.Errorf("syntax error in day-of-month field: '%s'", sdirective)
|
||||
}
|
||||
}
|
||||
}
|
||||
case one:
|
||||
populateOne(expr.daysOfMonth, directive.first)
|
||||
case span:
|
||||
populateMany(expr.daysOfMonth, directive.first, directive.last, directive.step)
|
||||
case all:
|
||||
populateMany(expr.daysOfMonth, directive.first, directive.last, directive.step)
|
||||
expr.daysOfMonthRestricted = false
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func populateOne(values map[int]bool, v int) {
|
||||
values[v] = true
|
||||
}
|
||||
|
||||
func populateMany(values map[int]bool, min, max, step int) {
|
||||
for i := min; i <= max; i += step {
|
||||
values[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
func toList(set map[int]bool) []int {
|
||||
list := make([]int, len(set))
|
||||
i := 0
|
||||
for k := range set {
|
||||
list[i] = k
|
||||
i += 1
|
||||
}
|
||||
sort.Ints(list)
|
||||
return list
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func genericFieldParse(s string, desc fieldDescriptor) ([]*cronDirective, error) {
|
||||
// At least one entry must be present
|
||||
indices := entryFinder.FindAllStringIndex(s, -1)
|
||||
if len(indices) == 0 {
|
||||
return nil, fmt.Errorf("%s field: missing directive", desc.name)
|
||||
}
|
||||
|
||||
directives := make([]*cronDirective, 0, len(indices))
|
||||
|
||||
for i := range indices {
|
||||
directive := cronDirective{
|
||||
sbeg: indices[i][0],
|
||||
send: indices[i][1],
|
||||
}
|
||||
snormal := strings.ToLower(s[indices[i][0]:indices[i][1]])
|
||||
|
||||
// `*`
|
||||
if makeLayoutRegexp(layoutWildcard, desc.valuePattern).MatchString(snormal) {
|
||||
directive.kind = all
|
||||
directive.first = desc.min
|
||||
directive.last = desc.max
|
||||
directive.step = 1
|
||||
directives = append(directives, &directive)
|
||||
continue
|
||||
}
|
||||
// `5`
|
||||
if makeLayoutRegexp(layoutValue, desc.valuePattern).MatchString(snormal) {
|
||||
directive.kind = one
|
||||
directive.first = desc.atoi(snormal)
|
||||
directives = append(directives, &directive)
|
||||
continue
|
||||
}
|
||||
// `5-20`
|
||||
pairs := makeLayoutRegexp(layoutRange, desc.valuePattern).FindStringSubmatchIndex(snormal)
|
||||
if len(pairs) > 0 {
|
||||
directive.kind = span
|
||||
directive.first = desc.atoi(snormal[pairs[2]:pairs[3]])
|
||||
directive.last = desc.atoi(snormal[pairs[4]:pairs[5]])
|
||||
directive.step = 1
|
||||
directives = append(directives, &directive)
|
||||
continue
|
||||
}
|
||||
// `*/2`
|
||||
pairs = makeLayoutRegexp(layoutWildcardAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal)
|
||||
if len(pairs) > 0 {
|
||||
directive.kind = span
|
||||
directive.first = desc.min
|
||||
directive.last = desc.max
|
||||
directive.step = atoi(snormal[pairs[2]:pairs[3]])
|
||||
if directive.step < 1 || directive.step > desc.max {
|
||||
return nil, fmt.Errorf("invalid interval %s", snormal)
|
||||
}
|
||||
directives = append(directives, &directive)
|
||||
continue
|
||||
}
|
||||
// `5/2`
|
||||
pairs = makeLayoutRegexp(layoutValueAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal)
|
||||
if len(pairs) > 0 {
|
||||
directive.kind = span
|
||||
directive.first = desc.atoi(snormal[pairs[2]:pairs[3]])
|
||||
directive.last = desc.max
|
||||
directive.step = atoi(snormal[pairs[4]:pairs[5]])
|
||||
if directive.step < 1 || directive.step > desc.max {
|
||||
return nil, fmt.Errorf("invalid interval %s", snormal)
|
||||
}
|
||||
directives = append(directives, &directive)
|
||||
continue
|
||||
}
|
||||
// `5-20/2`
|
||||
pairs = makeLayoutRegexp(layoutRangeAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal)
|
||||
if len(pairs) > 0 {
|
||||
directive.kind = span
|
||||
directive.first = desc.atoi(snormal[pairs[2]:pairs[3]])
|
||||
directive.last = desc.atoi(snormal[pairs[4]:pairs[5]])
|
||||
directive.step = atoi(snormal[pairs[6]:pairs[7]])
|
||||
if directive.step < 1 || directive.step > desc.max {
|
||||
return nil, fmt.Errorf("invalid interval %s", snormal)
|
||||
}
|
||||
directives = append(directives, &directive)
|
||||
continue
|
||||
}
|
||||
// No behavior for this one, let caller deal with it
|
||||
directive.kind = none
|
||||
directives = append(directives, &directive)
|
||||
}
|
||||
return directives, nil
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
func makeLayoutRegexp(layout, value string) *regexp.Regexp {
|
||||
layout = strings.Replace(layout, `%value%`, value, -1)
|
||||
re := layoutRegexp[layout]
|
||||
if re == nil {
|
||||
re = regexp.MustCompile(layout)
|
||||
layoutRegexp[layout] = re
|
||||
}
|
||||
return re
|
||||
}
|
|
@ -0,0 +1,363 @@
|
|||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
|
@ -0,0 +1,186 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ACLPolicies is used to query the ACL Policy endpoints.
|
||||
type ACLPolicies struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// ACLPolicies returns a new handle on the ACL policies.
|
||||
func (c *Client) ACLPolicies() *ACLPolicies {
|
||||
return &ACLPolicies{client: c}
|
||||
}
|
||||
|
||||
// List is used to dump all of the policies.
|
||||
func (a *ACLPolicies) List(q *QueryOptions) ([]*ACLPolicyListStub, *QueryMeta, error) {
|
||||
var resp []*ACLPolicyListStub
|
||||
qm, err := a.client.query("/v1/acl/policies", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Upsert is used to create or update a policy
|
||||
func (a *ACLPolicies) Upsert(policy *ACLPolicy, q *WriteOptions) (*WriteMeta, error) {
|
||||
if policy == nil || policy.Name == "" {
|
||||
return nil, fmt.Errorf("missing policy name")
|
||||
}
|
||||
wm, err := a.client.write("/v1/acl/policy/"+policy.Name, policy, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Delete is used to delete a policy
|
||||
func (a *ACLPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, error) {
|
||||
if policyName == "" {
|
||||
return nil, fmt.Errorf("missing policy name")
|
||||
}
|
||||
wm, err := a.client.delete("/v1/acl/policy/"+policyName, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Info is used to query a specific policy
|
||||
func (a *ACLPolicies) Info(policyName string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) {
|
||||
if policyName == "" {
|
||||
return nil, nil, fmt.Errorf("missing policy name")
|
||||
}
|
||||
var resp ACLPolicy
|
||||
wm, err := a.client.query("/v1/acl/policy/"+policyName, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// ACLTokens is used to query the ACL token endpoints.
|
||||
type ACLTokens struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// ACLTokens returns a new handle on the ACL tokens.
|
||||
func (c *Client) ACLTokens() *ACLTokens {
|
||||
return &ACLTokens{client: c}
|
||||
}
|
||||
|
||||
// Bootstrap is used to get the initial bootstrap token
|
||||
func (a *ACLTokens) Bootstrap(q *WriteOptions) (*ACLToken, *WriteMeta, error) {
|
||||
var resp ACLToken
|
||||
wm, err := a.client.write("/v1/acl/bootstrap", nil, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// List is used to dump all of the tokens.
|
||||
func (a *ACLTokens) List(q *QueryOptions) ([]*ACLTokenListStub, *QueryMeta, error) {
|
||||
var resp []*ACLTokenListStub
|
||||
qm, err := a.client.query("/v1/acl/tokens", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Create is used to create a token
|
||||
func (a *ACLTokens) Create(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
|
||||
if token.AccessorID != "" {
|
||||
return nil, nil, fmt.Errorf("cannot specify Accessor ID")
|
||||
}
|
||||
var resp ACLToken
|
||||
wm, err := a.client.write("/v1/acl/token", token, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// Update is used to update an existing token
|
||||
func (a *ACLTokens) Update(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
|
||||
if token.AccessorID == "" {
|
||||
return nil, nil, fmt.Errorf("missing accessor ID")
|
||||
}
|
||||
var resp ACLToken
|
||||
wm, err := a.client.write("/v1/acl/token/"+token.AccessorID,
|
||||
token, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// Delete is used to delete a token
|
||||
func (a *ACLTokens) Delete(accessorID string, q *WriteOptions) (*WriteMeta, error) {
|
||||
if accessorID == "" {
|
||||
return nil, fmt.Errorf("missing accessor ID")
|
||||
}
|
||||
wm, err := a.client.delete("/v1/acl/token/"+accessorID, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Info is used to query a token
|
||||
func (a *ACLTokens) Info(accessorID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) {
|
||||
if accessorID == "" {
|
||||
return nil, nil, fmt.Errorf("missing accessor ID")
|
||||
}
|
||||
var resp ACLToken
|
||||
wm, err := a.client.query("/v1/acl/token/"+accessorID, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// ACLPolicyListStub is used to for listing ACL policies
|
||||
type ACLPolicyListStub struct {
|
||||
Name string
|
||||
Description string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// ACLPolicy is used to represent an ACL policy
|
||||
type ACLPolicy struct {
|
||||
Name string
|
||||
Description string
|
||||
Rules string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// ACLToken represents a client token which is used to Authenticate
|
||||
type ACLToken struct {
|
||||
AccessorID string
|
||||
SecretID string
|
||||
Name string
|
||||
Type string
|
||||
Policies []string
|
||||
Global bool
|
||||
CreateTime time.Time
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
type ACLTokenListStub struct {
|
||||
AccessorID string
|
||||
Name string
|
||||
Type string
|
||||
Policies []string
|
||||
Global bool
|
||||
CreateTime time.Time
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
|
@ -0,0 +1,267 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Agent encapsulates an API client which talks to Nomad's
|
||||
// agent endpoints for a specific node.
|
||||
type Agent struct {
|
||||
client *Client
|
||||
|
||||
// Cache static agent info
|
||||
nodeName string
|
||||
datacenter string
|
||||
region string
|
||||
}
|
||||
|
||||
// KeyringResponse is a unified key response and can be used for install,
|
||||
// remove, use, as well as listing key queries.
|
||||
type KeyringResponse struct {
|
||||
Messages map[string]string
|
||||
Keys map[string]int
|
||||
NumNodes int
|
||||
}
|
||||
|
||||
// KeyringRequest is request objects for serf key operations.
|
||||
type KeyringRequest struct {
|
||||
Key string
|
||||
}
|
||||
|
||||
// Agent returns a new agent which can be used to query
|
||||
// the agent-specific endpoints.
|
||||
func (c *Client) Agent() *Agent {
|
||||
return &Agent{client: c}
|
||||
}
|
||||
|
||||
// Self is used to query the /v1/agent/self endpoint and
|
||||
// returns information specific to the running agent.
|
||||
func (a *Agent) Self() (*AgentSelf, error) {
|
||||
var out *AgentSelf
|
||||
|
||||
// Query the self endpoint on the agent
|
||||
_, err := a.client.query("/v1/agent/self", &out, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed querying self endpoint: %s", err)
|
||||
}
|
||||
|
||||
// Populate the cache for faster queries
|
||||
a.populateCache(out)
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// populateCache is used to insert various pieces of static
|
||||
// data into the agent handle. This is used during subsequent
|
||||
// lookups for the same data later on to save the round trip.
|
||||
func (a *Agent) populateCache(self *AgentSelf) {
|
||||
if a.nodeName == "" {
|
||||
a.nodeName = self.Member.Name
|
||||
}
|
||||
if a.datacenter == "" {
|
||||
if val, ok := self.Config["Datacenter"]; ok {
|
||||
a.datacenter, _ = val.(string)
|
||||
}
|
||||
}
|
||||
if a.region == "" {
|
||||
if val, ok := self.Config["Region"]; ok {
|
||||
a.region, _ = val.(string)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NodeName is used to query the Nomad agent for its node name.
|
||||
func (a *Agent) NodeName() (string, error) {
|
||||
// Return from cache if we have it
|
||||
if a.nodeName != "" {
|
||||
return a.nodeName, nil
|
||||
}
|
||||
|
||||
// Query the node name
|
||||
_, err := a.Self()
|
||||
return a.nodeName, err
|
||||
}
|
||||
|
||||
// Datacenter is used to return the name of the datacenter which
|
||||
// the agent is a member of.
|
||||
func (a *Agent) Datacenter() (string, error) {
|
||||
// Return from cache if we have it
|
||||
if a.datacenter != "" {
|
||||
return a.datacenter, nil
|
||||
}
|
||||
|
||||
// Query the agent for the DC
|
||||
_, err := a.Self()
|
||||
return a.datacenter, err
|
||||
}
|
||||
|
||||
// Region is used to look up the region the agent is in.
|
||||
func (a *Agent) Region() (string, error) {
|
||||
// Return from cache if we have it
|
||||
if a.region != "" {
|
||||
return a.region, nil
|
||||
}
|
||||
|
||||
// Query the agent for the region
|
||||
_, err := a.Self()
|
||||
return a.region, err
|
||||
}
|
||||
|
||||
// Join is used to instruct a server node to join another server
|
||||
// via the gossip protocol. Multiple addresses may be specified.
|
||||
// We attempt to join all of the hosts in the list. Returns the
|
||||
// number of nodes successfully joined and any error. If one or
|
||||
// more nodes have a successful result, no error is returned.
|
||||
func (a *Agent) Join(addrs ...string) (int, error) {
|
||||
// Accumulate the addresses
|
||||
v := url.Values{}
|
||||
for _, addr := range addrs {
|
||||
v.Add("address", addr)
|
||||
}
|
||||
|
||||
// Send the join request
|
||||
var resp joinResponse
|
||||
_, err := a.client.write("/v1/agent/join?"+v.Encode(), nil, &resp, nil)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed joining: %s", err)
|
||||
}
|
||||
if resp.Error != "" {
|
||||
return 0, fmt.Errorf("failed joining: %s", resp.Error)
|
||||
}
|
||||
return resp.NumJoined, nil
|
||||
}
|
||||
|
||||
// Members is used to query all of the known server members
|
||||
func (a *Agent) Members() (*ServerMembers, error) {
|
||||
var resp *ServerMembers
|
||||
|
||||
// Query the known members
|
||||
_, err := a.client.query("/v1/agent/members", &resp, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ForceLeave is used to eject an existing node from the cluster.
|
||||
func (a *Agent) ForceLeave(node string) error {
|
||||
_, err := a.client.write("/v1/agent/force-leave?node="+node, nil, nil, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Servers is used to query the list of servers on a client node.
|
||||
func (a *Agent) Servers() ([]string, error) {
|
||||
var resp []string
|
||||
_, err := a.client.query("/v1/agent/servers", &resp, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetServers is used to update the list of servers on a client node.
|
||||
func (a *Agent) SetServers(addrs []string) error {
|
||||
// Accumulate the addresses
|
||||
v := url.Values{}
|
||||
for _, addr := range addrs {
|
||||
v.Add("address", addr)
|
||||
}
|
||||
|
||||
_, err := a.client.write("/v1/agent/servers?"+v.Encode(), nil, nil, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// ListKeys returns the list of installed keys
|
||||
func (a *Agent) ListKeys() (*KeyringResponse, error) {
|
||||
var resp KeyringResponse
|
||||
_, err := a.client.query("/v1/agent/keyring/list", &resp, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// InstallKey installs a key in the keyrings of all the serf members
|
||||
func (a *Agent) InstallKey(key string) (*KeyringResponse, error) {
|
||||
args := KeyringRequest{
|
||||
Key: key,
|
||||
}
|
||||
var resp KeyringResponse
|
||||
_, err := a.client.write("/v1/agent/keyring/install", &args, &resp, nil)
|
||||
return &resp, err
|
||||
}
|
||||
|
||||
// UseKey uses a key from the keyring of serf members
|
||||
func (a *Agent) UseKey(key string) (*KeyringResponse, error) {
|
||||
args := KeyringRequest{
|
||||
Key: key,
|
||||
}
|
||||
var resp KeyringResponse
|
||||
_, err := a.client.write("/v1/agent/keyring/use", &args, &resp, nil)
|
||||
return &resp, err
|
||||
}
|
||||
|
||||
// RemoveKey removes a particular key from keyrings of serf members
|
||||
func (a *Agent) RemoveKey(key string) (*KeyringResponse, error) {
|
||||
args := KeyringRequest{
|
||||
Key: key,
|
||||
}
|
||||
var resp KeyringResponse
|
||||
_, err := a.client.write("/v1/agent/keyring/remove", &args, &resp, nil)
|
||||
return &resp, err
|
||||
}
|
||||
|
||||
// joinResponse is used to decode the response we get while
|
||||
// sending a member join request.
|
||||
type joinResponse struct {
|
||||
NumJoined int `json:"num_joined"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
type ServerMembers struct {
|
||||
ServerName string
|
||||
ServerRegion string
|
||||
ServerDC string
|
||||
Members []*AgentMember
|
||||
}
|
||||
|
||||
type AgentSelf struct {
|
||||
Config map[string]interface{} `json:"config"`
|
||||
Member AgentMember `json:"member"`
|
||||
Stats map[string]map[string]string `json:"stats"`
|
||||
}
|
||||
|
||||
// AgentMember represents a cluster member known to the agent
|
||||
type AgentMember struct {
|
||||
Name string
|
||||
Addr string
|
||||
Port uint16
|
||||
Tags map[string]string
|
||||
Status string
|
||||
ProtocolMin uint8
|
||||
ProtocolMax uint8
|
||||
ProtocolCur uint8
|
||||
DelegateMin uint8
|
||||
DelegateMax uint8
|
||||
DelegateCur uint8
|
||||
}
|
||||
|
||||
// AgentMembersNameSort implements sort.Interface for []*AgentMembersNameSort
|
||||
// based on the Name, DC and Region
|
||||
type AgentMembersNameSort []*AgentMember
|
||||
|
||||
func (a AgentMembersNameSort) Len() int { return len(a) }
|
||||
func (a AgentMembersNameSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a AgentMembersNameSort) Less(i, j int) bool {
|
||||
if a[i].Tags["region"] != a[j].Tags["region"] {
|
||||
return a[i].Tags["region"] < a[j].Tags["region"]
|
||||
}
|
||||
|
||||
if a[i].Tags["dc"] != a[j].Tags["dc"] {
|
||||
return a[i].Tags["dc"] < a[j].Tags["dc"]
|
||||
}
|
||||
|
||||
return a[i].Name < a[j].Name
|
||||
|
||||
}
|
|
@ -0,0 +1,157 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// NodeDownErr marks an operation as not able to complete since the node is
|
||||
// down.
|
||||
NodeDownErr = fmt.Errorf("node down")
|
||||
)
|
||||
|
||||
// Allocations is used to query the alloc-related endpoints.
|
||||
type Allocations struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Allocations returns a handle on the allocs endpoints.
|
||||
func (c *Client) Allocations() *Allocations {
|
||||
return &Allocations{client: c}
|
||||
}
|
||||
|
||||
// List returns a list of all of the allocations.
|
||||
func (a *Allocations) List(q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) {
|
||||
var resp []*AllocationListStub
|
||||
qm, err := a.client.query("/v1/allocations", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sort.Sort(AllocIndexSort(resp))
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
func (a *Allocations) PrefixList(prefix string) ([]*AllocationListStub, *QueryMeta, error) {
|
||||
return a.List(&QueryOptions{Prefix: prefix})
|
||||
}
|
||||
|
||||
// Info is used to retrieve a single allocation.
|
||||
func (a *Allocations) Info(allocID string, q *QueryOptions) (*Allocation, *QueryMeta, error) {
|
||||
var resp Allocation
|
||||
qm, err := a.client.query("/v1/allocation/"+allocID, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, qm, nil
|
||||
}
|
||||
|
||||
func (a *Allocations) Stats(alloc *Allocation, q *QueryOptions) (*AllocResourceUsage, error) {
|
||||
nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resp AllocResourceUsage
|
||||
_, err = nodeClient.query("/v1/client/allocation/"+alloc.ID+"/stats", &resp, nil)
|
||||
return &resp, err
|
||||
}
|
||||
|
||||
func (a *Allocations) GC(alloc *Allocation, q *QueryOptions) error {
|
||||
nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var resp struct{}
|
||||
_, err = nodeClient.query("/v1/client/allocation/"+alloc.ID+"/gc", &resp, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Allocation is used for serialization of allocations.
|
||||
type Allocation struct {
|
||||
ID string
|
||||
Namespace string
|
||||
EvalID string
|
||||
Name string
|
||||
NodeID string
|
||||
JobID string
|
||||
Job *Job
|
||||
TaskGroup string
|
||||
Resources *Resources
|
||||
TaskResources map[string]*Resources
|
||||
Services map[string]string
|
||||
Metrics *AllocationMetric
|
||||
DesiredStatus string
|
||||
DesiredDescription string
|
||||
ClientStatus string
|
||||
ClientDescription string
|
||||
TaskStates map[string]*TaskState
|
||||
DeploymentID string
|
||||
DeploymentStatus *AllocDeploymentStatus
|
||||
PreviousAllocation string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
AllocModifyIndex uint64
|
||||
CreateTime int64
|
||||
}
|
||||
|
||||
// AllocationMetric is used to deserialize allocation metrics.
|
||||
type AllocationMetric struct {
|
||||
NodesEvaluated int
|
||||
NodesFiltered int
|
||||
NodesAvailable map[string]int
|
||||
ClassFiltered map[string]int
|
||||
ConstraintFiltered map[string]int
|
||||
NodesExhausted int
|
||||
ClassExhausted map[string]int
|
||||
DimensionExhausted map[string]int
|
||||
Scores map[string]float64
|
||||
AllocationTime time.Duration
|
||||
CoalescedFailures int
|
||||
}
|
||||
|
||||
// AllocationListStub is used to return a subset of an allocation
|
||||
// during list operations.
|
||||
type AllocationListStub struct {
|
||||
ID string
|
||||
EvalID string
|
||||
Name string
|
||||
NodeID string
|
||||
JobID string
|
||||
JobVersion uint64
|
||||
TaskGroup string
|
||||
DesiredStatus string
|
||||
DesiredDescription string
|
||||
ClientStatus string
|
||||
ClientDescription string
|
||||
TaskStates map[string]*TaskState
|
||||
DeploymentStatus *AllocDeploymentStatus
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
CreateTime int64
|
||||
}
|
||||
|
||||
// AllocDeploymentStatus captures the status of the allocation as part of the
|
||||
// deployment. This can include things like if the allocation has been marked as
|
||||
// heatlhy.
|
||||
type AllocDeploymentStatus struct {
|
||||
Healthy *bool
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// AllocIndexSort reverse sorts allocs by CreateIndex.
|
||||
type AllocIndexSort []*AllocationListStub
|
||||
|
||||
func (a AllocIndexSort) Len() int {
|
||||
return len(a)
|
||||
}
|
||||
|
||||
func (a AllocIndexSort) Less(i, j int) bool {
|
||||
return a[i].CreateIndex > a[j].CreateIndex
|
||||
}
|
||||
|
||||
func (a AllocIndexSort) Swap(i, j int) {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
|
@ -0,0 +1,777 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
rootcerts "github.com/hashicorp/go-rootcerts"
|
||||
)
|
||||
|
||||
// QueryOptions are used to parameterize a query
|
||||
type QueryOptions struct {
|
||||
// Providing a datacenter overwrites the region provided
|
||||
// by the Config
|
||||
Region string
|
||||
|
||||
// Namespace is the target namespace for the query.
|
||||
Namespace string
|
||||
|
||||
// AllowStale allows any Nomad server (non-leader) to service
|
||||
// a read. This allows for lower latency and higher throughput
|
||||
AllowStale bool
|
||||
|
||||
// WaitIndex is used to enable a blocking query. Waits
|
||||
// until the timeout or the next index is reached
|
||||
WaitIndex uint64
|
||||
|
||||
// WaitTime is used to bound the duration of a wait.
|
||||
// Defaults to that of the Config, but can be overridden.
|
||||
WaitTime time.Duration
|
||||
|
||||
// If set, used as prefix for resource list searches
|
||||
Prefix string
|
||||
|
||||
// Set HTTP parameters on the query.
|
||||
Params map[string]string
|
||||
|
||||
// SecretID is the secret ID of an ACL token
|
||||
SecretID string
|
||||
}
|
||||
|
||||
// WriteOptions are used to parameterize a write
|
||||
type WriteOptions struct {
|
||||
// Providing a datacenter overwrites the region provided
|
||||
// by the Config
|
||||
Region string
|
||||
|
||||
// Namespace is the target namespace for the write.
|
||||
Namespace string
|
||||
|
||||
// SecretID is the secret ID of an ACL token
|
||||
SecretID string
|
||||
}
|
||||
|
||||
// QueryMeta is used to return meta data about a query
|
||||
type QueryMeta struct {
|
||||
// LastIndex. This can be used as a WaitIndex to perform
|
||||
// a blocking query
|
||||
LastIndex uint64
|
||||
|
||||
// Time of last contact from the leader for the
|
||||
// server servicing the request
|
||||
LastContact time.Duration
|
||||
|
||||
// Is there a known leader
|
||||
KnownLeader bool
|
||||
|
||||
// How long did the request take
|
||||
RequestTime time.Duration
|
||||
}
|
||||
|
||||
// WriteMeta is used to return meta data about a write
|
||||
type WriteMeta struct {
|
||||
// LastIndex. This can be used as a WaitIndex to perform
|
||||
// a blocking query
|
||||
LastIndex uint64
|
||||
|
||||
// How long did the request take
|
||||
RequestTime time.Duration
|
||||
}
|
||||
|
||||
// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
|
||||
type HttpBasicAuth struct {
|
||||
// Username to use for HTTP Basic Authentication
|
||||
Username string
|
||||
|
||||
// Password to use for HTTP Basic Authentication
|
||||
Password string
|
||||
}
|
||||
|
||||
// Config is used to configure the creation of a client
|
||||
type Config struct {
|
||||
// Address is the address of the Nomad agent
|
||||
Address string
|
||||
|
||||
// Region to use. If not provided, the default agent region is used.
|
||||
Region string
|
||||
|
||||
// SecretID to use. This can be overwritten per request.
|
||||
SecretID string
|
||||
|
||||
// Namespace to use. If not provided the default namespace is used.
|
||||
Namespace string
|
||||
|
||||
// httpClient is the client to use. Default will be used if not provided.
|
||||
httpClient *http.Client
|
||||
|
||||
// HttpAuth is the auth info to use for http access.
|
||||
HttpAuth *HttpBasicAuth
|
||||
|
||||
// WaitTime limits how long a Watch will block. If not provided,
|
||||
// the agent default values will be used.
|
||||
WaitTime time.Duration
|
||||
|
||||
// TLSConfig provides the various TLS related configurations for the http
|
||||
// client
|
||||
TLSConfig *TLSConfig
|
||||
}
|
||||
|
||||
// ClientConfig copies the configuration with a new client address, region, and
|
||||
// whether the client has TLS enabled.
|
||||
func (c *Config) ClientConfig(region, address string, tlsEnabled bool) *Config {
|
||||
scheme := "http"
|
||||
if tlsEnabled {
|
||||
scheme = "https"
|
||||
}
|
||||
defaultConfig := DefaultConfig()
|
||||
config := &Config{
|
||||
Address: fmt.Sprintf("%s://%s", scheme, address),
|
||||
Region: region,
|
||||
Namespace: c.Namespace,
|
||||
httpClient: defaultConfig.httpClient,
|
||||
SecretID: c.SecretID,
|
||||
HttpAuth: c.HttpAuth,
|
||||
WaitTime: c.WaitTime,
|
||||
TLSConfig: c.TLSConfig.Copy(),
|
||||
}
|
||||
if tlsEnabled && config.TLSConfig != nil {
|
||||
config.TLSConfig.TLSServerName = fmt.Sprintf("client.%s.nomad", region)
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// TLSConfig contains the parameters needed to configure TLS on the HTTP client
|
||||
// used to communicate with Nomad.
|
||||
type TLSConfig struct {
|
||||
// CACert is the path to a PEM-encoded CA cert file to use to verify the
|
||||
// Nomad server SSL certificate.
|
||||
CACert string
|
||||
|
||||
// CAPath is the path to a directory of PEM-encoded CA cert files to verify
|
||||
// the Nomad server SSL certificate.
|
||||
CAPath string
|
||||
|
||||
// ClientCert is the path to the certificate for Nomad communication
|
||||
ClientCert string
|
||||
|
||||
// ClientKey is the path to the private key for Nomad communication
|
||||
ClientKey string
|
||||
|
||||
// TLSServerName, if set, is used to set the SNI host when connecting via
|
||||
// TLS.
|
||||
TLSServerName string
|
||||
|
||||
// Insecure enables or disables SSL verification
|
||||
Insecure bool
|
||||
}
|
||||
|
||||
func (t *TLSConfig) Copy() *TLSConfig {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
nt := new(TLSConfig)
|
||||
*nt = *t
|
||||
return nt
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default configuration for the client
|
||||
func DefaultConfig() *Config {
|
||||
config := &Config{
|
||||
Address: "http://127.0.0.1:4646",
|
||||
httpClient: cleanhttp.DefaultClient(),
|
||||
TLSConfig: &TLSConfig{},
|
||||
}
|
||||
transport := config.httpClient.Transport.(*http.Transport)
|
||||
transport.TLSHandshakeTimeout = 10 * time.Second
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
|
||||
if addr := os.Getenv("NOMAD_ADDR"); addr != "" {
|
||||
config.Address = addr
|
||||
}
|
||||
if v := os.Getenv("NOMAD_REGION"); v != "" {
|
||||
config.Region = v
|
||||
}
|
||||
if v := os.Getenv("NOMAD_NAMESPACE"); v != "" {
|
||||
config.Namespace = v
|
||||
}
|
||||
if auth := os.Getenv("NOMAD_HTTP_AUTH"); auth != "" {
|
||||
var username, password string
|
||||
if strings.Contains(auth, ":") {
|
||||
split := strings.SplitN(auth, ":", 2)
|
||||
username = split[0]
|
||||
password = split[1]
|
||||
} else {
|
||||
username = auth
|
||||
}
|
||||
|
||||
config.HttpAuth = &HttpBasicAuth{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}
|
||||
}
|
||||
|
||||
// Read TLS specific env vars
|
||||
if v := os.Getenv("NOMAD_CACERT"); v != "" {
|
||||
config.TLSConfig.CACert = v
|
||||
}
|
||||
if v := os.Getenv("NOMAD_CAPATH"); v != "" {
|
||||
config.TLSConfig.CAPath = v
|
||||
}
|
||||
if v := os.Getenv("NOMAD_CLIENT_CERT"); v != "" {
|
||||
config.TLSConfig.ClientCert = v
|
||||
}
|
||||
if v := os.Getenv("NOMAD_CLIENT_KEY"); v != "" {
|
||||
config.TLSConfig.ClientKey = v
|
||||
}
|
||||
if v := os.Getenv("NOMAD_SKIP_VERIFY"); v != "" {
|
||||
if insecure, err := strconv.ParseBool(v); err == nil {
|
||||
config.TLSConfig.Insecure = insecure
|
||||
}
|
||||
}
|
||||
if v := os.Getenv("NOMAD_TOKEN"); v != "" {
|
||||
config.SecretID = v
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
// ConfigureTLS applies a set of TLS configurations to the the HTTP client.
|
||||
func (c *Config) ConfigureTLS() error {
|
||||
if c.TLSConfig == nil {
|
||||
return nil
|
||||
}
|
||||
if c.httpClient == nil {
|
||||
return fmt.Errorf("config HTTP Client must be set")
|
||||
}
|
||||
|
||||
var clientCert tls.Certificate
|
||||
foundClientCert := false
|
||||
if c.TLSConfig.ClientCert != "" || c.TLSConfig.ClientKey != "" {
|
||||
if c.TLSConfig.ClientCert != "" && c.TLSConfig.ClientKey != "" {
|
||||
var err error
|
||||
clientCert, err = tls.LoadX509KeyPair(c.TLSConfig.ClientCert, c.TLSConfig.ClientKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
foundClientCert = true
|
||||
} else {
|
||||
return fmt.Errorf("Both client cert and client key must be provided")
|
||||
}
|
||||
}
|
||||
|
||||
clientTLSConfig := c.httpClient.Transport.(*http.Transport).TLSClientConfig
|
||||
rootConfig := &rootcerts.Config{
|
||||
CAFile: c.TLSConfig.CACert,
|
||||
CAPath: c.TLSConfig.CAPath,
|
||||
}
|
||||
if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientTLSConfig.InsecureSkipVerify = c.TLSConfig.Insecure
|
||||
|
||||
if foundClientCert {
|
||||
clientTLSConfig.Certificates = []tls.Certificate{clientCert}
|
||||
}
|
||||
if c.TLSConfig.TLSServerName != "" {
|
||||
clientTLSConfig.ServerName = c.TLSConfig.TLSServerName
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Client provides a client to the Nomad API
|
||||
type Client struct {
|
||||
config Config
|
||||
}
|
||||
|
||||
// NewClient returns a new client
|
||||
func NewClient(config *Config) (*Client, error) {
|
||||
// bootstrap the config
|
||||
defConfig := DefaultConfig()
|
||||
|
||||
if config.Address == "" {
|
||||
config.Address = defConfig.Address
|
||||
} else if _, err := url.Parse(config.Address); err != nil {
|
||||
return nil, fmt.Errorf("invalid address '%s': %v", config.Address, err)
|
||||
}
|
||||
|
||||
if config.httpClient == nil {
|
||||
config.httpClient = defConfig.httpClient
|
||||
}
|
||||
|
||||
// Configure the TLS cofigurations
|
||||
if err := config.ConfigureTLS(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &Client{
|
||||
config: *config,
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// SetRegion sets the region to forward API requests to.
|
||||
func (c *Client) SetRegion(region string) {
|
||||
c.config.Region = region
|
||||
}
|
||||
|
||||
// SetNamespace sets the namespace to forward API requests to.
|
||||
func (c *Client) SetNamespace(namespace string) {
|
||||
c.config.Namespace = namespace
|
||||
}
|
||||
|
||||
// GetNodeClient returns a new Client that will dial the specified node. If the
|
||||
// QueryOptions is set, its region will be used.
|
||||
func (c *Client) GetNodeClient(nodeID string, q *QueryOptions) (*Client, error) {
|
||||
return c.getNodeClientImpl(nodeID, q, c.Nodes().Info)
|
||||
}
|
||||
|
||||
// nodeLookup is the definition of a function used to lookup a node. This is
|
||||
// largely used to mock the lookup in tests.
|
||||
type nodeLookup func(nodeID string, q *QueryOptions) (*Node, *QueryMeta, error)
|
||||
|
||||
// getNodeClientImpl is the implementation of creating a API client for
|
||||
// contacting a node. It takes a function to lookup the node such that it can be
|
||||
// mocked during tests.
|
||||
func (c *Client) getNodeClientImpl(nodeID string, q *QueryOptions, lookup nodeLookup) (*Client, error) {
|
||||
node, _, err := lookup(nodeID, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if node.Status == "down" {
|
||||
return nil, NodeDownErr
|
||||
}
|
||||
if node.HTTPAddr == "" {
|
||||
return nil, fmt.Errorf("http addr of node %q (%s) is not advertised", node.Name, nodeID)
|
||||
}
|
||||
|
||||
var region string
|
||||
switch {
|
||||
case q != nil && q.Region != "":
|
||||
// Prefer the region set in the query parameter
|
||||
region = q.Region
|
||||
case c.config.Region != "":
|
||||
// If the client is configured for a particular region use that
|
||||
region = c.config.Region
|
||||
default:
|
||||
// No region information is given so use the default.
|
||||
region = "global"
|
||||
}
|
||||
|
||||
// Get an API client for the node
|
||||
conf := c.config.ClientConfig(region, node.HTTPAddr, node.TLSEnabled)
|
||||
return NewClient(conf)
|
||||
}
|
||||
|
||||
// SetSecretID sets the ACL token secret for API requests.
|
||||
func (c *Client) SetSecretID(secretID string) {
|
||||
c.config.SecretID = secretID
|
||||
}
|
||||
|
||||
// request is used to help build up a request
|
||||
type request struct {
|
||||
config *Config
|
||||
method string
|
||||
url *url.URL
|
||||
params url.Values
|
||||
token string
|
||||
body io.Reader
|
||||
obj interface{}
|
||||
}
|
||||
|
||||
// setQueryOptions is used to annotate the request with
|
||||
// additional query options
|
||||
func (r *request) setQueryOptions(q *QueryOptions) {
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
if q.Region != "" {
|
||||
r.params.Set("region", q.Region)
|
||||
}
|
||||
if q.Namespace != "" {
|
||||
r.params.Set("namespace", q.Namespace)
|
||||
}
|
||||
if q.SecretID != "" {
|
||||
r.token = q.SecretID
|
||||
}
|
||||
if q.AllowStale {
|
||||
r.params.Set("stale", "")
|
||||
}
|
||||
if q.WaitIndex != 0 {
|
||||
r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
|
||||
}
|
||||
if q.WaitTime != 0 {
|
||||
r.params.Set("wait", durToMsec(q.WaitTime))
|
||||
}
|
||||
if q.Prefix != "" {
|
||||
r.params.Set("prefix", q.Prefix)
|
||||
}
|
||||
for k, v := range q.Params {
|
||||
r.params.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// durToMsec converts a duration to a millisecond specified string
|
||||
func durToMsec(dur time.Duration) string {
|
||||
return fmt.Sprintf("%dms", dur/time.Millisecond)
|
||||
}
|
||||
|
||||
// setWriteOptions is used to annotate the request with
|
||||
// additional write options
|
||||
func (r *request) setWriteOptions(q *WriteOptions) {
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
if q.Region != "" {
|
||||
r.params.Set("region", q.Region)
|
||||
}
|
||||
if q.Namespace != "" {
|
||||
r.params.Set("namespace", q.Namespace)
|
||||
}
|
||||
if q.SecretID != "" {
|
||||
r.token = q.SecretID
|
||||
}
|
||||
}
|
||||
|
||||
// toHTTP converts the request to an HTTP request
|
||||
func (r *request) toHTTP() (*http.Request, error) {
|
||||
// Encode the query parameters
|
||||
r.url.RawQuery = r.params.Encode()
|
||||
|
||||
// Check if we should encode the body
|
||||
if r.body == nil && r.obj != nil {
|
||||
if b, err := encodeBody(r.obj); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
r.body = b
|
||||
}
|
||||
}
|
||||
|
||||
// Create the HTTP request
|
||||
req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Optionally configure HTTP basic authentication
|
||||
if r.url.User != nil {
|
||||
username := r.url.User.Username()
|
||||
password, _ := r.url.User.Password()
|
||||
req.SetBasicAuth(username, password)
|
||||
} else if r.config.HttpAuth != nil {
|
||||
req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
|
||||
}
|
||||
|
||||
req.Header.Add("Accept-Encoding", "gzip")
|
||||
if r.token != "" {
|
||||
req.Header.Set("X-Nomad-Token", r.token)
|
||||
}
|
||||
|
||||
req.URL.Host = r.url.Host
|
||||
req.URL.Scheme = r.url.Scheme
|
||||
req.Host = r.url.Host
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// newRequest is used to create a new request
|
||||
func (c *Client) newRequest(method, path string) (*request, error) {
|
||||
base, _ := url.Parse(c.config.Address)
|
||||
u, err := url.Parse(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r := &request{
|
||||
config: &c.config,
|
||||
method: method,
|
||||
url: &url.URL{
|
||||
Scheme: base.Scheme,
|
||||
User: base.User,
|
||||
Host: base.Host,
|
||||
Path: u.Path,
|
||||
},
|
||||
params: make(map[string][]string),
|
||||
}
|
||||
if c.config.Region != "" {
|
||||
r.params.Set("region", c.config.Region)
|
||||
}
|
||||
if c.config.Namespace != "" {
|
||||
r.params.Set("namespace", c.config.Namespace)
|
||||
}
|
||||
if c.config.WaitTime != 0 {
|
||||
r.params.Set("wait", durToMsec(r.config.WaitTime))
|
||||
}
|
||||
if c.config.SecretID != "" {
|
||||
r.token = r.config.SecretID
|
||||
}
|
||||
|
||||
// Add in the query parameters, if any
|
||||
for key, values := range u.Query() {
|
||||
for _, value := range values {
|
||||
r.params.Add(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// multiCloser is to wrap a ReadCloser such that when close is called, multiple
|
||||
// Closes occur.
|
||||
type multiCloser struct {
|
||||
reader io.Reader
|
||||
inorderClose []io.Closer
|
||||
}
|
||||
|
||||
func (m *multiCloser) Close() error {
|
||||
for _, c := range m.inorderClose {
|
||||
if err := c.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *multiCloser) Read(p []byte) (int, error) {
|
||||
return m.reader.Read(p)
|
||||
}
|
||||
|
||||
// doRequest runs a request with our client
|
||||
func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
|
||||
req, err := r.toHTTP()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
start := time.Now()
|
||||
resp, err := c.config.httpClient.Do(req)
|
||||
diff := time.Now().Sub(start)
|
||||
|
||||
// If the response is compressed, we swap the body's reader.
|
||||
if resp != nil && resp.Header != nil {
|
||||
var reader io.ReadCloser
|
||||
switch resp.Header.Get("Content-Encoding") {
|
||||
case "gzip":
|
||||
greader, err := gzip.NewReader(resp.Body)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
// The gzip reader doesn't close the wrapped reader so we use
|
||||
// multiCloser.
|
||||
reader = &multiCloser{
|
||||
reader: greader,
|
||||
inorderClose: []io.Closer{greader, resp.Body},
|
||||
}
|
||||
default:
|
||||
reader = resp.Body
|
||||
}
|
||||
resp.Body = reader
|
||||
}
|
||||
|
||||
return diff, resp, err
|
||||
}
|
||||
|
||||
// rawQuery makes a GET request to the specified endpoint but returns just the
|
||||
// response body.
|
||||
func (c *Client) rawQuery(endpoint string, q *QueryOptions) (io.ReadCloser, error) {
|
||||
r, err := c.newRequest("GET", endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.setQueryOptions(q)
|
||||
_, resp, err := requireOK(c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// query is used to do a GET request against an endpoint
|
||||
// and deserialize the response into an interface using
|
||||
// standard Nomad conventions.
|
||||
func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
|
||||
r, err := c.newRequest("GET", endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
if err := decodeBody(resp, out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return qm, nil
|
||||
}
|
||||
|
||||
// putQuery is used to do a PUT request when doing a read against an endpoint
|
||||
// and deserialize the response into an interface using standard Nomad
|
||||
// conventions.
|
||||
func (c *Client) putQuery(endpoint string, in, out interface{}, q *QueryOptions) (*QueryMeta, error) {
|
||||
r, err := c.newRequest("PUT", endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.setQueryOptions(q)
|
||||
r.obj = in
|
||||
rtt, resp, err := requireOK(c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
if err := decodeBody(resp, out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return qm, nil
|
||||
}
|
||||
|
||||
// write is used to do a PUT request against an endpoint
|
||||
// and serialize/deserialized using the standard Nomad conventions.
|
||||
func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
|
||||
r, err := c.newRequest("PUT", endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.setWriteOptions(q)
|
||||
r.obj = in
|
||||
rtt, resp, err := requireOK(c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{RequestTime: rtt}
|
||||
parseWriteMeta(resp, wm)
|
||||
|
||||
if out != nil {
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// delete is used to do a DELETE request against an endpoint
|
||||
// and serialize/deserialized using the standard Nomad conventions.
|
||||
func (c *Client) delete(endpoint string, out interface{}, q *WriteOptions) (*WriteMeta, error) {
|
||||
r, err := c.newRequest("DELETE", endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.setWriteOptions(q)
|
||||
rtt, resp, err := requireOK(c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{RequestTime: rtt}
|
||||
parseWriteMeta(resp, wm)
|
||||
|
||||
if out != nil {
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// parseQueryMeta is used to help parse query meta-data
|
||||
func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
|
||||
header := resp.Header
|
||||
|
||||
// Parse the X-Nomad-Index
|
||||
index, err := strconv.ParseUint(header.Get("X-Nomad-Index"), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse X-Nomad-Index: %v", err)
|
||||
}
|
||||
q.LastIndex = index
|
||||
|
||||
// Parse the X-Nomad-LastContact
|
||||
last, err := strconv.ParseUint(header.Get("X-Nomad-LastContact"), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse X-Nomad-LastContact: %v", err)
|
||||
}
|
||||
q.LastContact = time.Duration(last) * time.Millisecond
|
||||
|
||||
// Parse the X-Nomad-KnownLeader
|
||||
switch header.Get("X-Nomad-KnownLeader") {
|
||||
case "true":
|
||||
q.KnownLeader = true
|
||||
default:
|
||||
q.KnownLeader = false
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseWriteMeta is used to help parse write meta-data
|
||||
func parseWriteMeta(resp *http.Response, q *WriteMeta) error {
|
||||
header := resp.Header
|
||||
|
||||
// Parse the X-Nomad-Index
|
||||
index, err := strconv.ParseUint(header.Get("X-Nomad-Index"), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse X-Nomad-Index: %v", err)
|
||||
}
|
||||
q.LastIndex = index
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeBody is used to JSON decode a body
|
||||
func decodeBody(resp *http.Response, out interface{}) error {
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
return dec.Decode(out)
|
||||
}
|
||||
|
||||
// encodeBody is used to encode a request body
|
||||
func encodeBody(obj interface{}) (io.Reader, error) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
enc := json.NewEncoder(buf)
|
||||
if err := enc.Encode(obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// requireOK is used to wrap doRequest and check for a 200
|
||||
func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
|
||||
if e != nil {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
return d, nil, e
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, resp.Body)
|
||||
resp.Body.Close()
|
||||
return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
|
||||
}
|
||||
return d, resp, nil
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
package api
|
||||
|
||||
// Constraint is used to serialize a job placement constraint.
|
||||
type Constraint struct {
|
||||
LTarget string
|
||||
RTarget string
|
||||
Operand string
|
||||
}
|
||||
|
||||
// NewConstraint generates a new job placement constraint.
|
||||
func NewConstraint(left, operand, right string) *Constraint {
|
||||
return &Constraint{
|
||||
LTarget: left,
|
||||
RTarget: right,
|
||||
Operand: operand,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
package contexts
|
||||
|
||||
// Context defines the scope in which a search for Nomad object operates
|
||||
type Context string
|
||||
|
||||
const (
|
||||
Allocs Context = "allocs"
|
||||
Deployments Context = "deployment"
|
||||
Evals Context = "evals"
|
||||
Jobs Context = "jobs"
|
||||
Nodes Context = "nodes"
|
||||
Namespaces Context = "namespaces"
|
||||
All Context = "all"
|
||||
)
|
|
@ -0,0 +1,234 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Deployments is used to query the deployments endpoints.
|
||||
type Deployments struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Deployments returns a new handle on the deployments.
|
||||
func (c *Client) Deployments() *Deployments {
|
||||
return &Deployments{client: c}
|
||||
}
|
||||
|
||||
// List is used to dump all of the deployments.
|
||||
func (d *Deployments) List(q *QueryOptions) ([]*Deployment, *QueryMeta, error) {
|
||||
var resp []*Deployment
|
||||
qm, err := d.client.query("/v1/deployments", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sort.Sort(DeploymentIndexSort(resp))
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
func (d *Deployments) PrefixList(prefix string) ([]*Deployment, *QueryMeta, error) {
|
||||
return d.List(&QueryOptions{Prefix: prefix})
|
||||
}
|
||||
|
||||
// Info is used to query a single deployment by its ID.
|
||||
func (d *Deployments) Info(deploymentID string, q *QueryOptions) (*Deployment, *QueryMeta, error) {
|
||||
var resp Deployment
|
||||
qm, err := d.client.query("/v1/deployment/"+deploymentID, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, qm, nil
|
||||
}
|
||||
|
||||
// Allocations is used to retrieve a set of allocations that are part of the
|
||||
// deployment
|
||||
func (d *Deployments) Allocations(deploymentID string, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) {
|
||||
var resp []*AllocationListStub
|
||||
qm, err := d.client.query("/v1/deployment/allocations/"+deploymentID, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sort.Sort(AllocIndexSort(resp))
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Fail is used to fail the given deployment.
|
||||
func (d *Deployments) Fail(deploymentID string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) {
|
||||
var resp DeploymentUpdateResponse
|
||||
req := &DeploymentFailRequest{
|
||||
DeploymentID: deploymentID,
|
||||
}
|
||||
wm, err := d.client.write("/v1/deployment/fail/"+deploymentID, req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// Pause is used to pause or unpause the given deployment.
|
||||
func (d *Deployments) Pause(deploymentID string, pause bool, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) {
|
||||
var resp DeploymentUpdateResponse
|
||||
req := &DeploymentPauseRequest{
|
||||
DeploymentID: deploymentID,
|
||||
Pause: pause,
|
||||
}
|
||||
wm, err := d.client.write("/v1/deployment/pause/"+deploymentID, req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// PromoteAll is used to promote all canaries in the given deployment
|
||||
func (d *Deployments) PromoteAll(deploymentID string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) {
|
||||
var resp DeploymentUpdateResponse
|
||||
req := &DeploymentPromoteRequest{
|
||||
DeploymentID: deploymentID,
|
||||
All: true,
|
||||
}
|
||||
wm, err := d.client.write("/v1/deployment/promote/"+deploymentID, req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// PromoteGroups is used to promote canaries in the passed groups in the given deployment
|
||||
func (d *Deployments) PromoteGroups(deploymentID string, groups []string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) {
|
||||
var resp DeploymentUpdateResponse
|
||||
req := &DeploymentPromoteRequest{
|
||||
DeploymentID: deploymentID,
|
||||
Groups: groups,
|
||||
}
|
||||
wm, err := d.client.write("/v1/deployment/promote/"+deploymentID, req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// SetAllocHealth is used to set allocation health for allocs that are part of
|
||||
// the given deployment
|
||||
func (d *Deployments) SetAllocHealth(deploymentID string, healthy, unhealthy []string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) {
|
||||
var resp DeploymentUpdateResponse
|
||||
req := &DeploymentAllocHealthRequest{
|
||||
DeploymentID: deploymentID,
|
||||
HealthyAllocationIDs: healthy,
|
||||
UnhealthyAllocationIDs: unhealthy,
|
||||
}
|
||||
wm, err := d.client.write("/v1/deployment/allocation-health/"+deploymentID, req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// Deployment is used to serialize an deployment.
|
||||
type Deployment struct {
|
||||
ID string
|
||||
Namespace string
|
||||
JobID string
|
||||
JobVersion uint64
|
||||
JobModifyIndex uint64
|
||||
JobCreateIndex uint64
|
||||
TaskGroups map[string]*DeploymentState
|
||||
Status string
|
||||
StatusDescription string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// DeploymentState tracks the state of a deployment for a given task group.
|
||||
type DeploymentState struct {
|
||||
PlacedCanaries []string
|
||||
AutoRevert bool
|
||||
Promoted bool
|
||||
DesiredCanaries int
|
||||
DesiredTotal int
|
||||
PlacedAllocs int
|
||||
HealthyAllocs int
|
||||
UnhealthyAllocs int
|
||||
}
|
||||
|
||||
// DeploymentIndexSort is a wrapper to sort deployments by CreateIndex. We
|
||||
// reverse the test so that we get the highest index first.
|
||||
type DeploymentIndexSort []*Deployment
|
||||
|
||||
func (d DeploymentIndexSort) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d DeploymentIndexSort) Less(i, j int) bool {
|
||||
return d[i].CreateIndex > d[j].CreateIndex
|
||||
}
|
||||
|
||||
func (d DeploymentIndexSort) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
||||
|
||||
// DeploymentUpdateResponse is used to respond to a deployment change. The
|
||||
// response will include the modify index of the deployment as well as details
|
||||
// of any triggered evaluation.
|
||||
type DeploymentUpdateResponse struct {
|
||||
EvalID string
|
||||
EvalCreateIndex uint64
|
||||
DeploymentModifyIndex uint64
|
||||
RevertedJobVersion *uint64
|
||||
WriteMeta
|
||||
}
|
||||
|
||||
// DeploymentAllocHealthRequest is used to set the health of a set of
|
||||
// allocations as part of a deployment.
|
||||
type DeploymentAllocHealthRequest struct {
|
||||
DeploymentID string
|
||||
|
||||
// Marks these allocations as healthy, allow further allocations
|
||||
// to be rolled.
|
||||
HealthyAllocationIDs []string
|
||||
|
||||
// Any unhealthy allocations fail the deployment
|
||||
UnhealthyAllocationIDs []string
|
||||
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// DeploymentPromoteRequest is used to promote task groups in a deployment
|
||||
type DeploymentPromoteRequest struct {
|
||||
DeploymentID string
|
||||
|
||||
// All is to promote all task groups
|
||||
All bool
|
||||
|
||||
// Groups is used to set the promotion status per task group
|
||||
Groups []string
|
||||
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// DeploymentPauseRequest is used to pause a deployment
|
||||
type DeploymentPauseRequest struct {
|
||||
DeploymentID string
|
||||
|
||||
// Pause sets the pause status
|
||||
Pause bool
|
||||
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// DeploymentSpecificRequest is used to make a request specific to a particular
|
||||
// deployment
|
||||
type DeploymentSpecificRequest struct {
|
||||
DeploymentID string
|
||||
QueryOptions
|
||||
}
|
||||
|
||||
// DeploymentFailRequest is used to fail a particular deployment
|
||||
type DeploymentFailRequest struct {
|
||||
DeploymentID string
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// SingleDeploymentResponse is used to respond with a single deployment
|
||||
type SingleDeploymentResponse struct {
|
||||
Deployment *Deployment
|
||||
QueryMeta
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Evaluations is used to query the evaluation endpoints.
|
||||
type Evaluations struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Evaluations returns a new handle on the evaluations.
|
||||
func (c *Client) Evaluations() *Evaluations {
|
||||
return &Evaluations{client: c}
|
||||
}
|
||||
|
||||
// List is used to dump all of the evaluations.
|
||||
func (e *Evaluations) List(q *QueryOptions) ([]*Evaluation, *QueryMeta, error) {
|
||||
var resp []*Evaluation
|
||||
qm, err := e.client.query("/v1/evaluations", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sort.Sort(EvalIndexSort(resp))
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
func (e *Evaluations) PrefixList(prefix string) ([]*Evaluation, *QueryMeta, error) {
|
||||
return e.List(&QueryOptions{Prefix: prefix})
|
||||
}
|
||||
|
||||
// Info is used to query a single evaluation by its ID.
|
||||
func (e *Evaluations) Info(evalID string, q *QueryOptions) (*Evaluation, *QueryMeta, error) {
|
||||
var resp Evaluation
|
||||
qm, err := e.client.query("/v1/evaluation/"+evalID, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, qm, nil
|
||||
}
|
||||
|
||||
// Allocations is used to retrieve a set of allocations given
|
||||
// an evaluation ID.
|
||||
func (e *Evaluations) Allocations(evalID string, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) {
|
||||
var resp []*AllocationListStub
|
||||
qm, err := e.client.query("/v1/evaluation/"+evalID+"/allocations", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sort.Sort(AllocIndexSort(resp))
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Evaluation is used to serialize an evaluation.
|
||||
type Evaluation struct {
|
||||
ID string
|
||||
Priority int
|
||||
Type string
|
||||
TriggeredBy string
|
||||
Namespace string
|
||||
JobID string
|
||||
JobModifyIndex uint64
|
||||
NodeID string
|
||||
NodeModifyIndex uint64
|
||||
DeploymentID string
|
||||
Status string
|
||||
StatusDescription string
|
||||
Wait time.Duration
|
||||
NextEval string
|
||||
PreviousEval string
|
||||
BlockedEval string
|
||||
FailedTGAllocs map[string]*AllocationMetric
|
||||
ClassEligibility map[string]bool
|
||||
EscapedComputedClass bool
|
||||
AnnotatePlan bool
|
||||
QueuedAllocations map[string]int
|
||||
SnapshotIndex uint64
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// EvalIndexSort is a wrapper to sort evaluations by CreateIndex.
|
||||
// We reverse the test so that we get the highest index first.
|
||||
type EvalIndexSort []*Evaluation
|
||||
|
||||
func (e EvalIndexSort) Len() int {
|
||||
return len(e)
|
||||
}
|
||||
|
||||
func (e EvalIndexSort) Less(i, j int) bool {
|
||||
return e[i].CreateIndex > e[j].CreateIndex
|
||||
}
|
||||
|
||||
func (e EvalIndexSort) Swap(i, j int) {
|
||||
e[i], e[j] = e[j], e[i]
|
||||
}
|
|
@ -0,0 +1,398 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// OriginStart and OriginEnd are the available parameters for the origin
|
||||
// argument when streaming a file. They respectively offset from the start
|
||||
// and end of a file.
|
||||
OriginStart = "start"
|
||||
OriginEnd = "end"
|
||||
)
|
||||
|
||||
// AllocFileInfo holds information about a file inside the AllocDir
|
||||
type AllocFileInfo struct {
|
||||
Name string
|
||||
IsDir bool
|
||||
Size int64
|
||||
FileMode string
|
||||
ModTime time.Time
|
||||
}
|
||||
|
||||
// StreamFrame is used to frame data of a file when streaming
|
||||
type StreamFrame struct {
|
||||
Offset int64 `json:",omitempty"`
|
||||
Data []byte `json:",omitempty"`
|
||||
File string `json:",omitempty"`
|
||||
FileEvent string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// IsHeartbeat returns if the frame is a heartbeat frame
|
||||
func (s *StreamFrame) IsHeartbeat() bool {
|
||||
return len(s.Data) == 0 && s.FileEvent == "" && s.File == "" && s.Offset == 0
|
||||
}
|
||||
|
||||
// AllocFS is used to introspect an allocation directory on a Nomad client
|
||||
type AllocFS struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// AllocFS returns an handle to the AllocFS endpoints
|
||||
func (c *Client) AllocFS() *AllocFS {
|
||||
return &AllocFS{client: c}
|
||||
}
|
||||
|
||||
// List is used to list the files at a given path of an allocation directory
|
||||
func (a *AllocFS) List(alloc *Allocation, path string, q *QueryOptions) ([]*AllocFileInfo, *QueryMeta, error) {
|
||||
nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if q == nil {
|
||||
q = &QueryOptions{}
|
||||
}
|
||||
if q.Params == nil {
|
||||
q.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
q.Params["path"] = path
|
||||
|
||||
var resp []*AllocFileInfo
|
||||
qm, err := nodeClient.query(fmt.Sprintf("/v1/client/fs/ls/%s", alloc.ID), &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Stat is used to stat a file at a given path of an allocation directory
|
||||
func (a *AllocFS) Stat(alloc *Allocation, path string, q *QueryOptions) (*AllocFileInfo, *QueryMeta, error) {
|
||||
nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if q == nil {
|
||||
q = &QueryOptions{}
|
||||
}
|
||||
if q.Params == nil {
|
||||
q.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
q.Params["path"] = path
|
||||
|
||||
var resp AllocFileInfo
|
||||
qm, err := nodeClient.query(fmt.Sprintf("/v1/client/fs/stat/%s", alloc.ID), &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, qm, nil
|
||||
}
|
||||
|
||||
// ReadAt is used to read bytes at a given offset until limit at the given path
|
||||
// in an allocation directory. If limit is <= 0, there is no limit.
|
||||
func (a *AllocFS) ReadAt(alloc *Allocation, path string, offset int64, limit int64, q *QueryOptions) (io.ReadCloser, error) {
|
||||
nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if q == nil {
|
||||
q = &QueryOptions{}
|
||||
}
|
||||
if q.Params == nil {
|
||||
q.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
q.Params["path"] = path
|
||||
q.Params["offset"] = strconv.FormatInt(offset, 10)
|
||||
q.Params["limit"] = strconv.FormatInt(limit, 10)
|
||||
|
||||
r, err := nodeClient.rawQuery(fmt.Sprintf("/v1/client/fs/readat/%s", alloc.ID), q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Cat is used to read contents of a file at the given path in an allocation
|
||||
// directory
|
||||
func (a *AllocFS) Cat(alloc *Allocation, path string, q *QueryOptions) (io.ReadCloser, error) {
|
||||
nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if q == nil {
|
||||
q = &QueryOptions{}
|
||||
}
|
||||
if q.Params == nil {
|
||||
q.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
q.Params["path"] = path
|
||||
|
||||
r, err := nodeClient.rawQuery(fmt.Sprintf("/v1/client/fs/cat/%s", alloc.ID), q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Stream streams the content of a file blocking on EOF.
|
||||
// The parameters are:
|
||||
// * path: path to file to stream.
|
||||
// * offset: The offset to start streaming data at.
|
||||
// * origin: Either "start" or "end" and defines from where the offset is applied.
|
||||
// * cancel: A channel that when closed, streaming will end.
|
||||
//
|
||||
// The return value is a channel that will emit StreamFrames as they are read.
|
||||
func (a *AllocFS) Stream(alloc *Allocation, path, origin string, offset int64,
|
||||
cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) {
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return nil, errCh
|
||||
}
|
||||
|
||||
if q == nil {
|
||||
q = &QueryOptions{}
|
||||
}
|
||||
if q.Params == nil {
|
||||
q.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
q.Params["path"] = path
|
||||
q.Params["offset"] = strconv.FormatInt(offset, 10)
|
||||
q.Params["origin"] = origin
|
||||
|
||||
r, err := nodeClient.rawQuery(fmt.Sprintf("/v1/client/fs/stream/%s", alloc.ID), q)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return nil, errCh
|
||||
}
|
||||
|
||||
// Create the output channel
|
||||
frames := make(chan *StreamFrame, 10)
|
||||
|
||||
go func() {
|
||||
// Close the body
|
||||
defer r.Close()
|
||||
|
||||
// Create a decoder
|
||||
dec := json.NewDecoder(r)
|
||||
|
||||
for {
|
||||
// Check if we have been cancelled
|
||||
select {
|
||||
case <-cancel:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Decode the next frame
|
||||
var frame StreamFrame
|
||||
if err := dec.Decode(&frame); err != nil {
|
||||
errCh <- err
|
||||
close(frames)
|
||||
return
|
||||
}
|
||||
|
||||
// Discard heartbeat frames
|
||||
if frame.IsHeartbeat() {
|
||||
continue
|
||||
}
|
||||
|
||||
frames <- &frame
|
||||
}
|
||||
}()
|
||||
|
||||
return frames, errCh
|
||||
}
|
||||
|
||||
// Logs streams the content of a tasks logs blocking on EOF.
|
||||
// The parameters are:
|
||||
// * allocation: the allocation to stream from.
|
||||
// * follow: Whether the logs should be followed.
|
||||
// * task: the tasks name to stream logs for.
|
||||
// * logType: Either "stdout" or "stderr"
|
||||
// * origin: Either "start" or "end" and defines from where the offset is applied.
|
||||
// * offset: The offset to start streaming data at.
|
||||
// * cancel: A channel that when closed, streaming will end.
|
||||
//
|
||||
// The return value is a channel that will emit StreamFrames as they are read.
|
||||
func (a *AllocFS) Logs(alloc *Allocation, follow bool, task, logType, origin string,
|
||||
offset int64, cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) {
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return nil, errCh
|
||||
}
|
||||
|
||||
if q == nil {
|
||||
q = &QueryOptions{}
|
||||
}
|
||||
if q.Params == nil {
|
||||
q.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
q.Params["follow"] = strconv.FormatBool(follow)
|
||||
q.Params["task"] = task
|
||||
q.Params["type"] = logType
|
||||
q.Params["origin"] = origin
|
||||
q.Params["offset"] = strconv.FormatInt(offset, 10)
|
||||
|
||||
r, err := nodeClient.rawQuery(fmt.Sprintf("/v1/client/fs/logs/%s", alloc.ID), q)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return nil, errCh
|
||||
}
|
||||
|
||||
// Create the output channel
|
||||
frames := make(chan *StreamFrame, 10)
|
||||
|
||||
go func() {
|
||||
// Close the body
|
||||
defer r.Close()
|
||||
|
||||
// Create a decoder
|
||||
dec := json.NewDecoder(r)
|
||||
|
||||
for {
|
||||
// Check if we have been cancelled
|
||||
select {
|
||||
case <-cancel:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Decode the next frame
|
||||
var frame StreamFrame
|
||||
if err := dec.Decode(&frame); err != nil {
|
||||
errCh <- err
|
||||
close(frames)
|
||||
return
|
||||
}
|
||||
|
||||
// Discard heartbeat frames
|
||||
if frame.IsHeartbeat() {
|
||||
continue
|
||||
}
|
||||
|
||||
frames <- &frame
|
||||
}
|
||||
}()
|
||||
|
||||
return frames, errCh
|
||||
}
|
||||
|
||||
// FrameReader is used to convert a stream of frames into a read closer.
|
||||
type FrameReader struct {
|
||||
frames <-chan *StreamFrame
|
||||
errCh <-chan error
|
||||
cancelCh chan struct{}
|
||||
|
||||
closedLock sync.Mutex
|
||||
closed bool
|
||||
|
||||
unblockTime time.Duration
|
||||
|
||||
frame *StreamFrame
|
||||
frameOffset int
|
||||
|
||||
byteOffset int
|
||||
}
|
||||
|
||||
// NewFrameReader takes a channel of frames and returns a FrameReader which
|
||||
// implements io.ReadCloser
|
||||
func NewFrameReader(frames <-chan *StreamFrame, errCh <-chan error, cancelCh chan struct{}) *FrameReader {
|
||||
return &FrameReader{
|
||||
frames: frames,
|
||||
errCh: errCh,
|
||||
cancelCh: cancelCh,
|
||||
}
|
||||
}
|
||||
|
||||
// SetUnblockTime sets the time to unblock and return zero bytes read. If the
|
||||
// duration is unset or is zero or less, the read will block til data is read.
|
||||
func (f *FrameReader) SetUnblockTime(d time.Duration) {
|
||||
f.unblockTime = d
|
||||
}
|
||||
|
||||
// Offset returns the offset into the stream.
|
||||
func (f *FrameReader) Offset() int {
|
||||
return f.byteOffset
|
||||
}
|
||||
|
||||
// Read reads the data of the incoming frames into the bytes buffer. Returns EOF
|
||||
// when there are no more frames.
|
||||
func (f *FrameReader) Read(p []byte) (n int, err error) {
|
||||
f.closedLock.Lock()
|
||||
closed := f.closed
|
||||
f.closedLock.Unlock()
|
||||
if closed {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if f.frame == nil {
|
||||
var unblock <-chan time.Time
|
||||
if f.unblockTime.Nanoseconds() > 0 {
|
||||
unblock = time.After(f.unblockTime)
|
||||
}
|
||||
|
||||
select {
|
||||
case frame, ok := <-f.frames:
|
||||
if !ok {
|
||||
return 0, io.EOF
|
||||
}
|
||||
f.frame = frame
|
||||
|
||||
// Store the total offset into the file
|
||||
f.byteOffset = int(f.frame.Offset)
|
||||
case <-unblock:
|
||||
return 0, nil
|
||||
case err := <-f.errCh:
|
||||
return 0, err
|
||||
case <-f.cancelCh:
|
||||
return 0, io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the data out of the frame and update our offset
|
||||
n = copy(p, f.frame.Data[f.frameOffset:])
|
||||
f.frameOffset += n
|
||||
|
||||
// Clear the frame and its offset once we have read everything
|
||||
if len(f.frame.Data) == f.frameOffset {
|
||||
f.frame = nil
|
||||
f.frameOffset = 0
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Close cancels the stream of frames
|
||||
func (f *FrameReader) Close() error {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
if f.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
close(f.cancelCh)
|
||||
f.closed = true
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,981 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gorhill/cronexpr"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
)
|
||||
|
||||
const (
|
||||
// JobTypeService indicates a long-running processes
|
||||
JobTypeService = "service"
|
||||
|
||||
// JobTypeBatch indicates a short-lived process
|
||||
JobTypeBatch = "batch"
|
||||
|
||||
// PeriodicSpecCron is used for a cron spec.
|
||||
PeriodicSpecCron = "cron"
|
||||
|
||||
// DefaultNamespace is the default namespace.
|
||||
DefaultNamespace = "default"
|
||||
)
|
||||
|
||||
const (
|
||||
// RegisterEnforceIndexErrPrefix is the prefix to use in errors caused by
|
||||
// enforcing the job modify index during registers.
|
||||
RegisterEnforceIndexErrPrefix = "Enforcing job modify index"
|
||||
)
|
||||
|
||||
// Jobs is used to access the job-specific endpoints.
|
||||
type Jobs struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Jobs returns a handle on the jobs endpoints.
|
||||
func (c *Client) Jobs() *Jobs {
|
||||
return &Jobs{client: c}
|
||||
}
|
||||
|
||||
func (j *Jobs) Validate(job *Job, q *WriteOptions) (*JobValidateResponse, *WriteMeta, error) {
|
||||
var resp JobValidateResponse
|
||||
req := &JobValidateRequest{Job: job}
|
||||
if q != nil {
|
||||
req.WriteRequest = WriteRequest{Region: q.Region}
|
||||
}
|
||||
wm, err := j.client.write("/v1/validate/job", req, &resp, q)
|
||||
return &resp, wm, err
|
||||
}
|
||||
|
||||
// RegisterOptions is used to pass through job registration parameters
|
||||
type RegisterOptions struct {
|
||||
EnforceIndex bool
|
||||
ModifyIndex uint64
|
||||
PolicyOverride bool
|
||||
}
|
||||
|
||||
// Register is used to register a new job. It returns the ID
|
||||
// of the evaluation, along with any errors encountered.
|
||||
func (j *Jobs) Register(job *Job, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
|
||||
return j.RegisterOpts(job, nil, q)
|
||||
}
|
||||
|
||||
// EnforceRegister is used to register a job enforcing its job modify index.
|
||||
func (j *Jobs) EnforceRegister(job *Job, modifyIndex uint64, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
|
||||
opts := RegisterOptions{EnforceIndex: true, ModifyIndex: modifyIndex}
|
||||
return j.RegisterOpts(job, &opts, q)
|
||||
}
|
||||
|
||||
// Register is used to register a new job. It returns the ID
|
||||
// of the evaluation, along with any errors encountered.
|
||||
func (j *Jobs) RegisterOpts(job *Job, opts *RegisterOptions, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
|
||||
// Format the request
|
||||
req := &RegisterJobRequest{
|
||||
Job: job,
|
||||
}
|
||||
if opts != nil {
|
||||
if opts.EnforceIndex {
|
||||
req.EnforceIndex = true
|
||||
req.JobModifyIndex = opts.ModifyIndex
|
||||
}
|
||||
if opts.PolicyOverride {
|
||||
req.PolicyOverride = true
|
||||
}
|
||||
}
|
||||
|
||||
var resp JobRegisterResponse
|
||||
wm, err := j.client.write("/v1/jobs", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// List is used to list all of the existing jobs.
|
||||
func (j *Jobs) List(q *QueryOptions) ([]*JobListStub, *QueryMeta, error) {
|
||||
var resp []*JobListStub
|
||||
qm, err := j.client.query("/v1/jobs", &resp, q)
|
||||
if err != nil {
|
||||
return nil, qm, err
|
||||
}
|
||||
sort.Sort(JobIDSort(resp))
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// PrefixList is used to list all existing jobs that match the prefix.
|
||||
func (j *Jobs) PrefixList(prefix string) ([]*JobListStub, *QueryMeta, error) {
|
||||
return j.List(&QueryOptions{Prefix: prefix})
|
||||
}
|
||||
|
||||
// Info is used to retrieve information about a particular
|
||||
// job given its unique ID.
|
||||
func (j *Jobs) Info(jobID string, q *QueryOptions) (*Job, *QueryMeta, error) {
|
||||
var resp Job
|
||||
qm, err := j.client.query("/v1/job/"+jobID, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, qm, nil
|
||||
}
|
||||
|
||||
// Versions is used to retrieve all versions of a particular job given its
|
||||
// unique ID.
|
||||
func (j *Jobs) Versions(jobID string, diffs bool, q *QueryOptions) ([]*Job, []*JobDiff, *QueryMeta, error) {
|
||||
var resp JobVersionsResponse
|
||||
qm, err := j.client.query(fmt.Sprintf("/v1/job/%s/versions?diffs=%v", jobID, diffs), &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return resp.Versions, resp.Diffs, qm, nil
|
||||
}
|
||||
|
||||
// Allocations is used to return the allocs for a given job ID.
|
||||
func (j *Jobs) Allocations(jobID string, allAllocs bool, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) {
|
||||
var resp []*AllocationListStub
|
||||
u, err := url.Parse("/v1/job/" + jobID + "/allocations")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
v := u.Query()
|
||||
v.Add("all", strconv.FormatBool(allAllocs))
|
||||
u.RawQuery = v.Encode()
|
||||
|
||||
qm, err := j.client.query(u.String(), &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sort.Sort(AllocIndexSort(resp))
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Deployments is used to query the deployments associated with the given job
|
||||
// ID.
|
||||
func (j *Jobs) Deployments(jobID string, q *QueryOptions) ([]*Deployment, *QueryMeta, error) {
|
||||
var resp []*Deployment
|
||||
qm, err := j.client.query("/v1/job/"+jobID+"/deployments", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sort.Sort(DeploymentIndexSort(resp))
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// LatestDeployment is used to query for the latest deployment associated with
|
||||
// the given job ID.
|
||||
func (j *Jobs) LatestDeployment(jobID string, q *QueryOptions) (*Deployment, *QueryMeta, error) {
|
||||
var resp *Deployment
|
||||
qm, err := j.client.query("/v1/job/"+jobID+"/deployment", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Evaluations is used to query the evaluations associated with the given job
|
||||
// ID.
|
||||
func (j *Jobs) Evaluations(jobID string, q *QueryOptions) ([]*Evaluation, *QueryMeta, error) {
|
||||
var resp []*Evaluation
|
||||
qm, err := j.client.query("/v1/job/"+jobID+"/evaluations", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sort.Sort(EvalIndexSort(resp))
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Deregister is used to remove an existing job. If purge is set to true, the job
|
||||
// is deregistered and purged from the system versus still being queryable and
|
||||
// eventually GC'ed from the system. Most callers should not specify purge.
|
||||
func (j *Jobs) Deregister(jobID string, purge bool, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
var resp JobDeregisterResponse
|
||||
wm, err := j.client.delete(fmt.Sprintf("/v1/job/%v?purge=%t", jobID, purge), &resp, q)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return resp.EvalID, wm, nil
|
||||
}
|
||||
|
||||
// ForceEvaluate is used to force-evaluate an existing job.
|
||||
func (j *Jobs) ForceEvaluate(jobID string, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
var resp JobRegisterResponse
|
||||
wm, err := j.client.write("/v1/job/"+jobID+"/evaluate", nil, &resp, q)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return resp.EvalID, wm, nil
|
||||
}
|
||||
|
||||
// PeriodicForce spawns a new instance of the periodic job and returns the eval ID
|
||||
func (j *Jobs) PeriodicForce(jobID string, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
var resp periodicForceResponse
|
||||
wm, err := j.client.write("/v1/job/"+jobID+"/periodic/force", nil, &resp, q)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return resp.EvalID, wm, nil
|
||||
}
|
||||
|
||||
// PlanOptions is used to pass through job planning parameters
|
||||
type PlanOptions struct {
|
||||
Diff bool
|
||||
PolicyOverride bool
|
||||
}
|
||||
|
||||
func (j *Jobs) Plan(job *Job, diff bool, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) {
|
||||
opts := PlanOptions{Diff: diff}
|
||||
return j.PlanOpts(job, &opts, q)
|
||||
}
|
||||
|
||||
func (j *Jobs) PlanOpts(job *Job, opts *PlanOptions, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) {
|
||||
if job == nil {
|
||||
return nil, nil, fmt.Errorf("must pass non-nil job")
|
||||
}
|
||||
|
||||
// Setup the request
|
||||
req := &JobPlanRequest{
|
||||
Job: job,
|
||||
}
|
||||
if opts != nil {
|
||||
req.Diff = opts.Diff
|
||||
req.PolicyOverride = opts.PolicyOverride
|
||||
}
|
||||
|
||||
var resp JobPlanResponse
|
||||
wm, err := j.client.write("/v1/job/"+*job.ID+"/plan", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
func (j *Jobs) Summary(jobID string, q *QueryOptions) (*JobSummary, *QueryMeta, error) {
|
||||
var resp JobSummary
|
||||
qm, err := j.client.query("/v1/job/"+jobID+"/summary", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, qm, nil
|
||||
}
|
||||
|
||||
func (j *Jobs) Dispatch(jobID string, meta map[string]string,
|
||||
payload []byte, q *WriteOptions) (*JobDispatchResponse, *WriteMeta, error) {
|
||||
var resp JobDispatchResponse
|
||||
req := &JobDispatchRequest{
|
||||
JobID: jobID,
|
||||
Meta: meta,
|
||||
Payload: payload,
|
||||
}
|
||||
wm, err := j.client.write("/v1/job/"+jobID+"/dispatch", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// Revert is used to revert the given job to the passed version. If
|
||||
// enforceVersion is set, the job is only reverted if the current version is at
|
||||
// the passed version.
|
||||
func (j *Jobs) Revert(jobID string, version uint64, enforcePriorVersion *uint64,
|
||||
q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
|
||||
|
||||
var resp JobRegisterResponse
|
||||
req := &JobRevertRequest{
|
||||
JobID: jobID,
|
||||
JobVersion: version,
|
||||
EnforcePriorVersion: enforcePriorVersion,
|
||||
}
|
||||
wm, err := j.client.write("/v1/job/"+jobID+"/revert", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// Stable is used to mark a job version's stability.
|
||||
func (j *Jobs) Stable(jobID string, version uint64, stable bool,
|
||||
q *WriteOptions) (*JobStabilityResponse, *WriteMeta, error) {
|
||||
|
||||
var resp JobStabilityResponse
|
||||
req := &JobStabilityRequest{
|
||||
JobID: jobID,
|
||||
JobVersion: version,
|
||||
Stable: stable,
|
||||
}
|
||||
wm, err := j.client.write("/v1/job/"+jobID+"/stable", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// periodicForceResponse is used to deserialize a force response
|
||||
type periodicForceResponse struct {
|
||||
EvalID string
|
||||
}
|
||||
|
||||
// UpdateStrategy defines a task groups update strategy.
|
||||
type UpdateStrategy struct {
|
||||
Stagger *time.Duration `mapstructure:"stagger"`
|
||||
MaxParallel *int `mapstructure:"max_parallel"`
|
||||
HealthCheck *string `mapstructure:"health_check"`
|
||||
MinHealthyTime *time.Duration `mapstructure:"min_healthy_time"`
|
||||
HealthyDeadline *time.Duration `mapstructure:"healthy_deadline"`
|
||||
AutoRevert *bool `mapstructure:"auto_revert"`
|
||||
Canary *int `mapstructure:"canary"`
|
||||
}
|
||||
|
||||
// DefaultUpdateStrategy provides a baseline that can be used to upgrade
|
||||
// jobs with the old policy or for populating field defaults.
|
||||
func DefaultUpdateStrategy() *UpdateStrategy {
|
||||
return &UpdateStrategy{
|
||||
Stagger: helper.TimeToPtr(30 * time.Second),
|
||||
MaxParallel: helper.IntToPtr(1),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
|
||||
AutoRevert: helper.BoolToPtr(false),
|
||||
Canary: helper.IntToPtr(0),
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UpdateStrategy) Copy() *UpdateStrategy {
|
||||
if u == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
copy := new(UpdateStrategy)
|
||||
|
||||
if u.Stagger != nil {
|
||||
copy.Stagger = helper.TimeToPtr(*u.Stagger)
|
||||
}
|
||||
|
||||
if u.MaxParallel != nil {
|
||||
copy.MaxParallel = helper.IntToPtr(*u.MaxParallel)
|
||||
}
|
||||
|
||||
if u.HealthCheck != nil {
|
||||
copy.HealthCheck = helper.StringToPtr(*u.HealthCheck)
|
||||
}
|
||||
|
||||
if u.MinHealthyTime != nil {
|
||||
copy.MinHealthyTime = helper.TimeToPtr(*u.MinHealthyTime)
|
||||
}
|
||||
|
||||
if u.HealthyDeadline != nil {
|
||||
copy.HealthyDeadline = helper.TimeToPtr(*u.HealthyDeadline)
|
||||
}
|
||||
|
||||
if u.AutoRevert != nil {
|
||||
copy.AutoRevert = helper.BoolToPtr(*u.AutoRevert)
|
||||
}
|
||||
|
||||
if u.Canary != nil {
|
||||
copy.Canary = helper.IntToPtr(*u.Canary)
|
||||
}
|
||||
|
||||
return copy
|
||||
}
|
||||
|
||||
func (u *UpdateStrategy) Merge(o *UpdateStrategy) {
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if o.Stagger != nil {
|
||||
u.Stagger = helper.TimeToPtr(*o.Stagger)
|
||||
}
|
||||
|
||||
if o.MaxParallel != nil {
|
||||
u.MaxParallel = helper.IntToPtr(*o.MaxParallel)
|
||||
}
|
||||
|
||||
if o.HealthCheck != nil {
|
||||
u.HealthCheck = helper.StringToPtr(*o.HealthCheck)
|
||||
}
|
||||
|
||||
if o.MinHealthyTime != nil {
|
||||
u.MinHealthyTime = helper.TimeToPtr(*o.MinHealthyTime)
|
||||
}
|
||||
|
||||
if o.HealthyDeadline != nil {
|
||||
u.HealthyDeadline = helper.TimeToPtr(*o.HealthyDeadline)
|
||||
}
|
||||
|
||||
if o.AutoRevert != nil {
|
||||
u.AutoRevert = helper.BoolToPtr(*o.AutoRevert)
|
||||
}
|
||||
|
||||
if o.Canary != nil {
|
||||
u.Canary = helper.IntToPtr(*o.Canary)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UpdateStrategy) Canonicalize() {
|
||||
d := DefaultUpdateStrategy()
|
||||
|
||||
if u.MaxParallel == nil {
|
||||
u.MaxParallel = d.MaxParallel
|
||||
}
|
||||
|
||||
if u.Stagger == nil {
|
||||
u.Stagger = d.Stagger
|
||||
}
|
||||
|
||||
if u.HealthCheck == nil {
|
||||
u.HealthCheck = d.HealthCheck
|
||||
}
|
||||
|
||||
if u.HealthyDeadline == nil {
|
||||
u.HealthyDeadline = d.HealthyDeadline
|
||||
}
|
||||
|
||||
if u.MinHealthyTime == nil {
|
||||
u.MinHealthyTime = d.MinHealthyTime
|
||||
}
|
||||
|
||||
if u.AutoRevert == nil {
|
||||
u.AutoRevert = d.AutoRevert
|
||||
}
|
||||
|
||||
if u.Canary == nil {
|
||||
u.Canary = d.Canary
|
||||
}
|
||||
}
|
||||
|
||||
// Empty returns whether the UpdateStrategy is empty or has user defined values.
|
||||
func (u *UpdateStrategy) Empty() bool {
|
||||
if u == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if u.Stagger != nil && *u.Stagger != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if u.MaxParallel != nil && *u.MaxParallel != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if u.HealthCheck != nil && *u.HealthCheck != "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if u.MinHealthyTime != nil && *u.MinHealthyTime != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if u.HealthyDeadline != nil && *u.HealthyDeadline != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if u.AutoRevert != nil && *u.AutoRevert {
|
||||
return false
|
||||
}
|
||||
|
||||
if u.Canary != nil && *u.Canary != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// PeriodicConfig is for serializing periodic config for a job.
|
||||
type PeriodicConfig struct {
|
||||
Enabled *bool
|
||||
Spec *string
|
||||
SpecType *string
|
||||
ProhibitOverlap *bool `mapstructure:"prohibit_overlap"`
|
||||
TimeZone *string `mapstructure:"time_zone"`
|
||||
}
|
||||
|
||||
func (p *PeriodicConfig) Canonicalize() {
|
||||
if p.Enabled == nil {
|
||||
p.Enabled = helper.BoolToPtr(true)
|
||||
}
|
||||
if p.Spec == nil {
|
||||
p.Spec = helper.StringToPtr("")
|
||||
}
|
||||
if p.SpecType == nil {
|
||||
p.SpecType = helper.StringToPtr(PeriodicSpecCron)
|
||||
}
|
||||
if p.ProhibitOverlap == nil {
|
||||
p.ProhibitOverlap = helper.BoolToPtr(false)
|
||||
}
|
||||
if p.TimeZone == nil || *p.TimeZone == "" {
|
||||
p.TimeZone = helper.StringToPtr("UTC")
|
||||
}
|
||||
}
|
||||
|
||||
// Next returns the closest time instant matching the spec that is after the
|
||||
// passed time. If no matching instance exists, the zero value of time.Time is
|
||||
// returned. The `time.Location` of the returned value matches that of the
|
||||
// passed time.
|
||||
func (p *PeriodicConfig) Next(fromTime time.Time) time.Time {
|
||||
if *p.SpecType == PeriodicSpecCron {
|
||||
if e, err := cronexpr.Parse(*p.Spec); err == nil {
|
||||
return e.Next(fromTime)
|
||||
}
|
||||
}
|
||||
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (p *PeriodicConfig) GetLocation() (*time.Location, error) {
|
||||
if p.TimeZone == nil || *p.TimeZone == "" {
|
||||
return time.UTC, nil
|
||||
}
|
||||
|
||||
return time.LoadLocation(*p.TimeZone)
|
||||
}
|
||||
|
||||
// ParameterizedJobConfig is used to configure the parameterized job.
|
||||
type ParameterizedJobConfig struct {
|
||||
Payload string
|
||||
MetaRequired []string `mapstructure:"meta_required"`
|
||||
MetaOptional []string `mapstructure:"meta_optional"`
|
||||
}
|
||||
|
||||
// Job is used to serialize a job.
|
||||
type Job struct {
|
||||
Stop *bool
|
||||
Region *string
|
||||
Namespace *string
|
||||
ID *string
|
||||
ParentID *string
|
||||
Name *string
|
||||
Type *string
|
||||
Priority *int
|
||||
AllAtOnce *bool `mapstructure:"all_at_once"`
|
||||
Datacenters []string
|
||||
Constraints []*Constraint
|
||||
TaskGroups []*TaskGroup
|
||||
Update *UpdateStrategy
|
||||
Periodic *PeriodicConfig
|
||||
ParameterizedJob *ParameterizedJobConfig
|
||||
Payload []byte
|
||||
Meta map[string]string
|
||||
VaultToken *string `mapstructure:"vault_token"`
|
||||
Status *string
|
||||
StatusDescription *string
|
||||
Stable *bool
|
||||
Version *uint64
|
||||
SubmitTime *int64
|
||||
CreateIndex *uint64
|
||||
ModifyIndex *uint64
|
||||
JobModifyIndex *uint64
|
||||
}
|
||||
|
||||
// IsPeriodic returns whether a job is periodic.
|
||||
func (j *Job) IsPeriodic() bool {
|
||||
return j.Periodic != nil
|
||||
}
|
||||
|
||||
// IsParameterized returns whether a job is parameterized job.
|
||||
func (j *Job) IsParameterized() bool {
|
||||
return j.ParameterizedJob != nil
|
||||
}
|
||||
|
||||
func (j *Job) Canonicalize() {
|
||||
if j.ID == nil {
|
||||
j.ID = helper.StringToPtr("")
|
||||
}
|
||||
if j.Name == nil {
|
||||
j.Name = helper.StringToPtr(*j.ID)
|
||||
}
|
||||
if j.ParentID == nil {
|
||||
j.ParentID = helper.StringToPtr("")
|
||||
}
|
||||
if j.Namespace == nil {
|
||||
j.Namespace = helper.StringToPtr(DefaultNamespace)
|
||||
}
|
||||
if j.Priority == nil {
|
||||
j.Priority = helper.IntToPtr(50)
|
||||
}
|
||||
if j.Stop == nil {
|
||||
j.Stop = helper.BoolToPtr(false)
|
||||
}
|
||||
if j.Region == nil {
|
||||
j.Region = helper.StringToPtr("global")
|
||||
}
|
||||
if j.Namespace == nil {
|
||||
j.Namespace = helper.StringToPtr("default")
|
||||
}
|
||||
if j.Type == nil {
|
||||
j.Type = helper.StringToPtr("service")
|
||||
}
|
||||
if j.AllAtOnce == nil {
|
||||
j.AllAtOnce = helper.BoolToPtr(false)
|
||||
}
|
||||
if j.VaultToken == nil {
|
||||
j.VaultToken = helper.StringToPtr("")
|
||||
}
|
||||
if j.Status == nil {
|
||||
j.Status = helper.StringToPtr("")
|
||||
}
|
||||
if j.StatusDescription == nil {
|
||||
j.StatusDescription = helper.StringToPtr("")
|
||||
}
|
||||
if j.Stable == nil {
|
||||
j.Stable = helper.BoolToPtr(false)
|
||||
}
|
||||
if j.Version == nil {
|
||||
j.Version = helper.Uint64ToPtr(0)
|
||||
}
|
||||
if j.CreateIndex == nil {
|
||||
j.CreateIndex = helper.Uint64ToPtr(0)
|
||||
}
|
||||
if j.ModifyIndex == nil {
|
||||
j.ModifyIndex = helper.Uint64ToPtr(0)
|
||||
}
|
||||
if j.JobModifyIndex == nil {
|
||||
j.JobModifyIndex = helper.Uint64ToPtr(0)
|
||||
}
|
||||
if j.Periodic != nil {
|
||||
j.Periodic.Canonicalize()
|
||||
}
|
||||
if j.Update != nil {
|
||||
j.Update.Canonicalize()
|
||||
}
|
||||
|
||||
for _, tg := range j.TaskGroups {
|
||||
tg.Canonicalize(j)
|
||||
}
|
||||
}
|
||||
|
||||
// JobSummary summarizes the state of the allocations of a job
|
||||
type JobSummary struct {
|
||||
JobID string
|
||||
Namespace string
|
||||
Summary map[string]TaskGroupSummary
|
||||
Children *JobChildrenSummary
|
||||
|
||||
// Raft Indexes
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// JobChildrenSummary contains the summary of children job status
|
||||
type JobChildrenSummary struct {
|
||||
Pending int64
|
||||
Running int64
|
||||
Dead int64
|
||||
}
|
||||
|
||||
func (jc *JobChildrenSummary) Sum() int {
|
||||
if jc == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return int(jc.Pending + jc.Running + jc.Dead)
|
||||
}
|
||||
|
||||
// TaskGroup summarizes the state of all the allocations of a particular
|
||||
// TaskGroup
|
||||
type TaskGroupSummary struct {
|
||||
Queued int
|
||||
Complete int
|
||||
Failed int
|
||||
Running int
|
||||
Starting int
|
||||
Lost int
|
||||
}
|
||||
|
||||
// JobListStub is used to return a subset of information about
|
||||
// jobs during list operations.
|
||||
type JobListStub struct {
|
||||
ID string
|
||||
ParentID string
|
||||
Name string
|
||||
Type string
|
||||
Priority int
|
||||
Periodic bool
|
||||
ParameterizedJob bool
|
||||
Stop bool
|
||||
Status string
|
||||
StatusDescription string
|
||||
JobSummary *JobSummary
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
JobModifyIndex uint64
|
||||
SubmitTime int64
|
||||
}
|
||||
|
||||
// JobIDSort is used to sort jobs by their job ID's.
|
||||
type JobIDSort []*JobListStub
|
||||
|
||||
func (j JobIDSort) Len() int {
|
||||
return len(j)
|
||||
}
|
||||
|
||||
func (j JobIDSort) Less(a, b int) bool {
|
||||
return j[a].ID < j[b].ID
|
||||
}
|
||||
|
||||
func (j JobIDSort) Swap(a, b int) {
|
||||
j[a], j[b] = j[b], j[a]
|
||||
}
|
||||
|
||||
// NewServiceJob creates and returns a new service-style job
|
||||
// for long-lived processes using the provided name, ID, and
|
||||
// relative job priority.
|
||||
func NewServiceJob(id, name, region string, pri int) *Job {
|
||||
return newJob(id, name, region, JobTypeService, pri)
|
||||
}
|
||||
|
||||
// NewBatchJob creates and returns a new batch-style job for
|
||||
// short-lived processes using the provided name and ID along
|
||||
// with the relative job priority.
|
||||
func NewBatchJob(id, name, region string, pri int) *Job {
|
||||
return newJob(id, name, region, JobTypeBatch, pri)
|
||||
}
|
||||
|
||||
// newJob is used to create a new Job struct.
|
||||
func newJob(id, name, region, typ string, pri int) *Job {
|
||||
return &Job{
|
||||
Region: ®ion,
|
||||
ID: &id,
|
||||
Name: &name,
|
||||
Type: &typ,
|
||||
Priority: &pri,
|
||||
}
|
||||
}
|
||||
|
||||
// SetMeta is used to set arbitrary k/v pairs of metadata on a job.
|
||||
func (j *Job) SetMeta(key, val string) *Job {
|
||||
if j.Meta == nil {
|
||||
j.Meta = make(map[string]string)
|
||||
}
|
||||
j.Meta[key] = val
|
||||
return j
|
||||
}
|
||||
|
||||
// AddDatacenter is used to add a datacenter to a job.
|
||||
func (j *Job) AddDatacenter(dc string) *Job {
|
||||
j.Datacenters = append(j.Datacenters, dc)
|
||||
return j
|
||||
}
|
||||
|
||||
// Constrain is used to add a constraint to a job.
|
||||
func (j *Job) Constrain(c *Constraint) *Job {
|
||||
j.Constraints = append(j.Constraints, c)
|
||||
return j
|
||||
}
|
||||
|
||||
// AddTaskGroup adds a task group to an existing job.
|
||||
func (j *Job) AddTaskGroup(grp *TaskGroup) *Job {
|
||||
j.TaskGroups = append(j.TaskGroups, grp)
|
||||
return j
|
||||
}
|
||||
|
||||
// AddPeriodicConfig adds a periodic config to an existing job.
|
||||
func (j *Job) AddPeriodicConfig(cfg *PeriodicConfig) *Job {
|
||||
j.Periodic = cfg
|
||||
return j
|
||||
}
|
||||
|
||||
type WriteRequest struct {
|
||||
// The target region for this write
|
||||
Region string
|
||||
|
||||
// Namespace is the target namespace for this write
|
||||
Namespace string
|
||||
|
||||
// SecretID is the secret ID of an ACL token
|
||||
SecretID string
|
||||
}
|
||||
|
||||
// JobValidateRequest is used to validate a job
|
||||
type JobValidateRequest struct {
|
||||
Job *Job
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// JobValidateResponse is the response from validate request
|
||||
type JobValidateResponse struct {
|
||||
// DriverConfigValidated indicates whether the agent validated the driver
|
||||
// config
|
||||
DriverConfigValidated bool
|
||||
|
||||
// ValidationErrors is a list of validation errors
|
||||
ValidationErrors []string
|
||||
|
||||
// Error is a string version of any error that may have occurred
|
||||
Error string
|
||||
|
||||
// Warnings contains any warnings about the given job. These may include
|
||||
// deprecation warnings.
|
||||
Warnings string
|
||||
}
|
||||
|
||||
// JobRevertRequest is used to revert a job to a prior version.
|
||||
type JobRevertRequest struct {
|
||||
// JobID is the ID of the job being reverted
|
||||
JobID string
|
||||
|
||||
// JobVersion the version to revert to.
|
||||
JobVersion uint64
|
||||
|
||||
// EnforcePriorVersion if set will enforce that the job is at the given
|
||||
// version before reverting.
|
||||
EnforcePriorVersion *uint64
|
||||
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// JobUpdateRequest is used to update a job
|
||||
type JobRegisterRequest struct {
|
||||
Job *Job
|
||||
// If EnforceIndex is set then the job will only be registered if the passed
|
||||
// JobModifyIndex matches the current Jobs index. If the index is zero, the
|
||||
// register only occurs if the job is new.
|
||||
EnforceIndex bool
|
||||
JobModifyIndex uint64
|
||||
PolicyOverride bool
|
||||
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// RegisterJobRequest is used to serialize a job registration
|
||||
type RegisterJobRequest struct {
|
||||
Job *Job
|
||||
EnforceIndex bool `json:",omitempty"`
|
||||
JobModifyIndex uint64 `json:",omitempty"`
|
||||
PolicyOverride bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// JobRegisterResponse is used to respond to a job registration
|
||||
type JobRegisterResponse struct {
|
||||
EvalID string
|
||||
EvalCreateIndex uint64
|
||||
JobModifyIndex uint64
|
||||
|
||||
// Warnings contains any warnings about the given job. These may include
|
||||
// deprecation warnings.
|
||||
Warnings string
|
||||
|
||||
QueryMeta
|
||||
}
|
||||
|
||||
// JobDeregisterResponse is used to respond to a job deregistration
|
||||
type JobDeregisterResponse struct {
|
||||
EvalID string
|
||||
EvalCreateIndex uint64
|
||||
JobModifyIndex uint64
|
||||
QueryMeta
|
||||
}
|
||||
|
||||
type JobPlanRequest struct {
|
||||
Job *Job
|
||||
Diff bool
|
||||
PolicyOverride bool
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
type JobPlanResponse struct {
|
||||
JobModifyIndex uint64
|
||||
CreatedEvals []*Evaluation
|
||||
Diff *JobDiff
|
||||
Annotations *PlanAnnotations
|
||||
FailedTGAllocs map[string]*AllocationMetric
|
||||
NextPeriodicLaunch time.Time
|
||||
|
||||
// Warnings contains any warnings about the given job. These may include
|
||||
// deprecation warnings.
|
||||
Warnings string
|
||||
}
|
||||
|
||||
type JobDiff struct {
|
||||
Type string
|
||||
ID string
|
||||
Fields []*FieldDiff
|
||||
Objects []*ObjectDiff
|
||||
TaskGroups []*TaskGroupDiff
|
||||
}
|
||||
|
||||
type TaskGroupDiff struct {
|
||||
Type string
|
||||
Name string
|
||||
Fields []*FieldDiff
|
||||
Objects []*ObjectDiff
|
||||
Tasks []*TaskDiff
|
||||
Updates map[string]uint64
|
||||
}
|
||||
|
||||
type TaskDiff struct {
|
||||
Type string
|
||||
Name string
|
||||
Fields []*FieldDiff
|
||||
Objects []*ObjectDiff
|
||||
Annotations []string
|
||||
}
|
||||
|
||||
type FieldDiff struct {
|
||||
Type string
|
||||
Name string
|
||||
Old, New string
|
||||
Annotations []string
|
||||
}
|
||||
|
||||
type ObjectDiff struct {
|
||||
Type string
|
||||
Name string
|
||||
Fields []*FieldDiff
|
||||
Objects []*ObjectDiff
|
||||
}
|
||||
|
||||
type PlanAnnotations struct {
|
||||
DesiredTGUpdates map[string]*DesiredUpdates
|
||||
}
|
||||
|
||||
type DesiredUpdates struct {
|
||||
Ignore uint64
|
||||
Place uint64
|
||||
Migrate uint64
|
||||
Stop uint64
|
||||
InPlaceUpdate uint64
|
||||
DestructiveUpdate uint64
|
||||
Canary uint64
|
||||
}
|
||||
|
||||
type JobDispatchRequest struct {
|
||||
JobID string
|
||||
Payload []byte
|
||||
Meta map[string]string
|
||||
}
|
||||
|
||||
type JobDispatchResponse struct {
|
||||
DispatchedJobID string
|
||||
EvalID string
|
||||
EvalCreateIndex uint64
|
||||
JobCreateIndex uint64
|
||||
WriteMeta
|
||||
}
|
||||
|
||||
// JobVersionsResponse is used for a job get versions request
|
||||
type JobVersionsResponse struct {
|
||||
Versions []*Job
|
||||
Diffs []*JobDiff
|
||||
QueryMeta
|
||||
}
|
||||
|
||||
// JobStabilityRequest is used to marked a job as stable.
|
||||
type JobStabilityRequest struct {
|
||||
// Job to set the stability on
|
||||
JobID string
|
||||
JobVersion uint64
|
||||
|
||||
// Set the stability
|
||||
Stable bool
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// JobStabilityResponse is the response when marking a job as stable.
|
||||
type JobStabilityResponse struct {
|
||||
JobModifyIndex uint64
|
||||
WriteMeta
|
||||
}
|
|
@ -0,0 +1,110 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
)
|
||||
|
||||
func MockJob() *Job {
|
||||
job := &Job{
|
||||
Region: helper.StringToPtr("global"),
|
||||
ID: helper.StringToPtr(uuid.Generate()),
|
||||
Name: helper.StringToPtr("my-job"),
|
||||
Type: helper.StringToPtr("service"),
|
||||
Priority: helper.IntToPtr(50),
|
||||
AllAtOnce: helper.BoolToPtr(false),
|
||||
Datacenters: []string{"dc1"},
|
||||
Constraints: []*Constraint{
|
||||
{
|
||||
LTarget: "${attr.kernel.name}",
|
||||
RTarget: "linux",
|
||||
Operand: "=",
|
||||
},
|
||||
},
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("web"),
|
||||
Count: helper.IntToPtr(10),
|
||||
EphemeralDisk: &EphemeralDisk{
|
||||
SizeMB: helper.IntToPtr(150),
|
||||
},
|
||||
RestartPolicy: &RestartPolicy{
|
||||
Attempts: helper.IntToPtr(3),
|
||||
Interval: helper.TimeToPtr(10 * time.Minute),
|
||||
Delay: helper.TimeToPtr(1 * time.Minute),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
},
|
||||
Tasks: []*Task{
|
||||
{
|
||||
Name: "web",
|
||||
Driver: "exec",
|
||||
Config: map[string]interface{}{
|
||||
"command": "/bin/date",
|
||||
},
|
||||
Env: map[string]string{
|
||||
"FOO": "bar",
|
||||
},
|
||||
Services: []*Service{
|
||||
{
|
||||
Name: "${TASK}-frontend",
|
||||
PortLabel: "http",
|
||||
Tags: []string{"pci:${meta.pci-dss}", "datacenter:${node.datacenter}"},
|
||||
Checks: []ServiceCheck{
|
||||
{
|
||||
Name: "check-table",
|
||||
Type: "script",
|
||||
Command: "/usr/local/check-table-${meta.database}",
|
||||
Args: []string{"${meta.version}"},
|
||||
Interval: 30 * time.Second,
|
||||
Timeout: 5 * time.Second,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "${TASK}-admin",
|
||||
PortLabel: "admin",
|
||||
},
|
||||
},
|
||||
LogConfig: DefaultLogConfig(),
|
||||
Resources: &Resources{
|
||||
CPU: helper.IntToPtr(500),
|
||||
MemoryMB: helper.IntToPtr(256),
|
||||
Networks: []*NetworkResource{
|
||||
{
|
||||
MBits: helper.IntToPtr(50),
|
||||
DynamicPorts: []Port{{Label: "http"}, {Label: "admin"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
Meta: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
Meta: map[string]string{
|
||||
"elb_check_type": "http",
|
||||
"elb_check_interval": "30s",
|
||||
"elb_check_min": "3",
|
||||
},
|
||||
},
|
||||
},
|
||||
Meta: map[string]string{
|
||||
"owner": "armon",
|
||||
},
|
||||
}
|
||||
job.Canonicalize()
|
||||
return job
|
||||
}
|
||||
|
||||
func MockPeriodicJob() *Job {
|
||||
j := MockJob()
|
||||
j.Type = helper.StringToPtr("batch")
|
||||
j.Periodic = &PeriodicConfig{
|
||||
Enabled: helper.BoolToPtr(true),
|
||||
SpecType: helper.StringToPtr("cron"),
|
||||
Spec: helper.StringToPtr("*/30 * * * *"),
|
||||
}
|
||||
return j
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Namespaces is used to query the namespace endpoints.
|
||||
type Namespaces struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Namespaces returns a new handle on the namespaces.
|
||||
func (c *Client) Namespaces() *Namespaces {
|
||||
return &Namespaces{client: c}
|
||||
}
|
||||
|
||||
// List is used to dump all of the namespaces.
|
||||
func (n *Namespaces) List(q *QueryOptions) ([]*Namespace, *QueryMeta, error) {
|
||||
var resp []*Namespace
|
||||
qm, err := n.client.query("/v1/namespaces", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sort.Sort(NamespaceIndexSort(resp))
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// PrefixList is used to do a PrefixList search over namespaces
|
||||
func (n *Namespaces) PrefixList(prefix string, q *QueryOptions) ([]*Namespace, *QueryMeta, error) {
|
||||
if q == nil {
|
||||
q = &QueryOptions{Prefix: prefix}
|
||||
} else {
|
||||
q.Prefix = prefix
|
||||
}
|
||||
|
||||
return n.List(q)
|
||||
}
|
||||
|
||||
// Info is used to query a single namespace by its name.
|
||||
func (n *Namespaces) Info(name string, q *QueryOptions) (*Namespace, *QueryMeta, error) {
|
||||
var resp Namespace
|
||||
qm, err := n.client.query("/v1/namespace/"+name, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, qm, nil
|
||||
}
|
||||
|
||||
// Register is used to register a namespace.
|
||||
func (n *Namespaces) Register(namespace *Namespace, q *WriteOptions) (*WriteMeta, error) {
|
||||
wm, err := n.client.write("/v1/namespace", namespace, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Delete is used to delete a namespace
|
||||
func (n *Namespaces) Delete(namespace string, q *WriteOptions) (*WriteMeta, error) {
|
||||
wm, err := n.client.delete(fmt.Sprintf("/v1/namespace/%s", namespace), nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Namespace is used to serialize a namespace.
|
||||
type Namespace struct {
|
||||
Name string
|
||||
Description string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// NamespaceIndexSort is a wrapper to sort Namespaces by CreateIndex. We
|
||||
// reverse the test so that we get the highest index first.
|
||||
type NamespaceIndexSort []*Namespace
|
||||
|
||||
func (n NamespaceIndexSort) Len() int {
|
||||
return len(n)
|
||||
}
|
||||
|
||||
func (n NamespaceIndexSort) Less(i, j int) bool {
|
||||
return n[i].CreateIndex > n[j].CreateIndex
|
||||
}
|
||||
|
||||
func (n NamespaceIndexSort) Swap(i, j int) {
|
||||
n[i], n[j] = n[j], n[i]
|
||||
}
|
|
@ -0,0 +1,199 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Nodes is used to query node-related API endpoints
|
||||
type Nodes struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Nodes returns a handle on the node endpoints.
|
||||
func (c *Client) Nodes() *Nodes {
|
||||
return &Nodes{client: c}
|
||||
}
|
||||
|
||||
// List is used to list out all of the nodes
|
||||
func (n *Nodes) List(q *QueryOptions) ([]*NodeListStub, *QueryMeta, error) {
|
||||
var resp NodeIndexSort
|
||||
qm, err := n.client.query("/v1/nodes", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sort.Sort(resp)
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
func (n *Nodes) PrefixList(prefix string) ([]*NodeListStub, *QueryMeta, error) {
|
||||
return n.List(&QueryOptions{Prefix: prefix})
|
||||
}
|
||||
|
||||
// Info is used to query a specific node by its ID.
|
||||
func (n *Nodes) Info(nodeID string, q *QueryOptions) (*Node, *QueryMeta, error) {
|
||||
var resp Node
|
||||
qm, err := n.client.query("/v1/node/"+nodeID, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, qm, nil
|
||||
}
|
||||
|
||||
// ToggleDrain is used to toggle drain mode on/off for a given node.
|
||||
func (n *Nodes) ToggleDrain(nodeID string, drain bool, q *WriteOptions) (*WriteMeta, error) {
|
||||
drainArg := strconv.FormatBool(drain)
|
||||
wm, err := n.client.write("/v1/node/"+nodeID+"/drain?enable="+drainArg, nil, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Allocations is used to return the allocations associated with a node.
|
||||
func (n *Nodes) Allocations(nodeID string, q *QueryOptions) ([]*Allocation, *QueryMeta, error) {
|
||||
var resp []*Allocation
|
||||
qm, err := n.client.query("/v1/node/"+nodeID+"/allocations", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sort.Sort(AllocationSort(resp))
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// ForceEvaluate is used to force-evaluate an existing node.
|
||||
func (n *Nodes) ForceEvaluate(nodeID string, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
var resp nodeEvalResponse
|
||||
wm, err := n.client.write("/v1/node/"+nodeID+"/evaluate", nil, &resp, q)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return resp.EvalID, wm, nil
|
||||
}
|
||||
|
||||
func (n *Nodes) Stats(nodeID string, q *QueryOptions) (*HostStats, error) {
|
||||
nodeClient, err := n.client.GetNodeClient(nodeID, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resp HostStats
|
||||
if _, err := nodeClient.query("/v1/client/stats", &resp, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (n *Nodes) GC(nodeID string, q *QueryOptions) error {
|
||||
nodeClient, err := n.client.GetNodeClient(nodeID, q)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var resp struct{}
|
||||
_, err = nodeClient.query("/v1/client/gc", &resp, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Node is used to deserialize a node entry.
|
||||
type Node struct {
|
||||
ID string
|
||||
Datacenter string
|
||||
Name string
|
||||
HTTPAddr string
|
||||
TLSEnabled bool
|
||||
Attributes map[string]string
|
||||
Resources *Resources
|
||||
Reserved *Resources
|
||||
Links map[string]string
|
||||
Meta map[string]string
|
||||
NodeClass string
|
||||
Drain bool
|
||||
Status string
|
||||
StatusDescription string
|
||||
StatusUpdatedAt int64
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// HostStats represents resource usage stats of the host running a Nomad client
|
||||
type HostStats struct {
|
||||
Memory *HostMemoryStats
|
||||
CPU []*HostCPUStats
|
||||
DiskStats []*HostDiskStats
|
||||
Uptime uint64
|
||||
CPUTicksConsumed float64
|
||||
}
|
||||
|
||||
type HostMemoryStats struct {
|
||||
Total uint64
|
||||
Available uint64
|
||||
Used uint64
|
||||
Free uint64
|
||||
}
|
||||
|
||||
type HostCPUStats struct {
|
||||
CPU string
|
||||
User float64
|
||||
System float64
|
||||
Idle float64
|
||||
}
|
||||
|
||||
type HostDiskStats struct {
|
||||
Device string
|
||||
Mountpoint string
|
||||
Size uint64
|
||||
Used uint64
|
||||
Available uint64
|
||||
UsedPercent float64
|
||||
InodesUsedPercent float64
|
||||
}
|
||||
|
||||
// NodeListStub is a subset of information returned during
|
||||
// node list operations.
|
||||
type NodeListStub struct {
|
||||
ID string
|
||||
Datacenter string
|
||||
Name string
|
||||
NodeClass string
|
||||
Version string
|
||||
Drain bool
|
||||
Status string
|
||||
StatusDescription string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// NodeIndexSort reverse sorts nodes by CreateIndex
|
||||
type NodeIndexSort []*NodeListStub
|
||||
|
||||
func (n NodeIndexSort) Len() int {
|
||||
return len(n)
|
||||
}
|
||||
|
||||
func (n NodeIndexSort) Less(i, j int) bool {
|
||||
return n[i].CreateIndex > n[j].CreateIndex
|
||||
}
|
||||
|
||||
func (n NodeIndexSort) Swap(i, j int) {
|
||||
n[i], n[j] = n[j], n[i]
|
||||
}
|
||||
|
||||
// nodeEvalResponse is used to decode a force-eval.
|
||||
type nodeEvalResponse struct {
|
||||
EvalID string
|
||||
}
|
||||
|
||||
// AllocationSort reverse sorts allocs by CreateIndex.
|
||||
type AllocationSort []*Allocation
|
||||
|
||||
func (a AllocationSort) Len() int {
|
||||
return len(a)
|
||||
}
|
||||
|
||||
func (a AllocationSort) Less(i, j int) bool {
|
||||
return a[i].CreateIndex > a[j].CreateIndex
|
||||
}
|
||||
|
||||
func (a AllocationSort) Swap(i, j int) {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
package api
|
||||
|
||||
// Operator can be used to perform low-level operator tasks for Nomad.
|
||||
type Operator struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// Operator returns a handle to the operator endpoints.
|
||||
func (c *Client) Operator() *Operator {
|
||||
return &Operator{c}
|
||||
}
|
||||
|
||||
// RaftServer has information about a server in the Raft configuration.
|
||||
type RaftServer struct {
|
||||
// ID is the unique ID for the server. These are currently the same
|
||||
// as the address, but they will be changed to a real GUID in a future
|
||||
// release of Nomad.
|
||||
ID string
|
||||
|
||||
// Node is the node name of the server, as known by Nomad, or this
|
||||
// will be set to "(unknown)" otherwise.
|
||||
Node string
|
||||
|
||||
// Address is the IP:port of the server, used for Raft communications.
|
||||
Address string
|
||||
|
||||
// Leader is true if this server is the current cluster leader.
|
||||
Leader bool
|
||||
|
||||
// Voter is true if this server has a vote in the cluster. This might
|
||||
// be false if the server is staging and still coming online, or if
|
||||
// it's a non-voting server, which will be added in a future release of
|
||||
// Nomad.
|
||||
Voter bool
|
||||
}
|
||||
|
||||
// RaftConfigration is returned when querying for the current Raft configuration.
|
||||
type RaftConfiguration struct {
|
||||
// Servers has the list of servers in the Raft configuration.
|
||||
Servers []*RaftServer
|
||||
|
||||
// Index has the Raft index of this configuration.
|
||||
Index uint64
|
||||
}
|
||||
|
||||
// RaftGetConfiguration is used to query the current Raft peer set.
|
||||
func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
|
||||
r, err := op.c.newRequest("GET", "/v1/operator/raft/configuration")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.setQueryOptions(q)
|
||||
_, resp, err := requireOK(op.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var out RaftConfiguration
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
|
||||
// quorum but no longer known to Serf or the catalog) by address in the form of
|
||||
// "IP:port".
|
||||
func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
|
||||
r, err := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.setWriteOptions(q)
|
||||
|
||||
// TODO (alexdadgar) Currently we made address a query parameter. Once
|
||||
// IDs are in place this will be DELETE /v1/operator/raft/peer/<id>.
|
||||
r.params.Set("address", address)
|
||||
|
||||
_, resp, err := requireOK(op.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
package api
|
||||
|
||||
import "io"
|
||||
|
||||
// Raw can be used to do raw queries against custom endpoints
|
||||
type Raw struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// Raw returns a handle to query endpoints
|
||||
func (c *Client) Raw() *Raw {
|
||||
return &Raw{c}
|
||||
}
|
||||
|
||||
// Query is used to do a GET request against an endpoint
|
||||
// and deserialize the response into an interface using
|
||||
// standard Nomad conventions.
|
||||
func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
|
||||
return raw.c.query(endpoint, out, q)
|
||||
}
|
||||
|
||||
// Response is used to make a GET request against an endpoint and returns the
|
||||
// response body
|
||||
func (raw *Raw) Response(endpoint string, q *QueryOptions) (io.ReadCloser, error) {
|
||||
return raw.c.rawQuery(endpoint, q)
|
||||
}
|
||||
|
||||
// Write is used to do a PUT request against an endpoint
|
||||
// and serialize/deserialized using the standard Nomad conventions.
|
||||
func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
|
||||
return raw.c.write(endpoint, in, out, q)
|
||||
}
|
||||
|
||||
// Delete is used to do a DELETE request against an endpoint
|
||||
// and serialize/deserialized using the standard Nomad conventions.
|
||||
func (raw *Raw) Delete(endpoint string, out interface{}, q *WriteOptions) (*WriteMeta, error) {
|
||||
return raw.c.delete(endpoint, out, q)
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package api
|
||||
|
||||
import "sort"
|
||||
|
||||
// Regions is used to query the regions in the cluster.
|
||||
type Regions struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Regions returns a handle on the regions endpoints.
|
||||
func (c *Client) Regions() *Regions {
|
||||
return &Regions{client: c}
|
||||
}
|
||||
|
||||
// List returns a list of all of the regions.
|
||||
func (r *Regions) List() ([]string, error) {
|
||||
var resp []string
|
||||
if _, err := r.client.query("/v1/regions", &resp, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sort.Strings(resp)
|
||||
return resp, nil
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
package api
|
||||
|
||||
import "github.com/hashicorp/nomad/helper"
|
||||
|
||||
// Resources encapsulates the required resources of
|
||||
// a given task or task group.
|
||||
type Resources struct {
|
||||
CPU *int
|
||||
MemoryMB *int `mapstructure:"memory"`
|
||||
DiskMB *int `mapstructure:"disk"`
|
||||
IOPS *int
|
||||
Networks []*NetworkResource
|
||||
}
|
||||
|
||||
func (r *Resources) Canonicalize() {
|
||||
if r.CPU == nil {
|
||||
r.CPU = helper.IntToPtr(100)
|
||||
}
|
||||
if r.MemoryMB == nil {
|
||||
r.MemoryMB = helper.IntToPtr(10)
|
||||
}
|
||||
if r.IOPS == nil {
|
||||
r.IOPS = helper.IntToPtr(0)
|
||||
}
|
||||
for _, n := range r.Networks {
|
||||
n.Canonicalize()
|
||||
}
|
||||
}
|
||||
|
||||
func MinResources() *Resources {
|
||||
return &Resources{
|
||||
CPU: helper.IntToPtr(100),
|
||||
MemoryMB: helper.IntToPtr(10),
|
||||
IOPS: helper.IntToPtr(0),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Merge merges this resource with another resource.
|
||||
func (r *Resources) Merge(other *Resources) {
|
||||
if other == nil {
|
||||
return
|
||||
}
|
||||
if other.CPU != nil {
|
||||
r.CPU = other.CPU
|
||||
}
|
||||
if other.MemoryMB != nil {
|
||||
r.MemoryMB = other.MemoryMB
|
||||
}
|
||||
if other.DiskMB != nil {
|
||||
r.DiskMB = other.DiskMB
|
||||
}
|
||||
if other.IOPS != nil {
|
||||
r.IOPS = other.IOPS
|
||||
}
|
||||
if len(other.Networks) != 0 {
|
||||
r.Networks = other.Networks
|
||||
}
|
||||
}
|
||||
|
||||
type Port struct {
|
||||
Label string
|
||||
Value int `mapstructure:"static"`
|
||||
}
|
||||
|
||||
// NetworkResource is used to describe required network
|
||||
// resources of a given task.
|
||||
type NetworkResource struct {
|
||||
Device string
|
||||
CIDR string
|
||||
IP string
|
||||
MBits *int
|
||||
ReservedPorts []Port
|
||||
DynamicPorts []Port
|
||||
}
|
||||
|
||||
func (n *NetworkResource) Canonicalize() {
|
||||
if n.MBits == nil {
|
||||
n.MBits = helper.IntToPtr(10)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/nomad/api/contexts"
|
||||
)
|
||||
|
||||
type Search struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Search returns a handle on the Search endpoints
|
||||
func (c *Client) Search() *Search {
|
||||
return &Search{client: c}
|
||||
}
|
||||
|
||||
// PrefixSearch returns a list of matches for a particular context and prefix.
|
||||
func (s *Search) PrefixSearch(prefix string, context contexts.Context, q *QueryOptions) (*SearchResponse, *QueryMeta, error) {
|
||||
var resp SearchResponse
|
||||
req := &SearchRequest{Prefix: prefix, Context: context}
|
||||
|
||||
qm, err := s.client.putQuery("/v1/search", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &resp, qm, nil
|
||||
}
|
||||
|
||||
type SearchRequest struct {
|
||||
Prefix string
|
||||
Context contexts.Context
|
||||
QueryOptions
|
||||
}
|
||||
|
||||
type SearchResponse struct {
|
||||
Matches map[contexts.Context][]string
|
||||
Truncations map[contexts.Context]bool
|
||||
QueryMeta
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package api
|
||||
|
||||
import "fmt"
|
||||
|
||||
// SentinelPolicies is used to query the Sentinel Policy endpoints.
|
||||
type SentinelPolicies struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// SentinelPolicies returns a new handle on the Sentinel policies.
|
||||
func (c *Client) SentinelPolicies() *SentinelPolicies {
|
||||
return &SentinelPolicies{client: c}
|
||||
}
|
||||
|
||||
// List is used to dump all of the policies.
|
||||
func (a *SentinelPolicies) List(q *QueryOptions) ([]*SentinelPolicyListStub, *QueryMeta, error) {
|
||||
var resp []*SentinelPolicyListStub
|
||||
qm, err := a.client.query("/v1/sentinel/policies", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Upsert is used to create or update a policy
|
||||
func (a *SentinelPolicies) Upsert(policy *SentinelPolicy, q *WriteOptions) (*WriteMeta, error) {
|
||||
if policy == nil || policy.Name == "" {
|
||||
return nil, fmt.Errorf("missing policy name")
|
||||
}
|
||||
wm, err := a.client.write("/v1/sentinel/policy/"+policy.Name, policy, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Delete is used to delete a policy
|
||||
func (a *SentinelPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, error) {
|
||||
if policyName == "" {
|
||||
return nil, fmt.Errorf("missing policy name")
|
||||
}
|
||||
wm, err := a.client.delete("/v1/sentinel/policy/"+policyName, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Info is used to query a specific policy
|
||||
func (a *SentinelPolicies) Info(policyName string, q *QueryOptions) (*SentinelPolicy, *QueryMeta, error) {
|
||||
if policyName == "" {
|
||||
return nil, nil, fmt.Errorf("missing policy name")
|
||||
}
|
||||
var resp SentinelPolicy
|
||||
wm, err := a.client.query("/v1/sentinel/policy/"+policyName, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
type SentinelPolicy struct {
|
||||
Name string
|
||||
Description string
|
||||
Scope string
|
||||
EnforcementLevel string
|
||||
Policy string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
type SentinelPolicyListStub struct {
|
||||
Name string
|
||||
Description string
|
||||
Scope string
|
||||
EnforcementLevel string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
package api
|
||||
|
||||
// Status is used to query the status-related endpoints.
|
||||
type Status struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Status returns a handle on the status endpoints.
|
||||
func (c *Client) Status() *Status {
|
||||
return &Status{client: c}
|
||||
}
|
||||
|
||||
// Leader is used to query for the current cluster leader.
|
||||
func (s *Status) Leader() (string, error) {
|
||||
var resp string
|
||||
_, err := s.client.query("/v1/status/leader", &resp, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// RegionLeader is used to query for the leader in the passed region.
|
||||
func (s *Status) RegionLeader(region string) (string, error) {
|
||||
var resp string
|
||||
q := QueryOptions{Region: region}
|
||||
_, err := s.client.query("/v1/status/leader", &resp, &q)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Peers is used to query the addresses of the server peers
|
||||
// in the cluster.
|
||||
func (s *Status) Peers() ([]string, error) {
|
||||
var resp []string
|
||||
_, err := s.client.query("/v1/status/peers", &resp, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package api
|
||||
|
||||
// Status is used to query the status-related endpoints.
|
||||
type System struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// System returns a handle on the system endpoints.
|
||||
func (c *Client) System() *System {
|
||||
return &System{client: c}
|
||||
}
|
||||
|
||||
func (s *System) GarbageCollect() error {
|
||||
var req struct{}
|
||||
_, err := s.client.write("/v1/system/gc", &req, nil, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *System) ReconcileSummaries() error {
|
||||
var req struct{}
|
||||
_, err := s.client.write("/v1/system/reconcile/summaries", &req, nil, nil)
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,617 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
)
|
||||
|
||||
// MemoryStats holds memory usage related stats
|
||||
type MemoryStats struct {
|
||||
RSS uint64
|
||||
Cache uint64
|
||||
Swap uint64
|
||||
MaxUsage uint64
|
||||
KernelUsage uint64
|
||||
KernelMaxUsage uint64
|
||||
Measured []string
|
||||
}
|
||||
|
||||
// CpuStats holds cpu usage related stats
|
||||
type CpuStats struct {
|
||||
SystemMode float64
|
||||
UserMode float64
|
||||
TotalTicks float64
|
||||
ThrottledPeriods uint64
|
||||
ThrottledTime uint64
|
||||
Percent float64
|
||||
Measured []string
|
||||
}
|
||||
|
||||
// ResourceUsage holds information related to cpu and memory stats
|
||||
type ResourceUsage struct {
|
||||
MemoryStats *MemoryStats
|
||||
CpuStats *CpuStats
|
||||
}
|
||||
|
||||
// TaskResourceUsage holds aggregated resource usage of all processes in a Task
|
||||
// and the resource usage of the individual pids
|
||||
type TaskResourceUsage struct {
|
||||
ResourceUsage *ResourceUsage
|
||||
Timestamp int64
|
||||
Pids map[string]*ResourceUsage
|
||||
}
|
||||
|
||||
// AllocResourceUsage holds the aggregated task resource usage of the
|
||||
// allocation.
|
||||
type AllocResourceUsage struct {
|
||||
ResourceUsage *ResourceUsage
|
||||
Tasks map[string]*TaskResourceUsage
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
// RestartPolicy defines how the Nomad client restarts
|
||||
// tasks in a taskgroup when they fail
|
||||
type RestartPolicy struct {
|
||||
Interval *time.Duration
|
||||
Attempts *int
|
||||
Delay *time.Duration
|
||||
Mode *string
|
||||
}
|
||||
|
||||
func (r *RestartPolicy) Merge(rp *RestartPolicy) {
|
||||
if rp.Interval != nil {
|
||||
r.Interval = rp.Interval
|
||||
}
|
||||
if rp.Attempts != nil {
|
||||
r.Attempts = rp.Attempts
|
||||
}
|
||||
if rp.Delay != nil {
|
||||
r.Delay = rp.Delay
|
||||
}
|
||||
if rp.Mode != nil {
|
||||
r.Mode = rp.Mode
|
||||
}
|
||||
}
|
||||
|
||||
// CheckRestart describes if and when a task should be restarted based on
|
||||
// failing health checks.
|
||||
type CheckRestart struct {
|
||||
Limit int `mapstructure:"limit"`
|
||||
Grace *time.Duration `mapstructure:"grace"`
|
||||
IgnoreWarnings bool `mapstructure:"ignore_warnings"`
|
||||
}
|
||||
|
||||
// Canonicalize CheckRestart fields if not nil.
|
||||
func (c *CheckRestart) Canonicalize() {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if c.Grace == nil {
|
||||
c.Grace = helper.TimeToPtr(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy returns a copy of CheckRestart or nil if unset.
|
||||
func (c *CheckRestart) Copy() *CheckRestart {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
nc := new(CheckRestart)
|
||||
nc.Limit = c.Limit
|
||||
if c.Grace != nil {
|
||||
g := *c.Grace
|
||||
nc.Grace = &g
|
||||
}
|
||||
nc.IgnoreWarnings = c.IgnoreWarnings
|
||||
return nc
|
||||
}
|
||||
|
||||
// Merge values from other CheckRestart over default values on this
|
||||
// CheckRestart and return merged copy.
|
||||
func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart {
|
||||
if c == nil {
|
||||
// Just return other
|
||||
return o
|
||||
}
|
||||
|
||||
nc := c.Copy()
|
||||
|
||||
if o == nil {
|
||||
// Nothing to merge
|
||||
return nc
|
||||
}
|
||||
|
||||
if nc.Limit == 0 {
|
||||
nc.Limit = o.Limit
|
||||
}
|
||||
|
||||
if nc.Grace == nil {
|
||||
nc.Grace = o.Grace
|
||||
}
|
||||
|
||||
if nc.IgnoreWarnings {
|
||||
nc.IgnoreWarnings = o.IgnoreWarnings
|
||||
}
|
||||
|
||||
return nc
|
||||
}
|
||||
|
||||
// The ServiceCheck data model represents the consul health check that
|
||||
// Nomad registers for a Task
|
||||
type ServiceCheck struct {
|
||||
Id string
|
||||
Name string
|
||||
Type string
|
||||
Command string
|
||||
Args []string
|
||||
Path string
|
||||
Protocol string
|
||||
PortLabel string `mapstructure:"port"`
|
||||
Interval time.Duration
|
||||
Timeout time.Duration
|
||||
InitialStatus string `mapstructure:"initial_status"`
|
||||
TLSSkipVerify bool `mapstructure:"tls_skip_verify"`
|
||||
Header map[string][]string
|
||||
Method string
|
||||
CheckRestart *CheckRestart `mapstructure:"check_restart"`
|
||||
}
|
||||
|
||||
// The Service model represents a Consul service definition
|
||||
type Service struct {
|
||||
Id string
|
||||
Name string
|
||||
Tags []string
|
||||
PortLabel string `mapstructure:"port"`
|
||||
AddressMode string `mapstructure:"address_mode"`
|
||||
Checks []ServiceCheck
|
||||
CheckRestart *CheckRestart `mapstructure:"check_restart"`
|
||||
}
|
||||
|
||||
func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
|
||||
if s.Name == "" {
|
||||
s.Name = fmt.Sprintf("%s-%s-%s", *job.Name, *tg.Name, t.Name)
|
||||
}
|
||||
|
||||
// Default to AddressModeAuto
|
||||
if s.AddressMode == "" {
|
||||
s.AddressMode = "auto"
|
||||
}
|
||||
|
||||
s.CheckRestart.Canonicalize()
|
||||
|
||||
// Canonicallize CheckRestart on Checks and merge Service.CheckRestart
|
||||
// into each check.
|
||||
for _, c := range s.Checks {
|
||||
c.CheckRestart.Canonicalize()
|
||||
c.CheckRestart = c.CheckRestart.Merge(s.CheckRestart)
|
||||
}
|
||||
}
|
||||
|
||||
// EphemeralDisk is an ephemeral disk object
|
||||
type EphemeralDisk struct {
|
||||
Sticky *bool
|
||||
Migrate *bool
|
||||
SizeMB *int `mapstructure:"size"`
|
||||
}
|
||||
|
||||
func DefaultEphemeralDisk() *EphemeralDisk {
|
||||
return &EphemeralDisk{
|
||||
Sticky: helper.BoolToPtr(false),
|
||||
Migrate: helper.BoolToPtr(false),
|
||||
SizeMB: helper.IntToPtr(300),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EphemeralDisk) Canonicalize() {
|
||||
if e.Sticky == nil {
|
||||
e.Sticky = helper.BoolToPtr(false)
|
||||
}
|
||||
if e.Migrate == nil {
|
||||
e.Migrate = helper.BoolToPtr(false)
|
||||
}
|
||||
if e.SizeMB == nil {
|
||||
e.SizeMB = helper.IntToPtr(300)
|
||||
}
|
||||
}
|
||||
|
||||
// TaskGroup is the unit of scheduling.
|
||||
type TaskGroup struct {
|
||||
Name *string
|
||||
Count *int
|
||||
Constraints []*Constraint
|
||||
Tasks []*Task
|
||||
RestartPolicy *RestartPolicy
|
||||
EphemeralDisk *EphemeralDisk
|
||||
Update *UpdateStrategy
|
||||
Meta map[string]string
|
||||
}
|
||||
|
||||
// NewTaskGroup creates a new TaskGroup.
|
||||
func NewTaskGroup(name string, count int) *TaskGroup {
|
||||
return &TaskGroup{
|
||||
Name: helper.StringToPtr(name),
|
||||
Count: helper.IntToPtr(count),
|
||||
}
|
||||
}
|
||||
|
||||
func (g *TaskGroup) Canonicalize(job *Job) {
|
||||
if g.Name == nil {
|
||||
g.Name = helper.StringToPtr("")
|
||||
}
|
||||
if g.Count == nil {
|
||||
g.Count = helper.IntToPtr(1)
|
||||
}
|
||||
for _, t := range g.Tasks {
|
||||
t.Canonicalize(g, job)
|
||||
}
|
||||
if g.EphemeralDisk == nil {
|
||||
g.EphemeralDisk = DefaultEphemeralDisk()
|
||||
} else {
|
||||
g.EphemeralDisk.Canonicalize()
|
||||
}
|
||||
|
||||
// Merge the update policy from the job
|
||||
if ju, tu := job.Update != nil, g.Update != nil; ju && tu {
|
||||
// Merge the jobs and task groups definition of the update strategy
|
||||
jc := job.Update.Copy()
|
||||
jc.Merge(g.Update)
|
||||
g.Update = jc
|
||||
} else if ju && !job.Update.Empty() {
|
||||
// Inherit the jobs as long as it is non-empty.
|
||||
jc := job.Update.Copy()
|
||||
g.Update = jc
|
||||
}
|
||||
|
||||
if g.Update != nil {
|
||||
g.Update.Canonicalize()
|
||||
}
|
||||
|
||||
var defaultRestartPolicy *RestartPolicy
|
||||
switch *job.Type {
|
||||
case "service", "system":
|
||||
defaultRestartPolicy = &RestartPolicy{
|
||||
Delay: helper.TimeToPtr(15 * time.Second),
|
||||
Attempts: helper.IntToPtr(2),
|
||||
Interval: helper.TimeToPtr(1 * time.Minute),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
}
|
||||
default:
|
||||
defaultRestartPolicy = &RestartPolicy{
|
||||
Delay: helper.TimeToPtr(15 * time.Second),
|
||||
Attempts: helper.IntToPtr(15),
|
||||
Interval: helper.TimeToPtr(7 * 24 * time.Hour),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
}
|
||||
}
|
||||
|
||||
if g.RestartPolicy != nil {
|
||||
defaultRestartPolicy.Merge(g.RestartPolicy)
|
||||
}
|
||||
g.RestartPolicy = defaultRestartPolicy
|
||||
}
|
||||
|
||||
// Constrain is used to add a constraint to a task group.
|
||||
func (g *TaskGroup) Constrain(c *Constraint) *TaskGroup {
|
||||
g.Constraints = append(g.Constraints, c)
|
||||
return g
|
||||
}
|
||||
|
||||
// AddMeta is used to add a meta k/v pair to a task group
|
||||
func (g *TaskGroup) SetMeta(key, val string) *TaskGroup {
|
||||
if g.Meta == nil {
|
||||
g.Meta = make(map[string]string)
|
||||
}
|
||||
g.Meta[key] = val
|
||||
return g
|
||||
}
|
||||
|
||||
// AddTask is used to add a new task to a task group.
|
||||
func (g *TaskGroup) AddTask(t *Task) *TaskGroup {
|
||||
g.Tasks = append(g.Tasks, t)
|
||||
return g
|
||||
}
|
||||
|
||||
// RequireDisk adds a ephemeral disk to the task group
|
||||
func (g *TaskGroup) RequireDisk(disk *EphemeralDisk) *TaskGroup {
|
||||
g.EphemeralDisk = disk
|
||||
return g
|
||||
}
|
||||
|
||||
// LogConfig provides configuration for log rotation
|
||||
type LogConfig struct {
|
||||
MaxFiles *int `mapstructure:"max_files"`
|
||||
MaxFileSizeMB *int `mapstructure:"max_file_size"`
|
||||
}
|
||||
|
||||
func DefaultLogConfig() *LogConfig {
|
||||
return &LogConfig{
|
||||
MaxFiles: helper.IntToPtr(10),
|
||||
MaxFileSizeMB: helper.IntToPtr(10),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LogConfig) Canonicalize() {
|
||||
if l.MaxFiles == nil {
|
||||
l.MaxFiles = helper.IntToPtr(10)
|
||||
}
|
||||
if l.MaxFileSizeMB == nil {
|
||||
l.MaxFileSizeMB = helper.IntToPtr(10)
|
||||
}
|
||||
}
|
||||
|
||||
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
|
||||
type DispatchPayloadConfig struct {
|
||||
File string
|
||||
}
|
||||
|
||||
// Task is a single process in a task group.
|
||||
type Task struct {
|
||||
Name string
|
||||
Driver string
|
||||
User string
|
||||
Config map[string]interface{}
|
||||
Constraints []*Constraint
|
||||
Env map[string]string
|
||||
Services []*Service
|
||||
Resources *Resources
|
||||
Meta map[string]string
|
||||
KillTimeout *time.Duration `mapstructure:"kill_timeout"`
|
||||
LogConfig *LogConfig `mapstructure:"logs"`
|
||||
Artifacts []*TaskArtifact
|
||||
Vault *Vault
|
||||
Templates []*Template
|
||||
DispatchPayload *DispatchPayloadConfig
|
||||
Leader bool
|
||||
ShutdownDelay time.Duration `mapstructure:"shutdown_delay"`
|
||||
}
|
||||
|
||||
func (t *Task) Canonicalize(tg *TaskGroup, job *Job) {
|
||||
min := MinResources()
|
||||
min.Merge(t.Resources)
|
||||
min.Canonicalize()
|
||||
t.Resources = min
|
||||
|
||||
if t.KillTimeout == nil {
|
||||
t.KillTimeout = helper.TimeToPtr(5 * time.Second)
|
||||
}
|
||||
if t.LogConfig == nil {
|
||||
t.LogConfig = DefaultLogConfig()
|
||||
} else {
|
||||
t.LogConfig.Canonicalize()
|
||||
}
|
||||
for _, artifact := range t.Artifacts {
|
||||
artifact.Canonicalize()
|
||||
}
|
||||
if t.Vault != nil {
|
||||
t.Vault.Canonicalize()
|
||||
}
|
||||
for _, tmpl := range t.Templates {
|
||||
tmpl.Canonicalize()
|
||||
}
|
||||
for _, s := range t.Services {
|
||||
s.Canonicalize(t, tg, job)
|
||||
}
|
||||
}
|
||||
|
||||
// TaskArtifact is used to download artifacts before running a task.
|
||||
type TaskArtifact struct {
|
||||
GetterSource *string `mapstructure:"source"`
|
||||
GetterOptions map[string]string `mapstructure:"options"`
|
||||
GetterMode *string `mapstructure:"mode"`
|
||||
RelativeDest *string `mapstructure:"destination"`
|
||||
}
|
||||
|
||||
func (a *TaskArtifact) Canonicalize() {
|
||||
if a.GetterMode == nil {
|
||||
a.GetterMode = helper.StringToPtr("any")
|
||||
}
|
||||
if a.GetterSource == nil {
|
||||
// Shouldn't be possible, but we don't want to panic
|
||||
a.GetterSource = helper.StringToPtr("")
|
||||
}
|
||||
if a.RelativeDest == nil {
|
||||
switch *a.GetterMode {
|
||||
case "file":
|
||||
// File mode should default to local/filename
|
||||
dest := *a.GetterSource
|
||||
dest = path.Base(dest)
|
||||
dest = filepath.Join("local", dest)
|
||||
a.RelativeDest = &dest
|
||||
default:
|
||||
// Default to a directory
|
||||
a.RelativeDest = helper.StringToPtr("local/")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Template struct {
|
||||
SourcePath *string `mapstructure:"source"`
|
||||
DestPath *string `mapstructure:"destination"`
|
||||
EmbeddedTmpl *string `mapstructure:"data"`
|
||||
ChangeMode *string `mapstructure:"change_mode"`
|
||||
ChangeSignal *string `mapstructure:"change_signal"`
|
||||
Splay *time.Duration `mapstructure:"splay"`
|
||||
Perms *string `mapstructure:"perms"`
|
||||
LeftDelim *string `mapstructure:"left_delimiter"`
|
||||
RightDelim *string `mapstructure:"right_delimiter"`
|
||||
Envvars *bool `mapstructure:"env"`
|
||||
VaultGrace *time.Duration `mapstructure:"vault_grace"`
|
||||
}
|
||||
|
||||
func (tmpl *Template) Canonicalize() {
|
||||
if tmpl.SourcePath == nil {
|
||||
tmpl.SourcePath = helper.StringToPtr("")
|
||||
}
|
||||
if tmpl.DestPath == nil {
|
||||
tmpl.DestPath = helper.StringToPtr("")
|
||||
}
|
||||
if tmpl.EmbeddedTmpl == nil {
|
||||
tmpl.EmbeddedTmpl = helper.StringToPtr("")
|
||||
}
|
||||
if tmpl.ChangeMode == nil {
|
||||
tmpl.ChangeMode = helper.StringToPtr("restart")
|
||||
}
|
||||
if tmpl.ChangeSignal == nil {
|
||||
if *tmpl.ChangeMode == "signal" {
|
||||
tmpl.ChangeSignal = helper.StringToPtr("SIGHUP")
|
||||
} else {
|
||||
tmpl.ChangeSignal = helper.StringToPtr("")
|
||||
}
|
||||
} else {
|
||||
sig := *tmpl.ChangeSignal
|
||||
tmpl.ChangeSignal = helper.StringToPtr(strings.ToUpper(sig))
|
||||
}
|
||||
if tmpl.Splay == nil {
|
||||
tmpl.Splay = helper.TimeToPtr(5 * time.Second)
|
||||
}
|
||||
if tmpl.Perms == nil {
|
||||
tmpl.Perms = helper.StringToPtr("0644")
|
||||
}
|
||||
if tmpl.LeftDelim == nil {
|
||||
tmpl.LeftDelim = helper.StringToPtr("{{")
|
||||
}
|
||||
if tmpl.RightDelim == nil {
|
||||
tmpl.RightDelim = helper.StringToPtr("}}")
|
||||
}
|
||||
if tmpl.Envvars == nil {
|
||||
tmpl.Envvars = helper.BoolToPtr(false)
|
||||
}
|
||||
if tmpl.VaultGrace == nil {
|
||||
tmpl.VaultGrace = helper.TimeToPtr(5 * time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
type Vault struct {
|
||||
Policies []string
|
||||
Env *bool
|
||||
ChangeMode *string `mapstructure:"change_mode"`
|
||||
ChangeSignal *string `mapstructure:"change_signal"`
|
||||
}
|
||||
|
||||
func (v *Vault) Canonicalize() {
|
||||
if v.Env == nil {
|
||||
v.Env = helper.BoolToPtr(true)
|
||||
}
|
||||
if v.ChangeMode == nil {
|
||||
v.ChangeMode = helper.StringToPtr("restart")
|
||||
}
|
||||
if v.ChangeSignal == nil {
|
||||
v.ChangeSignal = helper.StringToPtr("SIGHUP")
|
||||
}
|
||||
}
|
||||
|
||||
// NewTask creates and initializes a new Task.
|
||||
func NewTask(name, driver string) *Task {
|
||||
return &Task{
|
||||
Name: name,
|
||||
Driver: driver,
|
||||
}
|
||||
}
|
||||
|
||||
// Configure is used to configure a single k/v pair on
|
||||
// the task.
|
||||
func (t *Task) SetConfig(key string, val interface{}) *Task {
|
||||
if t.Config == nil {
|
||||
t.Config = make(map[string]interface{})
|
||||
}
|
||||
t.Config[key] = val
|
||||
return t
|
||||
}
|
||||
|
||||
// SetMeta is used to add metadata k/v pairs to the task.
|
||||
func (t *Task) SetMeta(key, val string) *Task {
|
||||
if t.Meta == nil {
|
||||
t.Meta = make(map[string]string)
|
||||
}
|
||||
t.Meta[key] = val
|
||||
return t
|
||||
}
|
||||
|
||||
// Require is used to add resource requirements to a task.
|
||||
func (t *Task) Require(r *Resources) *Task {
|
||||
t.Resources = r
|
||||
return t
|
||||
}
|
||||
|
||||
// Constraint adds a new constraints to a single task.
|
||||
func (t *Task) Constrain(c *Constraint) *Task {
|
||||
t.Constraints = append(t.Constraints, c)
|
||||
return t
|
||||
}
|
||||
|
||||
// SetLogConfig sets a log config to a task
|
||||
func (t *Task) SetLogConfig(l *LogConfig) *Task {
|
||||
t.LogConfig = l
|
||||
return t
|
||||
}
|
||||
|
||||
// TaskState tracks the current state of a task and events that caused state
|
||||
// transitions.
|
||||
type TaskState struct {
|
||||
State string
|
||||
Failed bool
|
||||
Restarts uint64
|
||||
LastRestart time.Time
|
||||
StartedAt time.Time
|
||||
FinishedAt time.Time
|
||||
Events []*TaskEvent
|
||||
}
|
||||
|
||||
const (
|
||||
TaskSetup = "Task Setup"
|
||||
TaskSetupFailure = "Setup Failure"
|
||||
TaskDriverFailure = "Driver Failure"
|
||||
TaskDriverMessage = "Driver"
|
||||
TaskReceived = "Received"
|
||||
TaskFailedValidation = "Failed Validation"
|
||||
TaskStarted = "Started"
|
||||
TaskTerminated = "Terminated"
|
||||
TaskKilling = "Killing"
|
||||
TaskKilled = "Killed"
|
||||
TaskRestarting = "Restarting"
|
||||
TaskNotRestarting = "Not Restarting"
|
||||
TaskDownloadingArtifacts = "Downloading Artifacts"
|
||||
TaskArtifactDownloadFailed = "Failed Artifact Download"
|
||||
TaskSiblingFailed = "Sibling Task Failed"
|
||||
TaskSignaling = "Signaling"
|
||||
TaskRestartSignal = "Restart Signaled"
|
||||
TaskLeaderDead = "Leader Task Dead"
|
||||
TaskBuildingTaskDir = "Building Task Directory"
|
||||
TaskGenericMessage = "Generic"
|
||||
)
|
||||
|
||||
// TaskEvent is an event that effects the state of a task and contains meta-data
|
||||
// appropriate to the events type.
|
||||
type TaskEvent struct {
|
||||
Type string
|
||||
Time int64
|
||||
FailsTask bool
|
||||
RestartReason string
|
||||
SetupError string
|
||||
DriverError string
|
||||
DriverMessage string
|
||||
ExitCode int
|
||||
Signal int
|
||||
Message string
|
||||
KillReason string
|
||||
KillTimeout time.Duration
|
||||
KillError string
|
||||
StartDelay int64
|
||||
DownloadError string
|
||||
ValidationError string
|
||||
DiskLimit int64
|
||||
DiskSize int64
|
||||
FailedSibling string
|
||||
VaultError string
|
||||
TaskSignalReason string
|
||||
TaskSignal string
|
||||
GenericSource string
|
||||
}
|
|
@ -0,0 +1,272 @@
|
|||
package helper
|
||||
|
||||
import (
|
||||
"crypto/sha512"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
// validUUID is used to check if a given string looks like a UUID
|
||||
var validUUID = regexp.MustCompile(`(?i)^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}$`)
|
||||
|
||||
// IsUUID returns true if the given string is a valid UUID.
|
||||
func IsUUID(str string) bool {
|
||||
const uuidLen = 36
|
||||
if len(str) != uuidLen {
|
||||
return false
|
||||
}
|
||||
|
||||
return validUUID.MatchString(str)
|
||||
}
|
||||
|
||||
// HashUUID takes an input UUID and returns a hashed version of the UUID to
|
||||
// ensure it is well distributed.
|
||||
func HashUUID(input string) (output string, hashed bool) {
|
||||
if !IsUUID(input) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Hash the input
|
||||
buf := sha512.Sum512([]byte(input))
|
||||
output = fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
|
||||
buf[0:4],
|
||||
buf[4:6],
|
||||
buf[6:8],
|
||||
buf[8:10],
|
||||
buf[10:16])
|
||||
|
||||
return output, true
|
||||
}
|
||||
|
||||
// boolToPtr returns the pointer to a boolean
|
||||
func BoolToPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
// IntToPtr returns the pointer to an int
|
||||
func IntToPtr(i int) *int {
|
||||
return &i
|
||||
}
|
||||
|
||||
// Int64ToPtr returns the pointer to an int
|
||||
func Int64ToPtr(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
// UintToPtr returns the pointer to an uint
|
||||
func Uint64ToPtr(u uint64) *uint64 {
|
||||
return &u
|
||||
}
|
||||
|
||||
// StringToPtr returns the pointer to a string
|
||||
func StringToPtr(str string) *string {
|
||||
return &str
|
||||
}
|
||||
|
||||
// TimeToPtr returns the pointer to a time stamp
|
||||
func TimeToPtr(t time.Duration) *time.Duration {
|
||||
return &t
|
||||
}
|
||||
|
||||
func IntMin(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func IntMax(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func Uint64Max(a, b uint64) uint64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MapStringStringSliceValueSet returns the set of values in a map[string][]string
|
||||
func MapStringStringSliceValueSet(m map[string][]string) []string {
|
||||
set := make(map[string]struct{})
|
||||
for _, slice := range m {
|
||||
for _, v := range slice {
|
||||
set[v] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
flat := make([]string, 0, len(set))
|
||||
for k := range set {
|
||||
flat = append(flat, k)
|
||||
}
|
||||
return flat
|
||||
}
|
||||
|
||||
func SliceStringToSet(s []string) map[string]struct{} {
|
||||
m := make(map[string]struct{}, (len(s)+1)/2)
|
||||
for _, k := range s {
|
||||
m[k] = struct{}{}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// SliceStringIsSubset returns whether the smaller set of strings is a subset of
|
||||
// the larger. If the smaller slice is not a subset, the offending elements are
|
||||
// returned.
|
||||
func SliceStringIsSubset(larger, smaller []string) (bool, []string) {
|
||||
largerSet := make(map[string]struct{}, len(larger))
|
||||
for _, l := range larger {
|
||||
largerSet[l] = struct{}{}
|
||||
}
|
||||
|
||||
subset := true
|
||||
var offending []string
|
||||
for _, s := range smaller {
|
||||
if _, ok := largerSet[s]; !ok {
|
||||
subset = false
|
||||
offending = append(offending, s)
|
||||
}
|
||||
}
|
||||
|
||||
return subset, offending
|
||||
}
|
||||
|
||||
func SliceSetDisjoint(first, second []string) (bool, []string) {
|
||||
contained := make(map[string]struct{}, len(first))
|
||||
for _, k := range first {
|
||||
contained[k] = struct{}{}
|
||||
}
|
||||
|
||||
offending := make(map[string]struct{})
|
||||
for _, k := range second {
|
||||
if _, ok := contained[k]; ok {
|
||||
offending[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if len(offending) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
flattened := make([]string, 0, len(offending))
|
||||
for k := range offending {
|
||||
flattened = append(flattened, k)
|
||||
}
|
||||
return false, flattened
|
||||
}
|
||||
|
||||
// Helpers for copying generic structures.
|
||||
func CopyMapStringString(m map[string]string) map[string]string {
|
||||
l := len(m)
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := make(map[string]string, l)
|
||||
for k, v := range m {
|
||||
c[k] = v
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func CopyMapStringStruct(m map[string]struct{}) map[string]struct{} {
|
||||
l := len(m)
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := make(map[string]struct{}, l)
|
||||
for k := range m {
|
||||
c[k] = struct{}{}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func CopyMapStringInt(m map[string]int) map[string]int {
|
||||
l := len(m)
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := make(map[string]int, l)
|
||||
for k, v := range m {
|
||||
c[k] = v
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func CopyMapStringFloat64(m map[string]float64) map[string]float64 {
|
||||
l := len(m)
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := make(map[string]float64, l)
|
||||
for k, v := range m {
|
||||
c[k] = v
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// CopyMapStringSliceString copies a map of strings to string slices such as
|
||||
// http.Header
|
||||
func CopyMapStringSliceString(m map[string][]string) map[string][]string {
|
||||
l := len(m)
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := make(map[string][]string, l)
|
||||
for k, v := range m {
|
||||
c[k] = CopySliceString(v)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func CopySliceString(s []string) []string {
|
||||
l := len(s)
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := make([]string, l)
|
||||
for i, v := range s {
|
||||
c[i] = v
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func CopySliceInt(s []int) []int {
|
||||
l := len(s)
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := make([]int, l)
|
||||
for i, v := range s {
|
||||
c[i] = v
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// CleanEnvVar replaces all occurrences of illegal characters in an environment
|
||||
// variable with the specified byte.
|
||||
func CleanEnvVar(s string, r byte) string {
|
||||
b := []byte(s)
|
||||
for i, c := range b {
|
||||
switch {
|
||||
case c == '_':
|
||||
case c >= 'a' && c <= 'z':
|
||||
case c >= 'A' && c <= 'Z':
|
||||
case i > 0 && c >= '0' && c <= '9':
|
||||
default:
|
||||
// Replace!
|
||||
b[i] = r
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package uuid
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Generate is used to generate a random UUID
|
||||
func Generate() string {
|
||||
buf := make([]byte, 16)
|
||||
if _, err := crand.Read(buf); err != nil {
|
||||
panic(fmt.Errorf("failed to read random bytes: %v", err))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
|
||||
buf[0:4],
|
||||
buf[4:6],
|
||||
buf[6:8],
|
||||
buf[8:10],
|
||||
buf[10:16])
|
||||
}
|
|
@ -960,6 +960,12 @@
|
|||
"revision": "317e0006254c44a0ac427cc52a0e083ff0b9622f",
|
||||
"revisionTime": "2017-09-15T02:47:31Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "5DBIm/bJOKLR3CbQH6wIELQDLlQ=",
|
||||
"path": "github.com/gorhill/cronexpr",
|
||||
"revision": "d520615e531a6bf3fb69406b9eba718261285ec8",
|
||||
"revisionTime": "2016-12-05T14:13:22Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "O0r0hj4YL+jSRNjnshkeH4GY+4s=",
|
||||
"path": "github.com/hailocab/go-hostpool",
|
||||
|
@ -1116,6 +1122,30 @@
|
|||
"revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8",
|
||||
"revisionTime": "2017-10-17T18:19:29Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "euodRTxiXS6udU7N9xRCQL6YDCg=",
|
||||
"path": "github.com/hashicorp/nomad/api",
|
||||
"revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1",
|
||||
"revisionTime": "2017-09-29T21:44:31Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Is7OvHxCEEkKpdQnW8olCxL0444=",
|
||||
"path": "github.com/hashicorp/nomad/api/contexts",
|
||||
"revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1",
|
||||
"revisionTime": "2017-09-29T21:44:31Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "DE+4s/X+r987Ia93s9633mGekzg=",
|
||||
"path": "github.com/hashicorp/nomad/helper",
|
||||
"revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1",
|
||||
"revisionTime": "2017-09-29T21:44:31Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "mSCo/iZUEOSpeX5NsGZZzFMJqto=",
|
||||
"path": "github.com/hashicorp/nomad/helper/uuid",
|
||||
"revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1",
|
||||
"revisionTime": "2017-09-29T21:44:31Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "mS15CkImPzXYsgNwl3Mt9Gh3Vb0=",
|
||||
"path": "github.com/hashicorp/serf/coordinate",
|
||||
|
|
|
@ -0,0 +1,328 @@
|
|||
---
|
||||
layout: "api"
|
||||
page_title: "Nomad Secret Backend - HTTP API"
|
||||
sidebar_current: "docs-http-secret-nomad"
|
||||
description: |-
|
||||
This is the API documentation for the Vault Nomad secret backend.
|
||||
---
|
||||
|
||||
# Nomad Secret Backend HTTP API
|
||||
|
||||
This is the API documentation for the Vault Nomad secret backend. For general
|
||||
information about the usage and operation of the Nomad backend, please see the
|
||||
[Vault Nomad backend documentation](/docs/secrets/nomad/index.html).
|
||||
|
||||
This documentation assumes the Nomad backend is mounted at the `/nomad` path
|
||||
in Vault. Since it is possible to mount secret backends at any location, please
|
||||
update your API calls accordingly.
|
||||
|
||||
## Configure Access
|
||||
|
||||
This endpoint configures the access information for Nomad. This access
|
||||
information is used so that Vault can communicate with Nomad and generate
|
||||
Nomad tokens.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| :------- | :--------------------------- | :--------------------- |
|
||||
| `POST` | `/nomad/config/access` | `204 (empty body)` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `address` `(string: "")` – Specifies the address of the Nomad
|
||||
instance, provided as `"protocol://host:port"` like `"http://127.0.0.1:4646"`.
|
||||
This value can also be provided on individual calls with the NOMAD_ADDR
|
||||
environment variable.
|
||||
|
||||
- `token` `(string: "")` – Specifies the Nomad Management token to use.
|
||||
This value can also be provided on individual calls with the NOMAD_TOKEN
|
||||
environment variable.
|
||||
|
||||
### Sample Payload
|
||||
|
||||
```json
|
||||
{
|
||||
"address": "http://127.0.0.1:4646",
|
||||
"token": "adha..."
|
||||
}
|
||||
```
|
||||
|
||||
### Sample Request
|
||||
|
||||
```
|
||||
$ curl \
|
||||
--request POST \
|
||||
--header "X-Vault-Token: ..." \
|
||||
--data @payload.json \
|
||||
https://vault.rocks/v1/nomad/config/access
|
||||
```
|
||||
|
||||
## Read Access Configuration
|
||||
|
||||
This endpoint queries for information about the Nomad connection.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| :------- | :--------------------------- | :--------------------- |
|
||||
| `GET` | `/nomad/config/access` | `200 application/json` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```
|
||||
$ curl \
|
||||
--header "X-Vault-Token: ..." \
|
||||
https://vault.rocks/v1/nomad/config/access
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
"data": {
|
||||
"address": "http://localhost:4646/"
|
||||
}
|
||||
```
|
||||
|
||||
## Configure Lease
|
||||
|
||||
This endpoint configures the lease settings for generated tokens.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| :------- | :--------------------------- | :--------------------- |
|
||||
| `POST` | `/nomad/config/lease` | `204 (empty body)` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `ttl` `(string: "")` – Specifies the ttl for the lease. This is provided
|
||||
as a string duration with a time suffix like `"30s"` or `"1h"` or as total
|
||||
seconds.
|
||||
|
||||
- `max_ttl` `(string: "")` – Specifies the max ttl for the lease. This is
|
||||
provided as a string duration with a time suffix like `"30s"` or `"1h"` or as
|
||||
total seconds.
|
||||
|
||||
### Sample Payload
|
||||
|
||||
```json
|
||||
{
|
||||
"ttl": 1800,
|
||||
"max_ttl": 3600
|
||||
}
|
||||
```
|
||||
|
||||
### Sample Request
|
||||
|
||||
```
|
||||
$ curl \
|
||||
--header "X-Vault-Token: ..." \
|
||||
--request POST \
|
||||
--data @payload.json \
|
||||
https://vault.rocks/v1/nomad/config/lease
|
||||
```
|
||||
|
||||
## Read Lease Configuration
|
||||
|
||||
This endpoint queries for information about the Lease TTL for the specified mount.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| :------- | :--------------------------- | :--------------------- |
|
||||
| `GET` | `/nomad/config/lease` | `200 application/json` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```
|
||||
$ curl \
|
||||
--header "X-Vault-Token: ..." \
|
||||
https://vault.rocks/v1/nomad/config/lease
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
"data": {
|
||||
"max_ttl": 86400,
|
||||
"ttl": 86400
|
||||
}
|
||||
```
|
||||
|
||||
## Delete Lease Configuration
|
||||
|
||||
This endpoint deletes the lease configuration.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| :------- | :--------------------------- | :--------------------- |
|
||||
| `DELETE` | `/nomad/config/lease` | `204 (empty body)` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```
|
||||
$ curl \
|
||||
--header "X-Vault-Token: ..." \
|
||||
--request DELETE \
|
||||
https://vault.rocks/v1/nomad/config/lease
|
||||
```
|
||||
|
||||
## Create/Update Role
|
||||
|
||||
This endpoint creates or updates the Nomad role definition in Vault. If the role does not exist, it will be created. If the role already exists, it will receive
|
||||
updated attributes.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| :------- | :--------------------------- | :--------------------- |
|
||||
| `POST` | `/nomad/role/:name` | `204 (empty body)` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `name` `(string: <required>)` – Specifies the name of an existing role against
|
||||
which to create this Nomad tokens. This is part of the request URL.
|
||||
|
||||
- `lease` `(string: "")` – Specifies the lease for this role. This is provided
|
||||
as a string duration with a time suffix like `"30s"` or `"1h"` or as total
|
||||
seconds. If not provided, the default Vault lease is used.
|
||||
|
||||
- `policies` `(string: "")` – Comma separated list of Nomad policies the token is going to be created against. These need to be created beforehand in Nomad.
|
||||
|
||||
- `global` `(bool: "false")` – Specifies if the token should be global, as defined in the [Nomad Documentation](https://www.nomadproject.io/guides/acl.html#acl-tokens).
|
||||
ma
|
||||
|
||||
- `type` `(string: "client")` - Specifies the type of token to create when
|
||||
using this role. Valid values are `"client"` or `"management"`.
|
||||
|
||||
### Sample Payload
|
||||
|
||||
To create a client token with a custom policy:
|
||||
|
||||
```json
|
||||
{
|
||||
"policies": "readonly"
|
||||
}
|
||||
```
|
||||
|
||||
### Sample Request
|
||||
|
||||
```
|
||||
$ curl \
|
||||
--request POST \
|
||||
--header "X-Vault-Token: ..." \
|
||||
--data @payload.json \
|
||||
https://vault.rocks/v1/nomad/role/monitoring
|
||||
```
|
||||
|
||||
## Read Role
|
||||
|
||||
This endpoint queries for information about a Nomad role with the given name.
|
||||
If no role exists with that name, a 404 is returned.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| :------- | :--------------------------- | :--------------------- |
|
||||
| `GET` | `/nomad/role/:name` | `200 application/json` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `name` `(string: <required>)` – Specifies the name of the role to query. This
|
||||
is part of the request URL.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```
|
||||
$ curl \
|
||||
--header "X-Vault-Token: ..." \
|
||||
https://vault.rocks/v1/nomad/role/monitoring
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"lease": "0s",
|
||||
"policies": [
|
||||
"example"
|
||||
],
|
||||
"token_type": "client"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## List Roles
|
||||
|
||||
This endpoint lists all existing roles in the backend.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| :------- | :--------------------------- | :--------------------- |
|
||||
| `LIST` | `/nomad/role` | `200 application/json` |
|
||||
| `GET` | `/nomad/role?list=true` | `200 application/json` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```
|
||||
$ curl \
|
||||
--header "X-Vault-Token: ..." \
|
||||
--request LIST \
|
||||
https://vault.rocks/v1/nomad/role
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"keys": [
|
||||
"example"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Delete Role
|
||||
|
||||
This endpoint deletes a Nomad role with the given name. Even if the role does
|
||||
not exist, this endpoint will still return a successful response.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| :------- | :--------------------------- | :--------------------- |
|
||||
| `DELETE` | `/nomad/role/:name` | `204 (empty body)` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `name` `(string: <required>)` – Specifies the name of the role to delete. This
|
||||
is part of the request URL.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```
|
||||
$ curl \
|
||||
--request DELETE \
|
||||
--header "X-Vault-Token: ..." \
|
||||
https://vault.rocks/v1/nomad/role/example-role
|
||||
```
|
||||
|
||||
## Generate Credential
|
||||
|
||||
This endpoint generates a dynamic Nomad token based on the given role
|
||||
definition.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| :------- | :--------------------------- | :--------------------- |
|
||||
| `GET` | `/nomad/creds/:name` | `200 application/json` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `name` `(string: <required>)` – Specifies the name of an existing role against
|
||||
which to create this Nomad token. This is part of the request URL.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```
|
||||
$ curl \
|
||||
--header "X-Vault-Token: ..." \
|
||||
https://vault.rocks/v1/nomad/creds/example
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"accessor_id": "c834ba40-8d84-b0c1-c084-3a31d3383c03",
|
||||
"secret_id": "65af6f07-7f57-bb24-cdae-a27f86a894ce"
|
||||
}
|
||||
}
|
||||
```
|
|
@ -0,0 +1,118 @@
|
|||
---
|
||||
layout: "docs"
|
||||
page_title: "Nomad Secret Backend"
|
||||
sidebar_current: "docs-secrets-nomad"
|
||||
description: |-
|
||||
The Nomad secret backend for Vault generates tokens for Nomad dynamically.
|
||||
---
|
||||
|
||||
# Nomad Secret Backend
|
||||
|
||||
Name: `Nomad`
|
||||
|
||||
The Nomad secret backend for Vault generates
|
||||
[Nomad](https://www.nomadproject.io)
|
||||
API tokens dynamically based on pre-existing Nomad ACL policies.
|
||||
|
||||
This page will show a quick start for this backend. For detailed documentation
|
||||
on every path, use `vault path-help` after mounting the backend.
|
||||
|
||||
~> **Version information** ACLs are only available on Nomad 0.7.0 and above.
|
||||
|
||||
## Quick Start
|
||||
|
||||
The first step to using the vault backend is to mount it.
|
||||
Unlike the `generic` backend, the `nomad` backend is not mounted by default.
|
||||
|
||||
```
|
||||
$ vault mount nomad
|
||||
Successfully mounted 'nomad' at 'nomad'!
|
||||
```
|
||||
|
||||
Optionally, we can configure the lease settings for credentials generated
|
||||
by Vault. This is done by writing to the `config/lease` key:
|
||||
|
||||
```
|
||||
$ vault write nomad/config/lease ttl=3600 max_ttl=86400
|
||||
Success! Data written to: nomad/config/lease
|
||||
```
|
||||
|
||||
For a quick start, you can use the SecretID token provided by the [Nomad ACL bootstrap
|
||||
process](https://www.nomadproject.io/guides/acl.html#generate-the-initial-token), although this
|
||||
is discouraged for production deployments.
|
||||
|
||||
```
|
||||
$ nomad acl bootstrap
|
||||
Accessor ID = 95a0ee55-eaa6-2c0a-a900-ed94c156754e
|
||||
Secret ID = c25b6ca0-ea4e-000f-807a-fd03fcab6e3c
|
||||
Name = Bootstrap Token
|
||||
Type = management
|
||||
Global = true
|
||||
Policies = n/a
|
||||
Create Time = 2017-09-20 19:40:36.527512364 +0000 UTC
|
||||
Create Index = 7
|
||||
Modify Index = 7
|
||||
```
|
||||
The suggested pattern is to generate a token specifically for Vault, following the
|
||||
[Nomad ACL guide](https://www.nomadproject.io/guides/acl.html)
|
||||
|
||||
Next, we must configure Vault to know how to contact Nomad.
|
||||
This is done by writing the access information:
|
||||
|
||||
```
|
||||
$ vault write nomad/config/access \
|
||||
address=http://127.0.0.1:4646 \
|
||||
token=adf4238a-882b-9ddc-4a9d-5b6758e4159e
|
||||
Success! Data written to: nomad/config/access
|
||||
```
|
||||
|
||||
In this case, we've configured Vault to connect to Nomad
|
||||
on the default port with the loopback address. We've also provided
|
||||
an ACL token to use with the `token` parameter. Vault must have a management
|
||||
type token so that it can create and revoke ACL tokens.
|
||||
|
||||
The next step is to configure a role. A role is a logical name that maps
|
||||
to a set of policy names used to generate those credentials. For example, lets create
|
||||
an "monitoring" role that maps to a "readonly" policy:
|
||||
|
||||
```
|
||||
$ vault write nomad/role/monitoring policies=readonly
|
||||
Success! Data written to: nomad/role/monitoring
|
||||
```
|
||||
|
||||
The backend expects either a single or a comma separated list of policy names.
|
||||
|
||||
To generate a new Nomad ACL token, we simply read from that role:
|
||||
|
||||
```
|
||||
$ vault read nomad/creds/monitoring
|
||||
Key Value
|
||||
--- -----
|
||||
lease_id nomad/creds/monitoring/78ec3ef3-c806-1022-4aa8-1dbae39c760c
|
||||
lease_duration 768h0m0s
|
||||
lease_renewable true
|
||||
accessor_id a715994d-f5fd-1194-73df-ae9dad616307
|
||||
secret_id b31fb56c-0936-5428-8c5f-ed010431aba9
|
||||
```
|
||||
|
||||
Here we can see that Vault has generated a new Nomad ACL token for us.
|
||||
We can test this token out, by reading it in Nomad (by it's accesor):
|
||||
|
||||
```
|
||||
$ nomad acl token info a715994d-f5fd-1194-73df-ae9dad616307
|
||||
Accessor ID = a715994d-f5fd-1194-73df-ae9dad616307
|
||||
Secret ID = b31fb56c-0936-5428-8c5f-ed010431aba9
|
||||
Name = Vault example root 1505945527022465593
|
||||
Type = client
|
||||
Global = false
|
||||
Policies = [readonly]
|
||||
Create Time = 2017-09-20 22:12:07.023455379 +0000 UTC
|
||||
Create Index = 138
|
||||
Modify Index = 138
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
The Nomad secret backend has a full HTTP API. Please see the
|
||||
[Nomad secret backend API](/api/secret/nomad/index.html) for more
|
||||
details.
|
|
@ -76,6 +76,9 @@
|
|||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-http-secret-nomad") %>>
|
||||
<a href="/api/secret/nomad/index.html">Nomad</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-http-secret-pki") %>>
|
||||
<a href="/api/secret/pki/index.html">PKI</a>
|
||||
</li>
|
||||
|
|
|
@ -238,6 +238,10 @@
|
|||
<a href="/docs/secrets/identity/index.html">Identity</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-secrets-nomad") %>>
|
||||
<a href="/docs/secrets/nomad/index.html">Nomad</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-secrets-pki") %>>
|
||||
<a href="/docs/secrets/pki/index.html">PKI (Certificates)</a>
|
||||
</li>
|
||||
|
|
Loading…
Reference in New Issue