Merge pull request #12045 from hashicorp/merge-release-1.2.6-branch

Merge release 1.2.6 branch
This commit is contained in:
Luiz Aoqui 2022-02-10 15:12:40 -05:00 committed by GitHub
commit 77c718d227
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 785 additions and 145 deletions

3
.changelog/12036.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:security
Fix race condition in use of go-getter that could cause a client agent to download the wrong artifact into the wrong destination. [CVE-2022-24686](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24686)
```

3
.changelog/12037.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:security
Resolve symlinks to prevent unauthorized access to files outside the allocation directory. [CVE-2022-24683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24683)
```

3
.changelog/12038.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:security
Add ACL requirement and HCL validation to the job parse API endpoint to prevent excessive CPU usage. [CVE-2022-24685](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24685)
```

3
.changelog/12039.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:security
Prevent panic in spread iterator during allocation stop. [CVE-2022-24684](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24684)
```

View File

@ -1,3 +1,16 @@
## 1.2.6 (February 9, 2022)
__BACKWARDS INCOMPATIBILITIES:__
* ACL authentication is now required for the Nomad API job parse endpoint to address a potential security vulnerability
SECURITY:
* Add ACL requirement and HCL validation to the job parse API endpoint to prevent excessive CPU usage. [CVE-2022-24685](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24685) [[GH-12038](https://github.com/hashicorp/nomad/issues/12038)]
* Fix race condition in use of go-getter that could cause a client agent to download the wrong artifact into the wrong destination. [CVE-2022-24686](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24686) [[GH-12036](https://github.com/hashicorp/nomad/issues/12036)]
* Prevent panic in spread iterator during allocation stop. [CVE-2022-24684](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24684) [[GH-12039](https://github.com/hashicorp/nomad/issues/12039)]
* Resolve symlinks to prevent unauthorized access to files outside the allocation directory. [CVE-2022-24683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24683) [[GH-12037](https://github.com/hashicorp/nomad/issues/12037)]
## 1.2.5 (February 1, 2022)
BUG FIXES:
@ -160,6 +173,19 @@ BUG FIXES:
* server: Fixed a panic on arm64 platform when dispatching a job with a payload [[GH-11396](https://github.com/hashicorp/nomad/issues/11396)]
* server: Fixed a panic that may occur when preempting multiple allocations on the same node [[GH-11346](https://github.com/hashicorp/nomad/issues/11346)]
## 1.1.12 (February 9, 2022)
__BACKWARDS INCOMPATIBILITIES:__
* ACL authentication is now required for the Nomad API job parse endpoint to address a potential security vulnerability
SECURITY:
* Add ACL requirement and HCL validation to the job parse API endpoint to prevent excessive CPU usage. [CVE-2022-24685](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24685) [[GH-12038](https://github.com/hashicorp/nomad/issues/12038)]
* Fix race condition in use of go-getter that could cause a client agent to download the wrong artifact into the wrong destination. [CVE-2022-24686](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24686) [[GH-12036](https://github.com/hashicorp/nomad/issues/12036)]
* Prevent panic in spread iterator during allocation stop. [CVE-2022-24684](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24684) [[GH-12039](https://github.com/hashicorp/nomad/issues/12039)]
* Resolve symlinks to prevent unauthorized access to files outside the allocation directory. [CVE-2022-24683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24683) [[GH-12037](https://github.com/hashicorp/nomad/issues/12037)]
## 1.1.11 (February 1, 2022)
BUG FIXES:
@ -452,6 +478,19 @@ BUG FIXES:
* server: Fixed a panic that may arise on submission of jobs containing invalid service checks [[GH-10154](https://github.com/hashicorp/nomad/issues/10154)]
* ui: Fixed the rendering of interstitial components shown after processing a dynamic application sizing recommendation. [[GH-10094](https://github.com/hashicorp/nomad/pull/10094)]
## 1.0.18 (February 9, 2022)
__BACKWARDS INCOMPATIBILITIES:__
* ACL authentication is now required for the Nomad API job parse endpoint to address a potential security vulnerability
SECURITY:
* Add ACL requirement and HCL validation to the job parse API endpoint to prevent excessive CPU usage. [CVE-2022-24685](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24685) [[GH-12038](https://github.com/hashicorp/nomad/issues/12038)]
* Fix race condition in use of go-getter that could cause a client agent to download the wrong artifact into the wrong destination. [CVE-2022-24686](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24686) [[GH-12036](https://github.com/hashicorp/nomad/issues/12036)]
* Prevent panic in spread iterator during allocation stop. [CVE-2022-24684](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24684) [[GH-12039](https://github.com/hashicorp/nomad/issues/12039)]
* Resolve symlinks to prevent unauthorized access to files outside the allocation directory. [CVE-2022-24683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24683) [[GH-12037](https://github.com/hashicorp/nomad/issues/12037)]
## 1.0.17 (February 1, 2022)
BUG FIXES:

View File

@ -39,7 +39,7 @@ PROTO_COMPARE_TAG ?= v1.0.3$(if $(findstring ent,$(GO_TAGS)),+ent,)
# LAST_RELEASE is the git sha of the latest release corresponding to this branch. main should have the latest
# published release, but backport branches should point to the parent tag (e.g. 1.0.8 in release-1.0.9 after 1.1.0 is cut).
LAST_RELEASE ?= v1.2.5
LAST_RELEASE ?= v1.2.6
default: help

View File

@ -26,6 +26,7 @@ const (
NamespaceCapabilityDeny = "deny"
NamespaceCapabilityListJobs = "list-jobs"
NamespaceCapabilityParseJob = "parse-job"
NamespaceCapabilityReadJob = "read-job"
NamespaceCapabilitySubmitJob = "submit-job"
NamespaceCapabilityDispatchJob = "dispatch-job"
@ -146,7 +147,7 @@ func (p *PluginPolicy) isValid() bool {
// isNamespaceCapabilityValid ensures the given capability is valid for a namespace policy
func isNamespaceCapabilityValid(cap string) bool {
switch cap {
case NamespaceCapabilityDeny, NamespaceCapabilityListJobs, NamespaceCapabilityReadJob,
case NamespaceCapabilityDeny, NamespaceCapabilityParseJob, NamespaceCapabilityListJobs, NamespaceCapabilityReadJob,
NamespaceCapabilitySubmitJob, NamespaceCapabilityDispatchJob, NamespaceCapabilityReadLogs,
NamespaceCapabilityReadFS, NamespaceCapabilityAllocLifecycle,
NamespaceCapabilityAllocExec, NamespaceCapabilityAllocNodeExec,
@ -166,6 +167,7 @@ func isNamespaceCapabilityValid(cap string) bool {
func expandNamespacePolicy(policy string) []string {
read := []string{
NamespaceCapabilityListJobs,
NamespaceCapabilityParseJob,
NamespaceCapabilityReadJob,
NamespaceCapabilityCSIListVolume,
NamespaceCapabilityCSIReadVolume,

View File

@ -29,6 +29,7 @@ func TestParse(t *testing.T) {
Policy: PolicyRead,
Capabilities: []string{
NamespaceCapabilityListJobs,
NamespaceCapabilityParseJob,
NamespaceCapabilityReadJob,
NamespaceCapabilityCSIListVolume,
NamespaceCapabilityCSIReadVolume,
@ -78,6 +79,7 @@ func TestParse(t *testing.T) {
Policy: PolicyRead,
Capabilities: []string{
NamespaceCapabilityListJobs,
NamespaceCapabilityParseJob,
NamespaceCapabilityReadJob,
NamespaceCapabilityCSIListVolume,
NamespaceCapabilityCSIReadVolume,
@ -91,6 +93,7 @@ func TestParse(t *testing.T) {
Policy: PolicyWrite,
Capabilities: []string{
NamespaceCapabilityListJobs,
NamespaceCapabilityParseJob,
NamespaceCapabilityReadJob,
NamespaceCapabilityCSIListVolume,
NamespaceCapabilityCSIReadVolume,

View File

@ -6,17 +6,17 @@ import (
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
"net/http"
"strings"
hclog "github.com/hashicorp/go-hclog"
multierror "github.com/hashicorp/go-multierror"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/escapingfs"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hpcloud/tail/watch"
tomb "gopkg.in/tomb.v1"
@ -350,7 +350,7 @@ func (d *AllocDir) Build() error {
// List returns the list of files at a path relative to the alloc dir
func (d *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) {
if escapes, err := structs.PathEscapesAllocDir("", path); err != nil {
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil {
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err)
} else if escapes {
return nil, fmt.Errorf("Path escapes the alloc directory")
@ -376,7 +376,7 @@ func (d *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) {
// Stat returns information about the file at a path relative to the alloc dir
func (d *AllocDir) Stat(path string) (*cstructs.AllocFileInfo, error) {
if escapes, err := structs.PathEscapesAllocDir("", path); err != nil {
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil {
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err)
} else if escapes {
return nil, fmt.Errorf("Path escapes the alloc directory")
@ -426,7 +426,7 @@ func detectContentType(fileInfo os.FileInfo, path string) string {
// ReadAt returns a reader for a file at the path relative to the alloc dir
func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) {
if escapes, err := structs.PathEscapesAllocDir("", path); err != nil {
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil {
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err)
} else if escapes {
return nil, fmt.Errorf("Path escapes the alloc directory")
@ -457,7 +457,7 @@ func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) {
// BlockUntilExists blocks until the passed file relative the allocation
// directory exists. The block can be cancelled with the passed context.
func (d *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan error, error) {
if escapes, err := structs.PathEscapesAllocDir("", path); err != nil {
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil {
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err)
} else if escapes {
return nil, fmt.Errorf("Path escapes the alloc directory")
@ -483,7 +483,7 @@ func (d *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan erro
// allocation directory. The offset should be the last read offset. The context is
// used to clean up the watch.
func (d *AllocDir) ChangeEvents(ctx context.Context, path string, curOffset int64) (*watch.FileChanges, error) {
if escapes, err := structs.PathEscapesAllocDir("", path); err != nil {
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil {
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err)
} else if escapes {
return nil, fmt.Errorf("Path escapes the alloc directory")

View File

@ -332,28 +332,30 @@ func TestAllocDir_EscapeChecking(t *testing.T) {
// Test that `nomad fs` can't read secrets
func TestAllocDir_ReadAt_SecretDir(t *testing.T) {
tmp, err := ioutil.TempDir("", "AllocDir")
if err != nil {
t.Fatalf("Couldn't create temp dir: %v", err)
}
defer os.RemoveAll(tmp)
tmp := t.TempDir()
d := NewAllocDir(testlog.HCLogger(t), tmp, "test")
if err := d.Build(); err != nil {
t.Fatalf("Build() failed: %v", err)
}
defer d.Destroy()
err := d.Build()
require.NoError(t, err)
defer func() {
_ = d.Destroy()
}()
td := d.NewTaskDir(t1.Name)
if err := td.Build(false, nil); err != nil {
t.Fatalf("TaskDir.Build() failed: %v", err)
}
err = td.Build(false, nil)
require.NoError(t, err)
// ReadAt of secret dir should fail
secret := filepath.Join(t1.Name, TaskSecrets, "test_file")
if _, err := d.ReadAt(secret, 0); err == nil || !strings.Contains(err.Error(), "secret file prohibited") {
t.Fatalf("ReadAt of secret file didn't error: %v", err)
}
// something to write and test reading
target := filepath.Join(t1.Name, TaskSecrets, "test_file")
// create target file in the task secrets dir
full := filepath.Join(d.AllocDir, target)
err = ioutil.WriteFile(full, []byte("hi"), 0600)
require.NoError(t, err)
// ReadAt of a file in the task secrets dir should fail
_, err = d.ReadAt(target, 0)
require.EqualError(t, err, "Reading secret file prohibited: web/secrets/test_file")
}
func TestAllocDir_SplitPath(t *testing.T) {

View File

@ -6,22 +6,20 @@ import (
"net/http"
"net/url"
"strings"
"sync"
"github.com/hashicorp/go-cleanhttp"
gg "github.com/hashicorp/go-getter"
"github.com/hashicorp/nomad/nomad/structs"
)
var (
// getters is the map of getters suitable for Nomad. It is initialized once
// and the lock is used to guard access to it.
getters map[string]gg.Getter
lock sync.Mutex
// supported is the set of download schemes supported by Nomad
supported = []string{"http", "https", "s3", "hg", "git", "gcs"}
)
// httpClient is a shared HTTP client for use across all http/https Getter
// instantiations. The HTTP client is designed to be thread-safe, and using a pooled
// transport will help reduce excessive connections when clients are downloading lots
// of artifacts.
var httpClient = &http.Client{
Transport: cleanhttp.DefaultPooledTransport(),
}
const (
// gitSSHPrefix is the prefix for downloading via git using ssh
@ -35,53 +33,36 @@ type EnvReplacer interface {
ClientPath(string, bool) (string, bool)
}
func makeGetters(headers http.Header) map[string]gg.Getter {
getters := make(map[string]gg.Getter, len(supported))
for _, getter := range supported {
switch {
case getter == "http" && len(headers) > 0:
fallthrough
case getter == "https" && len(headers) > 0:
getters[getter] = &gg.HttpGetter{
Netrc: true,
Header: headers,
}
default:
if defaultGetter, ok := gg.Getters[getter]; ok {
getters[getter] = defaultGetter
}
}
}
return getters
}
// getClient returns a client that is suitable for Nomad downloading artifacts.
func getClient(src string, headers http.Header, mode gg.ClientMode, dst string) *gg.Client {
client := &gg.Client{
Src: src,
Dst: dst,
Mode: mode,
Umask: 060000000,
return &gg.Client{
Src: src,
Dst: dst,
Mode: mode,
Umask: 060000000,
Getters: createGetters(headers),
}
}
switch len(headers) {
case 0:
// When no headers are present use the memoized getters, creating them
// on demand if they do not exist yet.
lock.Lock()
if getters == nil {
getters = makeGetters(nil)
}
lock.Unlock()
client.Getters = getters
default:
// When there are headers present, we must create fresh gg.HttpGetter
// objects, because that is where gg stores the headers to use in its
// artifact HTTP GET requests.
client.Getters = makeGetters(headers)
func createGetters(header http.Header) map[string]gg.Getter {
httpGetter := &gg.HttpGetter{
Netrc: true,
Client: httpClient,
Header: header,
}
// Explicitly create fresh set of supported Getter for each Client, because
// go-getter is not thread-safe. Use a shared HTTP client for http/https Getter,
// with pooled transport which is thread-safe.
//
// If a getter type is not listed here, it is not supported (e.g. file).
return map[string]gg.Getter{
"git": new(gg.GitGetter),
"gcs": new(gg.GCSGetter),
"hg": new(gg.HgGetter),
"s3": new(gg.S3Getter),
"http": httpGetter,
"https": httpGetter,
}
return client
}
// getGetterUrl returns the go-getter URL to download the artifact.

View File

@ -16,7 +16,6 @@ import (
"github.com/docker/docker/pkg/ioutils"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-msgpack/codec"
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/api"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/command/agent/host"
@ -62,24 +61,7 @@ func (s *HTTPServer) AgentSelfRequest(resp http.ResponseWriter, req *http.Reques
return nil, CodedError(405, ErrInvalidMethod)
}
var secret string
s.parseToken(req, &secret)
var aclObj *acl.ACL
var err error
// Get the member as a server
var member serf.Member
if srv := s.agent.Server(); srv != nil {
member = srv.LocalMember()
aclObj, err = srv.ResolveToken(secret)
} else {
// Not a Server, so use the Client for token resolution. Note
// this gets forwarded to a server with AllowStale = true if
// the local ACL cache TTL has expired (30s by default)
aclObj, err = s.agent.Client().ResolveToken(secret)
}
aclObj, err := s.ResolveToken(req)
if err != nil {
return nil, err
}
@ -89,6 +71,12 @@ func (s *HTTPServer) AgentSelfRequest(resp http.ResponseWriter, req *http.Reques
return nil, structs.ErrPermissionDenied
}
// Get the member as a server
var member serf.Member
if srv := s.agent.Server(); srv != nil {
member = srv.LocalMember()
}
self := agentSelf{
Member: nomadMember(member),
Stats: s.agent.Stats(),
@ -671,27 +659,19 @@ func (s *HTTPServer) AgentHostRequest(resp http.ResponseWriter, req *http.Reques
return nil, CodedError(405, ErrInvalidMethod)
}
var secret string
s.parseToken(req, &secret)
// Check agent read permissions
var aclObj *acl.ACL
var enableDebug bool
var err error
if srv := s.agent.Server(); srv != nil {
aclObj, err = srv.ResolveToken(secret)
enableDebug = srv.GetConfig().EnableDebug
} else {
// Not a Server, so use the Client for token resolution. Note
// this gets forwarded to a server with AllowStale = true if
// the local ACL cache TTL has expired (30s by default)
aclObj, err = s.agent.Client().ResolveToken(secret)
enableDebug = s.agent.Client().GetConfig().EnableDebug
}
aclObj, err := s.ResolveToken(req)
if err != nil {
return nil, err
}
// Check agent read permissions
var enableDebug bool
if srv := s.agent.Server(); srv != nil {
enableDebug = srv.GetConfig().EnableDebug
} else {
enableDebug = s.agent.Client().GetConfig().EnableDebug
}
if (aclObj != nil && !aclObj.AllowAgentRead()) ||
(aclObj == nil && !enableDebug) {
return nil, structs.ErrPermissionDenied

View File

@ -23,6 +23,7 @@ import (
multierror "github.com/hashicorp/go-multierror"
"github.com/rs/cors"
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/helper/noxssrw"
"github.com/hashicorp/nomad/helper/tlsutil"
"github.com/hashicorp/nomad/nomad/structs"
@ -264,6 +265,31 @@ func (s *HTTPServer) Shutdown() {
}
}
// ResolveToken extracts the ACL token secret ID from the request and
// translates it into an ACL object. Returns nil if ACLs are disabled.
func (s *HTTPServer) ResolveToken(req *http.Request) (*acl.ACL, error) {
var secret string
s.parseToken(req, &secret)
var aclObj *acl.ACL
var err error
if srv := s.agent.Server(); srv != nil {
aclObj, err = srv.ResolveToken(secret)
} else {
// Not a Server, so use the Client for token resolution. Note
// this gets forwarded to a server with AllowStale = true if
// the local ACL cache TTL has expired (30s by default)
aclObj, err = s.agent.Client().ResolveToken(secret)
}
if err != nil {
return nil, fmt.Errorf("failed to resolve ACL token: %v", err)
}
return aclObj, nil
}
// registerHandlers is used to attach our handlers to the mux
func (s HTTPServer) registerHandlers(enableDebug bool) {
s.mux.HandleFunc("/v1/jobs", s.wrap(s.JobsRequest))

View File

@ -23,6 +23,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/testlog"
@ -1315,6 +1316,57 @@ func TestHTTPServer_Limits_OK(t *testing.T) {
}
}
func TestHTTPServer_ResolveToken(t *testing.T) {
t.Parallel()
// Setup two servers, one with ACL enabled and another with ACL disabled.
noACLServer := makeHTTPServer(t, func(c *Config) {
c.ACL = &ACLConfig{Enabled: false}
})
defer noACLServer.Shutdown()
ACLServer := makeHTTPServer(t, func(c *Config) {
c.ACL = &ACLConfig{Enabled: true}
})
defer ACLServer.Shutdown()
// Register sample token.
state := ACLServer.Agent.server.State()
token := mock.CreatePolicyAndToken(t, state, 1000, "node", mock.NodePolicy(acl.PolicyWrite))
// Tests cases.
t.Run("acl disabled", func(t *testing.T) {
req := &http.Request{Body: http.NoBody}
got, err := noACLServer.Server.ResolveToken(req)
require.NoError(t, err)
require.Nil(t, got)
})
t.Run("token not found", func(t *testing.T) {
req := &http.Request{
Body: http.NoBody,
Header: make(map[string][]string),
}
setToken(req, mock.ACLToken())
got, err := ACLServer.Server.ResolveToken(req)
require.Nil(t, got)
require.Error(t, err)
require.Contains(t, err.Error(), "ACL token not found")
})
t.Run("set token", func(t *testing.T) {
req := &http.Request{
Body: http.NoBody,
Header: make(map[string][]string),
}
setToken(req, token)
got, err := ACLServer.Server.ResolveToken(req)
require.NoError(t, err)
require.NotNil(t, got)
require.True(t, got.AllowNodeWrite())
})
}
func Test_IsAPIClientError(t *testing.T) {
trueCases := []int{400, 403, 404, 499}
for _, c := range trueCases {
@ -1410,6 +1462,12 @@ func setToken(req *http.Request, token *structs.ACLToken) {
req.Header.Set("X-Nomad-Token", token.SecretID)
}
func setNamespace(req *http.Request, ns string) {
q := req.URL.Query()
q.Add("namespace", ns)
req.URL.RawQuery = q.Encode()
}
func encodeReq(obj interface{}) io.ReadCloser {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)

View File

@ -7,6 +7,7 @@ import (
"strings"
"github.com/golang/snappy"
"github.com/hashicorp/nomad/acl"
api "github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/jobspec"
@ -703,6 +704,25 @@ func (s *HTTPServer) JobsParseRequest(resp http.ResponseWriter, req *http.Reques
return nil, CodedError(405, ErrInvalidMethod)
}
var namespace string
parseNamespace(req, &namespace)
aclObj, err := s.ResolveToken(req)
if err != nil {
return nil, err
}
// Check job parse permissions
if aclObj != nil {
hasParseJob := aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityParseJob)
hasSubmitJob := aclObj.AllowNsOp(namespace, acl.NamespaceCapabilitySubmitJob)
allowed := hasParseJob || hasSubmitJob
if !allowed {
return nil, structs.ErrPermissionDenied
}
}
args := &api.JobsParseRequest{}
if err := decodeBody(req, &args); err != nil {
return nil, CodedError(400, err.Error())
@ -712,7 +732,6 @@ func (s *HTTPServer) JobsParseRequest(resp http.ResponseWriter, req *http.Reques
}
var jobStruct *api.Job
var err error
if args.HCLv1 {
jobStruct, err = jobspec.Parse(strings.NewReader(args.JobHCL))
} else {

View File

@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/nomad/acl"
api "github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/mock"
@ -407,6 +408,128 @@ func TestHTTP_JobsParse(t *testing.T) {
}
})
}
func TestHTTP_JobsParse_ACL(t *testing.T) {
t.Parallel()
httpACLTest(t, nil, func(s *TestAgent) {
state := s.Agent.server.State()
// ACL tokens used in tests.
nodeToken := mock.CreatePolicyAndToken(
t, state, 1000, "node",
mock.NodePolicy(acl.PolicyWrite),
)
parseJobDevToken := mock.CreatePolicyAndToken(
t, state, 1002, "parse-job-dev",
mock.NamespacePolicy("dev", "", []string{"parse-job"}),
)
readNsDevToken := mock.CreatePolicyAndToken(
t, state, 1004, "read-dev",
mock.NamespacePolicy("dev", "read", nil),
)
parseJobDefaultToken := mock.CreatePolicyAndToken(
t, state, 1006, "parse-job-default",
mock.NamespacePolicy("default", "", []string{"parse-job"}),
)
submitJobDefaultToken := mock.CreatePolicyAndToken(
t, state, 1008, "submit-job-default",
mock.NamespacePolicy("default", "", []string{"submit-job"}),
)
readNsDefaultToken := mock.CreatePolicyAndToken(
t, state, 1010, "read-default",
mock.NamespacePolicy("default", "read", nil),
)
testCases := []struct {
name string
token *structs.ACLToken
namespace string
expectError bool
}{
{
name: "missing ACL token",
token: nil,
expectError: true,
},
{
name: "wrong permissions",
token: nodeToken,
expectError: true,
},
{
name: "wrong namespace",
token: readNsDevToken,
expectError: true,
},
{
name: "wrong namespace capability",
token: parseJobDevToken,
expectError: true,
},
{
name: "default namespace read",
token: readNsDefaultToken,
expectError: false,
},
{
name: "non-default namespace read",
token: readNsDevToken,
namespace: "dev",
expectError: false,
},
{
name: "default namespace parse-job capability",
token: parseJobDefaultToken,
expectError: false,
},
{
name: "default namespace submit-job capability",
token: submitJobDefaultToken,
expectError: false,
},
{
name: "non-default namespace capability",
token: parseJobDevToken,
namespace: "dev",
expectError: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
buf := encodeReq(api.JobsParseRequest{JobHCL: mock.HCL()})
req, err := http.NewRequest("POST", "/v1/jobs/parse", buf)
require.NoError(t, err)
if tc.namespace != "" {
setNamespace(req, tc.namespace)
}
if tc.token != nil {
setToken(req, tc.token)
}
respW := httptest.NewRecorder()
obj, err := s.Server.JobsParseRequest(respW, req)
if tc.expectError {
require.Error(t, err)
require.Equal(t, structs.ErrPermissionDenied.Error(), err.Error())
} else {
require.NoError(t, err)
require.NotNil(t, obj)
job := obj.(*api.Job)
expected := mock.Job()
require.Equal(t, expected.Name, *job.Name)
require.ElementsMatch(t, expected.Datacenters, job.Datacenters)
}
})
}
})
}
func TestHTTP_JobQuery(t *testing.T) {
t.Parallel()
httpTest(t, nil, func(s *TestAgent) {

View File

@ -0,0 +1,99 @@
package escapingfs
import (
"errors"
"os"
"path/filepath"
"strings"
)
// PathEscapesAllocViaRelative returns if the given path escapes the allocation
// directory using relative paths.
//
// Only for use in server-side validation, where the real filesystem is not available.
// For client-side validation use PathEscapesAllocDir, which includes symlink validation
// as well.
//
// The prefix is joined to the path (e.g. "task/local"), and this function
// checks if path escapes the alloc dir, NOT the prefix directory within the alloc dir.
// With prefix="task/local", it will return false for "../secret", but
// true for "../../../../../../root" path; only the latter escapes the alloc dir.
func PathEscapesAllocViaRelative(prefix, path string) (bool, error) {
// Verify the destination does not escape the task's directory. The "alloc-dir"
// and "alloc-id" here are just placeholders; on a real filesystem they will
// have different names. The names are not important, but rather the number of levels
// in the path they represent.
alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/"))
if err != nil {
return false, err
}
abs, err := filepath.Abs(filepath.Join(alloc, prefix, path))
if err != nil {
return false, err
}
rel, err := filepath.Rel(alloc, abs)
if err != nil {
return false, err
}
return strings.HasPrefix(rel, ".."), nil
}
// pathEscapesBaseViaSymlink returns if path escapes dir, taking into account evaluation
// of symlinks.
//
// The base directory must be an absolute path.
func pathEscapesBaseViaSymlink(base, full string) (bool, error) {
resolveSym, err := filepath.EvalSymlinks(full)
if err != nil {
return false, err
}
rel, err := filepath.Rel(resolveSym, base)
if err != nil {
return true, nil
}
// note: this is not the same as !filesystem.IsAbs; we are asking if the relative
// path is descendent of the base path, indicating it does not escape.
isRelative := strings.HasPrefix(rel, "..") || rel == "."
escapes := !isRelative
return escapes, nil
}
// PathEscapesAllocDir returns true if base/prefix/path escapes the given base directory.
//
// Escaping a directory can be done with relative paths (e.g. ../../ etc.) or by
// using symlinks. This checks both methods.
//
// The base directory must be an absolute path.
func PathEscapesAllocDir(base, prefix, path string) (bool, error) {
full := filepath.Join(base, prefix, path)
// If base is not an absolute path, the caller passed in the wrong thing.
if !filepath.IsAbs(base) {
return false, errors.New("alloc dir must be absolute")
}
// Check path does not escape the alloc dir using relative paths.
if escapes, err := PathEscapesAllocViaRelative(prefix, path); err != nil {
return false, err
} else if escapes {
return true, nil
}
// Check path does not escape the alloc dir using symlinks.
if escapes, err := pathEscapesBaseViaSymlink(base, full); err != nil {
if os.IsNotExist(err) {
// Treat non-existent files as non-errors; perhaps not ideal but we
// have existing features (log-follow) that depend on this. Still safe,
// because we do the symlink check on every ReadAt call also.
return false, nil
}
return false, err
} else if escapes {
return true, nil
}
return false, nil
}

View File

@ -0,0 +1,162 @@
package escapingfs
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
)
func setup(t *testing.T) string {
p, err := ioutil.TempDir("", "escapist")
require.NoError(t, err)
return p
}
func cleanup(t *testing.T, root string) {
err := os.RemoveAll(root)
require.NoError(t, err)
}
func write(t *testing.T, file, data string) {
err := ioutil.WriteFile(file, []byte(data), 0600)
require.NoError(t, err)
}
func Test_PathEscapesAllocViaRelative(t *testing.T) {
for _, test := range []struct {
prefix string
path string
exp bool
}{
// directly under alloc-dir/alloc-id/
{prefix: "", path: "", exp: false},
{prefix: "", path: "/foo", exp: false},
{prefix: "", path: "./", exp: false},
{prefix: "", path: "../", exp: true}, // at alloc-id/
// under alloc-dir/alloc-id/<foo>/
{prefix: "foo", path: "", exp: false},
{prefix: "foo", path: "/foo", exp: false},
{prefix: "foo", path: "../", exp: false}, // at foo/
{prefix: "foo", path: "../../", exp: true}, // at alloc-id/
// under alloc-dir/alloc-id/foo/bar/
{prefix: "foo/bar", path: "", exp: false},
{prefix: "foo/bar", path: "/foo", exp: false},
{prefix: "foo/bar", path: "../", exp: false}, // at bar/
{prefix: "foo/bar", path: "../../", exp: false}, // at foo/
{prefix: "foo/bar", path: "../../../", exp: true}, // at alloc-id/
} {
result, err := PathEscapesAllocViaRelative(test.prefix, test.path)
require.NoError(t, err)
require.Equal(t, test.exp, result)
}
}
func Test_pathEscapesBaseViaSymlink(t *testing.T) {
t.Run("symlink-escape", func(t *testing.T) {
dir := setup(t)
defer cleanup(t, dir)
// link from dir/link
link := filepath.Join(dir, "link")
// link to /tmp
target := filepath.Clean("/tmp")
err := os.Symlink(target, link)
require.NoError(t, err)
escape, err := pathEscapesBaseViaSymlink(dir, link)
require.NoError(t, err)
require.True(t, escape)
})
t.Run("symlink-noescape", func(t *testing.T) {
dir := setup(t)
defer cleanup(t, dir)
// create a file within dir
target := filepath.Join(dir, "foo")
write(t, target, "hi")
// link to file within dir
link := filepath.Join(dir, "link")
err := os.Symlink(target, link)
require.NoError(t, err)
// link to file within dir does not escape dir
escape, err := pathEscapesBaseViaSymlink(dir, link)
require.NoError(t, err)
require.False(t, escape)
})
}
func Test_PathEscapesAllocDir(t *testing.T) {
t.Run("no-escape-root", func(t *testing.T) {
dir := setup(t)
defer cleanup(t, dir)
escape, err := PathEscapesAllocDir(dir, "", "/")
require.NoError(t, err)
require.False(t, escape)
})
t.Run("no-escape", func(t *testing.T) {
dir := setup(t)
defer cleanup(t, dir)
write(t, filepath.Join(dir, "foo"), "hi")
escape, err := PathEscapesAllocDir(dir, "", "/foo")
require.NoError(t, err)
require.False(t, escape)
})
t.Run("no-escape-no-exist", func(t *testing.T) {
dir := setup(t)
defer cleanup(t, dir)
escape, err := PathEscapesAllocDir(dir, "", "/no-exist")
require.NoError(t, err)
require.False(t, escape)
})
t.Run("symlink-escape", func(t *testing.T) {
dir := setup(t)
defer cleanup(t, dir)
// link from dir/link
link := filepath.Join(dir, "link")
// link to /tmp
target := filepath.Clean("/tmp")
err := os.Symlink(target, link)
require.NoError(t, err)
escape, err := PathEscapesAllocDir(dir, "", "/link")
require.NoError(t, err)
require.True(t, escape)
})
t.Run("relative-escape", func(t *testing.T) {
dir := setup(t)
defer cleanup(t, dir)
escape, err := PathEscapesAllocDir(dir, "", "../../foo")
require.NoError(t, err)
require.True(t, escape)
})
t.Run("relative-escape-prefix", func(t *testing.T) {
dir := setup(t)
defer cleanup(t, dir)
escape, err := PathEscapesAllocDir(dir, "/foo/bar", "../../../foo")
require.NoError(t, err)
require.True(t, escape)
})
}

View File

@ -96,6 +96,12 @@ func decode(c *jobConfig) error {
diags = append(diags, ds...)
}
// Return early if the input job or variable files are not valid.
// Decoding and evaluating invalid files may result in unexpected results.
if diags.HasErrors() {
return diags
}
diags = append(diags, c.decodeBody(file.Body)...)
if diags.HasErrors() {

View File

@ -374,6 +374,49 @@ job "example" {
require.Equal(t, "3", out.TaskGroups[2].Tasks[0].Meta["VERSION"])
}
func TestParse_InvalidHCL(t *testing.T) {
t.Run("invalid body", func(t *testing.T) {
hcl := `invalid{hcl`
_, err := ParseWithConfig(&ParseConfig{
Path: "input.hcl",
Body: []byte(hcl),
ArgVars: []string{},
AllowFS: true,
})
require.Error(t, err)
})
t.Run("invalid vars file", func(t *testing.T) {
tmp, err := ioutil.TempFile("", "nomad-jobspec2-")
require.NoError(t, err)
defer os.Remove(tmp.Name())
vars := `invalid{hcl`
_, err = tmp.Write([]byte(vars))
require.NoError(t, err)
hcl := `
variables {
region_var = "default"
}
job "example" {
datacenters = [for s in ["dc1", "dc2"] : upper(s)]
region = var.region_var
}
`
_, err = ParseWithConfig(&ParseConfig{
Path: "input.hcl",
Body: []byte(hcl),
VarFiles: []string{tmp.Name()},
ArgVars: []string{},
AllowFS: true,
})
require.Error(t, err)
})
}
func TestParse_InvalidScalingSyntax(t *testing.T) {
cases := []struct {
name string

View File

@ -17,7 +17,6 @@ import (
"math"
"net"
"os"
"path/filepath"
"reflect"
"regexp"
"sort"
@ -25,6 +24,7 @@ import (
"strings"
"time"
"github.com/hashicorp/nomad/helper/escapingfs"
"golang.org/x/crypto/blake2b"
"github.com/hashicorp/cronexpr"
@ -5316,7 +5316,7 @@ func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig {
func (d *DispatchPayloadConfig) Validate() error {
// Verify the destination doesn't escape
escaped, err := PathEscapesAllocDir("task/local/", d.File)
escaped, err := escapingfs.PathEscapesAllocViaRelative("task/local/", d.File)
if err != nil {
return fmt.Errorf("invalid destination path: %v", err)
} else if escaped {
@ -7535,7 +7535,7 @@ func (t *Template) Validate() error {
}
// Verify the destination doesn't escape
escaped, err := PathEscapesAllocDir("task", t.DestPath)
escaped, err := escapingfs.PathEscapesAllocViaRelative("task", t.DestPath)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
@ -8333,31 +8333,6 @@ func (ta *TaskArtifact) Hash() string {
return base64.RawStdEncoding.EncodeToString(h.Sum(nil))
}
// PathEscapesAllocDir returns if the given path escapes the allocation
// directory.
//
// The prefix is to joined to the path (e.g. "task/local"), and this function
// checks if path escapes the alloc dir, NOT the prefix directory within the alloc dir.
// With prefix="task/local", it will return false for "../secret", but
// true for "../../../../../../root" path; only the latter escapes the alloc dir
func PathEscapesAllocDir(prefix, path string) (bool, error) {
// Verify the destination doesn't escape the tasks directory
alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/"))
if err != nil {
return false, err
}
abs, err := filepath.Abs(filepath.Join(alloc, prefix, path))
if err != nil {
return false, err
}
rel, err := filepath.Rel(alloc, abs)
if err != nil {
return false, err
}
return strings.HasPrefix(rel, ".."), nil
}
func (ta *TaskArtifact) Validate() error {
// Verify the source
var mErr multierror.Error
@ -8376,7 +8351,7 @@ func (ta *TaskArtifact) Validate() error {
ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir))
}
escaped, err := PathEscapesAllocDir("task", ta.RelativeDest)
escaped, err := escapingfs.PathEscapesAllocViaRelative("task", ta.RelativeDest)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {

View File

@ -71,6 +71,12 @@ func (iter *SpreadIterator) SetJob(job *structs.Job) {
if job.Spreads != nil {
iter.jobSpreads = job.Spreads
}
// reset group spread/property so that when we temporarily SetJob
// to an older version to calculate stops we don't leak old
// versions of spread/properties to the new job version
iter.tgSpreadInfo = make(map[string]spreadAttributeMap)
iter.groupPropertySets = make(map[string][]*propertySet)
}
func (iter *SpreadIterator) SetTaskGroup(tg *structs.TaskGroup) {
@ -134,6 +140,15 @@ func (iter *SpreadIterator) Next() *RankedNode {
spreadAttributeMap := iter.tgSpreadInfo[tgName]
spreadDetails := spreadAttributeMap[pset.targetAttribute]
if spreadDetails == nil {
iter.ctx.Logger().Named("spread").Error(
"error reading spread attribute map for task group",
"task_group", tgName,
"target", pset.targetAttribute,
)
continue
}
if len(spreadDetails.desiredCounts) == 0 {
// When desired counts map is empty the user didn't specify any targets
// Use even spreading scoring algorithm for this scenario

View File

@ -9,6 +9,7 @@ import (
"fmt"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
@ -811,3 +812,97 @@ func validateEqualSpread(h *Harness) error {
}
return fmt.Errorf("expected even distributon of allocs to racks, but got:\n%+v", countSet)
}
func TestSpreadPanicDowngrade(t *testing.T) {
h := NewHarness(t)
nodes := []*structs.Node{}
for i := 0; i < 5; i++ {
node := mock.Node()
nodes = append(nodes, node)
err := h.State.UpsertNode(structs.MsgTypeTestSetup,
h.NextIndex(), node)
require.NoError(t, err)
}
// job version 1
// max_parallel = 0, canary = 1, spread != nil, 1 failed alloc
job1 := mock.Job()
job1.Spreads = []*structs.Spread{
{
Attribute: "${node.unique.name}",
Weight: 50,
SpreadTarget: []*structs.SpreadTarget{},
},
}
job1.Update = structs.UpdateStrategy{
Stagger: time.Duration(30 * time.Second),
MaxParallel: 0,
}
job1.Status = structs.JobStatusRunning
job1.TaskGroups[0].Count = 4
job1.TaskGroups[0].Update = &structs.UpdateStrategy{
Stagger: time.Duration(30 * time.Second),
MaxParallel: 1,
HealthCheck: "checks",
MinHealthyTime: time.Duration(30 * time.Second),
HealthyDeadline: time.Duration(9 * time.Minute),
ProgressDeadline: time.Duration(10 * time.Minute),
AutoRevert: true,
Canary: 1,
}
job1.Version = 1
job1.TaskGroups[0].Count = 5
err := h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job1)
require.NoError(t, err)
allocs := []*structs.Allocation{}
for i := 0; i < 4; i++ {
alloc := mock.Alloc()
alloc.Job = job1
alloc.JobID = job1.ID
alloc.NodeID = nodes[i].ID
alloc.DeploymentStatus = &structs.AllocDeploymentStatus{
Healthy: helper.BoolToPtr(true),
Timestamp: time.Now(),
Canary: false,
ModifyIndex: h.NextIndex(),
}
if i == 0 {
alloc.DeploymentStatus.Canary = true
}
if i == 1 {
alloc.ClientStatus = structs.AllocClientStatusFailed
}
allocs = append(allocs, alloc)
}
err = h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)
// job version 2
// max_parallel = 0, canary = 1, spread == nil
job2 := job1.Copy()
job2.Version = 2
job2.Spreads = nil
err = h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)
require.NoError(t, err)
eval := &structs.Evaluation{
Namespace: job2.Namespace,
ID: uuid.Generate(),
Priority: job2.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
JobID: job2.ID,
Status: structs.EvalStatusPending,
}
err = h.State.UpsertEvals(structs.MsgTypeTestSetup,
h.NextIndex(), []*structs.Evaluation{eval})
require.NoError(t, err)
processErr := h.Process(NewServiceScheduler, eval)
require.NoError(t, processErr, "failed to process eval")
require.Len(t, h.Plans, 1)
}