Rename resourceContainer{,Context} and resCon{,Ctx}.

This commit is contained in:
Sean Chittenden 2016-07-11 00:02:55 -07:00
parent 1c14e01ac0
commit be272168c7
No known key found for this signature in database
GPG Key ID: 4EBC9DC16C2E5E16
4 changed files with 30 additions and 26 deletions

View File

@ -191,7 +191,7 @@ type UniversalExecutor struct {
syslogServer *logging.SyslogServer
syslogChan chan *logging.SyslogMessage
resCon resourceContainer
resConCtx resourceContainerContext
consulSyncer *consul.Syncer
consulCtx *ConsulContext
@ -299,7 +299,7 @@ func (e *UniversalExecutor) LaunchCmd(command *ExecCommand, ctx *ExecutorContext
}
go e.collectPids()
go e.wait()
ic := e.resCon.getIsolationConfig()
ic := e.resConCtx.getIsolationConfig()
return &ProcessState{Pid: e.cmd.Process.Pid, ExitCode: -1, IsolationConfig: ic, Time: time.Now()}, nil
}
@ -387,7 +387,7 @@ func generateServiceKeys(allocID string, services []*structs.Service) map[consul
func (e *UniversalExecutor) wait() {
defer close(e.processExited)
err := e.cmd.Wait()
ic := e.resCon.getIsolationConfig()
ic := e.resConCtx.getIsolationConfig()
if err == nil {
e.exitState = &ProcessState{Pid: 0, ExitCode: 0, IsolationConfig: ic, Time: time.Now()}
return
@ -461,7 +461,7 @@ func (e *UniversalExecutor) Exit() error {
}
if e.command.ResourceLimits {
if err := e.resCon.executorCleanup(); err != nil {
if err := e.resConCtx.executorCleanup(); err != nil {
merr.Errors = append(merr.Errors, err)
}
}

View File

@ -68,7 +68,7 @@ func (e *UniversalExecutor) applyLimits(pid int) error {
}
// Entering the process in the cgroup
manager := getCgroupManager(e.resCon.groups, nil)
manager := getCgroupManager(e.resConCtx.groups, nil)
if err := manager.Apply(pid); err != nil {
e.logger.Printf("[ERR] executor: error applying pid to cgroup: %v", err)
if er := e.removeChrootMounts(); er != nil {
@ -76,11 +76,11 @@ func (e *UniversalExecutor) applyLimits(pid int) error {
}
return err
}
e.resCon.cgPaths = manager.GetPaths()
cgConfig := cgroupConfig.Config{Cgroups: e.resCon.groups}
e.resConCtx.cgPaths = manager.GetPaths()
cgConfig := cgroupConfig.Config{Cgroups: e.resConCtx.groups}
if err := manager.Set(&cgConfig); err != nil {
e.logger.Printf("[ERR] executor: error setting cgroup config: %v", err)
if er := DestroyCgroup(e.resCon.groups, e.resCon.cgPaths, os.Getpid()); er != nil {
if er := DestroyCgroup(e.resConCtx.groups, e.resConCtx.cgPaths, os.Getpid()); er != nil {
e.logger.Printf("[ERR] executor: error destroying cgroup: %v", er)
}
if er := e.removeChrootMounts(); er != nil {
@ -94,19 +94,19 @@ func (e *UniversalExecutor) applyLimits(pid int) error {
// configureCgroups converts a Nomad Resources specification into the equivalent
// cgroup configuration. It returns an error if the resources are invalid.
func (e *UniversalExecutor) configureCgroups(resources *structs.Resources) error {
e.resCon.groups = &cgroupConfig.Cgroup{}
e.resCon.groups.Resources = &cgroupConfig.Resources{}
e.resConCtx.groups = &cgroupConfig.Cgroup{}
e.resConCtx.groups.Resources = &cgroupConfig.Resources{}
cgroupName := structs.GenerateUUID()
e.resCon.groups.Path = filepath.Join("/nomad", cgroupName)
e.resConCtx.groups.Path = filepath.Join("/nomad", cgroupName)
// TODO: verify this is needed for things like network access
e.resCon.groups.Resources.AllowAllDevices = true
e.resConCtx.groups.Resources.AllowAllDevices = true
if resources.MemoryMB > 0 {
// Total amount of memory allowed to consume
e.resCon.groups.Resources.Memory = int64(resources.MemoryMB * 1024 * 1024)
e.resConCtx.groups.Resources.Memory = int64(resources.MemoryMB * 1024 * 1024)
// Disable swap to avoid issues on the machine
e.resCon.groups.Resources.MemorySwap = int64(-1)
e.resConCtx.groups.Resources.MemorySwap = int64(-1)
}
if resources.CPU < 2 {
@ -114,7 +114,7 @@ func (e *UniversalExecutor) configureCgroups(resources *structs.Resources) error
}
// Set the relative CPU shares for this cgroup.
e.resCon.groups.Resources.CpuShares = int64(resources.CPU)
e.resConCtx.groups.Resources.CpuShares = int64(resources.CPU)
if resources.IOPS != 0 {
// Validate it is in an acceptable range.
@ -122,7 +122,7 @@ func (e *UniversalExecutor) configureCgroups(resources *structs.Resources) error
return fmt.Errorf("resources.IOPS must be between 10 and 1000: %d", resources.IOPS)
}
e.resCon.groups.Resources.BlkioWeight = uint16(resources.IOPS)
e.resConCtx.groups.Resources.BlkioWeight = uint16(resources.IOPS)
}
return nil
@ -140,7 +140,7 @@ func (e *UniversalExecutor) Stats() (*cstructs.TaskResourceUsage, error) {
return e.aggregatedResourceUsage(pidStats), nil
}
ts := time.Now()
manager := getCgroupManager(e.resCon.groups, e.resCon.cgPaths)
manager := getCgroupManager(e.resConCtx.groups, e.resConCtx.cgPaths)
stats, err := manager.GetStats()
if err != nil {
return nil, err
@ -255,8 +255,8 @@ func (e *UniversalExecutor) configureChroot() error {
// should be called when tearing down the task.
func (e *UniversalExecutor) removeChrootMounts() error {
// Prevent a race between Wait/ForceStop
e.resCon.cgLock.Lock()
defer e.resCon.cgLock.Unlock()
e.resConCtx.cgLock.Lock()
defer e.resConCtx.cgLock.Unlock()
return e.ctx.AllocDir.UnmountAll()
}
@ -266,7 +266,7 @@ func (e *UniversalExecutor) removeChrootMounts() error {
// isolation
func (e *UniversalExecutor) getAllPids() (map[int]*nomadPid, error) {
if e.command.ResourceLimits {
manager := getCgroupManager(e.resCon.groups, e.resCon.cgPaths)
manager := getCgroupManager(e.resConCtx.groups, e.resConCtx.cgPaths)
pids, err := manager.GetAllPids()
if err != nil {
return nil, err

View File

@ -6,17 +6,19 @@ import (
dstructs "github.com/hashicorp/nomad/client/driver/structs"
)
type resourceContainer struct {
// resourceContainerContext is a platform-specific struct for managing a
// resource container.
type resourceContainerContext struct {
}
func clientCleanup(ic *dstructs.IsolationConfig, pid int) error {
return nil
}
func (rc *resourceContainer) executorCleanup() error {
func (rc *resourceContainerContext) executorCleanup() error {
return nil
}
func (rc *resourceContainer) getIsolationConfig() *dstructs.IsolationConfig {
func (rc *resourceContainerContext) getIsolationConfig() *dstructs.IsolationConfig {
return nil
}

View File

@ -8,7 +8,9 @@ import (
cgroupConfig "github.com/opencontainers/runc/libcontainer/configs"
)
type resourceContainer struct {
// resourceContainerContext is a platform-specific struct for managing a
// resource container. In the case of Linux, this is used to control Cgroups.
type resourceContainerContext struct {
groups *cgroupConfig.Cgroup
cgPaths map[string]string
cgLock sync.Mutex
@ -23,7 +25,7 @@ func clientCleanup(ic *dstructs.IsolationConfig, pid int) error {
}
// cleanup removes this host's Cgroup from within an Executor's context
func (rc *resourceContainer) executorCleanup() error {
func (rc *resourceContainerContext) executorCleanup() error {
rc.cgLock.Lock()
defer rc.cgLock.Unlock()
if err := DestroyCgroup(rc.groups, rc.cgPaths, os.Getpid()); err != nil {
@ -32,7 +34,7 @@ func (rc *resourceContainer) executorCleanup() error {
return nil
}
func (rc *resourceContainer) getIsolationConfig() *dstructs.IsolationConfig {
func (rc *resourceContainerContext) getIsolationConfig() *dstructs.IsolationConfig {
return &dstructs.IsolationConfig{
Cgroup: rc.groups,
CgroupPaths: rc.cgPaths,