Don't kill proxies on agent shutdown; backport manager close fix
This commit is contained in:
parent
877390cd28
commit
3df45ac7f1
|
@ -1319,11 +1319,8 @@ func (a *Agent) ShutdownAgent() error {
|
|||
}
|
||||
|
||||
// Stop the proxy manager
|
||||
// NOTE(mitchellh): we use Kill for now to kill the processes since
|
||||
// the local state isn't snapshotting meaning the proxy tokens are
|
||||
// regenerated each time forcing the processes to restart anyways.
|
||||
if a.proxyManager != nil {
|
||||
if err := a.proxyManager.Kill(); err != nil {
|
||||
if err := a.proxyManager.Close(); err != nil {
|
||||
a.logger.Printf("[WARN] agent: error shutting down proxy manager: %s", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -210,6 +210,8 @@ func (p *Daemon) keepAlive(stopCh <-chan struct{}, exitedCh chan<- struct{}) {
|
|||
process = nil
|
||||
if err != nil {
|
||||
p.Logger.Printf("[INFO] agent/proxy: daemon exited with error: %s", err)
|
||||
} else if ps != nil && !ps.Exited() {
|
||||
p.Logger.Printf("[INFO] agent/proxy: daemon left running")
|
||||
} else if status, ok := exitStatus(ps); ok {
|
||||
p.Logger.Printf("[INFO] agent/proxy: daemon exited with exit code: %d", status)
|
||||
}
|
||||
|
|
|
@ -148,7 +148,35 @@ func (m *Manager) Close() error {
|
|||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
return m.stop(func(p Proxy) error {
|
||||
return p.Close()
|
||||
})
|
||||
}
|
||||
|
||||
// Kill will Close the manager and Kill all proxies that were being managed.
|
||||
// Only ONE of Kill or Close must be called. If Close has been called already
|
||||
// then this will have no effect.
|
||||
func (m *Manager) Kill() error {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
return m.stop(func(p Proxy) error {
|
||||
return p.Stop()
|
||||
})
|
||||
}
|
||||
|
||||
// stop stops the run loop and cleans up all the proxies by calling
|
||||
// the given cleaner. If the cleaner returns an error the proxy won't be
|
||||
// removed from the map.
|
||||
//
|
||||
// The lock must be held while this is called.
|
||||
func (m *Manager) stop(cleaner func(Proxy) error) error {
|
||||
for {
|
||||
// Special case state that exits the for loop
|
||||
if m.runState == managerStateStopped {
|
||||
break
|
||||
}
|
||||
|
||||
switch m.runState {
|
||||
case managerStateIdle:
|
||||
// Idle so just set it to stopped and return. We notify
|
||||
|
@ -167,29 +195,13 @@ func (m *Manager) Close() error {
|
|||
case managerStateStopping:
|
||||
// Still stopping, wait...
|
||||
m.cond.Wait()
|
||||
|
||||
case managerStateStopped:
|
||||
// Stopped, target state reached
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Kill will Close the manager and Kill all proxies that were being managed.
|
||||
//
|
||||
// This is safe to call with Close already called since Close is idempotent.
|
||||
func (m *Manager) Kill() error {
|
||||
// Close first so that we aren't getting changes in proxies
|
||||
if err := m.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
// Clean up all the proxies
|
||||
var err error
|
||||
for id, proxy := range m.proxies {
|
||||
if err := proxy.Stop(); err != nil {
|
||||
if err := cleaner(proxy); err != nil {
|
||||
err = multierror.Append(
|
||||
err, fmt.Errorf("failed to stop proxy %q: %s", id, err))
|
||||
continue
|
||||
|
|
Loading…
Reference in a new issue