Fix link meta panics (#18774)
* return error for meta auth and mount listing if sealed * some logging changes * some more logging changes * add panic recovery * use ErrInternalError
This commit is contained in:
parent
b3dc380c82
commit
0eedcd979b
|
@ -3816,6 +3816,10 @@ func (c *Core) aliasNameFromLoginRequest(ctx context.Context, req *logical.Reque
|
|||
|
||||
// ListMounts will provide a slice containing a deep copy each mount entry
|
||||
func (c *Core) ListMounts() ([]*MountEntry, error) {
|
||||
if c.Sealed() {
|
||||
return nil, fmt.Errorf("vault is sealed")
|
||||
}
|
||||
|
||||
c.mountsLock.RLock()
|
||||
defer c.mountsLock.RUnlock()
|
||||
|
||||
|
@ -3835,6 +3839,10 @@ func (c *Core) ListMounts() ([]*MountEntry, error) {
|
|||
|
||||
// ListAuths will provide a slice containing a deep copy each auth entry
|
||||
func (c *Core) ListAuths() ([]*MountEntry, error) {
|
||||
if c.Sealed() {
|
||||
return nil, fmt.Errorf("vault is sealed")
|
||||
}
|
||||
|
||||
c.mountsLock.RLock()
|
||||
defer c.mountsLock.RUnlock()
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -110,7 +111,14 @@ func (h *hcpLinkControlHandler) Stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (h *hcpLinkControlHandler) PurgePolicy(ctx context.Context, req *link_control.PurgePolicyRequest) (*link_control.PurgePolicyResponse, error) {
|
||||
func (h *hcpLinkControlHandler) PurgePolicy(ctx context.Context, req *link_control.PurgePolicyRequest) (retResp *link_control.PurgePolicyResponse, retErr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
h.logger.Error("panic serving purge policy request", "error", r, "stacktrace", string(debug.Stack()))
|
||||
retErr = vault.ErrInternalError
|
||||
}
|
||||
}()
|
||||
|
||||
standby, perfStandby := h.wrappedCore.StandbyStates()
|
||||
// only purging an active node, perf/standby nodes should purge
|
||||
// automatically
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -37,7 +38,6 @@ type hcpLinkMetaHandler struct {
|
|||
|
||||
func NewHCPLinkMetaService(scadaProvider scada.SCADAProvider, c *vault.Core, baseLogger hclog.Logger) *hcpLinkMetaHandler {
|
||||
logger := baseLogger.Named(capabilities.MetaCapability)
|
||||
logger.Info("Setting up HCP Link Meta Service")
|
||||
|
||||
grpcServer := grpc.NewServer(
|
||||
grpc.KeepaliveParams(keepalive.ServerParameters{
|
||||
|
@ -78,7 +78,7 @@ func (h *hcpLinkMetaHandler) Start() error {
|
|||
return fmt.Errorf("no listener found for meta capability")
|
||||
}
|
||||
|
||||
h.logger.Info("starting HCP Link Meta Service")
|
||||
h.logger.Info("starting HCP meta capability")
|
||||
// Start the gRPC server
|
||||
go func() {
|
||||
err = h.grpcServer.Serve(metaListener)
|
||||
|
@ -101,7 +101,7 @@ func (h *hcpLinkMetaHandler) Stop() error {
|
|||
// Give some time for existing RPCs to drain.
|
||||
time.Sleep(cluster.ListenerAcceptDeadline)
|
||||
|
||||
h.logger.Info("Tearing down HCP Link Meta Service")
|
||||
h.logger.Info("tearing down HCP meta capability")
|
||||
|
||||
if h.stopCh != nil {
|
||||
close(h.stopCh)
|
||||
|
@ -115,7 +115,14 @@ func (h *hcpLinkMetaHandler) Stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (h *hcpLinkMetaHandler) ListNamespaces(ctx context.Context, req *meta.ListNamespacesRequest) (*meta.ListNamespacesResponse, error) {
|
||||
func (h *hcpLinkMetaHandler) ListNamespaces(ctx context.Context, req *meta.ListNamespacesRequest) (retResp *meta.ListNamespacesResponse, retErr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
h.logger.Error("panic serving list namespaces request", "error", r, "stacktrace", string(debug.Stack()))
|
||||
retErr = vault.ErrInternalError
|
||||
}
|
||||
}()
|
||||
|
||||
children := h.wrappedCore.ListNamespaces(true)
|
||||
|
||||
var namespaces []string
|
||||
|
@ -128,7 +135,14 @@ func (h *hcpLinkMetaHandler) ListNamespaces(ctx context.Context, req *meta.ListN
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (h *hcpLinkMetaHandler) ListMounts(ctx context.Context, req *meta.ListMountsRequest) (*meta.ListMountsResponse, error) {
|
||||
func (h *hcpLinkMetaHandler) ListMounts(ctx context.Context, req *meta.ListMountsRequest) (retResp *meta.ListMountsResponse, retErr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
h.logger.Error("panic serving list mounts request", "error", r, "stacktrace", string(debug.Stack()))
|
||||
retErr = vault.ErrInternalError
|
||||
}
|
||||
}()
|
||||
|
||||
mountEntries, err := h.wrappedCore.ListMounts()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list secret mounts: %w", err)
|
||||
|
@ -160,7 +174,14 @@ func (h *hcpLinkMetaHandler) ListMounts(ctx context.Context, req *meta.ListMount
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (h *hcpLinkMetaHandler) ListAuths(ctx context.Context, req *meta.ListAuthsRequest) (*meta.ListAuthResponse, error) {
|
||||
func (h *hcpLinkMetaHandler) ListAuths(ctx context.Context, req *meta.ListAuthsRequest) (retResp *meta.ListAuthResponse, retErr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
h.logger.Error("panic serving list auths request", "error", r, "stacktrace", string(debug.Stack()))
|
||||
retErr = vault.ErrInternalError
|
||||
}
|
||||
}()
|
||||
|
||||
authEntries, err := h.wrappedCore.ListAuths()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list auth mounts: %w", err)
|
||||
|
@ -192,7 +213,14 @@ func (h *hcpLinkMetaHandler) ListAuths(ctx context.Context, req *meta.ListAuthsR
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (h *hcpLinkMetaHandler) GetClusterStatus(ctx context.Context, req *meta.GetClusterStatusRequest) (*meta.GetClusterStatusResponse, error) {
|
||||
func (h *hcpLinkMetaHandler) GetClusterStatus(ctx context.Context, req *meta.GetClusterStatusRequest) (retResp *meta.GetClusterStatusResponse, retErr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
h.logger.Error("panic serving cluster status request", "error", r, "stacktrace", string(debug.Stack()))
|
||||
retErr = vault.ErrInternalError
|
||||
}
|
||||
}()
|
||||
|
||||
if h.wrappedCore.HAStateWithLock() != consts.Active {
|
||||
return nil, fmt.Errorf("node not active")
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package node_status
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcp-link/pkg/nodestatus"
|
||||
"github.com/hashicorp/vault/helper/logging"
|
||||
|
@ -20,7 +21,13 @@ type NodeStatusReporter struct {
|
|||
NodeStatusGetter internal.WrappedCoreNodeStatus
|
||||
}
|
||||
|
||||
func (c *NodeStatusReporter) GetNodeStatus(ctx context.Context) (nodestatus.NodeStatus, error) {
|
||||
func (c *NodeStatusReporter) GetNodeStatus(ctx context.Context) (retStatus nodestatus.NodeStatus, retErr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
retErr = fmt.Errorf("internal server error")
|
||||
}
|
||||
}()
|
||||
|
||||
var status nodestatus.NodeStatus
|
||||
|
||||
sealStatus, err := c.NodeStatusGetter.GetSealStatus(ctx)
|
||||
|
|
|
@ -212,7 +212,7 @@ func (h *HCPLinkVault) start() error {
|
|||
|
||||
h.running = true
|
||||
|
||||
h.logger.Info("started HCP Link")
|
||||
h.logger.Info("established connection to HCP")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ func (h *HCPLinkVault) Shutdown() error {
|
|||
h.stopCh = nil
|
||||
}
|
||||
|
||||
h.logger.Info("tearing down HCP Link")
|
||||
h.logger.Info("tearing down connection to HCP")
|
||||
|
||||
var retErr *multierror.Error
|
||||
|
||||
|
|
Loading…
Reference in New Issue