Support go-sockaddr templates in top-level cluster_addr config (#13678)
In doing some testing I found that the listener clusteraddr isn't really used, or at least isn't as important as the top-level clusteraddr setting. As such, go-sockaddr templating needs to be implemented for the top-level `cluster_addr` setting or it's unusable for HA. Also fix a nil pointer panic I discovered at the same time.
This commit is contained in:
parent
d96298461f
commit
21be98ee7a
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
core: add support for go-sockaddr templates in the top-level cluster_addr field
|
||||||
|
```
|
|
@ -724,7 +724,6 @@ func (c *ServerCommand) runRecoveryMode() int {
|
||||||
c.logger.Info("goroutine trace", "stack", string(buf[:n]))
|
c.logger.Info("goroutine trace", "stack", string(buf[:n]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func logProxyEnvironmentVariables(logger hclog.Logger) {
|
func logProxyEnvironmentVariables(logger hclog.Logger) {
|
||||||
|
@ -2410,6 +2409,11 @@ CLUSTER_SYNTHESIS_COMPLETE:
|
||||||
}
|
}
|
||||||
|
|
||||||
if coreConfig.ClusterAddr != "" {
|
if coreConfig.ClusterAddr != "" {
|
||||||
|
rendered, err := configutil.ParseSingleIPTemplate(coreConfig.ClusterAddr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error parsing cluster address %s: %v", coreConfig.ClusterAddr, err)
|
||||||
|
}
|
||||||
|
coreConfig.ClusterAddr = rendered
|
||||||
// Force https as we'll always be TLS-secured
|
// Force https as we'll always be TLS-secured
|
||||||
u, err := url.ParseRequestURI(coreConfig.ClusterAddr)
|
u, err := url.ParseRequestURI(coreConfig.ClusterAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1390,6 +1390,9 @@ func (c *Core) getUnsealKey(ctx context.Context, seal Seal) ([]byte, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if config == nil {
|
||||||
|
return nil, fmt.Errorf("failed to obtain seal/recovery configuration")
|
||||||
|
}
|
||||||
|
|
||||||
// Check if we don't have enough keys to unlock, proceed through the rest of
|
// Check if we don't have enough keys to unlock, proceed through the rest of
|
||||||
// the call only if we have met the threshold
|
// the call only if we have met the threshold
|
||||||
|
@ -2045,7 +2048,7 @@ func (s standardUnsealStrategy) unseal(ctx context.Context, logger log.Logger, c
|
||||||
}
|
}
|
||||||
if err := c.setupManagedKeyRegistry(); err != nil {
|
if err := c.setupManagedKeyRegistry(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := c.loadCORSConfig(ctx); err != nil {
|
if err := c.loadCORSConfig(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -3041,7 +3044,7 @@ func (c *Core) LogCompletedRequests(reqID string, statusCode int) {
|
||||||
|
|
||||||
// there is only one writer to this map, so skip checking for errors
|
// there is only one writer to this map, so skip checking for errors
|
||||||
reqData := v.(InFlightReqData)
|
reqData := v.(InFlightReqData)
|
||||||
c.logger.Log(logLevel, "completed_request","client_id", reqData.ClientID, "client_address", reqData.ClientRemoteAddr, "status_code", statusCode, "request_path", reqData.ReqPath, "request_method", reqData.Method)
|
c.logger.Log(logLevel, "completed_request", "client_id", reqData.ClientID, "client_address", reqData.ClientRemoteAddr, "status_code", statusCode, "request_path", reqData.ReqPath, "request_method", reqData.Method)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Core) ReloadLogRequestsLevel() {
|
func (c *Core) ReloadLogRequestsLevel() {
|
||||||
|
@ -3079,4 +3082,4 @@ func (c *Core) GetHAPeerNodesCached() []PeerNode {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return nodes
|
return nodes
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue