2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2013-12-06 23:43:07 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
2022-09-26 18:58:15 +00:00
|
|
|
"context"
|
2020-06-05 19:56:19 +00:00
|
|
|
"crypto/x509"
|
2013-12-07 01:18:09 +00:00
|
|
|
"fmt"
|
2013-12-31 23:44:27 +00:00
|
|
|
"net"
|
2013-12-06 23:43:07 +00:00
|
|
|
"os"
|
2022-04-06 21:33:05 +00:00
|
|
|
"reflect"
|
2014-10-04 20:43:10 +00:00
|
|
|
"strings"
|
2022-05-19 20:39:28 +00:00
|
|
|
"sync"
|
2017-06-26 08:46:20 +00:00
|
|
|
"sync/atomic"
|
2013-12-06 23:43:07 +00:00
|
|
|
"testing"
|
2013-12-10 00:05:15 +00:00
|
|
|
"time"
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2022-04-06 21:33:05 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2020-03-09 20:59:02 +00:00
|
|
|
"github.com/google/tcpproxy"
|
2022-04-06 21:33:05 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2022-10-18 19:05:09 +00:00
|
|
|
"github.com/hashicorp/go-uuid"
|
2020-09-16 17:28:03 +00:00
|
|
|
"github.com/hashicorp/memberlist"
|
2021-05-04 14:36:53 +00:00
|
|
|
"github.com/hashicorp/raft"
|
2022-09-26 18:58:15 +00:00
|
|
|
"github.com/stretchr/testify/mock"
|
2022-07-08 17:01:13 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2020-09-16 17:28:03 +00:00
|
|
|
"golang.org/x/time/rate"
|
2022-07-08 17:01:13 +00:00
|
|
|
"google.golang.org/grpc"
|
2022-03-18 10:46:58 +00:00
|
|
|
|
2022-07-13 15:33:48 +00:00
|
|
|
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
|
|
|
|
2018-05-10 16:04:33 +00:00
|
|
|
"github.com/hashicorp/consul/agent/connect"
|
2022-12-13 20:09:55 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul/multilimiter"
|
|
|
|
rpcRate "github.com/hashicorp/consul/agent/consul/rate"
|
2022-07-15 18:15:50 +00:00
|
|
|
external "github.com/hashicorp/consul/agent/grpc-external"
|
2022-10-28 20:34:41 +00:00
|
|
|
grpcmiddleware "github.com/hashicorp/consul/agent/grpc-middleware"
|
HCP Telemetry Feature (#17460)
* Move hcp client to subpackage hcpclient (#16800)
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] OTELExporter (#17128)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* [HCP Observability] OTELSink (#17159)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Fix nits
* [HCP Observability] Init OTELSink in Telemetry (#17162)
* Move hcp client to subpackage hcpclient (#16800)
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* [HCP Observability] OTELExporter (#17128)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Initialize OTELSink with sync.Map for all the instrument stores.
* Added telemetry agent to client and init sink in deps
* Fixed client
* Initalize sink in deps
* init sink in telemetry library
* Init deps before telemetry
* Use concrete telemetry.OtelSink type
* add /v1/metrics
* Avoid returning err for telemetry init
* move sink init within the IsCloudEnabled()
* Use HCPSinkOpts in deps instead
* update golden test for configuration file
* Switch to using extra sinks in the telemetry library
* keep name MetricsConfig
* fix log in verifyCCMRegistration
* Set logger in context
* pass around MetricSink in deps
* Fix imports
* Rebased onto otel sink pr
* Fix URL in test
* [HCP Observability] OTELSink (#17159)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Fix nits
* pass extraSinks as function param instead
* Add default interval as package export
* remove verifyCCM func
* Add clusterID
* Fix import and add t.Parallel() for missing tests
* Kick Vercel CI
* Remove scheme from endpoint path, and fix error logging
* return metrics.MetricSink for sink method
* Update SDK
* [HCP Observability] Metrics filtering and Labels in Go Metrics sink (#17184)
* Move hcp client to subpackage hcpclient (#16800)
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* [HCP Observability] OTELExporter (#17128)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Initialize OTELSink with sync.Map for all the instrument stores.
* Added telemetry agent to client and init sink in deps
* Fixed client
* Initalize sink in deps
* init sink in telemetry library
* Init deps before telemetry
* Use concrete telemetry.OtelSink type
* add /v1/metrics
* Avoid returning err for telemetry init
* move sink init within the IsCloudEnabled()
* Use HCPSinkOpts in deps instead
* update golden test for configuration file
* Switch to using extra sinks in the telemetry library
* keep name MetricsConfig
* fix log in verifyCCMRegistration
* Set logger in context
* pass around MetricSink in deps
* Fix imports
* Rebased onto otel sink pr
* Fix URL in test
* [HCP Observability] OTELSink (#17159)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Fix nits
* pass extraSinks as function param instead
* Add default interval as package export
* remove verifyCCM func
* Add clusterID
* Fix import and add t.Parallel() for missing tests
* Kick Vercel CI
* Remove scheme from endpoint path, and fix error logging
* return metrics.MetricSink for sink method
* Update SDK
* Added telemetry agent to client and init sink in deps
* Add node_id and __replica__ default labels
* add function for default labels and set x-hcp-resource-id
* Fix labels tests
* Commit suggestion for getDefaultLabels
Co-authored-by: Joshua Timmons <joshua.timmons1@gmail.com>
* Fixed server.id, and t.Parallel()
* Make defaultLabels a method on the TelemetryConfig object
* Rename FilterList to lowercase filterList
* Cleanup filter implemetation by combining regex into a single one, and making the type lowercase
* Fix append
* use regex directly for filters
* Fix x-resource-id test to use mocked value
* Fix log.Error formats
* Forgot the len(opts.Label) optimization)
* Use cfg.NodeID instead
---------
Co-authored-by: Joshua Timmons <joshua.timmons1@gmail.com>
* remove replic tag (#17484)
* [HCP Observability] Add custom metrics for OTEL sink, improve logging, upgrade modules and cleanup metrics client (#17455)
* Add custom metrics for Exporter and transform operations
* Improve deps logging
Run go mod tidy
* Upgrade SDK and OTEL
* Remove the partial success implemetation and check for HTTP status code in metrics client
* Add x-channel
* cleanup logs in deps.go based on PR feedback
* Change to debug log and lowercase
* address test operation feedback
* use GetHumanVersion on version
* Fix error wrapping
* Fix metric names
* [HCP Observability] Turn off retries for now until dynamically configurable (#17496)
* Remove retries for now until dynamic configuration is possible
* Clarify comment
* Update changelog
* improve changelog
---------
Co-authored-by: Joshua Timmons <joshua.timmons1@gmail.com>
2023-05-29 20:11:08 +00:00
|
|
|
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
|
2017-07-06 10:48:37 +00:00
|
|
|
"github.com/hashicorp/consul/agent/metadata"
|
2022-05-27 00:55:16 +00:00
|
|
|
"github.com/hashicorp/consul/agent/rpc/middleware"
|
2018-05-10 16:04:33 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-08-03 22:39:31 +00:00
|
|
|
"github.com/hashicorp/consul/agent/token"
|
2022-05-27 00:55:16 +00:00
|
|
|
"github.com/hashicorp/consul/ipaddr"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/freeport"
|
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2019-04-25 16:26:33 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2019-02-26 15:52:07 +00:00
|
|
|
"github.com/hashicorp/consul/tlsutil"
|
2017-02-22 20:53:32 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2013-12-06 23:43:07 +00:00
|
|
|
)
|
|
|
|
|
2020-01-13 20:51:40 +00:00
|
|
|
const (
|
2022-01-20 12:47:50 +00:00
|
|
|
TestDefaultInitialManagementToken = "d9f05e83-a7ae-47ce-839e-c0d53a68c00a"
|
2020-01-13 20:51:40 +00:00
|
|
|
)
|
|
|
|
|
2020-06-05 19:56:19 +00:00
|
|
|
// testTLSCertificates Generates a TLS CA and server key/cert and returns them
|
|
|
|
// in PEM encoded form.
|
|
|
|
func testTLSCertificates(serverName string) (cert string, key string, cacert string, err error) {
|
2021-01-27 07:52:15 +00:00
|
|
|
signer, _, err := tlsutil.GeneratePrivateKey()
|
2020-06-05 19:56:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", "", "", err
|
|
|
|
}
|
2021-01-27 07:52:15 +00:00
|
|
|
|
|
|
|
ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
2020-06-05 19:56:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", "", "", err
|
|
|
|
}
|
|
|
|
|
2021-03-22 09:16:41 +00:00
|
|
|
cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
|
|
|
Signer: signer,
|
|
|
|
CA: ca,
|
|
|
|
Name: "Test Cert Name",
|
|
|
|
Days: 365,
|
|
|
|
DNSNames: []string{serverName},
|
|
|
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
|
|
|
})
|
2020-06-05 19:56:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", "", "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return cert, privateKey, ca, nil
|
|
|
|
}
|
|
|
|
|
2021-12-10 00:08:40 +00:00
|
|
|
func testServerACLConfig(c *Config) {
|
|
|
|
c.PrimaryDatacenter = "dc1"
|
|
|
|
c.ACLsEnabled = true
|
2022-01-20 12:47:50 +00:00
|
|
|
c.ACLInitialManagementToken = TestDefaultInitialManagementToken
|
2021-12-10 00:08:40 +00:00
|
|
|
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
|
2020-01-13 20:51:40 +00:00
|
|
|
}
|
|
|
|
|
2014-04-07 21:36:32 +00:00
|
|
|
func configureTLS(config *Config) {
|
2022-03-18 10:46:58 +00:00
|
|
|
config.TLSConfig.InternalRPC.CAFile = "../../test/ca/root.cer"
|
|
|
|
config.TLSConfig.InternalRPC.CertFile = "../../test/key/ourdomain.cer"
|
|
|
|
config.TLSConfig.InternalRPC.KeyFile = "../../test/key/ourdomain.key"
|
2014-04-07 21:36:32 +00:00
|
|
|
}
|
|
|
|
|
2017-06-26 12:23:09 +00:00
|
|
|
var id int64
|
|
|
|
|
|
|
|
func uniqueNodeName(name string) string {
|
2020-03-09 20:59:02 +00:00
|
|
|
name = strings.ReplaceAll(name, "/", "_")
|
2017-06-26 12:23:09 +00:00
|
|
|
return fmt.Sprintf("%s-node-%d", name, atomic.AddInt64(&id, 1))
|
|
|
|
}
|
|
|
|
|
2019-12-18 18:45:27 +00:00
|
|
|
// This will find the leader of a list of servers and verify that leader establishment has completed
|
|
|
|
func waitForLeaderEstablishment(t *testing.T, servers ...*Server) {
|
|
|
|
t.Helper()
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
hasLeader := false
|
|
|
|
for _, srv := range servers {
|
|
|
|
if srv.IsLeader() {
|
|
|
|
hasLeader = true
|
|
|
|
require.True(r, srv.isReadyForConsistentReads(), "Leader %s hasn't finished establishing leadership yet", srv.config.NodeName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.True(r, hasLeader, "Cluster has not elected a leader yet")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-06-26 12:23:09 +00:00
|
|
|
func testServerConfig(t *testing.T) (string, *Config) {
|
2017-05-12 13:41:13 +00:00
|
|
|
dir := testutil.TempDir(t, "consul")
|
2013-12-07 01:18:09 +00:00
|
|
|
config := DefaultConfig()
|
2014-05-26 22:47:47 +00:00
|
|
|
|
2022-07-07 18:55:41 +00:00
|
|
|
ports := freeport.GetN(t, 4) // {server, serf_lan, serf_wan, grpc}
|
2017-06-26 12:23:09 +00:00
|
|
|
config.NodeName = uniqueNodeName(t.Name())
|
2014-04-07 20:13:23 +00:00
|
|
|
config.Bootstrap = true
|
|
|
|
config.Datacenter = "dc1"
|
2021-08-06 22:02:55 +00:00
|
|
|
config.PrimaryDatacenter = "dc1"
|
2013-12-07 01:18:09 +00:00
|
|
|
config.DataDir = dir
|
2017-06-25 19:36:03 +00:00
|
|
|
|
|
|
|
// bind the rpc server to a random port. config.RPCAdvertise will be
|
|
|
|
// set to the listen address unless it was set in the configuration.
|
|
|
|
// In that case get the address from srv.Listener.Addr().
|
2017-09-25 18:40:42 +00:00
|
|
|
config.RPCAddr = &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: ports[0]}
|
2017-06-25 19:36:03 +00:00
|
|
|
|
2017-02-22 20:53:32 +00:00
|
|
|
nodeID, err := uuid.GenerateUUID()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
config.NodeID = types.NodeID(nodeID)
|
2017-06-25 19:36:03 +00:00
|
|
|
|
|
|
|
// set the memberlist bind port to 0 to bind to a random port.
|
|
|
|
// memberlist will update the value of BindPort after bind
|
|
|
|
// to the actual value.
|
2013-12-07 01:18:09 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.BindAddr = "127.0.0.1"
|
2017-09-25 18:40:42 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.BindPort = ports[1]
|
|
|
|
config.SerfLANConfig.MemberlistConfig.AdvertisePort = ports[1]
|
2014-01-10 01:46:33 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.SuspicionMult = 2
|
|
|
|
config.SerfLANConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
|
|
|
|
config.SerfLANConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
|
2013-12-11 22:57:40 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
|
2019-05-15 18:59:33 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.DeadNodeReclaimTime = 100 * time.Millisecond
|
2013-12-11 22:57:40 +00:00
|
|
|
|
2013-12-07 01:18:09 +00:00
|
|
|
config.SerfWANConfig.MemberlistConfig.BindAddr = "127.0.0.1"
|
2017-09-25 18:40:42 +00:00
|
|
|
config.SerfWANConfig.MemberlistConfig.BindPort = ports[2]
|
|
|
|
config.SerfWANConfig.MemberlistConfig.AdvertisePort = ports[2]
|
2014-01-10 01:46:33 +00:00
|
|
|
config.SerfWANConfig.MemberlistConfig.SuspicionMult = 2
|
|
|
|
config.SerfWANConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
|
|
|
|
config.SerfWANConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
|
2013-12-11 22:57:40 +00:00
|
|
|
config.SerfWANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
|
2019-05-15 18:59:33 +00:00
|
|
|
config.SerfWANConfig.MemberlistConfig.DeadNodeReclaimTime = 100 * time.Millisecond
|
2013-12-11 22:57:40 +00:00
|
|
|
|
2017-06-26 14:29:49 +00:00
|
|
|
config.RaftConfig.LeaderLeaseTimeout = 100 * time.Millisecond
|
|
|
|
config.RaftConfig.HeartbeatTimeout = 200 * time.Millisecond
|
|
|
|
config.RaftConfig.ElectionTimeout = 200 * time.Millisecond
|
2013-12-07 01:18:09 +00:00
|
|
|
|
2017-06-26 14:29:49 +00:00
|
|
|
config.ReconcileInterval = 300 * time.Millisecond
|
2015-05-14 01:22:34 +00:00
|
|
|
|
2017-03-21 23:36:44 +00:00
|
|
|
config.AutopilotConfig.ServerStabilizationTime = 100 * time.Millisecond
|
|
|
|
config.ServerHealthInterval = 50 * time.Millisecond
|
|
|
|
config.AutopilotInterval = 100 * time.Millisecond
|
|
|
|
|
2015-06-30 21:25:40 +00:00
|
|
|
config.CoordinateUpdatePeriod = 100 * time.Millisecond
|
2017-10-10 22:19:50 +00:00
|
|
|
config.LeaveDrainTime = 1 * time.Millisecond
|
|
|
|
|
|
|
|
// TODO (slackpad) - We should be able to run all tests w/o this, but it
|
|
|
|
// looks like several depend on it.
|
2022-04-21 20:21:35 +00:00
|
|
|
config.RPCHoldTimeout = 10 * time.Second
|
2017-10-10 22:19:50 +00:00
|
|
|
|
2022-07-07 18:55:41 +00:00
|
|
|
config.GRPCPort = ports[3]
|
|
|
|
|
2018-04-27 06:02:18 +00:00
|
|
|
config.ConnectEnabled = true
|
2018-05-10 16:04:33 +00:00
|
|
|
config.CAConfig = &structs.CAConfiguration{
|
|
|
|
ClusterID: connect.TestClusterID,
|
|
|
|
Provider: structs.ConsulCAProvider,
|
2018-05-10 16:27:42 +00:00
|
|
|
Config: map[string]interface{}{
|
2020-01-17 22:27:13 +00:00
|
|
|
"PrivateKey": "",
|
|
|
|
"RootCert": "",
|
|
|
|
"LeafCertTTL": "72h",
|
2020-02-10 23:05:49 +00:00
|
|
|
"IntermediateCertTTL": "288h",
|
2018-05-10 16:27:42 +00:00
|
|
|
},
|
2018-05-10 16:04:33 +00:00
|
|
|
}
|
2022-08-01 19:22:36 +00:00
|
|
|
config.PeeringEnabled = true
|
2014-04-07 20:13:23 +00:00
|
|
|
return dir, config
|
|
|
|
}
|
2014-01-10 19:07:29 +00:00
|
|
|
|
2021-12-10 00:08:40 +00:00
|
|
|
// Deprecated: use testServerWithConfig instead. It does the same thing and more.
|
2014-04-07 20:13:23 +00:00
|
|
|
func testServer(t *testing.T) (string, *Server) {
|
2021-12-10 00:08:40 +00:00
|
|
|
return testServerWithConfig(t)
|
2014-04-07 20:13:23 +00:00
|
|
|
}
|
|
|
|
|
2021-12-10 00:08:40 +00:00
|
|
|
// Deprecated: use testServerWithConfig
|
2014-04-07 20:13:23 +00:00
|
|
|
func testServerDC(t *testing.T, dc string) (string, *Server) {
|
2017-06-26 08:44:36 +00:00
|
|
|
return testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = dc
|
|
|
|
c.Bootstrap = true
|
|
|
|
})
|
2014-04-07 20:13:23 +00:00
|
|
|
}
|
|
|
|
|
2021-12-10 00:08:40 +00:00
|
|
|
// Deprecated: use testServerWithConfig
|
2014-04-07 20:13:23 +00:00
|
|
|
func testServerDCBootstrap(t *testing.T, dc string, bootstrap bool) (string, *Server) {
|
2017-06-26 08:44:36 +00:00
|
|
|
return testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = dc
|
2021-08-06 22:02:55 +00:00
|
|
|
c.PrimaryDatacenter = dc
|
2017-06-26 08:44:36 +00:00
|
|
|
c.Bootstrap = bootstrap
|
|
|
|
})
|
2013-12-07 01:18:09 +00:00
|
|
|
}
|
|
|
|
|
2021-12-10 00:08:40 +00:00
|
|
|
// Deprecated: use testServerWithConfig
|
2014-06-16 21:36:12 +00:00
|
|
|
func testServerDCExpect(t *testing.T, dc string, expect int) (string, *Server) {
|
2017-06-26 08:44:36 +00:00
|
|
|
return testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = dc
|
|
|
|
c.Bootstrap = false
|
|
|
|
c.BootstrapExpect = expect
|
|
|
|
})
|
2014-06-16 21:36:12 +00:00
|
|
|
}
|
|
|
|
|
2021-12-10 00:08:40 +00:00
|
|
|
func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *Server) {
|
2019-07-24 14:41:00 +00:00
|
|
|
var dir string
|
2019-07-12 15:52:26 +00:00
|
|
|
var srv *Server
|
|
|
|
|
2022-09-01 17:32:11 +00:00
|
|
|
var config *Config
|
|
|
|
var deps Deps
|
2019-07-12 15:52:26 +00:00
|
|
|
// Retry added to avoid cases where bind addr is already in use
|
|
|
|
retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) {
|
2019-07-24 14:41:00 +00:00
|
|
|
dir, config = testServerConfig(t)
|
2021-12-10 00:08:40 +00:00
|
|
|
for _, fn := range configOpts {
|
|
|
|
fn(config)
|
2019-07-24 14:41:00 +00:00
|
|
|
}
|
|
|
|
|
2021-08-09 18:04:27 +00:00
|
|
|
// Apply config to copied fields because many tests only set the old
|
2022-09-29 03:27:11 +00:00
|
|
|
// values.
|
2021-08-09 18:04:27 +00:00
|
|
|
config.ACLResolverSettings.ACLsEnabled = config.ACLsEnabled
|
|
|
|
config.ACLResolverSettings.NodeName = config.NodeName
|
|
|
|
config.ACLResolverSettings.Datacenter = config.Datacenter
|
2021-08-25 18:43:11 +00:00
|
|
|
config.ACLResolverSettings.EnterpriseMeta = *config.AgentEnterpriseMeta()
|
2021-08-09 18:04:27 +00:00
|
|
|
|
2020-07-29 20:05:51 +00:00
|
|
|
var err error
|
2022-09-01 17:32:11 +00:00
|
|
|
deps = newDefaultDeps(t, config)
|
|
|
|
srv, err = newServerWithDeps(t, config, deps)
|
2019-07-12 15:52:26 +00:00
|
|
|
if err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
t.Cleanup(func() { srv.Shutdown() })
|
|
|
|
|
2022-09-29 03:27:11 +00:00
|
|
|
for _, grpcPort := range []int{srv.config.GRPCPort, srv.config.GRPCTLSPort} {
|
|
|
|
if grpcPort == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-07-07 18:55:41 +00:00
|
|
|
// Normally the gRPC server listener is created at the agent level and
|
|
|
|
// passed down into the Server creation.
|
2022-09-29 03:27:11 +00:00
|
|
|
ln, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", grpcPort))
|
2022-07-07 18:55:41 +00:00
|
|
|
require.NoError(t, err)
|
2022-09-01 17:32:11 +00:00
|
|
|
|
2022-10-28 20:34:41 +00:00
|
|
|
protocol := grpcmiddleware.ProtocolPlaintext
|
2022-09-29 03:27:11 +00:00
|
|
|
if grpcPort == srv.config.GRPCTLSPort || deps.TLSConfigurator.GRPCServerUseTLS() {
|
2022-10-28 20:34:41 +00:00
|
|
|
protocol = grpcmiddleware.ProtocolTLS
|
2022-09-29 03:27:11 +00:00
|
|
|
// Set the internally managed server certificate. The cert manager is hooked to the Agent, so we need to bypass that here.
|
|
|
|
if srv.config.PeeringEnabled && srv.config.ConnectEnabled {
|
|
|
|
key, _ := srv.config.CAConfig.Config["PrivateKey"].(string)
|
|
|
|
cert, _ := srv.config.CAConfig.Config["RootCert"].(string)
|
|
|
|
if key != "" && cert != "" {
|
|
|
|
ca := &structs.CARoot{
|
|
|
|
SigningKey: key,
|
|
|
|
RootCert: cert,
|
|
|
|
}
|
|
|
|
require.NoError(t, deps.TLSConfigurator.UpdateAutoTLSCert(connect.TestServerLeaf(t, srv.config.Datacenter, ca)))
|
|
|
|
deps.TLSConfigurator.UpdateAutoTLSPeeringServerName(connect.PeeringServerSAN("dc1", connect.TestTrustDomain))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-01 17:32:11 +00:00
|
|
|
}
|
2022-10-28 20:34:41 +00:00
|
|
|
ln = grpcmiddleware.LabelledListener{Listener: ln, Protocol: protocol}
|
2022-09-01 17:32:11 +00:00
|
|
|
|
2022-07-07 18:55:41 +00:00
|
|
|
go func() {
|
2022-09-01 17:32:11 +00:00
|
|
|
_ = srv.externalGRPCServer.Serve(ln)
|
2022-07-07 18:55:41 +00:00
|
|
|
}()
|
2022-09-01 17:32:11 +00:00
|
|
|
t.Cleanup(srv.externalGRPCServer.Stop)
|
2022-07-07 18:55:41 +00:00
|
|
|
}
|
|
|
|
|
2017-06-26 08:46:20 +00:00
|
|
|
return dir, srv
|
2014-08-11 21:01:45 +00:00
|
|
|
}
|
|
|
|
|
2020-01-13 20:51:40 +00:00
|
|
|
// cb is a function that can alter the test servers configuration prior to the server starting.
|
2020-06-16 16:54:27 +00:00
|
|
|
func testACLServerWithConfig(t *testing.T, cb func(*Config), initReplicationToken bool) (string, *Server, rpc.ClientCodec) {
|
2021-12-10 00:08:40 +00:00
|
|
|
opts := []func(*Config){testServerACLConfig}
|
|
|
|
if cb != nil {
|
|
|
|
opts = append(opts, cb)
|
|
|
|
}
|
|
|
|
dir, srv := testServerWithConfig(t, opts...)
|
2020-01-13 20:51:40 +00:00
|
|
|
|
|
|
|
if initReplicationToken {
|
|
|
|
// setup some tokens here so we get less warnings in the logs
|
2022-01-20 12:47:50 +00:00
|
|
|
srv.tokens.UpdateReplicationToken(TestDefaultInitialManagementToken, token.TokenSourceConfig)
|
2020-01-13 20:51:40 +00:00
|
|
|
}
|
2020-06-16 16:54:27 +00:00
|
|
|
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
return dir, srv, codec
|
2020-01-13 20:51:40 +00:00
|
|
|
}
|
|
|
|
|
2022-05-04 16:38:45 +00:00
|
|
|
func testGRPCIntegrationServer(t *testing.T, cb func(*Config)) (*Server, *grpc.ClientConn, rpc.ClientCodec) {
|
|
|
|
_, srv, codec := testACLServerWithConfig(t, cb, false)
|
2022-04-21 16:56:18 +00:00
|
|
|
|
2022-07-07 18:55:41 +00:00
|
|
|
grpcAddr := fmt.Sprintf("127.0.0.1:%d", srv.config.GRPCPort)
|
2022-11-07 16:34:30 +00:00
|
|
|
//nolint:staticcheck
|
2022-07-07 18:55:41 +00:00
|
|
|
conn, err := grpc.Dial(grpcAddr, grpc.WithInsecure())
|
2022-04-21 16:56:18 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
t.Cleanup(func() { _ = conn.Close() })
|
|
|
|
|
2022-05-04 16:38:45 +00:00
|
|
|
return srv, conn, codec
|
2022-04-21 16:56:18 +00:00
|
|
|
}
|
|
|
|
|
2020-07-29 20:05:51 +00:00
|
|
|
func newServer(t *testing.T, c *Config) (*Server, error) {
|
2022-04-06 21:33:05 +00:00
|
|
|
return newServerWithDeps(t, c, newDefaultDeps(t, c))
|
|
|
|
}
|
|
|
|
|
|
|
|
func newServerWithDeps(t *testing.T, c *Config, deps Deps) (*Server, error) {
|
2017-06-25 19:36:03 +00:00
|
|
|
// chain server up notification
|
|
|
|
oldNotify := c.NotifyListen
|
|
|
|
up := make(chan struct{})
|
|
|
|
c.NotifyListen = func() {
|
|
|
|
close(up)
|
|
|
|
if oldNotify != nil {
|
|
|
|
oldNotify()
|
|
|
|
}
|
|
|
|
}
|
2022-12-20 22:00:22 +00:00
|
|
|
grpcServer := external.NewServer(deps.Logger.Named("grpc.external"), nil, deps.TLSConfigurator, rpcRate.NullRequestLimitsHandler())
|
2022-12-23 19:42:16 +00:00
|
|
|
srv, err := NewServer(c, deps, grpcServer, nil, deps.Logger)
|
2017-06-25 19:36:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-12-08 19:26:06 +00:00
|
|
|
t.Cleanup(func() { srv.Shutdown() })
|
2017-06-25 19:36:03 +00:00
|
|
|
|
|
|
|
// wait until after listen
|
|
|
|
<-up
|
|
|
|
|
|
|
|
// get the real address
|
|
|
|
//
|
|
|
|
// the server already sets the RPCAdvertise address
|
|
|
|
// if it wasn't configured since it needs it for
|
|
|
|
// some initialization
|
|
|
|
//
|
|
|
|
// todo(fs): setting RPCAddr should probably be guarded
|
|
|
|
// todo(fs): but for now it is a shortcut to avoid fixing
|
|
|
|
// todo(fs): tests which depend on that value. They should
|
|
|
|
// todo(fs): just get the listener address instead.
|
|
|
|
c.RPCAddr = srv.Listener.Addr().(*net.TCPAddr)
|
|
|
|
return srv, nil
|
|
|
|
}
|
|
|
|
|
2013-12-06 23:43:07 +00:00
|
|
|
func TestServer_StartStop(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-03-24 03:04:23 +00:00
|
|
|
// Start up a server and then stop it.
|
2020-08-07 21:28:16 +00:00
|
|
|
_, s1 := testServer(t)
|
2017-03-24 03:04:23 +00:00
|
|
|
if err := s1.Shutdown(); err != nil {
|
2013-12-06 23:43:07 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-24 03:04:23 +00:00
|
|
|
// Shut down again, which should be idempotent.
|
|
|
|
if err := s1.Shutdown(); err != nil {
|
2013-12-06 23:43:07 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2013-12-07 01:18:09 +00:00
|
|
|
|
2019-10-17 15:57:17 +00:00
|
|
|
func TestServer_fixupACLDatacenter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-10-17 15:57:17 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
_, s1 := testServerWithConfig(t, func(c *Config) {
|
2019-10-17 15:57:17 +00:00
|
|
|
c.Datacenter = "aye"
|
|
|
|
c.PrimaryDatacenter = "aye"
|
|
|
|
c.ACLsEnabled = true
|
|
|
|
})
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
_, s2 := testServerWithConfig(t, func(c *Config) {
|
2019-10-17 15:57:17 +00:00
|
|
|
c.Datacenter = "bee"
|
|
|
|
c.PrimaryDatacenter = "aye"
|
|
|
|
c.ACLsEnabled = true
|
|
|
|
})
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(s1.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s1 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s2.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s2 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "aye")
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "bee")
|
|
|
|
|
|
|
|
require.Equal(t, "aye", s1.config.Datacenter)
|
2021-08-06 22:00:58 +00:00
|
|
|
require.Equal(t, "aye", s1.config.PrimaryDatacenter)
|
2019-10-17 15:57:17 +00:00
|
|
|
require.Equal(t, "aye", s1.config.PrimaryDatacenter)
|
|
|
|
|
|
|
|
require.Equal(t, "bee", s2.config.Datacenter)
|
2021-08-06 22:00:58 +00:00
|
|
|
require.Equal(t, "aye", s2.config.PrimaryDatacenter)
|
2019-10-17 15:57:17 +00:00
|
|
|
require.Equal(t, "aye", s2.config.PrimaryDatacenter)
|
|
|
|
}
|
|
|
|
|
2013-12-07 01:18:09 +00:00
|
|
|
func TestServer_JoinLAN(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-07 01:18:09 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s1.LANMembersInAgentPartition()), 2; got != want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got %d s1 LAN members want %d", got, want)
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s2.LANMembersInAgentPartition()), 2; got != want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got %d s2 LAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2013-12-07 01:18:09 +00:00
|
|
|
}
|
|
|
|
|
2021-11-15 15:51:14 +00:00
|
|
|
// TestServer_JoinLAN_SerfAllowedCIDRs test that IPs might be blocked with
|
|
|
|
// Serf.
|
|
|
|
//
|
|
|
|
// To run properly, this test requires to be able to bind and have access on
|
|
|
|
// 127.0.1.1 which is the case for most Linux machines and Windows, so Unit
|
|
|
|
// test will run in the CI.
|
|
|
|
//
|
|
|
|
// To run it on Mac OS, please run this command first, otherwise the test will
|
|
|
|
// be skipped: `sudo ifconfig lo0 alias 127.0.1.1 up`
|
2020-05-20 09:31:19 +00:00
|
|
|
func TestServer_JoinLAN_SerfAllowedCIDRs(t *testing.T) {
|
|
|
|
t.Parallel()
|
2021-11-15 15:51:14 +00:00
|
|
|
|
|
|
|
const targetAddr = "127.0.1.1"
|
|
|
|
|
|
|
|
skipIfCannotBindToIP(t, targetAddr)
|
|
|
|
|
2020-05-20 09:31:19 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.BootstrapExpect = 1
|
|
|
|
lan, err := memberlist.ParseCIDRs([]string{"127.0.0.1/32"})
|
2021-11-15 15:51:14 +00:00
|
|
|
require.NoError(t, err)
|
2020-05-20 09:31:19 +00:00
|
|
|
c.SerfLANConfig.MemberlistConfig.CIDRsAllowed = lan
|
|
|
|
wan, err := memberlist.ParseCIDRs([]string{"127.0.0.0/24", "::1/128"})
|
2021-11-15 15:51:14 +00:00
|
|
|
require.NoError(t, err)
|
2020-05-20 09:31:19 +00:00
|
|
|
c.SerfWANConfig.MemberlistConfig.CIDRsAllowed = wan
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2021-11-15 15:51:14 +00:00
|
|
|
dir2, a2 := testClientWithConfig(t, func(c *Config) {
|
2020-05-20 09:31:19 +00:00
|
|
|
c.SerfLANConfig.MemberlistConfig.BindAddr = targetAddr
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer a2.Shutdown()
|
|
|
|
|
|
|
|
dir3, rs3 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.BootstrapExpect = 1
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer rs3.Shutdown()
|
|
|
|
|
|
|
|
leaderAddr := joinAddrLAN(s1)
|
2021-10-26 20:08:55 +00:00
|
|
|
if _, err := a2.JoinLAN([]string{leaderAddr}, nil); err != nil {
|
2020-05-20 09:31:19 +00:00
|
|
|
t.Fatalf("Expected no error, had: %#v", err)
|
|
|
|
}
|
|
|
|
// Try to join
|
|
|
|
joinWAN(t, rs3, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s1.LANMembersInAgentPartition()), 1; got != want {
|
2020-05-20 09:31:19 +00:00
|
|
|
// LAN is blocked, should be 1 only
|
|
|
|
r.Fatalf("got %d s1 LAN members want %d", got, want)
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(a2.LANMembersInAgentPartition()), 2; got != want {
|
2020-05-20 09:31:19 +00:00
|
|
|
// LAN is blocked a2 can see s1, but not s1
|
|
|
|
r.Fatalf("got %d a2 LAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s1.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s1 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(rs3.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d rs3 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-11-15 15:51:14 +00:00
|
|
|
// TestServer_JoinWAN_SerfAllowedCIDRs test that IPs might be
|
|
|
|
// blocked with Serf.
|
|
|
|
//
|
|
|
|
// To run properly, this test requires to be able to bind and have access on
|
|
|
|
// 127.0.1.1 which is the case for most Linux machines and Windows, so Unit
|
|
|
|
// test will run in the CI.
|
|
|
|
//
|
|
|
|
// To run it on Mac OS, please run this command first, otherwise the test will
|
|
|
|
// be skipped: `sudo ifconfig lo0 alias 127.0.1.1 up`
|
|
|
|
func TestServer_JoinWAN_SerfAllowedCIDRs(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
const targetAddr = "127.0.1.1"
|
|
|
|
|
|
|
|
skipIfCannotBindToIP(t, targetAddr)
|
|
|
|
|
|
|
|
wanCIDRs, err := memberlist.ParseCIDRs([]string{"127.0.0.1/32"})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = true
|
|
|
|
c.BootstrapExpect = 1
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.SerfWANConfig.MemberlistConfig.CIDRsAllowed = wanCIDRs
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
waitForLeaderEstablishment(t, s1)
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = true
|
|
|
|
c.BootstrapExpect = 1
|
|
|
|
c.PrimaryDatacenter = "dc1"
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.SerfWANConfig.MemberlistConfig.BindAddr = targetAddr
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
waitForLeaderEstablishment(t, s2)
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
|
|
|
|
|
|
// Joining should be fine
|
|
|
|
joinWANWithNoMembershipChecks(t, s2, s1)
|
|
|
|
|
|
|
|
// But membership is blocked if you go and take a peek on the server.
|
|
|
|
t.Run("LAN membership should only show each other", func(t *testing.T) {
|
|
|
|
require.Len(t, s1.LANMembersInAgentPartition(), 1)
|
|
|
|
require.Len(t, s2.LANMembersInAgentPartition(), 1)
|
|
|
|
})
|
|
|
|
t.Run("WAN membership in the primary should not show the secondary", func(t *testing.T) {
|
|
|
|
require.Len(t, s1.WANMembers(), 1)
|
|
|
|
})
|
|
|
|
t.Run("WAN membership in the secondary can show the primary", func(t *testing.T) {
|
|
|
|
require.Len(t, s2.WANMembers(), 2)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func skipIfCannotBindToIP(t *testing.T, ip string) {
|
2021-11-29 17:19:43 +00:00
|
|
|
l, err := net.Listen("tcp", net.JoinHostPort(ip, "0"))
|
2021-11-15 15:51:14 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Skipf("Cannot bind on %s, to run on Mac OS: `sudo ifconfig lo0 alias %s up`", ip, ip)
|
|
|
|
}
|
2021-11-29 17:19:43 +00:00
|
|
|
l.Close()
|
2021-11-15 15:51:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-04 14:19:35 +00:00
|
|
|
func TestServer_LANReap(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-03-05 16:16:31 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
configureServer := func(c *Config) {
|
2019-03-04 14:19:35 +00:00
|
|
|
c.SerfFloodInterval = 100 * time.Millisecond
|
|
|
|
c.SerfLANConfig.ReconnectTimeout = 250 * time.Millisecond
|
2019-03-05 16:16:31 +00:00
|
|
|
c.SerfLANConfig.TombstoneTimeout = 250 * time.Millisecond
|
2019-03-04 14:52:45 +00:00
|
|
|
c.SerfLANConfig.ReapInterval = 300 * time.Millisecond
|
2019-03-05 16:16:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
configureServer(c)
|
2019-03-04 14:19:35 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.Bootstrap = false
|
2019-03-05 16:16:31 +00:00
|
|
|
configureServer(c)
|
2019-03-04 14:19:35 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
|
|
|
|
dir3, s3 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.Bootstrap = false
|
2019-03-05 16:16:31 +00:00
|
|
|
configureServer(c)
|
2019-03-04 14:19:35 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
joinLAN(t, s3, s1)
|
|
|
|
|
2019-03-04 14:52:45 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
2019-03-04 14:19:35 +00:00
|
|
|
testrpc.WaitForLeader(t, s3.RPC, "dc1")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
require.Len(r, s1.LANMembersInAgentPartition(), 3)
|
|
|
|
require.Len(r, s2.LANMembersInAgentPartition(), 3)
|
|
|
|
require.Len(r, s3.LANMembersInAgentPartition(), 3)
|
2019-03-04 14:19:35 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Check the router has both
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Len(r, s1.serverLookup.Servers(), 3)
|
|
|
|
require.Len(r, s2.serverLookup.Servers(), 3)
|
|
|
|
require.Len(r, s3.serverLookup.Servers(), 3)
|
|
|
|
})
|
|
|
|
|
|
|
|
// shutdown the second dc
|
|
|
|
s2.Shutdown()
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
require.Len(r, s1.LANMembersInAgentPartition(), 2)
|
2019-03-04 14:19:35 +00:00
|
|
|
servers := s1.serverLookup.Servers()
|
|
|
|
require.Len(r, servers, 2)
|
|
|
|
// require.Equal(r, s1.config.NodeName, servers[0].Name)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2013-12-07 01:18:09 +00:00
|
|
|
func TestServer_JoinWAN(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-07 01:18:09 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2013-12-12 00:24:34 +00:00
|
|
|
dir2, s2 := testServerDC(t, "dc2")
|
2013-12-07 01:18:09 +00:00
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinWAN(t, s2, s1)
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(s1.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s1 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s2.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s2 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2013-12-12 00:24:34 +00:00
|
|
|
|
2017-03-14 05:56:24 +00:00
|
|
|
// Check the router has both
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-09 04:57:06 +00:00
|
|
|
if got, want := len(s1.router.GetDatacenters()), 2; got != want {
|
|
|
|
r.Fatalf("got %d routes want %d", got, want)
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(s2.router.GetDatacenters()), 2; got != want {
|
2017-05-09 04:57:06 +00:00
|
|
|
r.Fatalf("got %d datacenters want %d", got, want)
|
2017-04-29 16:34:02 +00:00
|
|
|
}
|
|
|
|
})
|
2013-12-07 01:18:09 +00:00
|
|
|
}
|
2013-12-10 00:05:15 +00:00
|
|
|
|
2019-03-04 14:19:35 +00:00
|
|
|
func TestServer_WANReap(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-03-04 14:19:35 +00:00
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
c.SerfFloodInterval = 100 * time.Millisecond
|
|
|
|
c.SerfWANConfig.ReconnectTimeout = 250 * time.Millisecond
|
2019-03-05 16:16:31 +00:00
|
|
|
c.SerfWANConfig.TombstoneTimeout = 250 * time.Millisecond
|
2019-03-04 14:19:35 +00:00
|
|
|
c.SerfWANConfig.ReapInterval = 500 * time.Millisecond
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerDC(t, "dc2")
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
|
|
|
|
// Try to join
|
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Len(r, s1.WANMembers(), 2)
|
|
|
|
require.Len(r, s2.WANMembers(), 2)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Check the router has both
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Len(r, s1.router.GetDatacenters(), 2)
|
|
|
|
require.Len(r, s2.router.GetDatacenters(), 2)
|
|
|
|
})
|
|
|
|
|
|
|
|
// shutdown the second dc
|
|
|
|
s2.Shutdown()
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Len(r, s1.WANMembers(), 1)
|
|
|
|
datacenters := s1.router.GetDatacenters()
|
|
|
|
require.Len(r, datacenters, 1)
|
|
|
|
require.Equal(r, "dc1", datacenters[0])
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-03-15 19:26:54 +00:00
|
|
|
func TestServer_JoinWAN_Flood(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-03-15 19:26:54 +00:00
|
|
|
// Set up two servers in a WAN.
|
2017-06-26 09:05:44 +00:00
|
|
|
dir1, s1 := testServerDCBootstrap(t, "dc1", true)
|
2017-03-15 19:26:54 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2017-06-26 09:05:44 +00:00
|
|
|
dir2, s2 := testServerDCBootstrap(t, "dc2", true)
|
2017-03-15 19:26:54 +00:00
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
2017-05-05 10:29:49 +00:00
|
|
|
joinWAN(t, s2, s1)
|
2017-03-15 19:26:54 +00:00
|
|
|
|
2017-04-29 16:34:02 +00:00
|
|
|
for _, s := range []*Server{s1, s2} {
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(s.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2017-03-20 23:23:40 +00:00
|
|
|
}
|
2017-03-15 19:26:54 +00:00
|
|
|
|
2017-06-26 09:05:44 +00:00
|
|
|
dir3, s3 := testServerDCBootstrap(t, "dc1", false)
|
2017-03-15 19:26:54 +00:00
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// Do just a LAN join for the new server and make sure it
|
|
|
|
// shows up in the WAN.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s3, s1)
|
2017-03-15 19:26:54 +00:00
|
|
|
|
2017-04-29 16:34:02 +00:00
|
|
|
for _, s := range []*Server{s1, s2, s3} {
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(s.WANMembers()), 3; got != want {
|
2017-06-26 09:05:44 +00:00
|
|
|
r.Fatalf("got %d WAN members for %s want %d", got, s.config.NodeName, want)
|
2017-04-29 16:34:02 +00:00
|
|
|
}
|
|
|
|
})
|
2017-03-20 23:23:40 +00:00
|
|
|
}
|
2017-03-15 19:26:54 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
// This is a mirror of a similar test in agent/agent_test.go
|
|
|
|
func TestServer_JoinWAN_viaMeshGateway(t *testing.T) {
|
2021-11-01 15:40:16 +00:00
|
|
|
// if this test is failing because of expired certificates
|
|
|
|
// use the procedure in test/CA-GENERATION.md
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2021-11-30 22:32:41 +00:00
|
|
|
port := freeport.GetOne(t)
|
2021-11-27 20:27:59 +00:00
|
|
|
gwAddr := ipaddr.FormatAddressPort("127.0.0.1", port)
|
2020-03-09 20:59:02 +00:00
|
|
|
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
2021-07-09 22:17:42 +00:00
|
|
|
c.TLSConfig.Domain = "consul"
|
2020-03-09 20:59:02 +00:00
|
|
|
c.NodeName = "bob"
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.PrimaryDatacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
// tls
|
2022-03-18 10:46:58 +00:00
|
|
|
c.TLSConfig.InternalRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
|
|
|
c.TLSConfig.InternalRPC.CertFile = "../../test/hostname/Bob.crt"
|
|
|
|
c.TLSConfig.InternalRPC.KeyFile = "../../test/hostname/Bob.key"
|
|
|
|
c.TLSConfig.InternalRPC.VerifyIncoming = true
|
|
|
|
c.TLSConfig.InternalRPC.VerifyOutgoing = true
|
|
|
|
c.TLSConfig.InternalRPC.VerifyServerHostname = true
|
2020-03-09 20:59:02 +00:00
|
|
|
// wanfed
|
|
|
|
c.ConnectMeshGatewayWANFederationEnabled = true
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
2021-07-09 22:17:42 +00:00
|
|
|
c.TLSConfig.Domain = "consul"
|
2020-03-09 20:59:02 +00:00
|
|
|
c.NodeName = "betty"
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.PrimaryDatacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
// tls
|
2022-03-18 10:46:58 +00:00
|
|
|
c.TLSConfig.InternalRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
|
|
|
c.TLSConfig.InternalRPC.CertFile = "../../test/hostname/Betty.crt"
|
|
|
|
c.TLSConfig.InternalRPC.KeyFile = "../../test/hostname/Betty.key"
|
|
|
|
c.TLSConfig.InternalRPC.VerifyIncoming = true
|
|
|
|
c.TLSConfig.InternalRPC.VerifyOutgoing = true
|
|
|
|
c.TLSConfig.InternalRPC.VerifyServerHostname = true
|
2020-03-09 20:59:02 +00:00
|
|
|
// wanfed
|
|
|
|
c.ConnectMeshGatewayWANFederationEnabled = true
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
dir3, s3 := testServerWithConfig(t, func(c *Config) {
|
2021-07-09 22:17:42 +00:00
|
|
|
c.TLSConfig.Domain = "consul"
|
2020-03-09 20:59:02 +00:00
|
|
|
c.NodeName = "bonnie"
|
|
|
|
c.Datacenter = "dc3"
|
|
|
|
c.PrimaryDatacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
// tls
|
2022-03-18 10:46:58 +00:00
|
|
|
c.TLSConfig.InternalRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
|
|
|
c.TLSConfig.InternalRPC.CertFile = "../../test/hostname/Bonnie.crt"
|
|
|
|
c.TLSConfig.InternalRPC.KeyFile = "../../test/hostname/Bonnie.key"
|
|
|
|
c.TLSConfig.InternalRPC.VerifyIncoming = true
|
|
|
|
c.TLSConfig.InternalRPC.VerifyOutgoing = true
|
|
|
|
c.TLSConfig.InternalRPC.VerifyServerHostname = true
|
2020-03-09 20:59:02 +00:00
|
|
|
// wanfed
|
|
|
|
c.ConnectMeshGatewayWANFederationEnabled = true
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// We'll use the same gateway for all datacenters since it doesn't care.
|
|
|
|
var p tcpproxy.Proxy
|
|
|
|
p.AddSNIRoute(gwAddr, "bob.server.dc1.consul", tcpproxy.To(s1.config.RPCAddr.String()))
|
|
|
|
p.AddSNIRoute(gwAddr, "betty.server.dc2.consul", tcpproxy.To(s2.config.RPCAddr.String()))
|
|
|
|
p.AddSNIRoute(gwAddr, "bonnie.server.dc3.consul", tcpproxy.To(s3.config.RPCAddr.String()))
|
|
|
|
p.AddStopACMESearch(gwAddr)
|
|
|
|
require.NoError(t, p.Start())
|
|
|
|
defer func() {
|
|
|
|
p.Close()
|
|
|
|
p.Wait()
|
|
|
|
}()
|
|
|
|
|
|
|
|
t.Logf("routing %s => %s", "bob.server.dc1.consul", s1.config.RPCAddr.String())
|
|
|
|
t.Logf("routing %s => %s", "betty.server.dc2.consul", s2.config.RPCAddr.String())
|
|
|
|
t.Logf("routing %s => %s", "bonnie.server.dc3.consul", s3.config.RPCAddr.String())
|
|
|
|
|
|
|
|
// Register this into the catalog in dc1.
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bob",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindMeshGateway,
|
|
|
|
ID: "mesh-gateway",
|
|
|
|
Service: "mesh-gateway",
|
|
|
|
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
2021-11-27 20:27:59 +00:00
|
|
|
Port: port,
|
2020-03-09 20:59:02 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "Catalog.Register", &arg, &out))
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for it to make it into the gateway locator.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotEmpty(r, s1.gatewayLocator.PickGateway("dc1"))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Seed the secondaries with the address of the primary and wait for that to
|
|
|
|
// be in their locators.
|
|
|
|
s2.RefreshPrimaryGatewayFallbackAddresses([]string{gwAddr})
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotEmpty(r, s2.gatewayLocator.PickGateway("dc1"))
|
|
|
|
})
|
|
|
|
s3.RefreshPrimaryGatewayFallbackAddresses([]string{gwAddr})
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotEmpty(r, s3.gatewayLocator.PickGateway("dc1"))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Try to join from secondary to primary. We can't use joinWAN() because we
|
|
|
|
// are simulating proper bootstrapping and if ACLs were on we would have to
|
|
|
|
// delay gateway registration in the secondary until after one directional
|
|
|
|
// join. So this way we explicitly join secondary-to-primary as a standalone
|
|
|
|
// operation and follow it up later with a full join.
|
|
|
|
_, err := s2.JoinWAN([]string{joinAddrWAN(s1)})
|
|
|
|
require.NoError(t, err)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(s2.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s2 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
_, err = s3.JoinWAN([]string{joinAddrWAN(s1)})
|
|
|
|
require.NoError(t, err)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(s3.WANMembers()), 3; got != want {
|
|
|
|
r.Fatalf("got %d s3 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Now we can register this into the catalog in dc2 and dc3.
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc2",
|
|
|
|
Node: "betty",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindMeshGateway,
|
|
|
|
ID: "mesh-gateway",
|
|
|
|
Service: "mesh-gateway",
|
|
|
|
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
2021-11-27 20:27:59 +00:00
|
|
|
Port: port,
|
2020-03-09 20:59:02 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s2.RPC(context.Background(), "Catalog.Register", &arg, &out))
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc3",
|
|
|
|
Node: "bonnie",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindMeshGateway,
|
|
|
|
ID: "mesh-gateway",
|
|
|
|
Service: "mesh-gateway",
|
|
|
|
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
2021-11-27 20:27:59 +00:00
|
|
|
Port: port,
|
2020-03-09 20:59:02 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s3.RPC(context.Background(), "Catalog.Register", &arg, &out))
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for it to make it into the gateway locator in dc2 and then for
|
|
|
|
// AE to carry it back to the primary
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotEmpty(r, s3.gatewayLocator.PickGateway("dc2"))
|
|
|
|
require.NotEmpty(r, s2.gatewayLocator.PickGateway("dc2"))
|
|
|
|
require.NotEmpty(r, s1.gatewayLocator.PickGateway("dc2"))
|
|
|
|
|
|
|
|
require.NotEmpty(r, s3.gatewayLocator.PickGateway("dc3"))
|
|
|
|
require.NotEmpty(r, s2.gatewayLocator.PickGateway("dc3"))
|
|
|
|
require.NotEmpty(r, s1.gatewayLocator.PickGateway("dc3"))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Try to join again using the standard verification method now that
|
|
|
|
// all of the plumbing is in place.
|
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(s1.WANMembers()), 3; got != want {
|
|
|
|
r.Fatalf("got %d s1 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s2.WANMembers()), 3; got != want {
|
|
|
|
r.Fatalf("got %d s2 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Check the router has all of them
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(s1.router.GetDatacenters()), 3; got != want {
|
|
|
|
r.Fatalf("got %d routes want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s2.router.GetDatacenters()), 3; got != want {
|
|
|
|
r.Fatalf("got %d datacenters want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s3.router.GetDatacenters()), 3; got != want {
|
|
|
|
r.Fatalf("got %d datacenters want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Ensure we can do some trivial RPC in all directions.
|
|
|
|
servers := map[string]*Server{"dc1": s1, "dc2": s2, "dc3": s3}
|
|
|
|
names := map[string]string{"dc1": "bob", "dc2": "betty", "dc3": "bonnie"}
|
|
|
|
for _, srcDC := range []string{"dc1", "dc2", "dc3"} {
|
|
|
|
srv := servers[srcDC]
|
|
|
|
for _, dstDC := range []string{"dc1", "dc2", "dc3"} {
|
|
|
|
if srcDC == dstDC {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
t.Run(srcDC+" to "+dstDC, func(t *testing.T) {
|
|
|
|
arg := structs.DCSpecificRequest{
|
|
|
|
Datacenter: dstDC,
|
|
|
|
}
|
|
|
|
var out structs.IndexedNodes
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, srv.RPC(context.Background(), "Catalog.ListNodes", &arg, &out))
|
2020-03-09 20:59:02 +00:00
|
|
|
require.Len(t, out.Nodes, 1)
|
|
|
|
node := out.Nodes[0]
|
|
|
|
require.Equal(t, dstDC, node.Datacenter)
|
|
|
|
require.Equal(t, names[dstDC], node.Node)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-28 09:47:28 +00:00
|
|
|
func TestServer_JoinSeparateLanAndWanAddresses(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-06-27 22:04:17 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = t.Name() + "-s1"
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
c.SerfFloodInterval = 100 * time.Millisecond
|
|
|
|
})
|
2015-03-28 09:47:28 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2017-06-27 22:04:17 +00:00
|
|
|
s2Name := t.Name() + "-s2"
|
2015-03-28 09:47:28 +00:00
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
2017-06-27 22:04:17 +00:00
|
|
|
c.NodeName = s2Name
|
2015-03-28 09:47:28 +00:00
|
|
|
c.Datacenter = "dc2"
|
2017-06-27 22:04:17 +00:00
|
|
|
c.Bootstrap = false
|
2015-03-28 09:47:28 +00:00
|
|
|
// This wan address will be expected to be seen on s1
|
|
|
|
c.SerfWANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.2"
|
|
|
|
// This lan address will be expected to be seen on s3
|
|
|
|
c.SerfLANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.3"
|
2017-06-27 22:04:17 +00:00
|
|
|
c.SerfFloodInterval = 100 * time.Millisecond
|
2015-03-28 09:47:28 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
2017-06-27 22:04:17 +00:00
|
|
|
dir3, s3 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = t.Name() + "-s3"
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.Bootstrap = true
|
|
|
|
c.SerfFloodInterval = 100 * time.Millisecond
|
|
|
|
})
|
2015-03-28 09:47:28 +00:00
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// Join s2 to s1 on wan
|
2017-05-05 10:29:49 +00:00
|
|
|
joinWAN(t, s2, s1)
|
2015-03-28 09:47:28 +00:00
|
|
|
|
|
|
|
// Join s3 to s2 on lan
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s3, s2)
|
2017-07-05 04:38:42 +00:00
|
|
|
|
|
|
|
// We rely on flood joining to fill across the LAN, so we expect s3 to
|
|
|
|
// show up on the WAN as well, even though it's not explicitly joined.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-07-04 18:02:01 +00:00
|
|
|
if got, want := len(s1.WANMembers()), 3; got != want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got %d s1 WAN members want %d", got, want)
|
|
|
|
}
|
2017-07-04 18:02:01 +00:00
|
|
|
if got, want := len(s2.WANMembers()), 3; got != want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got %d s2 WAN members want %d", got, want)
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s2.LANMembersInAgentPartition()), 2; got != want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got %d s2 LAN members want %d", got, want)
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s3.LANMembersInAgentPartition()), 2; got != want {
|
2017-06-26 12:22:09 +00:00
|
|
|
r.Fatalf("got %d s3 LAN members want %d", got, want)
|
2017-04-29 16:34:02 +00:00
|
|
|
}
|
|
|
|
})
|
2015-03-28 09:47:28 +00:00
|
|
|
|
2017-03-14 05:56:24 +00:00
|
|
|
// Check the router has both
|
2017-05-09 04:57:06 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if len(s1.router.GetDatacenters()) != 2 {
|
|
|
|
r.Fatalf("remote consul missing")
|
|
|
|
}
|
|
|
|
if len(s2.router.GetDatacenters()) != 2 {
|
|
|
|
r.Fatalf("remote consul missing")
|
|
|
|
}
|
2017-08-30 17:31:36 +00:00
|
|
|
if len(s2.serverLookup.Servers()) != 2 {
|
2017-05-09 04:57:06 +00:00
|
|
|
r.Fatalf("local consul fellow s3 for s2 missing")
|
|
|
|
}
|
|
|
|
})
|
2015-03-28 09:47:28 +00:00
|
|
|
|
|
|
|
// Get and check the wan address of s2 from s1
|
|
|
|
var s2WanAddr string
|
|
|
|
for _, member := range s1.WANMembers() {
|
2017-06-27 22:04:17 +00:00
|
|
|
if member.Name == s2Name+".dc2" {
|
2015-03-28 09:47:28 +00:00
|
|
|
s2WanAddr = member.Addr.String()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s2WanAddr != "127.0.0.2" {
|
|
|
|
t.Fatalf("s1 sees s2 on a wrong address: %s, expecting: %s", s2WanAddr, "127.0.0.2")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get and check the lan address of s2 from s3
|
|
|
|
var s2LanAddr string
|
2021-10-26 20:08:55 +00:00
|
|
|
for _, lanmember := range s3.LANMembersInAgentPartition() {
|
2017-06-27 22:04:17 +00:00
|
|
|
if lanmember.Name == s2Name {
|
2015-03-28 09:47:28 +00:00
|
|
|
s2LanAddr = lanmember.Addr.String()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s2LanAddr != "127.0.0.3" {
|
|
|
|
t.Fatalf("s3 sees s2 on a wrong address: %s, expecting: %s", s2LanAddr, "127.0.0.3")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-21 00:30:56 +00:00
|
|
|
func TestServer_LeaveLeader(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-10 00:05:15 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2014-01-30 21:13:29 +00:00
|
|
|
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
|
2013-12-10 00:05:15 +00:00
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
2017-10-31 20:16:56 +00:00
|
|
|
dir3, s3 := testServerDCBootstrap(t, "dc1", false)
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
2013-12-10 00:05:15 +00:00
|
|
|
|
2017-06-26 12:35:34 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2017-10-31 20:16:56 +00:00
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
joinLAN(t, s3, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
r.Check(wantPeers(s1, 3))
|
|
|
|
r.Check(wantPeers(s2, 3))
|
|
|
|
r.Check(wantPeers(s3, 3))
|
|
|
|
})
|
2015-01-21 00:30:56 +00:00
|
|
|
// Issue a leave to the leader
|
2017-10-10 22:19:50 +00:00
|
|
|
var leader *Server
|
2017-06-26 12:35:34 +00:00
|
|
|
switch {
|
|
|
|
case s1.IsLeader():
|
2017-10-10 22:19:50 +00:00
|
|
|
leader = s1
|
2017-06-26 12:35:34 +00:00
|
|
|
case s2.IsLeader():
|
2017-10-10 22:19:50 +00:00
|
|
|
leader = s2
|
2017-10-31 20:16:56 +00:00
|
|
|
case s3.IsLeader():
|
|
|
|
leader = s3
|
2017-06-26 12:35:34 +00:00
|
|
|
default:
|
|
|
|
t.Fatal("no leader")
|
|
|
|
}
|
2017-10-10 22:19:50 +00:00
|
|
|
if err := leader.Leave(); err != nil {
|
2017-06-26 12:35:34 +00:00
|
|
|
t.Fatal("leave failed: ", err)
|
2013-12-10 00:05:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Should lose a peer
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-10-31 20:16:56 +00:00
|
|
|
r.Check(wantPeers(s1, 2))
|
|
|
|
r.Check(wantPeers(s2, 2))
|
|
|
|
r.Check(wantPeers(s3, 2))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2015-01-21 00:30:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestServer_Leave(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2015-01-21 00:30:56 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
// Second server not in bootstrap mode
|
|
|
|
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2015-01-21 00:30:56 +00:00
|
|
|
|
2017-06-26 12:35:34 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
2015-01-21 00:30:56 +00:00
|
|
|
|
|
|
|
// Issue a leave to the non-leader
|
2017-10-10 22:19:50 +00:00
|
|
|
var nonleader *Server
|
2017-06-26 12:35:34 +00:00
|
|
|
switch {
|
|
|
|
case s1.IsLeader():
|
2017-10-10 22:19:50 +00:00
|
|
|
nonleader = s2
|
2017-06-26 12:35:34 +00:00
|
|
|
case s2.IsLeader():
|
2017-10-10 22:19:50 +00:00
|
|
|
nonleader = s1
|
2017-06-26 12:35:34 +00:00
|
|
|
default:
|
|
|
|
t.Fatal("no leader")
|
|
|
|
}
|
2017-10-10 22:19:50 +00:00
|
|
|
if err := nonleader.Leave(); err != nil {
|
2017-06-26 12:35:34 +00:00
|
|
|
t.Fatal("leave failed: ", err)
|
2015-01-21 00:30:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Should lose a peer
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-05 07:23:28 +00:00
|
|
|
r.Check(wantPeers(s1, 1))
|
|
|
|
r.Check(wantPeers(s2, 1))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2013-12-10 00:05:15 +00:00
|
|
|
}
|
2013-12-19 23:18:25 +00:00
|
|
|
|
|
|
|
func TestServer_RPC(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-19 23:18:25 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := s1.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != nil {
|
2013-12-19 23:18:25 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2014-04-07 21:36:32 +00:00
|
|
|
|
2022-04-06 21:33:05 +00:00
|
|
|
// TestServer_RPC_MetricsIntercept_Off proves that we can turn off net/rpc interceptors all together.
|
|
|
|
func TestServer_RPC_MetricsIntercept_Off(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2022-05-19 20:39:28 +00:00
|
|
|
storage := &sync.Map{} // string -> float32
|
2022-04-06 21:33:05 +00:00
|
|
|
keyMakingFunc := func(key []string, labels []metrics.Label) string {
|
|
|
|
allKey := strings.Join(key, "+")
|
|
|
|
|
|
|
|
for _, label := range labels {
|
|
|
|
if label.Name == "method" {
|
|
|
|
allKey = allKey + "+" + label.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return allKey
|
|
|
|
}
|
|
|
|
|
|
|
|
simpleRecorderFunc := func(key []string, val float32, labels []metrics.Label) {
|
2022-05-19 20:39:28 +00:00
|
|
|
storage.Store(keyMakingFunc(key, labels), val)
|
2022-04-06 21:33:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("test no net/rpc interceptor metric with nil func", func(t *testing.T) {
|
|
|
|
_, conf := testServerConfig(t)
|
|
|
|
deps := newDefaultDeps(t, conf)
|
|
|
|
|
|
|
|
// "disable" metrics net/rpc interceptor
|
|
|
|
deps.GetNetRPCInterceptorFunc = nil
|
|
|
|
// "hijack" the rpc recorder for asserts;
|
|
|
|
// note that there will be "internal" net/rpc calls made
|
|
|
|
// that will still show up; those don't go thru the net/rpc interceptor;
|
|
|
|
// see consul.agent.rpc.middleware.RPCTypeInternal for context
|
2022-04-12 17:50:25 +00:00
|
|
|
deps.NewRequestRecorderFunc = func(logger hclog.Logger, isLeader func() bool, localDC string) *middleware.RequestRecorder {
|
|
|
|
// for the purposes of this test, we don't need isLeader or localDC
|
2022-04-06 21:33:05 +00:00
|
|
|
return &middleware.RequestRecorder{
|
|
|
|
Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}),
|
|
|
|
RecorderFunc: simpleRecorderFunc,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-23 19:42:16 +00:00
|
|
|
s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger)
|
2022-04-06 21:33:05 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
t.Cleanup(func() { s1.Shutdown() })
|
|
|
|
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := s1.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != nil {
|
2022-04-06 21:33:05 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
key := keyMakingFunc(middleware.OneTwelveRPCSummary[0].Name, []metrics.Label{{Name: "method", Value: "Status.Ping"}})
|
|
|
|
|
2022-05-19 20:39:28 +00:00
|
|
|
if _, ok := storage.Load(key); ok {
|
2022-04-06 21:33:05 +00:00
|
|
|
t.Fatalf("Did not expect to find key %s in the metrics log, ", key)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("test no net/rpc interceptor metric with func that gives nil", func(t *testing.T) {
|
|
|
|
_, conf := testServerConfig(t)
|
|
|
|
deps := newDefaultDeps(t, conf)
|
|
|
|
|
|
|
|
// "hijack" the rpc recorder for asserts;
|
|
|
|
// note that there will be "internal" net/rpc calls made
|
|
|
|
// that will still show up; those don't go thru the net/rpc interceptor;
|
|
|
|
// see consul.agent.rpc.middleware.RPCTypeInternal for context
|
2022-04-12 17:50:25 +00:00
|
|
|
deps.NewRequestRecorderFunc = func(logger hclog.Logger, isLeader func() bool, localDC string) *middleware.RequestRecorder {
|
|
|
|
// for the purposes of this test, we don't need isLeader or localDC
|
2022-04-06 21:33:05 +00:00
|
|
|
return &middleware.RequestRecorder{
|
|
|
|
Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}),
|
|
|
|
RecorderFunc: simpleRecorderFunc,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
deps.GetNetRPCInterceptorFunc = func(recorder *middleware.RequestRecorder) rpc.ServerServiceCallInterceptor {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-12-23 19:42:16 +00:00
|
|
|
s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger)
|
2022-04-06 21:33:05 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
t.Cleanup(func() { s2.Shutdown() })
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := s2.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != nil {
|
2022-04-06 21:33:05 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
key := keyMakingFunc(middleware.OneTwelveRPCSummary[0].Name, []metrics.Label{{Name: "method", Value: "Status.Ping"}})
|
|
|
|
|
2022-05-19 20:39:28 +00:00
|
|
|
if _, ok := storage.Load(key); ok {
|
2022-04-06 21:33:05 +00:00
|
|
|
t.Fatalf("Did not expect to find key %s in the metrics log, ", key)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestServer_RPC_RequestRecorder proves that we cannot make a server without a valid RequestRecorder provider func
|
|
|
|
// or a non nil RequestRecorder.
|
|
|
|
func TestServer_RPC_RequestRecorder(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("test nil func provider", func(t *testing.T) {
|
|
|
|
_, conf := testServerConfig(t)
|
|
|
|
deps := newDefaultDeps(t, conf)
|
|
|
|
deps.NewRequestRecorderFunc = nil
|
|
|
|
|
2022-12-23 19:42:16 +00:00
|
|
|
s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger)
|
2022-04-06 21:33:05 +00:00
|
|
|
|
|
|
|
require.Error(t, err, "need err when provider func is nil")
|
|
|
|
require.Equal(t, err.Error(), "cannot initialize server without an RPC request recorder provider")
|
|
|
|
|
|
|
|
t.Cleanup(func() {
|
|
|
|
if s1 != nil {
|
|
|
|
s1.Shutdown()
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("test nil RequestRecorder", func(t *testing.T) {
|
|
|
|
_, conf := testServerConfig(t)
|
|
|
|
deps := newDefaultDeps(t, conf)
|
2022-04-12 17:50:25 +00:00
|
|
|
deps.NewRequestRecorderFunc = func(logger hclog.Logger, isLeader func() bool, localDC string) *middleware.RequestRecorder {
|
2022-04-06 21:33:05 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-12-23 19:42:16 +00:00
|
|
|
s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger)
|
2022-04-06 21:33:05 +00:00
|
|
|
|
|
|
|
require.Error(t, err, "need err when RequestRecorder is nil")
|
2022-04-12 17:50:25 +00:00
|
|
|
require.Equal(t, err.Error(), "cannot initialize server with a nil RPC request recorder")
|
2022-04-06 21:33:05 +00:00
|
|
|
|
|
|
|
t.Cleanup(func() {
|
|
|
|
if s2 != nil {
|
|
|
|
s2.Shutdown()
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestServer_RPC_MetricsIntercept mocks a request recorder and asserts that RPC calls are observed.
|
|
|
|
func TestServer_RPC_MetricsIntercept(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
_, conf := testServerConfig(t)
|
|
|
|
deps := newDefaultDeps(t, conf)
|
|
|
|
|
|
|
|
// The method used to record metric observations here is similar to that used in
|
2022-05-19 20:39:28 +00:00
|
|
|
// interceptors_test.go.
|
|
|
|
storage := &sync.Map{} // string -> float32
|
2022-04-06 21:33:05 +00:00
|
|
|
keyMakingFunc := func(key []string, labels []metrics.Label) string {
|
|
|
|
allKey := strings.Join(key, "+")
|
|
|
|
|
|
|
|
for _, label := range labels {
|
|
|
|
allKey = allKey + "+" + label.Value
|
|
|
|
}
|
|
|
|
|
|
|
|
return allKey
|
|
|
|
}
|
|
|
|
|
|
|
|
simpleRecorderFunc := func(key []string, val float32, labels []metrics.Label) {
|
2022-05-19 20:39:28 +00:00
|
|
|
storage.Store(keyMakingFunc(key, labels), val)
|
2022-04-06 21:33:05 +00:00
|
|
|
}
|
2022-04-12 17:50:25 +00:00
|
|
|
deps.NewRequestRecorderFunc = func(logger hclog.Logger, isLeader func() bool, localDC string) *middleware.RequestRecorder {
|
|
|
|
// for the purposes of this test, we don't need isLeader or localDC
|
2022-04-06 21:33:05 +00:00
|
|
|
return &middleware.RequestRecorder{
|
|
|
|
Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}),
|
|
|
|
RecorderFunc: simpleRecorderFunc,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
deps.GetNetRPCInterceptorFunc = func(recorder *middleware.RequestRecorder) rpc.ServerServiceCallInterceptor {
|
|
|
|
return func(reqServiceMethod string, argv, replyv reflect.Value, handler func() error) {
|
|
|
|
reqStart := time.Now()
|
|
|
|
|
|
|
|
err := handler()
|
|
|
|
|
|
|
|
recorder.Record(reqServiceMethod, "test", reqStart, argv.Interface(), err != nil)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s, err := newServerWithDeps(t, conf, deps)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, s.RPC, "dc1")
|
|
|
|
|
|
|
|
// asserts
|
|
|
|
t.Run("test happy path for metrics interceptor", func(t *testing.T) {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := s.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != nil {
|
2022-04-06 21:33:05 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedLabels := []metrics.Label{
|
|
|
|
{Name: "method", Value: "Status.Ping"},
|
|
|
|
{Name: "errored", Value: "false"},
|
|
|
|
{Name: "request_type", Value: "read"},
|
|
|
|
{Name: "rpc_type", Value: "test"},
|
2022-04-12 17:50:25 +00:00
|
|
|
{Name: "server_role", Value: "unreported"},
|
2022-04-06 21:33:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
key := keyMakingFunc(middleware.OneTwelveRPCSummary[0].Name, expectedLabels)
|
|
|
|
|
2022-05-19 20:39:28 +00:00
|
|
|
if _, ok := storage.Load(key); !ok {
|
2022-04-12 17:50:25 +00:00
|
|
|
// the compound key will look like: "rpc+server+call+Status.Ping+false+read+test+unreported"
|
2022-04-06 21:33:05 +00:00
|
|
|
t.Fatalf("Did not find key %s in the metrics log, ", key)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-04-07 21:36:32 +00:00
|
|
|
func TestServer_JoinLAN_TLS(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf1 := testServerConfig(t)
|
2022-03-18 10:46:58 +00:00
|
|
|
conf1.TLSConfig.InternalRPC.VerifyIncoming = true
|
|
|
|
conf1.TLSConfig.InternalRPC.VerifyOutgoing = true
|
2014-04-07 21:36:32 +00:00
|
|
|
configureTLS(conf1)
|
2020-07-29 20:05:51 +00:00
|
|
|
s1, err := newServer(t, conf1)
|
2014-04-07 21:36:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s1.Shutdown()
|
2018-11-20 11:27:26 +00:00
|
|
|
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
|
2014-04-07 21:36:32 +00:00
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf2 := testServerConfig(t)
|
2014-04-07 21:36:32 +00:00
|
|
|
conf2.Bootstrap = false
|
2022-03-18 10:46:58 +00:00
|
|
|
conf2.TLSConfig.InternalRPC.VerifyIncoming = true
|
|
|
|
conf2.TLSConfig.InternalRPC.VerifyOutgoing = true
|
2014-04-07 21:36:32 +00:00
|
|
|
configureTLS(conf2)
|
2020-07-29 20:05:51 +00:00
|
|
|
s2, err := newServer(t, conf2)
|
2014-04-07 21:36:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2018-11-20 11:27:26 +00:00
|
|
|
testrpc.WaitForTestAgent(t, s2.RPC, "dc1")
|
2017-09-25 22:27:04 +00:00
|
|
|
|
2014-05-26 21:44:37 +00:00
|
|
|
// Verify Raft has established a peer
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-09-25 22:27:04 +00:00
|
|
|
r.Check(wantRaft([]*Server{s1, s2}))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-04-07 21:36:32 +00:00
|
|
|
}
|
2014-06-16 21:36:12 +00:00
|
|
|
|
|
|
|
func TestServer_Expect(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2016-09-01 04:22:32 +00:00
|
|
|
// All test servers should be in expect=3 mode, except for the 3rd one,
|
|
|
|
// but one with expect=0 can cause a bootstrap to occur from the other
|
|
|
|
// servers as currently implemented.
|
2014-06-16 21:36:12 +00:00
|
|
|
dir1, s1 := testServerDCExpect(t, "dc1", 3)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerDCExpect(t, "dc1", 3)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
2014-06-18 23:15:28 +00:00
|
|
|
dir3, s3 := testServerDCExpect(t, "dc1", 0)
|
2014-06-16 21:36:12 +00:00
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
2016-09-01 04:22:32 +00:00
|
|
|
dir4, s4 := testServerDCExpect(t, "dc1", 3)
|
|
|
|
defer os.RemoveAll(dir4)
|
|
|
|
defer s4.Shutdown()
|
|
|
|
|
|
|
|
// Join the first two servers.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2016-09-01 04:22:32 +00:00
|
|
|
// Should have no peers yet since the bootstrap didn't occur.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-05 07:23:28 +00:00
|
|
|
r.Check(wantPeers(s1, 0))
|
|
|
|
r.Check(wantPeers(s2, 0))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2016-09-01 04:22:32 +00:00
|
|
|
// Join the third node.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s3, s1)
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2016-09-01 04:22:32 +00:00
|
|
|
// Now we have three servers so we should bootstrap.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-05 07:23:28 +00:00
|
|
|
r.Check(wantPeers(s1, 3))
|
|
|
|
r.Check(wantPeers(s2, 3))
|
|
|
|
r.Check(wantPeers(s3, 3))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2019-07-12 15:52:26 +00:00
|
|
|
// Join the fourth node.
|
|
|
|
joinLAN(t, s4, s1)
|
|
|
|
|
|
|
|
// Wait for the new server to see itself added to the cluster.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
r.Check(wantRaft([]*Server{s1, s2, s3, s4}))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should not trigger bootstrap and new election when s3 joins, since cluster exists
|
|
|
|
func TestServer_AvoidReBootstrap(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-07-12 15:52:26 +00:00
|
|
|
dir1, s1 := testServerDCExpect(t, "dc1", 2)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerDCExpect(t, "dc1", 0)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
dir3, s3 := testServerDCExpect(t, "dc1", 2)
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// Join the first two servers
|
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
|
2016-09-01 04:22:32 +00:00
|
|
|
// Make sure a leader is elected, grab the current term and then add in
|
2019-07-12 15:52:26 +00:00
|
|
|
// the third server.
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2016-09-01 04:22:32 +00:00
|
|
|
termBefore := s1.raft.Stats()["last_log_term"]
|
2019-07-12 15:52:26 +00:00
|
|
|
joinLAN(t, s3, s1)
|
2016-09-01 04:22:32 +00:00
|
|
|
|
|
|
|
// Wait for the new server to see itself added to the cluster.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-07-12 15:52:26 +00:00
|
|
|
r.Check(wantRaft([]*Server{s1, s2, s3}))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-09-01 04:22:32 +00:00
|
|
|
|
|
|
|
// Make sure there's still a leader and that the term didn't change,
|
|
|
|
// so we know an election didn't occur.
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2016-09-01 04:22:32 +00:00
|
|
|
termAfter := s1.raft.Stats()["last_log_term"]
|
|
|
|
if termAfter != termBefore {
|
|
|
|
t.Fatalf("looks like an election took place")
|
|
|
|
}
|
2014-06-16 21:36:12 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 00:41:36 +00:00
|
|
|
func TestServer_Expect_NonVoters(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-09-20 00:41:36 +00:00
|
|
|
t.Parallel()
|
2021-12-10 00:08:40 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
|
|
|
c.BootstrapExpect = 2
|
|
|
|
c.ReadReplica = true
|
|
|
|
})
|
2018-09-20 00:41:36 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2019-07-17 17:35:33 +00:00
|
|
|
dir2, s2 := testServerDCExpect(t, "dc1", 2)
|
2018-09-20 00:41:36 +00:00
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
2019-07-17 17:35:33 +00:00
|
|
|
dir3, s3 := testServerDCExpect(t, "dc1", 2)
|
2018-09-20 00:41:36 +00:00
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
2019-07-17 17:35:33 +00:00
|
|
|
// Join the first two servers.
|
2018-09-20 00:41:36 +00:00
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
|
|
|
|
// Should have no peers yet since the bootstrap didn't occur.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
r.Check(wantPeers(s1, 0))
|
|
|
|
r.Check(wantPeers(s2, 0))
|
|
|
|
})
|
|
|
|
|
2019-07-17 17:35:33 +00:00
|
|
|
// Join the third node.
|
|
|
|
joinLAN(t, s3, s1)
|
2018-09-20 00:41:36 +00:00
|
|
|
|
|
|
|
// Now we have three servers so we should bootstrap.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-07-17 17:35:33 +00:00
|
|
|
r.Check(wantPeers(s1, 2))
|
|
|
|
r.Check(wantPeers(s2, 2))
|
|
|
|
r.Check(wantPeers(s3, 2))
|
2018-09-20 00:41:36 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Make sure a leader is elected
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-07-17 17:35:33 +00:00
|
|
|
r.Check(wantRaft([]*Server{s1, s2, s3}))
|
2018-09-20 00:41:36 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-06-16 21:36:12 +00:00
|
|
|
func TestServer_BadExpect(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-06-16 21:36:12 +00:00
|
|
|
// this one is in expect=3 mode
|
|
|
|
dir1, s1 := testServerDCExpect(t, "dc1", 3)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
// this one is in expect=2 mode
|
|
|
|
dir2, s2 := testServerDCExpect(t, "dc1", 2)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// and this one is in expect=3 mode
|
|
|
|
dir3, s3 := testServerDCExpect(t, "dc1", 3)
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2017-05-05 07:23:28 +00:00
|
|
|
// should have no peers yet
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-05 07:23:28 +00:00
|
|
|
r.Check(wantPeers(s1, 0))
|
|
|
|
r.Check(wantPeers(s2, 0))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-06-16 21:36:12 +00:00
|
|
|
|
|
|
|
// join the third node
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s3, s1)
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2017-05-05 07:23:28 +00:00
|
|
|
// should still have no peers (because s2 is in expect=2 mode)
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-05 07:23:28 +00:00
|
|
|
r.Check(wantPeers(s1, 0))
|
|
|
|
r.Check(wantPeers(s2, 0))
|
|
|
|
r.Check(wantPeers(s3, 0))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-10-04 20:43:10 +00:00
|
|
|
}
|
|
|
|
|
2014-10-09 17:25:53 +00:00
|
|
|
type fakeGlobalResp struct{}
|
2014-10-04 20:43:10 +00:00
|
|
|
|
2014-10-09 17:25:53 +00:00
|
|
|
func (r *fakeGlobalResp) Add(interface{}) {
|
|
|
|
}
|
2014-10-07 18:05:31 +00:00
|
|
|
|
2014-10-09 17:25:53 +00:00
|
|
|
func (r *fakeGlobalResp) New() interface{} {
|
|
|
|
return struct{}{}
|
|
|
|
}
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2020-08-11 11:35:48 +00:00
|
|
|
func TestServer_keyringRPCs(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-10-09 17:25:53 +00:00
|
|
|
dir1, s1 := testServerDC(t, "dc1")
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-04-29 16:34:02 +00:00
|
|
|
if len(s1.router.GetDatacenters()) != 1 {
|
|
|
|
r.Fatal(nil)
|
|
|
|
}
|
|
|
|
})
|
2014-12-05 05:32:59 +00:00
|
|
|
|
2014-10-07 18:05:31 +00:00
|
|
|
// Check that an error from a remote DC is returned
|
2020-08-11 11:35:48 +00:00
|
|
|
_, err := s1.keyringRPCs("Bad.Method", nil, []string{s1.config.Datacenter})
|
2014-10-04 20:43:10 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("should have errored")
|
|
|
|
}
|
|
|
|
if !strings.Contains(err.Error(), "Bad.Method") {
|
2018-03-19 16:56:00 +00:00
|
|
|
t.Fatalf("unexpected error: %s", err)
|
2014-10-04 20:43:10 +00:00
|
|
|
}
|
2014-06-16 21:36:12 +00:00
|
|
|
}
|
2014-10-07 18:05:31 +00:00
|
|
|
|
2017-05-10 21:25:48 +00:00
|
|
|
func testVerifyRPC(s1, s2 *Server, t *testing.T) (bool, error) {
|
2017-09-25 22:27:04 +00:00
|
|
|
joinLAN(t, s1, s2)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
r.Check(wantRaft([]*Server{s1, s2}))
|
|
|
|
})
|
2017-05-10 21:25:48 +00:00
|
|
|
|
|
|
|
// Have s2 make an RPC call to s1
|
2017-07-06 10:48:37 +00:00
|
|
|
var leader *metadata.Server
|
2017-08-30 17:31:36 +00:00
|
|
|
for _, server := range s2.serverLookup.Servers() {
|
2017-05-10 21:25:48 +00:00
|
|
|
if server.Name == s1.config.NodeName {
|
|
|
|
leader = server
|
|
|
|
}
|
|
|
|
}
|
2017-05-24 19:26:42 +00:00
|
|
|
if leader == nil {
|
|
|
|
t.Fatal("no leader")
|
|
|
|
}
|
2020-05-28 07:48:34 +00:00
|
|
|
return s2.connPool.Ping(leader.Datacenter, leader.ShortName, leader.Addr)
|
2017-05-10 21:25:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestServer_TLSToNoTLS(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-05-10 21:25:48 +00:00
|
|
|
// Set up a server with no TLS configured
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Add a second server with TLS configured
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
2022-03-18 10:46:58 +00:00
|
|
|
c.TLSConfig.InternalRPC.CAFile = "../../test/client_certs/rootca.crt"
|
|
|
|
c.TLSConfig.InternalRPC.CertFile = "../../test/client_certs/server.crt"
|
|
|
|
c.TLSConfig.InternalRPC.KeyFile = "../../test/client_certs/server.key"
|
2017-05-10 21:25:48 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
success, err := testVerifyRPC(s1, s2, t)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !success {
|
|
|
|
t.Fatalf("bad: %v", success)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServer_TLSForceOutgoingToNoTLS(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-05-10 21:25:48 +00:00
|
|
|
// Set up a server with no TLS configured
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Add a second server with TLS and VerifyOutgoing set
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
2022-03-18 10:46:58 +00:00
|
|
|
c.TLSConfig.InternalRPC.CAFile = "../../test/client_certs/rootca.crt"
|
|
|
|
c.TLSConfig.InternalRPC.CertFile = "../../test/client_certs/server.crt"
|
|
|
|
c.TLSConfig.InternalRPC.KeyFile = "../../test/client_certs/server.key"
|
|
|
|
c.TLSConfig.InternalRPC.VerifyOutgoing = true
|
2017-05-10 21:25:48 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
_, err := testVerifyRPC(s1, s2, t)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "remote error: tls") {
|
|
|
|
t.Fatalf("should fail")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServer_TLSToFullVerify(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-05-10 21:25:48 +00:00
|
|
|
// Set up a server with TLS and VerifyIncoming set
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
2022-03-18 10:46:58 +00:00
|
|
|
c.TLSConfig.InternalRPC.CAFile = "../../test/client_certs/rootca.crt"
|
|
|
|
c.TLSConfig.InternalRPC.CertFile = "../../test/client_certs/server.crt"
|
|
|
|
c.TLSConfig.InternalRPC.KeyFile = "../../test/client_certs/server.key"
|
|
|
|
c.TLSConfig.InternalRPC.VerifyOutgoing = true
|
2017-05-10 21:25:48 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Add a second server with TLS configured
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
2022-03-18 10:46:58 +00:00
|
|
|
c.TLSConfig.InternalRPC.CAFile = "../../test/client_certs/rootca.crt"
|
|
|
|
c.TLSConfig.InternalRPC.CertFile = "../../test/client_certs/server.crt"
|
|
|
|
c.TLSConfig.InternalRPC.KeyFile = "../../test/client_certs/server.key"
|
2017-05-10 21:25:48 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
success, err := testVerifyRPC(s1, s2, t)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !success {
|
|
|
|
t.Fatalf("bad: %v", success)
|
|
|
|
}
|
|
|
|
}
|
2018-02-21 18:48:53 +00:00
|
|
|
|
|
|
|
func TestServer_RevokeLeadershipIdempotent(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-02-21 18:48:53 +00:00
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2018-03-13 17:30:18 +00:00
|
|
|
|
2018-02-21 18:48:53 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
2019-06-19 12:50:48 +00:00
|
|
|
s1.revokeLeadership()
|
|
|
|
s1.revokeLeadership()
|
2018-02-21 18:48:53 +00:00
|
|
|
}
|
2019-04-26 18:25:03 +00:00
|
|
|
|
2020-09-16 17:28:03 +00:00
|
|
|
func TestServer_ReloadConfig(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
2019-04-26 18:25:03 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-09-16 17:28:03 +00:00
|
|
|
entryInit := &structs.ProxyConfigEntry{
|
2019-04-26 18:25:03 +00:00
|
|
|
Kind: structs.ProxyDefaults,
|
|
|
|
Name: structs.ProxyConfigGlobal,
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
// these are made a []uint8 and a int64 to allow the Equals test to pass
|
|
|
|
// otherwise it will fail complaining about data types
|
2019-04-29 22:08:09 +00:00
|
|
|
"foo": "bar",
|
2019-04-26 18:25:03 +00:00
|
|
|
"bar": int64(1),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
dir1, s := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Build = "1.5.0"
|
2020-09-16 17:29:59 +00:00
|
|
|
c.RPCRateLimit = 500
|
2019-06-13 09:26:27 +00:00
|
|
|
c.RPCMaxBurst = 5000
|
2022-12-13 20:09:55 +00:00
|
|
|
c.RequestLimitsMode = "permissive"
|
|
|
|
c.RequestLimitsReadRate = 500
|
|
|
|
c.RequestLimitsWriteRate = 500
|
2022-10-18 19:05:09 +00:00
|
|
|
c.RPCClientTimeout = 60 * time.Second
|
2021-05-04 14:36:53 +00:00
|
|
|
// Set one raft param to be non-default in the initial config, others are
|
|
|
|
// default.
|
|
|
|
c.RaftConfig.TrailingLogs = 1234
|
2019-04-26 18:25:03 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, s.RPC, "dc1")
|
|
|
|
|
2019-06-13 09:26:27 +00:00
|
|
|
limiter := s.rpcLimiter.Load().(*rate.Limiter)
|
|
|
|
require.Equal(t, rate.Limit(500), limiter.Limit())
|
|
|
|
require.Equal(t, 5000, limiter.Burst())
|
|
|
|
|
2022-10-18 19:05:09 +00:00
|
|
|
require.Equal(t, 60*time.Second, s.connPool.RPCClientTimeout())
|
|
|
|
|
2020-09-16 17:28:03 +00:00
|
|
|
rc := ReloadableConfig{
|
2022-12-13 20:09:55 +00:00
|
|
|
RequestLimits: &RequestLimits{
|
|
|
|
Mode: rpcRate.ModeEnforcing,
|
|
|
|
ReadRate: 1000,
|
|
|
|
WriteRate: 1100,
|
|
|
|
},
|
2022-10-18 19:05:09 +00:00
|
|
|
RPCClientTimeout: 2 * time.Minute,
|
2020-09-16 17:28:03 +00:00
|
|
|
RPCRateLimit: 1000,
|
|
|
|
RPCMaxBurst: 10000,
|
|
|
|
ConfigEntryBootstrap: []structs.ConfigEntry{entryInit},
|
2021-05-04 14:36:53 +00:00
|
|
|
// Reset the custom one to default be removing it from config file (it will
|
|
|
|
// be a zero value here).
|
|
|
|
RaftTrailingLogs: 0,
|
|
|
|
|
|
|
|
// Set a different Raft param to something custom now
|
|
|
|
RaftSnapshotThreshold: 4321,
|
|
|
|
|
|
|
|
// Leave other raft fields default
|
2020-09-16 17:28:03 +00:00
|
|
|
}
|
2022-12-13 20:09:55 +00:00
|
|
|
|
|
|
|
mockHandler := rpcRate.NewMockRequestLimitsHandler(t)
|
|
|
|
mockHandler.On("UpdateConfig", mock.Anything).Return(func(cfg rpcRate.HandlerConfig) {})
|
|
|
|
|
|
|
|
s.incomingRPCLimiter = mockHandler
|
2020-09-16 17:28:03 +00:00
|
|
|
require.NoError(t, s.ReloadConfig(rc))
|
2019-04-26 18:25:03 +00:00
|
|
|
|
2021-07-22 18:20:45 +00:00
|
|
|
_, entry, err := s.fsm.State().ConfigEntry(nil, structs.ProxyDefaults, structs.ProxyConfigGlobal, structs.DefaultEnterpriseMetaInDefaultPartition())
|
2019-04-26 18:25:03 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, entry)
|
|
|
|
global, ok := entry.(*structs.ProxyConfigEntry)
|
|
|
|
require.True(t, ok)
|
2020-09-16 17:28:03 +00:00
|
|
|
require.Equal(t, entryInit.Kind, global.Kind)
|
|
|
|
require.Equal(t, entryInit.Name, global.Name)
|
|
|
|
require.Equal(t, entryInit.Config, global.Config)
|
2019-06-13 09:26:27 +00:00
|
|
|
|
|
|
|
// Check rate limiter got updated
|
|
|
|
limiter = s.rpcLimiter.Load().(*rate.Limiter)
|
|
|
|
require.Equal(t, rate.Limit(1000), limiter.Limit())
|
|
|
|
require.Equal(t, 10000, limiter.Burst())
|
2021-05-04 14:36:53 +00:00
|
|
|
|
2022-12-13 20:09:55 +00:00
|
|
|
// Check the incoming RPC rate limiter got updated
|
|
|
|
mockHandler.AssertCalled(t, "UpdateConfig", rpcRate.HandlerConfig{
|
2023-04-14 13:26:38 +00:00
|
|
|
GlobalLimitConfig: rpcRate.GlobalLimitConfig{
|
|
|
|
Mode: rc.RequestLimits.Mode,
|
|
|
|
ReadWriteConfig: rpcRate.ReadWriteConfig{
|
|
|
|
ReadConfig: multilimiter.LimiterConfig{
|
|
|
|
Rate: rc.RequestLimits.ReadRate,
|
|
|
|
Burst: int(rc.RequestLimits.ReadRate) * requestLimitsBurstMultiplier,
|
|
|
|
},
|
|
|
|
WriteConfig: multilimiter.LimiterConfig{
|
|
|
|
Rate: rc.RequestLimits.WriteRate,
|
|
|
|
Burst: int(rc.RequestLimits.WriteRate) * requestLimitsBurstMultiplier,
|
|
|
|
},
|
|
|
|
},
|
2022-12-13 20:09:55 +00:00
|
|
|
},
|
|
|
|
})
|
|
|
|
|
2022-10-18 19:05:09 +00:00
|
|
|
// Check RPC client timeout got updated
|
|
|
|
require.Equal(t, 2*time.Minute, s.connPool.RPCClientTimeout())
|
|
|
|
|
2021-05-04 14:36:53 +00:00
|
|
|
// Check raft config
|
|
|
|
defaults := DefaultConfig()
|
|
|
|
got := s.raft.ReloadableConfig()
|
|
|
|
require.Equal(t, uint64(4321), got.SnapshotThreshold,
|
xds: generate listeners directly from API gateway snapshot (#17398)
* API Gateway XDS Primitives, endpoints and clusters (#17002)
* XDS primitive generation for endpoints and clusters
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* server_test
* deleted extra file
* add missing parents to test
---------
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* Routes for API Gateway (#17158)
* XDS primitive generation for endpoints and clusters
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* server_test
* deleted extra file
* add missing parents to test
* checkpoint
* delete extra file
* httproute flattening code
* linting issue
* so close on this, calling for tonight
* unit test passing
* add in header manip to virtual host
* upstream rebuild commented out
* Use consistent upstream name whether or not we're rebuilding
* Start working through route naming logic
* Fix typos in test descriptions
* Simplify route naming logic
* Simplify RebuildHTTPRouteUpstream
* Merge additional compiled discovery chains instead of overwriting
* Use correct chain for flattened route, clean up + add TODOs
* Remove empty conditional branch
* Restore previous variable declaration
Limit the scope of this PR
* Clean up, improve TODO
* add logging, clean up todos
* clean up function
---------
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* checkpoint, skeleton, tests not passing
* checkpoint
* endpoints xds cluster configuration
* resources test fix
* fix reversion in resources_test
* checkpoint
* Update agent/proxycfg/api_gateway.go
Co-authored-by: John Maguire <john.maguire@hashicorp.com>
* unit tests passing
* gofmt
* add deterministic sorting to appease the unit test gods
* remove panic
* Find ready upstream matching listener instead of first in list
* Clean up, improve TODO
* Modify getReadyUpstreams to filter upstreams by listener (#17410)
Each listener would previously have all upstreams from any route that bound to the listener. This is problematic when a route bound to one listener also binds to other listeners and so includes upstreams for multiple listeners. The list for a given listener would then wind up including upstreams for other listeners.
* clean up todos, references to api gateway in listeners_ingress
* merge in Nathan's fix
* Update agent/consul/discoverychain/gateway.go
* cleanup current todos, remove snapshot manipulation from generation code
* Update agent/structs/config_entry_gateways.go
Co-authored-by: Thomas Eckert <teckert@hashicorp.com>
* Update agent/consul/discoverychain/gateway.go
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* Update agent/consul/discoverychain/gateway.go
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* Update agent/proxycfg/snapshot.go
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* clarified header comment for FlattenHTTPRoute, changed RebuildHTTPRouteUpstream to BuildHTTPRouteUpstream
* simplify cert logic
* Delete scratch
* revert route related changes in listener PR
* Update agent/consul/discoverychain/gateway.go
* Update agent/proxycfg/snapshot.go
* clean up uneeded extra lines in endpoints
---------
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
Co-authored-by: John Maguire <john.maguire@hashicorp.com>
Co-authored-by: Thomas Eckert <teckert@hashicorp.com>
2023-05-22 21:36:29 +00:00
|
|
|
"should have been reloaded to new value")
|
2021-05-04 14:36:53 +00:00
|
|
|
require.Equal(t, defaults.RaftConfig.SnapshotInterval, got.SnapshotInterval,
|
|
|
|
"should have remained the default interval")
|
|
|
|
require.Equal(t, defaults.RaftConfig.TrailingLogs, got.TrailingLogs,
|
|
|
|
"should have reloaded to default trailing_logs")
|
|
|
|
|
|
|
|
// Now check that update each of those raft fields separately works correctly
|
|
|
|
// too.
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServer_computeRaftReloadableConfig(t *testing.T) {
|
|
|
|
|
|
|
|
defaults := DefaultConfig().RaftConfig
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
rc ReloadableConfig
|
|
|
|
want raft.ReloadableConfig
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
// This case is the common path - reload is called with a ReloadableConfig
|
|
|
|
// populated from the RuntimeConfig which has zero values for the fields.
|
|
|
|
// On startup we selectively pick non-zero runtime config fields to
|
|
|
|
// override defaults so we need to do the same.
|
|
|
|
name: "Still defaults",
|
|
|
|
rc: ReloadableConfig{},
|
|
|
|
want: raft.ReloadableConfig{
|
|
|
|
SnapshotThreshold: defaults.SnapshotThreshold,
|
|
|
|
SnapshotInterval: defaults.SnapshotInterval,
|
|
|
|
TrailingLogs: defaults.TrailingLogs,
|
2022-04-25 14:19:26 +00:00
|
|
|
ElectionTimeout: defaults.ElectionTimeout,
|
|
|
|
HeartbeatTimeout: defaults.HeartbeatTimeout,
|
2021-05-04 14:36:53 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Threshold set",
|
|
|
|
rc: ReloadableConfig{
|
|
|
|
RaftSnapshotThreshold: 123456,
|
|
|
|
},
|
|
|
|
want: raft.ReloadableConfig{
|
|
|
|
SnapshotThreshold: 123456,
|
|
|
|
SnapshotInterval: defaults.SnapshotInterval,
|
|
|
|
TrailingLogs: defaults.TrailingLogs,
|
2022-04-25 14:19:26 +00:00
|
|
|
ElectionTimeout: defaults.ElectionTimeout,
|
|
|
|
HeartbeatTimeout: defaults.HeartbeatTimeout,
|
2021-05-04 14:36:53 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "interval set",
|
|
|
|
rc: ReloadableConfig{
|
|
|
|
RaftSnapshotInterval: 13 * time.Minute,
|
|
|
|
},
|
|
|
|
want: raft.ReloadableConfig{
|
|
|
|
SnapshotThreshold: defaults.SnapshotThreshold,
|
|
|
|
SnapshotInterval: 13 * time.Minute,
|
|
|
|
TrailingLogs: defaults.TrailingLogs,
|
2022-04-25 14:19:26 +00:00
|
|
|
ElectionTimeout: defaults.ElectionTimeout,
|
|
|
|
HeartbeatTimeout: defaults.HeartbeatTimeout,
|
2021-05-04 14:36:53 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "trailing logs set",
|
|
|
|
rc: ReloadableConfig{
|
|
|
|
RaftTrailingLogs: 78910,
|
|
|
|
},
|
|
|
|
want: raft.ReloadableConfig{
|
|
|
|
SnapshotThreshold: defaults.SnapshotThreshold,
|
|
|
|
SnapshotInterval: defaults.SnapshotInterval,
|
|
|
|
TrailingLogs: 78910,
|
2022-04-25 14:19:26 +00:00
|
|
|
ElectionTimeout: defaults.ElectionTimeout,
|
|
|
|
HeartbeatTimeout: defaults.HeartbeatTimeout,
|
2021-05-04 14:36:53 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "all set",
|
|
|
|
rc: ReloadableConfig{
|
|
|
|
RaftSnapshotThreshold: 123456,
|
|
|
|
RaftSnapshotInterval: 13 * time.Minute,
|
|
|
|
RaftTrailingLogs: 78910,
|
2022-04-25 14:19:26 +00:00
|
|
|
ElectionTimeout: 300 * time.Millisecond,
|
|
|
|
HeartbeatTimeout: 400 * time.Millisecond,
|
2021-05-04 14:36:53 +00:00
|
|
|
},
|
|
|
|
want: raft.ReloadableConfig{
|
|
|
|
SnapshotThreshold: 123456,
|
|
|
|
SnapshotInterval: 13 * time.Minute,
|
|
|
|
TrailingLogs: 78910,
|
2022-04-25 14:19:26 +00:00
|
|
|
ElectionTimeout: 300 * time.Millisecond,
|
|
|
|
HeartbeatTimeout: 400 * time.Millisecond,
|
2021-05-04 14:36:53 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
tc := tc
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
got := computeRaftReloadableConfig(tc.rc)
|
|
|
|
require.Equal(t, tc.want, got)
|
|
|
|
})
|
|
|
|
}
|
2019-06-13 09:26:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestServer_RPC_RateLimit(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-06-13 09:26:27 +00:00
|
|
|
t.Parallel()
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf1 := testServerConfig(t)
|
2020-09-16 17:29:59 +00:00
|
|
|
conf1.RPCRateLimit = 2
|
2019-06-13 09:26:27 +00:00
|
|
|
conf1.RPCMaxBurst = 2
|
2020-07-29 20:05:51 +00:00
|
|
|
s1, err := newServer(t, conf1)
|
2019-06-13 09:26:27 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := s1.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != structs.ErrRPCRateExceeded {
|
2019-06-13 09:26:27 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2019-04-26 18:25:03 +00:00
|
|
|
}
|
2022-05-27 00:55:16 +00:00
|
|
|
|
2022-06-06 17:00:38 +00:00
|
|
|
// TestServer_Peering_LeadershipCheck tests that a peering service can receive the leader address
|
|
|
|
// through the LeaderAddress IRL.
|
|
|
|
func TestServer_Peering_LeadershipCheck(t *testing.T) {
|
2022-05-27 00:55:16 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// given two servers: s1 (leader), s2 (follower)
|
|
|
|
_, conf1 := testServerConfig(t)
|
|
|
|
s1, err := newServer(t, conf1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
_, conf2 := testServerConfig(t)
|
|
|
|
conf2.Bootstrap = false
|
|
|
|
s2, err := newServer(t, conf2)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
|
|
|
|
// Verify Raft has established a peer
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
r.Check(wantRaft([]*Server{s1, s2}))
|
|
|
|
})
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
|
|
|
waitForLeaderEstablishment(t, s1)
|
|
|
|
|
|
|
|
// the actual tests
|
|
|
|
// when leadership has been established s2 should have the address of s1
|
2022-06-06 17:00:38 +00:00
|
|
|
// in the peering service
|
2022-07-08 17:01:13 +00:00
|
|
|
peeringLeaderAddr := s2.peeringBackend.GetLeaderAddress()
|
2022-05-27 00:55:16 +00:00
|
|
|
|
|
|
|
require.Equal(t, s1.config.RPCAddr.String(), peeringLeaderAddr)
|
|
|
|
// test corollary by transitivity to future-proof against any setup bugs
|
|
|
|
require.NotEqual(t, s2.config.RPCAddr.String(), peeringLeaderAddr)
|
|
|
|
}
|
2022-09-26 18:58:15 +00:00
|
|
|
|
|
|
|
func TestServer_hcpManager(t *testing.T) {
|
|
|
|
_, conf1 := testServerConfig(t)
|
|
|
|
conf1.BootstrapExpect = 1
|
|
|
|
conf1.RPCAdvertise = &net.TCPAddr{IP: []byte{127, 0, 0, 2}, Port: conf1.RPCAddr.Port}
|
HCP Telemetry Feature (#17460)
* Move hcp client to subpackage hcpclient (#16800)
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] OTELExporter (#17128)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* [HCP Observability] OTELSink (#17159)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Fix nits
* [HCP Observability] Init OTELSink in Telemetry (#17162)
* Move hcp client to subpackage hcpclient (#16800)
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* [HCP Observability] OTELExporter (#17128)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Initialize OTELSink with sync.Map for all the instrument stores.
* Added telemetry agent to client and init sink in deps
* Fixed client
* Initalize sink in deps
* init sink in telemetry library
* Init deps before telemetry
* Use concrete telemetry.OtelSink type
* add /v1/metrics
* Avoid returning err for telemetry init
* move sink init within the IsCloudEnabled()
* Use HCPSinkOpts in deps instead
* update golden test for configuration file
* Switch to using extra sinks in the telemetry library
* keep name MetricsConfig
* fix log in verifyCCMRegistration
* Set logger in context
* pass around MetricSink in deps
* Fix imports
* Rebased onto otel sink pr
* Fix URL in test
* [HCP Observability] OTELSink (#17159)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Fix nits
* pass extraSinks as function param instead
* Add default interval as package export
* remove verifyCCM func
* Add clusterID
* Fix import and add t.Parallel() for missing tests
* Kick Vercel CI
* Remove scheme from endpoint path, and fix error logging
* return metrics.MetricSink for sink method
* Update SDK
* [HCP Observability] Metrics filtering and Labels in Go Metrics sink (#17184)
* Move hcp client to subpackage hcpclient (#16800)
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* [HCP Observability] OTELExporter (#17128)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Initialize OTELSink with sync.Map for all the instrument stores.
* Added telemetry agent to client and init sink in deps
* Fixed client
* Initalize sink in deps
* init sink in telemetry library
* Init deps before telemetry
* Use concrete telemetry.OtelSink type
* add /v1/metrics
* Avoid returning err for telemetry init
* move sink init within the IsCloudEnabled()
* Use HCPSinkOpts in deps instead
* update golden test for configuration file
* Switch to using extra sinks in the telemetry library
* keep name MetricsConfig
* fix log in verifyCCMRegistration
* Set logger in context
* pass around MetricSink in deps
* Fix imports
* Rebased onto otel sink pr
* Fix URL in test
* [HCP Observability] OTELSink (#17159)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Fix nits
* pass extraSinks as function param instead
* Add default interval as package export
* remove verifyCCM func
* Add clusterID
* Fix import and add t.Parallel() for missing tests
* Kick Vercel CI
* Remove scheme from endpoint path, and fix error logging
* return metrics.MetricSink for sink method
* Update SDK
* Added telemetry agent to client and init sink in deps
* Add node_id and __replica__ default labels
* add function for default labels and set x-hcp-resource-id
* Fix labels tests
* Commit suggestion for getDefaultLabels
Co-authored-by: Joshua Timmons <joshua.timmons1@gmail.com>
* Fixed server.id, and t.Parallel()
* Make defaultLabels a method on the TelemetryConfig object
* Rename FilterList to lowercase filterList
* Cleanup filter implemetation by combining regex into a single one, and making the type lowercase
* Fix append
* use regex directly for filters
* Fix x-resource-id test to use mocked value
* Fix log.Error formats
* Forgot the len(opts.Label) optimization)
* Use cfg.NodeID instead
---------
Co-authored-by: Joshua Timmons <joshua.timmons1@gmail.com>
* remove replic tag (#17484)
* [HCP Observability] Add custom metrics for OTEL sink, improve logging, upgrade modules and cleanup metrics client (#17455)
* Add custom metrics for Exporter and transform operations
* Improve deps logging
Run go mod tidy
* Upgrade SDK and OTEL
* Remove the partial success implemetation and check for HTTP status code in metrics client
* Add x-channel
* cleanup logs in deps.go based on PR feedback
* Change to debug log and lowercase
* address test operation feedback
* use GetHumanVersion on version
* Fix error wrapping
* Fix metric names
* [HCP Observability] Turn off retries for now until dynamically configurable (#17496)
* Remove retries for now until dynamic configuration is possible
* Clarify comment
* Update changelog
* improve changelog
---------
Co-authored-by: Joshua Timmons <joshua.timmons1@gmail.com>
2023-05-29 20:11:08 +00:00
|
|
|
hcp1 := hcpclient.NewMockClient(t)
|
|
|
|
hcp1.EXPECT().PushServerStatus(mock.Anything, mock.MatchedBy(func(status *hcpclient.ServerStatus) bool {
|
2022-09-26 18:58:15 +00:00
|
|
|
return status.ID == string(conf1.NodeID)
|
HCP Telemetry Feature (#17460)
* Move hcp client to subpackage hcpclient (#16800)
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] OTELExporter (#17128)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* [HCP Observability] OTELSink (#17159)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Fix nits
* [HCP Observability] Init OTELSink in Telemetry (#17162)
* Move hcp client to subpackage hcpclient (#16800)
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* [HCP Observability] OTELExporter (#17128)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Initialize OTELSink with sync.Map for all the instrument stores.
* Added telemetry agent to client and init sink in deps
* Fixed client
* Initalize sink in deps
* init sink in telemetry library
* Init deps before telemetry
* Use concrete telemetry.OtelSink type
* add /v1/metrics
* Avoid returning err for telemetry init
* move sink init within the IsCloudEnabled()
* Use HCPSinkOpts in deps instead
* update golden test for configuration file
* Switch to using extra sinks in the telemetry library
* keep name MetricsConfig
* fix log in verifyCCMRegistration
* Set logger in context
* pass around MetricSink in deps
* Fix imports
* Rebased onto otel sink pr
* Fix URL in test
* [HCP Observability] OTELSink (#17159)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Fix nits
* pass extraSinks as function param instead
* Add default interval as package export
* remove verifyCCM func
* Add clusterID
* Fix import and add t.Parallel() for missing tests
* Kick Vercel CI
* Remove scheme from endpoint path, and fix error logging
* return metrics.MetricSink for sink method
* Update SDK
* [HCP Observability] Metrics filtering and Labels in Go Metrics sink (#17184)
* Move hcp client to subpackage hcpclient (#16800)
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* [HCP Observability] New MetricsClient (#17100)
* Client configured with TLS using HCP config and retry/throttle
* Add tests and godoc for metrics client
* close body after request
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* remove clone
* Extract CloudConfig and mock for future PR
* Switch to hclog.FromContext
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* [HCP Observability] OTELExporter (#17128)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Initialize OTELSink with sync.Map for all the instrument stores.
* Added telemetry agent to client and init sink in deps
* Fixed client
* Initalize sink in deps
* init sink in telemetry library
* Init deps before telemetry
* Use concrete telemetry.OtelSink type
* add /v1/metrics
* Avoid returning err for telemetry init
* move sink init within the IsCloudEnabled()
* Use HCPSinkOpts in deps instead
* update golden test for configuration file
* Switch to using extra sinks in the telemetry library
* keep name MetricsConfig
* fix log in verifyCCMRegistration
* Set logger in context
* pass around MetricSink in deps
* Fix imports
* Rebased onto otel sink pr
* Fix URL in test
* [HCP Observability] OTELSink (#17159)
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Create new OTELExporter which uses the MetricsClient
Add transform because the conversion is in an /internal package
* Fix lint error
* early return when there are no metrics
* Add NewOTELExporter() function
* Downgrade to metrics SDK version: v1.15.0-rc.1
* Fix imports
* fix small nits with comments and url.URL
* Fix tests by asserting actual error for context cancellation, fix parallel, and make mock more versatile
* Cleanup error handling and clarify empty metrics case
* Fix input/expected naming in otel_transform_test.go
* add comment for metric tracking
* Add a general isEmpty method
* Add clear error types
* update to latest version 1.15.0 of OTEL
* Client configured with TLS using HCP config and retry/throttle
* run go mod tidy
* Remove one abstraction to use the config from deps
* Address PR feedback
* Initialize OTELSink with sync.Map for all the instrument stores.
* Moved PeriodicReader init to NewOtelReader function. This allows us to use a ManualReader for tests.
* Switch to mutex instead of sync.Map to avoid type assertion
* Add gauge store
* Clarify comments
* return concrete sink type
* Fix lint errors
* Move gauge store to be within sink
* Use context.TODO,rebase and clenaup opts handling
* Rebase onto otl exporter to downgrade metrics API to v1.15.0-rc.1
* Fix imports
* Update to latest stable version by rebasing on cc-4933, fix import, remove mutex init, fix opts error messages and use logger from ctx
* Add lots of documentation to the OTELSink
* Fix gauge store comment and check ok
* Add select and ctx.Done() check to gauge callback
* use require.Equal for attributes
* Fixed import naming
* Remove float64 calls and add a NewGaugeStore method
* Change name Store to Set in gaugeStore, add concurrency tests in both OTELSink and gauge store
* Generate 100 gauge operations
* Seperate the labels into goroutines in sink test
* Generate kv store for the test case keys to avoid using uuid
* Added a race test with 300 samples for OTELSink
* Do not pass in waitgroup and use error channel instead.
* Using SHA 7dea2225a218872e86d2f580e82c089b321617b0 to avoid build failures in otel
* Fix nits
* pass extraSinks as function param instead
* Add default interval as package export
* remove verifyCCM func
* Add clusterID
* Fix import and add t.Parallel() for missing tests
* Kick Vercel CI
* Remove scheme from endpoint path, and fix error logging
* return metrics.MetricSink for sink method
* Update SDK
* Added telemetry agent to client and init sink in deps
* Add node_id and __replica__ default labels
* add function for default labels and set x-hcp-resource-id
* Fix labels tests
* Commit suggestion for getDefaultLabels
Co-authored-by: Joshua Timmons <joshua.timmons1@gmail.com>
* Fixed server.id, and t.Parallel()
* Make defaultLabels a method on the TelemetryConfig object
* Rename FilterList to lowercase filterList
* Cleanup filter implemetation by combining regex into a single one, and making the type lowercase
* Fix append
* use regex directly for filters
* Fix x-resource-id test to use mocked value
* Fix log.Error formats
* Forgot the len(opts.Label) optimization)
* Use cfg.NodeID instead
---------
Co-authored-by: Joshua Timmons <joshua.timmons1@gmail.com>
* remove replic tag (#17484)
* [HCP Observability] Add custom metrics for OTEL sink, improve logging, upgrade modules and cleanup metrics client (#17455)
* Add custom metrics for Exporter and transform operations
* Improve deps logging
Run go mod tidy
* Upgrade SDK and OTEL
* Remove the partial success implemetation and check for HTTP status code in metrics client
* Add x-channel
* cleanup logs in deps.go based on PR feedback
* Change to debug log and lowercase
* address test operation feedback
* use GetHumanVersion on version
* Fix error wrapping
* Fix metric names
* [HCP Observability] Turn off retries for now until dynamically configurable (#17496)
* Remove retries for now until dynamic configuration is possible
* Clarify comment
* Update changelog
* improve changelog
---------
Co-authored-by: Joshua Timmons <joshua.timmons1@gmail.com>
2023-05-29 20:11:08 +00:00
|
|
|
})).Run(func(ctx context.Context, status *hcpclient.ServerStatus) {
|
2022-09-26 18:58:15 +00:00
|
|
|
require.Equal(t, status.LanAddress, "127.0.0.2")
|
|
|
|
}).Call.Return(nil)
|
|
|
|
|
|
|
|
deps1 := newDefaultDeps(t, conf1)
|
|
|
|
deps1.HCP.Client = hcp1
|
|
|
|
s1, err := newServerWithDeps(t, conf1, deps1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s1.Shutdown()
|
|
|
|
require.NotNil(t, s1.hcpManager)
|
|
|
|
waitForLeaderEstablishment(t, s1)
|
|
|
|
hcp1.AssertExpectations(t)
|
|
|
|
|
|
|
|
}
|