test: refactor testcontainers and add peering integ tests (#15084)

This commit is contained in:
Dan Stough 2022-11-01 15:03:23 -04:00 committed by GitHub
parent e74bd41a38
commit 19ec59c930
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 3046 additions and 733 deletions

View File

@ -869,7 +869,7 @@ jobs:
# tput complains if this isn't set to something. # tput complains if this isn't set to something.
TERM: ansi TERM: ansi
- store_artifacts: - store_artifacts:
path: ./test/integration/consul-container/upgrade/workdir/logs path: ./test/integration/consul-container/test/upgrade/workdir/logs
destination: container-logs destination: container-logs
- store_test_results: - store_test_results:
path: *TEST_RESULTS_DIR path: *TEST_RESULTS_DIR

View File

@ -133,168 +133,168 @@ type Cache struct {
// configuration it should be treated as an external API which cannot be // configuration it should be treated as an external API which cannot be
// changed and refactored at will since this will break existing setups. // changed and refactored at will since this will break existing setups.
type Config struct { type Config struct {
ACL ACL `mapstructure:"acl"` ACL ACL `mapstructure:"acl" json:"-"`
Addresses Addresses `mapstructure:"addresses"` Addresses Addresses `mapstructure:"addresses" json:"-"`
AdvertiseAddrLAN *string `mapstructure:"advertise_addr"` AdvertiseAddrLAN *string `mapstructure:"advertise_addr" json:"advertise_addr,omitempty"`
AdvertiseAddrLANIPv4 *string `mapstructure:"advertise_addr_ipv4"` AdvertiseAddrLANIPv4 *string `mapstructure:"advertise_addr_ipv4" json:"advertise_addr_ipv4,omitempty"`
AdvertiseAddrLANIPv6 *string `mapstructure:"advertise_addr_ipv6"` AdvertiseAddrLANIPv6 *string `mapstructure:"advertise_addr_ipv6" json:"advertise_addr_ipv6,omitempty"`
AdvertiseAddrWAN *string `mapstructure:"advertise_addr_wan"` AdvertiseAddrWAN *string `mapstructure:"advertise_addr_wan" json:"advertise_addr_wan,omitempty"`
AdvertiseAddrWANIPv4 *string `mapstructure:"advertise_addr_wan_ipv4"` AdvertiseAddrWANIPv4 *string `mapstructure:"advertise_addr_wan_ipv4" json:"advertise_addr_wan_ipv4,omitempty"`
AdvertiseAddrWANIPv6 *string `mapstructure:"advertise_addr_wan_ipv6"` AdvertiseAddrWANIPv6 *string `mapstructure:"advertise_addr_wan_ipv6" json:"advertise_addr_wan_ipv6,omitempty"`
AdvertiseReconnectTimeout *string `mapstructure:"advertise_reconnect_timeout"` AdvertiseReconnectTimeout *string `mapstructure:"advertise_reconnect_timeout" json:"-"`
AutoConfig AutoConfigRaw `mapstructure:"auto_config"` AutoConfig AutoConfigRaw `mapstructure:"auto_config" json:"-"`
Autopilot Autopilot `mapstructure:"autopilot"` Autopilot Autopilot `mapstructure:"autopilot" json:"-"`
BindAddr *string `mapstructure:"bind_addr"` BindAddr *string `mapstructure:"bind_addr" json:"bind_addr,omitempty"`
Bootstrap *bool `mapstructure:"bootstrap"` Bootstrap *bool `mapstructure:"bootstrap" json:"bootstrap,omitempty"`
BootstrapExpect *int `mapstructure:"bootstrap_expect"` BootstrapExpect *int `mapstructure:"bootstrap_expect" json:"bootstrap_expect,omitempty"`
Cache Cache `mapstructure:"cache"` Cache Cache `mapstructure:"cache" json:"-"`
Check *CheckDefinition `mapstructure:"check"` // needs to be a pointer to avoid partial merges Check *CheckDefinition `mapstructure:"check" json:"-"` // needs to be a pointer to avoid partial merges
CheckOutputMaxSize *int `mapstructure:"check_output_max_size"` CheckOutputMaxSize *int `mapstructure:"check_output_max_size" json:"check_output_max_size,omitempty"`
CheckUpdateInterval *string `mapstructure:"check_update_interval"` CheckUpdateInterval *string `mapstructure:"check_update_interval" json:"check_update_interval,omitempty"`
Checks []CheckDefinition `mapstructure:"checks"` Checks []CheckDefinition `mapstructure:"checks" json:"-"`
ClientAddr *string `mapstructure:"client_addr"` ClientAddr *string `mapstructure:"client_addr" json:"client_addr,omitempty"`
Cloud *CloudConfigRaw `mapstructure:"cloud"` Cloud *CloudConfigRaw `mapstructure:"cloud" json:"-"`
ConfigEntries ConfigEntries `mapstructure:"config_entries"` ConfigEntries ConfigEntries `mapstructure:"config_entries" json:"-"`
AutoEncrypt AutoEncrypt `mapstructure:"auto_encrypt"` AutoEncrypt AutoEncrypt `mapstructure:"auto_encrypt" json:"auto_encrypt,omitempty"`
Connect Connect `mapstructure:"connect"` Connect Connect `mapstructure:"connect" json:"connect,omitempty"`
DNS DNS `mapstructure:"dns_config"` DNS DNS `mapstructure:"dns_config" json:"-"`
DNSDomain *string `mapstructure:"domain"` DNSDomain *string `mapstructure:"domain" json:"domain,omitempty"`
DNSAltDomain *string `mapstructure:"alt_domain"` DNSAltDomain *string `mapstructure:"alt_domain" json:"alt_domain,omitempty"`
DNSRecursors []string `mapstructure:"recursors"` DNSRecursors []string `mapstructure:"recursors" json:"recursors,omitempty"`
DataDir *string `mapstructure:"data_dir"` DataDir *string `mapstructure:"data_dir" json:"data_dir,omitempty"`
Datacenter *string `mapstructure:"datacenter"` Datacenter *string `mapstructure:"datacenter" json:"datacenter,omitempty"`
DefaultQueryTime *string `mapstructure:"default_query_time"` DefaultQueryTime *string `mapstructure:"default_query_time" json:"default_query_time,omitempty"`
DisableAnonymousSignature *bool `mapstructure:"disable_anonymous_signature"` DisableAnonymousSignature *bool `mapstructure:"disable_anonymous_signature" json:"disable_anonymous_signature,omitempty"`
DisableCoordinates *bool `mapstructure:"disable_coordinates"` DisableCoordinates *bool `mapstructure:"disable_coordinates" json:"disable_coordinates,omitempty"`
DisableHostNodeID *bool `mapstructure:"disable_host_node_id"` DisableHostNodeID *bool `mapstructure:"disable_host_node_id" json:"disable_host_node_id,omitempty"`
DisableHTTPUnprintableCharFilter *bool `mapstructure:"disable_http_unprintable_char_filter"` DisableHTTPUnprintableCharFilter *bool `mapstructure:"disable_http_unprintable_char_filter" json:"disable_http_unprintable_char_filter,omitempty"`
DisableKeyringFile *bool `mapstructure:"disable_keyring_file"` DisableKeyringFile *bool `mapstructure:"disable_keyring_file" json:"disable_keyring_file,omitempty"`
DisableRemoteExec *bool `mapstructure:"disable_remote_exec"` DisableRemoteExec *bool `mapstructure:"disable_remote_exec" json:"disable_remote_exec,omitempty"`
DisableUpdateCheck *bool `mapstructure:"disable_update_check"` DisableUpdateCheck *bool `mapstructure:"disable_update_check" json:"disable_update_check,omitempty"`
DiscardCheckOutput *bool `mapstructure:"discard_check_output"` DiscardCheckOutput *bool `mapstructure:"discard_check_output" json:"discard_check_output,omitempty"`
DiscoveryMaxStale *string `mapstructure:"discovery_max_stale"` DiscoveryMaxStale *string `mapstructure:"discovery_max_stale" json:"discovery_max_stale,omitempty"`
EnableAgentTLSForChecks *bool `mapstructure:"enable_agent_tls_for_checks"` EnableAgentTLSForChecks *bool `mapstructure:"enable_agent_tls_for_checks" json:"enable_agent_tls_for_checks,omitempty"`
EnableCentralServiceConfig *bool `mapstructure:"enable_central_service_config"` EnableCentralServiceConfig *bool `mapstructure:"enable_central_service_config" json:"enable_central_service_config,omitempty"`
EnableDebug *bool `mapstructure:"enable_debug"` EnableDebug *bool `mapstructure:"enable_debug" json:"enable_debug,omitempty"`
EnableScriptChecks *bool `mapstructure:"enable_script_checks"` EnableScriptChecks *bool `mapstructure:"enable_script_checks" json:"enable_script_checks,omitempty"`
EnableLocalScriptChecks *bool `mapstructure:"enable_local_script_checks"` EnableLocalScriptChecks *bool `mapstructure:"enable_local_script_checks" json:"enable_local_script_checks,omitempty"`
EnableSyslog *bool `mapstructure:"enable_syslog"` EnableSyslog *bool `mapstructure:"enable_syslog" json:"enable_syslog,omitempty"`
EncryptKey *string `mapstructure:"encrypt"` EncryptKey *string `mapstructure:"encrypt" json:"encrypt,omitempty"`
EncryptVerifyIncoming *bool `mapstructure:"encrypt_verify_incoming"` EncryptVerifyIncoming *bool `mapstructure:"encrypt_verify_incoming" json:"encrypt_verify_incoming,omitempty"`
EncryptVerifyOutgoing *bool `mapstructure:"encrypt_verify_outgoing"` EncryptVerifyOutgoing *bool `mapstructure:"encrypt_verify_outgoing" json:"encrypt_verify_outgoing,omitempty"`
GossipLAN GossipLANConfig `mapstructure:"gossip_lan"` GossipLAN GossipLANConfig `mapstructure:"gossip_lan" json:"-"`
GossipWAN GossipWANConfig `mapstructure:"gossip_wan"` GossipWAN GossipWANConfig `mapstructure:"gossip_wan" json:"-"`
HTTPConfig HTTPConfig `mapstructure:"http_config"` HTTPConfig HTTPConfig `mapstructure:"http_config" json:"-"`
LeaveOnTerm *bool `mapstructure:"leave_on_terminate"` LeaveOnTerm *bool `mapstructure:"leave_on_terminate" json:"leave_on_terminate,omitempty"`
LicensePath *string `mapstructure:"license_path"` LicensePath *string `mapstructure:"license_path" json:"license_path,omitempty"`
Limits Limits `mapstructure:"limits"` Limits Limits `mapstructure:"limits" json:"-"`
LogLevel *string `mapstructure:"log_level"` LogLevel *string `mapstructure:"log_level" json:"log_level,omitempty"`
LogJSON *bool `mapstructure:"log_json"` LogJSON *bool `mapstructure:"log_json" json:"log_json,omitempty"`
LogFile *string `mapstructure:"log_file"` LogFile *string `mapstructure:"log_file" json:"log_file,omitempty"`
LogRotateDuration *string `mapstructure:"log_rotate_duration"` LogRotateDuration *string `mapstructure:"log_rotate_duration" json:"log_rotate_duration,omitempty"`
LogRotateBytes *int `mapstructure:"log_rotate_bytes"` LogRotateBytes *int `mapstructure:"log_rotate_bytes" json:"log_rotate_bytes,omitempty"`
LogRotateMaxFiles *int `mapstructure:"log_rotate_max_files"` LogRotateMaxFiles *int `mapstructure:"log_rotate_max_files" json:"log_rotate_max_files,omitempty"`
MaxQueryTime *string `mapstructure:"max_query_time"` MaxQueryTime *string `mapstructure:"max_query_time" json:"max_query_time,omitempty"`
NodeID *string `mapstructure:"node_id"` NodeID *string `mapstructure:"node_id" json:"node_id,omitempty"`
NodeMeta map[string]string `mapstructure:"node_meta"` NodeMeta map[string]string `mapstructure:"node_meta" json:"node_meta,omitempty"`
NodeName *string `mapstructure:"node_name"` NodeName *string `mapstructure:"node_name" json:"node_name,omitempty"`
Peering Peering `mapstructure:"peering"` Peering Peering `mapstructure:"peering" json:"peering,omitempty"`
Performance Performance `mapstructure:"performance"` Performance Performance `mapstructure:"performance" json:"-"`
PidFile *string `mapstructure:"pid_file"` PidFile *string `mapstructure:"pid_file" json:"pid_file,omitempty"`
Ports Ports `mapstructure:"ports"` Ports Ports `mapstructure:"ports" json:"ports,omitempty"`
PrimaryDatacenter *string `mapstructure:"primary_datacenter"` PrimaryDatacenter *string `mapstructure:"primary_datacenter" json:"primary_datacenter,omitempty"`
PrimaryGateways []string `mapstructure:"primary_gateways"` PrimaryGateways []string `mapstructure:"primary_gateways" json:"primary_gateways,omitempty"`
PrimaryGatewaysInterval *string `mapstructure:"primary_gateways_interval"` PrimaryGatewaysInterval *string `mapstructure:"primary_gateways_interval" json:"primary_gateways_interval,omitempty"`
RPCProtocol *int `mapstructure:"protocol"` RPCProtocol *int `mapstructure:"protocol" json:"protocol,omitempty"`
RaftProtocol *int `mapstructure:"raft_protocol"` RaftProtocol *int `mapstructure:"raft_protocol" json:"raft_protocol,omitempty"`
RaftSnapshotThreshold *int `mapstructure:"raft_snapshot_threshold"` RaftSnapshotThreshold *int `mapstructure:"raft_snapshot_threshold" json:"raft_snapshot_threshold,omitempty"`
RaftSnapshotInterval *string `mapstructure:"raft_snapshot_interval"` RaftSnapshotInterval *string `mapstructure:"raft_snapshot_interval" json:"raft_snapshot_interval,omitempty"`
RaftTrailingLogs *int `mapstructure:"raft_trailing_logs"` RaftTrailingLogs *int `mapstructure:"raft_trailing_logs" json:"raft_trailing_logs,omitempty"`
ReconnectTimeoutLAN *string `mapstructure:"reconnect_timeout"` ReconnectTimeoutLAN *string `mapstructure:"reconnect_timeout" json:"reconnect_timeout,omitempty"`
ReconnectTimeoutWAN *string `mapstructure:"reconnect_timeout_wan"` ReconnectTimeoutWAN *string `mapstructure:"reconnect_timeout_wan" json:"reconnect_timeout_wan,omitempty"`
RejoinAfterLeave *bool `mapstructure:"rejoin_after_leave"` RejoinAfterLeave *bool `mapstructure:"rejoin_after_leave" json:"rejoin_after_leave,omitempty"`
AutoReloadConfig *bool `mapstructure:"auto_reload_config"` AutoReloadConfig *bool `mapstructure:"auto_reload_config" json:"auto_reload_config,omitempty"`
RetryJoinIntervalLAN *string `mapstructure:"retry_interval"` RetryJoinIntervalLAN *string `mapstructure:"retry_interval" json:"retry_interval,omitempty"`
RetryJoinIntervalWAN *string `mapstructure:"retry_interval_wan"` RetryJoinIntervalWAN *string `mapstructure:"retry_interval_wan" json:"retry_interval_wan,omitempty"`
RetryJoinLAN []string `mapstructure:"retry_join"` RetryJoinLAN []string `mapstructure:"retry_join" json:"retry_join,omitempty"`
RetryJoinMaxAttemptsLAN *int `mapstructure:"retry_max"` RetryJoinMaxAttemptsLAN *int `mapstructure:"retry_max" json:"retry_max,omitempty"`
RetryJoinMaxAttemptsWAN *int `mapstructure:"retry_max_wan"` RetryJoinMaxAttemptsWAN *int `mapstructure:"retry_max_wan" json:"retry_max_wan,omitempty"`
RetryJoinWAN []string `mapstructure:"retry_join_wan"` RetryJoinWAN []string `mapstructure:"retry_join_wan" json:"retry_join_wan,omitempty"`
SerfAllowedCIDRsLAN []string `mapstructure:"serf_lan_allowed_cidrs"` SerfAllowedCIDRsLAN []string `mapstructure:"serf_lan_allowed_cidrs" json:"serf_lan_allowed_cidrs,omitempty"`
SerfAllowedCIDRsWAN []string `mapstructure:"serf_wan_allowed_cidrs"` SerfAllowedCIDRsWAN []string `mapstructure:"serf_wan_allowed_cidrs" json:"serf_wan_allowed_cidrs,omitempty"`
SerfBindAddrLAN *string `mapstructure:"serf_lan"` SerfBindAddrLAN *string `mapstructure:"serf_lan" json:"serf_lan,omitempty"`
SerfBindAddrWAN *string `mapstructure:"serf_wan"` SerfBindAddrWAN *string `mapstructure:"serf_wan" json:"serf_wan,omitempty"`
ServerMode *bool `mapstructure:"server"` ServerMode *bool `mapstructure:"server" json:"server,omitempty"`
ServerName *string `mapstructure:"server_name"` ServerName *string `mapstructure:"server_name" json:"server_name,omitempty"`
Service *ServiceDefinition `mapstructure:"service"` Service *ServiceDefinition `mapstructure:"service" json:"-"`
Services []ServiceDefinition `mapstructure:"services"` Services []ServiceDefinition `mapstructure:"services" json:"-"`
SessionTTLMin *string `mapstructure:"session_ttl_min"` SessionTTLMin *string `mapstructure:"session_ttl_min" json:"session_ttl_min,omitempty"`
SkipLeaveOnInt *bool `mapstructure:"skip_leave_on_interrupt"` SkipLeaveOnInt *bool `mapstructure:"skip_leave_on_interrupt" json:"skip_leave_on_interrupt,omitempty"`
StartJoinAddrsLAN []string `mapstructure:"start_join"` StartJoinAddrsLAN []string `mapstructure:"start_join" json:"start_join,omitempty"`
StartJoinAddrsWAN []string `mapstructure:"start_join_wan"` StartJoinAddrsWAN []string `mapstructure:"start_join_wan" json:"start_join_wan,omitempty"`
SyslogFacility *string `mapstructure:"syslog_facility"` SyslogFacility *string `mapstructure:"syslog_facility" json:"syslog_facility,omitempty"`
TLS TLS `mapstructure:"tls"` TLS TLS `mapstructure:"tls" json:"tls,omitempty"`
TaggedAddresses map[string]string `mapstructure:"tagged_addresses"` TaggedAddresses map[string]string `mapstructure:"tagged_addresses" json:"tagged_addresses,omitempty"`
Telemetry Telemetry `mapstructure:"telemetry"` Telemetry Telemetry `mapstructure:"telemetry" json:"telemetry,omitempty"`
TranslateWANAddrs *bool `mapstructure:"translate_wan_addrs"` TranslateWANAddrs *bool `mapstructure:"translate_wan_addrs" json:"translate_wan_addrs,omitempty"`
XDS XDS `mapstructure:"xds"` XDS XDS `mapstructure:"xds" json:"-"`
// DEPRECATED (ui-config) - moved to the ui_config stanza // DEPRECATED (ui-config) - moved to the ui_config stanza
UI *bool `mapstructure:"ui"` UI *bool `mapstructure:"ui" json:"-"`
// DEPRECATED (ui-config) - moved to the ui_config stanza // DEPRECATED (ui-config) - moved to the ui_config stanza
UIContentPath *string `mapstructure:"ui_content_path"` UIContentPath *string `mapstructure:"ui_content_path" json:"-"`
// DEPRECATED (ui-config) - moved to the ui_config stanza // DEPRECATED (ui-config) - moved to the ui_config stanza
UIDir *string `mapstructure:"ui_dir"` UIDir *string `mapstructure:"ui_dir" json:"-"`
UIConfig RawUIConfig `mapstructure:"ui_config"` UIConfig RawUIConfig `mapstructure:"ui_config" json:"-"`
UnixSocket UnixSocket `mapstructure:"unix_sockets"` UnixSocket UnixSocket `mapstructure:"unix_sockets" json:"-"`
Watches []map[string]interface{} `mapstructure:"watches"` Watches []map[string]interface{} `mapstructure:"watches" json:"-"`
RPC RPC `mapstructure:"rpc"` RPC RPC `mapstructure:"rpc" json:"-"`
RaftBoltDBConfig *consul.RaftBoltDBConfig `mapstructure:"raft_boltdb"` RaftBoltDBConfig *consul.RaftBoltDBConfig `mapstructure:"raft_boltdb" json:"-"`
// UseStreamingBackend instead of blocking queries for service health and // UseStreamingBackend instead of blocking queries for service health and
// any other endpoints which support streaming. // any other endpoints which support streaming.
UseStreamingBackend *bool `mapstructure:"use_streaming_backend"` UseStreamingBackend *bool `mapstructure:"use_streaming_backend" json:"-"`
// This isn't used by Consul but we've documented a feature where users // This isn't used by Consul but we've documented a feature where users
// can deploy their snapshot agent configs alongside their Consul configs // can deploy their snapshot agent configs alongside their Consul configs
// so we have a placeholder here so it can be parsed but this doesn't // so we have a placeholder here so it can be parsed but this doesn't
// manifest itself in any way inside the runtime config. // manifest itself in any way inside the runtime config.
SnapshotAgent map[string]interface{} `mapstructure:"snapshot_agent"` SnapshotAgent map[string]interface{} `mapstructure:"snapshot_agent" json:"-"`
// non-user configurable values // non-user configurable values
AEInterval *string `mapstructure:"ae_interval"` AEInterval *string `mapstructure:"ae_interval" json:"-"`
CheckDeregisterIntervalMin *string `mapstructure:"check_deregister_interval_min"` CheckDeregisterIntervalMin *string `mapstructure:"check_deregister_interval_min" json:"-"`
CheckReapInterval *string `mapstructure:"check_reap_interval"` CheckReapInterval *string `mapstructure:"check_reap_interval" json:"-"`
Consul Consul `mapstructure:"consul"` Consul Consul `mapstructure:"consul" json:"-"`
Revision *string `mapstructure:"revision"` Revision *string `mapstructure:"revision" json:"-"`
SegmentLimit *int `mapstructure:"segment_limit"` SegmentLimit *int `mapstructure:"segment_limit" json:"-"`
SegmentNameLimit *int `mapstructure:"segment_name_limit"` SegmentNameLimit *int `mapstructure:"segment_name_limit" json:"-"`
SyncCoordinateIntervalMin *string `mapstructure:"sync_coordinate_interval_min"` SyncCoordinateIntervalMin *string `mapstructure:"sync_coordinate_interval_min" json:"-"`
SyncCoordinateRateTarget *float64 `mapstructure:"sync_coordinate_rate_target"` SyncCoordinateRateTarget *float64 `mapstructure:"sync_coordinate_rate_target" json:"-"`
Version *string `mapstructure:"version"` Version *string `mapstructure:"version" json:"-"`
VersionPrerelease *string `mapstructure:"version_prerelease"` VersionPrerelease *string `mapstructure:"version_prerelease" json:"-"`
VersionMetadata *string `mapstructure:"version_metadata"` VersionMetadata *string `mapstructure:"version_metadata" json:"-"`
BuildDate *time.Time `mapstructure:"build_date"` BuildDate *time.Time `mapstructure:"build_date" json:"-"`
// Enterprise Only // Enterprise Only
Audit Audit `mapstructure:"audit"` Audit Audit `mapstructure:"audit" json:"-"`
// Enterprise Only // Enterprise Only
ReadReplica *bool `mapstructure:"read_replica" alias:"non_voting_server"` ReadReplica *bool `mapstructure:"read_replica" alias:"non_voting_server" json:"-"`
// Enterprise Only // Enterprise Only
SegmentName *string `mapstructure:"segment"` SegmentName *string `mapstructure:"segment" json:"-"`
// Enterprise Only // Enterprise Only
Segments []Segment `mapstructure:"segments"` Segments []Segment `mapstructure:"segments" json:"-"`
// Enterprise Only // Enterprise Only
Partition *string `mapstructure:"partition"` Partition *string `mapstructure:"partition" json:"-"`
// Enterprise Only - not user configurable // Enterprise Only - not user configurable
LicensePollBaseTime *string `mapstructure:"license_poll_base_time"` LicensePollBaseTime *string `mapstructure:"license_poll_base_time" json:"-"`
LicensePollMaxTime *string `mapstructure:"license_poll_max_time"` LicensePollMaxTime *string `mapstructure:"license_poll_max_time" json:"-"`
LicenseUpdateBaseTime *string `mapstructure:"license_update_base_time"` LicenseUpdateBaseTime *string `mapstructure:"license_update_base_time" json:"-"`
LicenseUpdateMaxTime *string `mapstructure:"license_update_max_time"` LicenseUpdateMaxTime *string `mapstructure:"license_update_max_time" json:"-"`
} }
type GossipLANConfig struct { type GossipLANConfig struct {
@ -592,33 +592,33 @@ type ExposePath struct {
// AutoEncrypt is the agent-global auto_encrypt configuration. // AutoEncrypt is the agent-global auto_encrypt configuration.
type AutoEncrypt struct { type AutoEncrypt struct {
// TLS enables receiving certificates for clients from servers // TLS enables receiving certificates for clients from servers
TLS *bool `mapstructure:"tls"` TLS *bool `mapstructure:"tls" json:"tls,omitempty"`
// Additional DNS SAN entries that clients request for their certificates. // Additional DNS SAN entries that clients request for their certificates.
DNSSAN []string `mapstructure:"dns_san"` DNSSAN []string `mapstructure:"dns_san" json:"dns_san,omitempty"`
// Additional IP SAN entries that clients request for their certificates. // Additional IP SAN entries that clients request for their certificates.
IPSAN []string `mapstructure:"ip_san"` IPSAN []string `mapstructure:"ip_san" json:"ip_san,omitempty"`
// AllowTLS enables the RPC endpoint on the server to answer // AllowTLS enables the RPC endpoint on the server to answer
// AutoEncrypt.Sign requests. // AutoEncrypt.Sign requests.
AllowTLS *bool `mapstructure:"allow_tls"` AllowTLS *bool `mapstructure:"allow_tls" json:"allow_tls,omitempty"`
} }
// Connect is the agent-global connect configuration. // Connect is the agent-global connect configuration.
type Connect struct { type Connect struct {
// Enabled opts the agent into connect. It should be set on all clients and // Enabled opts the agent into connect. It should be set on all clients and
// servers in a cluster for correct connect operation. // servers in a cluster for correct connect operation.
Enabled *bool `mapstructure:"enabled"` Enabled *bool `mapstructure:"enabled" json:"enabled,omitempty"`
CAProvider *string `mapstructure:"ca_provider"` CAProvider *string `mapstructure:"ca_provider" json:"ca_provider,omitempty"`
CAConfig map[string]interface{} `mapstructure:"ca_config"` CAConfig map[string]interface{} `mapstructure:"ca_config" json:"ca_config,omitempty"`
MeshGatewayWANFederationEnabled *bool `mapstructure:"enable_mesh_gateway_wan_federation"` MeshGatewayWANFederationEnabled *bool `mapstructure:"enable_mesh_gateway_wan_federation" json:"enable_mesh_gateway_wan_federation,omitempty"`
EnableServerlessPlugin *bool `mapstructure:"enable_serverless_plugin"` EnableServerlessPlugin *bool `mapstructure:"enable_serverless_plugin" json:"enable_serverless_plugin,omitempty"`
// TestCALeafRootChangeSpread controls how long after a CA roots change before new leaf certs will be generated. // TestCALeafRootChangeSpread controls how long after a CA roots change before new leaf certs will be generated.
// This is only tuned in tests, generally set to 1ns to make tests deterministic with when to expect updated leaf // This is only tuned in tests, generally set to 1ns to make tests deterministic with when to expect updated leaf
// certs by. This configuration is not exposed to users (not documented, and agent/config/default.go will override it) // certs by. This configuration is not exposed to users (not documented, and agent/config/default.go will override it)
TestCALeafRootChangeSpread *string `mapstructure:"test_ca_leaf_root_change_spread"` TestCALeafRootChangeSpread *string `mapstructure:"test_ca_leaf_root_change_spread" json:"test_ca_leaf_root_change_spread,omitempty"`
} }
// SOA is the configuration of SOA for DNS // SOA is the configuration of SOA for DNS
@ -665,46 +665,46 @@ type Performance struct {
} }
type Telemetry struct { type Telemetry struct {
CirconusAPIApp *string `mapstructure:"circonus_api_app"` CirconusAPIApp *string `mapstructure:"circonus_api_app" json:"circonus_api_app,omitempty"`
CirconusAPIToken *string `mapstructure:"circonus_api_token"` CirconusAPIToken *string `mapstructure:"circonus_api_token" json:"circonus_api_token,omitempty"`
CirconusAPIURL *string `mapstructure:"circonus_api_url"` CirconusAPIURL *string `mapstructure:"circonus_api_url" json:"circonus_api_url,omitempty"`
CirconusBrokerID *string `mapstructure:"circonus_broker_id"` CirconusBrokerID *string `mapstructure:"circonus_broker_id" json:"circonus_broker_id,omitempty"`
CirconusBrokerSelectTag *string `mapstructure:"circonus_broker_select_tag"` CirconusBrokerSelectTag *string `mapstructure:"circonus_broker_select_tag" json:"circonus_broker_select_tag,omitempty"`
CirconusCheckDisplayName *string `mapstructure:"circonus_check_display_name"` CirconusCheckDisplayName *string `mapstructure:"circonus_check_display_name" json:"circonus_check_display_name,omitempty"`
CirconusCheckForceMetricActivation *string `mapstructure:"circonus_check_force_metric_activation"` CirconusCheckForceMetricActivation *string `mapstructure:"circonus_check_force_metric_activation" json:"circonus_check_force_metric_activation,omitempty"`
CirconusCheckID *string `mapstructure:"circonus_check_id"` CirconusCheckID *string `mapstructure:"circonus_check_id" json:"circonus_check_id,omitempty"`
CirconusCheckInstanceID *string `mapstructure:"circonus_check_instance_id"` CirconusCheckInstanceID *string `mapstructure:"circonus_check_instance_id" json:"circonus_check_instance_id,omitempty"`
CirconusCheckSearchTag *string `mapstructure:"circonus_check_search_tag"` CirconusCheckSearchTag *string `mapstructure:"circonus_check_search_tag" json:"circonus_check_search_tag,omitempty"`
CirconusCheckTags *string `mapstructure:"circonus_check_tags"` CirconusCheckTags *string `mapstructure:"circonus_check_tags" json:"circonus_check_tags,omitempty"`
CirconusSubmissionInterval *string `mapstructure:"circonus_submission_interval"` CirconusSubmissionInterval *string `mapstructure:"circonus_submission_interval" json:"circonus_submission_interval,omitempty"`
CirconusSubmissionURL *string `mapstructure:"circonus_submission_url"` CirconusSubmissionURL *string `mapstructure:"circonus_submission_url" json:"circonus_submission_url,omitempty"`
DisableHostname *bool `mapstructure:"disable_hostname"` DisableHostname *bool `mapstructure:"disable_hostname" json:"disable_hostname,omitempty"`
DogstatsdAddr *string `mapstructure:"dogstatsd_addr"` DogstatsdAddr *string `mapstructure:"dogstatsd_addr" json:"dogstatsd_addr,omitempty"`
DogstatsdTags []string `mapstructure:"dogstatsd_tags"` DogstatsdTags []string `mapstructure:"dogstatsd_tags" json:"dogstatsd_tags,omitempty"`
RetryFailedConfiguration *bool `mapstructure:"retry_failed_connection"` RetryFailedConfiguration *bool `mapstructure:"retry_failed_connection" json:"retry_failed_connection,omitempty"`
FilterDefault *bool `mapstructure:"filter_default"` FilterDefault *bool `mapstructure:"filter_default" json:"filter_default,omitempty"`
PrefixFilter []string `mapstructure:"prefix_filter"` PrefixFilter []string `mapstructure:"prefix_filter" json:"prefix_filter,omitempty"`
MetricsPrefix *string `mapstructure:"metrics_prefix"` MetricsPrefix *string `mapstructure:"metrics_prefix" json:"metrics_prefix,omitempty"`
PrometheusRetentionTime *string `mapstructure:"prometheus_retention_time"` PrometheusRetentionTime *string `mapstructure:"prometheus_retention_time" json:"prometheus_retention_time,omitempty"`
StatsdAddr *string `mapstructure:"statsd_address"` StatsdAddr *string `mapstructure:"statsd_address" json:"statsd_address,omitempty"`
StatsiteAddr *string `mapstructure:"statsite_address"` StatsiteAddr *string `mapstructure:"statsite_address" json:"statsite_address,omitempty"`
} }
type Ports struct { type Ports struct {
DNS *int `mapstructure:"dns"` DNS *int `mapstructure:"dns" json:"dns,omitempty"`
HTTP *int `mapstructure:"http"` HTTP *int `mapstructure:"http" json:"http,omitempty"`
HTTPS *int `mapstructure:"https"` HTTPS *int `mapstructure:"https" json:"https,omitempty"`
SerfLAN *int `mapstructure:"serf_lan"` SerfLAN *int `mapstructure:"serf_lan" json:"serf_lan,omitempty"`
SerfWAN *int `mapstructure:"serf_wan"` SerfWAN *int `mapstructure:"serf_wan" json:"serf_wan,omitempty"`
Server *int `mapstructure:"server"` Server *int `mapstructure:"server" json:"server,omitempty"`
GRPC *int `mapstructure:"grpc"` GRPC *int `mapstructure:"grpc" json:"grpc,omitempty"`
GRPCTLS *int `mapstructure:"grpc_tls"` GRPCTLS *int `mapstructure:"grpc_tls" json:"grpc_tls,omitempty"`
ProxyMinPort *int `mapstructure:"proxy_min_port"` ProxyMinPort *int `mapstructure:"proxy_min_port" json:"proxy_min_port,omitempty"`
ProxyMaxPort *int `mapstructure:"proxy_max_port"` ProxyMaxPort *int `mapstructure:"proxy_max_port" json:"proxy_max_port,omitempty"`
SidecarMinPort *int `mapstructure:"sidecar_min_port"` SidecarMinPort *int `mapstructure:"sidecar_min_port" json:"sidecar_min_port,omitempty"`
SidecarMaxPort *int `mapstructure:"sidecar_max_port"` SidecarMaxPort *int `mapstructure:"sidecar_max_port" json:"sidecar_max_port,omitempty"`
ExposeMinPort *int `mapstructure:"expose_min_port"` ExposeMinPort *int `mapstructure:"expose_min_port" json:"expose_min_port,omitempty" `
ExposeMaxPort *int `mapstructure:"expose_max_port"` ExposeMaxPort *int `mapstructure:"expose_max_port" json:"expose_max_port,omitempty"`
} }
type UnixSocket struct { type UnixSocket struct {
@ -873,23 +873,23 @@ type CloudConfigRaw struct {
} }
type TLSProtocolConfig struct { type TLSProtocolConfig struct {
CAFile *string `mapstructure:"ca_file"` CAFile *string `mapstructure:"ca_file" json:"ca_file,omitempty"`
CAPath *string `mapstructure:"ca_path"` CAPath *string `mapstructure:"ca_path" json:"ca_path,omitempty"`
CertFile *string `mapstructure:"cert_file"` CertFile *string `mapstructure:"cert_file" json:"cert_file,omitempty"`
KeyFile *string `mapstructure:"key_file"` KeyFile *string `mapstructure:"key_file" json:"key_file,omitempty"`
TLSMinVersion *string `mapstructure:"tls_min_version"` TLSMinVersion *string `mapstructure:"tls_min_version" json:"tls_min_version,omitempty"`
TLSCipherSuites *string `mapstructure:"tls_cipher_suites"` TLSCipherSuites *string `mapstructure:"tls_cipher_suites" json:"tls_cipher_suites,omitempty"`
VerifyIncoming *bool `mapstructure:"verify_incoming"` VerifyIncoming *bool `mapstructure:"verify_incoming" json:"verify_incoming,omitempty"`
VerifyOutgoing *bool `mapstructure:"verify_outgoing"` VerifyOutgoing *bool `mapstructure:"verify_outgoing" json:"verify_outgoing,omitempty"`
VerifyServerHostname *bool `mapstructure:"verify_server_hostname"` VerifyServerHostname *bool `mapstructure:"verify_server_hostname" json:"verify_server_hostname,omitempty"`
UseAutoCert *bool `mapstructure:"use_auto_cert"` UseAutoCert *bool `mapstructure:"use_auto_cert" json:"use_auto_cert,omitempty"`
} }
type TLS struct { type TLS struct {
Defaults TLSProtocolConfig `mapstructure:"defaults"` Defaults TLSProtocolConfig `mapstructure:"defaults" json:"defaults,omitempty"`
InternalRPC TLSProtocolConfig `mapstructure:"internal_rpc"` InternalRPC TLSProtocolConfig `mapstructure:"internal_rpc" json:"internal_rpc,omitempty"`
HTTPS TLSProtocolConfig `mapstructure:"https"` HTTPS TLSProtocolConfig `mapstructure:"https" json:"https,omitempty"`
GRPC TLSProtocolConfig `mapstructure:"grpc"` GRPC TLSProtocolConfig `mapstructure:"grpc" json:"grpc,omitempty"`
// GRPCModifiedByDeprecatedConfig is a flag used to indicate that GRPC was // GRPCModifiedByDeprecatedConfig is a flag used to indicate that GRPC was
// modified by the deprecated field mapping (as apposed to a user-provided // modified by the deprecated field mapping (as apposed to a user-provided
@ -902,15 +902,15 @@ type TLS struct {
// //
// Note: we use a *struct{} here because a simple bool isn't supported by our // Note: we use a *struct{} here because a simple bool isn't supported by our
// config merging logic. // config merging logic.
GRPCModifiedByDeprecatedConfig *struct{} `mapstructure:"-"` GRPCModifiedByDeprecatedConfig *struct{} `mapstructure:"-" json:"-"`
} }
type Peering struct { type Peering struct {
Enabled *bool `mapstructure:"enabled"` Enabled *bool `mapstructure:"enabled" json:"enabled,omitempty"`
// TestAllowPeerRegistrations controls whether CatalogRegister endpoints allow registrations for objects with `PeerName` // TestAllowPeerRegistrations controls whether CatalogRegister endpoints allow registrations for objects with `PeerName`
// This always gets overridden in NonUserSource() // This always gets overridden in NonUserSource()
TestAllowPeerRegistrations *bool `mapstructure:"test_allow_peer_registrations"` TestAllowPeerRegistrations *bool `mapstructure:"test_allow_peer_registrations" json:"test_allow_peer_registrations,omitempty"`
} }
type XDS struct { type XDS struct {

View File

@ -4,62 +4,174 @@ go 1.19
require ( require (
github.com/docker/docker v20.10.11+incompatible github.com/docker/docker v20.10.11+incompatible
github.com/hashicorp/consul/api v1.15.2 github.com/docker/go-connections v0.4.0
github.com/hashicorp/consul v1.13.3
github.com/hashicorp/consul/api v1.15.3
github.com/hashicorp/consul/sdk v0.11.0 github.com/hashicorp/consul/sdk v0.11.0
github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/serf v0.10.1
github.com/hashicorp/hcl v1.0.0 github.com/itchyny/gojq v0.12.9
github.com/stretchr/testify v1.7.0 github.com/pkg/errors v0.9.1
github.com/stretchr/testify v1.8.0
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569
github.com/testcontainers/testcontainers-go v0.13.0 github.com/testcontainers/testcontainers-go v0.13.0
) )
require ( require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/DataDog/datadog-go v3.2.0+incompatible // indirect
github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/Microsoft/hcsshim v0.8.24 // indirect github.com/Microsoft/hcsshim v0.8.24 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e // indirect
github.com/armon/go-metrics v0.3.10 // indirect github.com/armon/go-metrics v0.3.10 // indirect
github.com/armon/go-radix v1.0.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
github.com/aws/aws-sdk-go v1.42.34 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cenkalti/backoff/v4 v4.1.2 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible // indirect
github.com/circonus-labs/circonusllhist v0.1.3 // indirect
github.com/containerd/cgroups v1.0.3 // indirect github.com/containerd/cgroups v1.0.3 // indirect
github.com/containerd/containerd v1.5.13 // indirect github.com/containerd/containerd v1.5.13 // indirect
github.com/coreos/go-oidc v2.1.0+incompatible // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect
github.com/fatih/color v1.10.0 // indirect github.com/fatih/color v1.13.0 // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/go-logr/logr v0.2.0 // indirect
github.com/go-openapi/analysis v0.21.2 // indirect
github.com/go-openapi/errors v0.20.2 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/loads v0.21.1 // indirect
github.com/go-openapi/runtime v0.24.1 // indirect
github.com/go-openapi/spec v0.20.4 // indirect
github.com/go-openapi/strfmt v0.21.3 // indirect
github.com/go-openapi/swag v0.21.1 // indirect
github.com/go-openapi/validate v0.21.0 // indirect
github.com/go-ozzo/ozzo-validation v3.6.0+incompatible // indirect
github.com/go-test/deep v1.0.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.1 // indirect
github.com/google/btree v1.0.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gnostic v0.4.1 // indirect
github.com/gorilla/mux v1.7.3 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect
github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706 // indirect
github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4 // indirect
github.com/hashicorp/consul/proto-public v0.1.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.2 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v0.16.2 // indirect github.com/hashicorp/go-connlimit v0.3.0 // indirect
github.com/hashicorp/go-hclog v1.2.1 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-memdb v1.3.4 // indirect
github.com/hashicorp/go-msgpack v1.1.5 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-raftchunking v0.6.2 // indirect
github.com/hashicorp/go-retryablehttp v0.6.7 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/go-syslog v1.0.0 // indirect
github.com/hashicorp/go-uuid v1.0.2 // indirect
github.com/hashicorp/go-version v1.2.1 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/serf v0.10.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/hcp-scada-provider v0.1.0 // indirect
github.com/hashicorp/hcp-sdk-go v0.23.1-0.20220921131124-49168300a7dc // indirect
github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 // indirect
github.com/hashicorp/memberlist v0.5.0 // indirect
github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 // indirect
github.com/hashicorp/raft v1.3.11 // indirect
github.com/hashicorp/raft-autopilot v0.1.6 // indirect
github.com/hashicorp/raft-boltdb/v2 v2.2.2 // indirect
github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086 // indirect
github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 // indirect
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/itchyny/timefmt-go v0.1.4 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.10 // indirect
github.com/magiconair/properties v1.8.5 // indirect github.com/magiconair/properties v1.8.5 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/miekg/dns v1.1.41 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/copystructure v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.4.2 // indirect github.com/mitchellh/go-testing-interface v1.14.0 // indirect
github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
github.com/mitchellh/mapstructure v1.4.3 // indirect
github.com/mitchellh/pointerstructure v1.2.1 // indirect
github.com/mitchellh/reflectwalk v1.0.1 // indirect
github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/sys/mount v0.2.0 // indirect
github.com/moby/sys/mountinfo v0.5.0 // indirect github.com/moby/sys/mountinfo v0.5.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/runc v1.1.4 // indirect github.com/opencontainers/runc v1.1.4 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
github.com/prometheus/client_golang v1.7.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.10.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect github.com/sirupsen/logrus v1.8.1 // indirect
go.opencensus.io v0.22.3 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect github.com/stretchr/objx v0.4.0 // indirect
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect go.etcd.io/bbolt v1.3.5 // indirect
google.golang.org/grpc v1.41.0 // indirect go.mongodb.org/mongo-driver v1.10.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect go.opencensus.io v0.22.4 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
google.golang.org/appengine v1.6.6 // indirect
google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7 // indirect
google.golang.org/grpc v1.45.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.20.6 // indirect
k8s.io/apimachinery v0.20.6 // indirect
k8s.io/client-go v0.20.6 // indirect
k8s.io/klog/v2 v2.4.0 // indirect
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.0.3 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
) )
replace github.com/hashicorp/consul/api => ../../../api replace github.com/hashicorp/consul/api => ../../../api
replace github.com/hashicorp/consul/sdk => ../../../sdk replace github.com/hashicorp/consul/sdk => ../../../sdk
replace github.com/hashicorp/consul => ../../..

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,29 @@
package agent
import (
"context"
"github.com/hashicorp/consul/api"
)
// Agent represent a Consul agent abstraction
type Agent interface {
GetAddr() (string, int)
GetClient() *api.Client
GetName() string
GetConfig() Config
GetDatacenter() string
IsServer() bool
RegisterTermination(func() error)
Terminate() error
Upgrade(ctx context.Context, config Config, index int) error
}
// Config is a set of configurations required to create a Agent
type Config struct {
JSON string
Certs map[string]string
Image string
Version string
Cmd []string
}

View File

@ -0,0 +1,266 @@
package agent
import (
"encoding/json"
"path/filepath"
"github.com/pkg/errors"
agentconfig "github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/tlsutil"
)
const (
remoteCertDirectory = "/consul/config/certs"
)
// BuildContext provides a reusable object meant to share common configuration settings
// between agent configuration builders.
type BuildContext struct {
datacenter string
encryptKey string
caCert string
caKey string
index int // keeps track of the certificates issued for naming purposes
injectAutoEncryption bool // initialize the built-in CA and set up agents to use auto-encrpt
injectCerts bool // initializes the built-in CA and distributes client certificates to agents
injectGossipEncryption bool // setup the agents to use a gossip encryption key
}
// BuildOptions define the desired automated test setup overrides that are
// applied across agents in the cluster
type BuildOptions struct {
Datacenter string // Override datacenter for agents
InjectCerts bool // Provides a CA for all agents and (future) agent certs.
InjectAutoEncryption bool // Configures auto-encrypt for TLS and sets up certs. Overrides InjectCerts.
InjectGossipEncryption bool // Provides a gossip encryption key for all agents.
}
func NewBuildContext(opts BuildOptions) (*BuildContext, error) {
ctx := &BuildContext{
datacenter: opts.Datacenter,
injectAutoEncryption: opts.InjectAutoEncryption,
injectCerts: opts.InjectCerts,
injectGossipEncryption: opts.InjectGossipEncryption,
}
if opts.InjectGossipEncryption {
serfKey, err := newSerfEncryptionKey()
if err != nil {
return nil, errors.Wrap(err, "could not generate serf encryption key")
}
ctx.encryptKey = serfKey
}
if opts.InjectAutoEncryption || opts.InjectCerts {
// This is the same call that 'consul tls ca create` will run
caCert, caKey, err := tlsutil.GenerateCA(tlsutil.CAOpts{Domain: "consul", PermittedDNSDomains: []string{"consul", "localhost"}})
if err != nil {
return nil, errors.Wrap(err, "could not generate built-in CA root pair")
}
ctx.caCert = caCert
ctx.caKey = caKey
}
return ctx, nil
}
func (c *BuildContext) GetCerts() (cert string, key string) {
return c.caCert, c.caKey
}
type Builder struct {
conf *agentconfig.Config
certs map[string]string
context *BuildContext
}
// NewConfigBuilder instantiates a builder object with sensible defaults for a single consul instance
// This includes the following:
// * default ports with no plaintext options
// * debug logging
// * single server with bootstrap
// * bind to all interfaces, advertise on 'eth0'
// * connect enabled
func NewConfigBuilder(ctx *BuildContext) *Builder {
b := &Builder{
certs: map[string]string{},
conf: &agentconfig.Config{
AdvertiseAddrLAN: utils.StringToPointer(`{{ GetInterfaceIP "eth0" }}`),
BindAddr: utils.StringToPointer("0.0.0.0"),
Bootstrap: utils.BoolToPointer(true),
ClientAddr: utils.StringToPointer("0.0.0.0"),
Connect: agentconfig.Connect{
Enabled: utils.BoolToPointer(true),
},
LogLevel: utils.StringToPointer("DEBUG"),
ServerMode: utils.BoolToPointer(true),
},
context: ctx,
}
// These are the default ports, disabling plaintext transport
b.conf.Ports = agentconfig.Ports{
DNS: utils.IntToPointer(8600),
HTTP: nil,
HTTPS: utils.IntToPointer(8501),
GRPC: utils.IntToPointer(8502),
GRPCTLS: utils.IntToPointer(8503),
SerfLAN: utils.IntToPointer(8301),
SerfWAN: utils.IntToPointer(8302),
Server: utils.IntToPointer(8300),
}
return b
}
func (b *Builder) Bootstrap(servers int) *Builder {
if servers < 1 {
b.conf.Bootstrap = nil
b.conf.BootstrapExpect = nil
} else if servers == 1 {
b.conf.Bootstrap = utils.BoolToPointer(true)
b.conf.BootstrapExpect = nil
} else {
b.conf.Bootstrap = nil
b.conf.BootstrapExpect = utils.IntToPointer(servers)
}
return b
}
func (b *Builder) Client() *Builder {
b.conf.Ports.Server = nil
b.conf.ServerMode = nil
b.conf.Bootstrap = nil
b.conf.BootstrapExpect = nil
return b
}
func (b *Builder) Datacenter(name string) *Builder {
b.conf.Datacenter = utils.StringToPointer(name)
return b
}
func (b *Builder) Peering(enable bool) *Builder {
b.conf.Peering = agentconfig.Peering{
Enabled: utils.BoolToPointer(enable),
}
return b
}
func (b *Builder) RetryJoin(names ...string) *Builder {
b.conf.RetryJoinLAN = names
return b
}
func (b *Builder) Telemetry(statSite string) *Builder {
b.conf.Telemetry = agentconfig.Telemetry{
StatsiteAddr: utils.StringToPointer(statSite),
}
return b
}
// ToAgentConfig renders the builders configuration into a string
// representation of the json config file for agents.
// DANGER! Some fields may not have json tags in the Agent Config.
// You may need to add these yourself.
func (b *Builder) ToAgentConfig() (*Config, error) {
b.injectContextOptions()
out, err := json.MarshalIndent(b.conf, "", " ")
if err != nil {
return nil, errors.Wrap(err, "could not marshall builder")
}
conf := &Config{
Certs: b.certs,
Cmd: []string{"agent"},
Image: *utils.TargetImage,
JSON: string(out),
Version: *utils.TargetVersion,
}
return conf, nil
}
func (b *Builder) injectContextOptions() {
if b.context == nil {
return
}
var dc string
if b.context.datacenter != "" {
b.conf.Datacenter = utils.StringToPointer(b.context.datacenter)
dc = b.context.datacenter
}
if b.conf.Datacenter == nil || *b.conf.Datacenter == "" {
dc = "dc1"
}
server := b.conf.ServerMode != nil && *b.conf.ServerMode
if b.context.encryptKey != "" {
b.conf.EncryptKey = utils.StringToPointer(b.context.encryptKey)
}
// For any TLS setup, we add the CA to agent conf
if b.context.caCert != "" {
// Add the ca file to the list of certs that will be mounted to consul
filename := filepath.Join(remoteCertDirectory, "consul-agent-ca.pem")
b.certs[filename] = b.context.caCert
b.conf.TLS = agentconfig.TLS{
Defaults: agentconfig.TLSProtocolConfig{
CAFile: utils.StringToPointer(filename),
VerifyOutgoing: utils.BoolToPointer(true), // Secure settings
},
InternalRPC: agentconfig.TLSProtocolConfig{
VerifyServerHostname: utils.BoolToPointer(true),
},
}
}
// Also for any TLS setup, generate server key pairs from the CA
if b.context.caCert != "" && server {
keyFileName, priv, certFileName, pub := newServerTLSKeyPair(dc, b.context)
// Add the key pair to the list that will be mounted to consul
certFileName = filepath.Join(remoteCertDirectory, certFileName)
keyFileName = filepath.Join(remoteCertDirectory, keyFileName)
b.certs[certFileName] = pub
b.certs[keyFileName] = priv
b.conf.TLS.Defaults.CertFile = utils.StringToPointer(certFileName)
b.conf.TLS.Defaults.KeyFile = utils.StringToPointer(keyFileName)
b.conf.TLS.Defaults.VerifyIncoming = utils.BoolToPointer(true) // Only applies to servers for auto-encrypt
}
// This assumes we've already gone through the CA/Cert setup in the previous conditional
if b.context.injectAutoEncryption && server {
b.conf.AutoEncrypt = agentconfig.AutoEncrypt{
AllowTLS: utils.BoolToPointer(true), // This setting is different between client and servers
}
b.conf.TLS.GRPC = agentconfig.TLSProtocolConfig{
UseAutoCert: utils.BoolToPointer(true), // This is required for peering to work over the non-GRPC_TLS port
}
// VerifyIncoming does not apply to client agents for auto-encrypt
}
if b.context.injectAutoEncryption && !server {
b.conf.AutoEncrypt = agentconfig.AutoEncrypt{
TLS: utils.BoolToPointer(true), // This setting is different between client and servers
}
b.conf.TLS.GRPC = agentconfig.TLSProtocolConfig{
UseAutoCert: utils.BoolToPointer(true), // This is required for peering to work over the non-GRPC_TLS port
}
}
if b.context.injectCerts && !b.context.injectAutoEncryption {
panic("client certificate distribution not implemented")
}
b.context.index++
}

View File

@ -0,0 +1,421 @@
package agent
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"time"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/ioutils"
"github.com/pkg/errors"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
const bootLogLine = "Consul agent running"
const disableRYUKEnv = "TESTCONTAINERS_RYUK_DISABLED"
// consulContainerNode implements the Agent interface by running a Consul agent
// in a container.
type consulContainerNode struct {
ctx context.Context
client *api.Client
pod testcontainers.Container
container testcontainers.Container
serverMode bool
ip string
port int
datacenter string
config Config
podReq testcontainers.ContainerRequest
consulReq testcontainers.ContainerRequest
certDir string
dataDir string
network string
id int
terminateFuncs []func() error
}
// NewConsulContainer starts a Consul agent in a container with the given config.
func NewConsulContainer(ctx context.Context, config Config, network string, index int) (Agent, error) {
license, err := readLicense()
if err != nil {
return nil, err
}
pc, err := readSomeConfigFileFields(config.JSON)
if err != nil {
return nil, err
}
consulType := "client"
if pc.Server {
consulType = "server"
}
name := utils.RandName(fmt.Sprintf("%s-consul-%s-%d", pc.Datacenter, consulType, index))
// Inject new Agent name
config.Cmd = append(config.Cmd, "-node", name)
tmpDirData, err := ioutils.TempDir("", name)
if err != nil {
return nil, err
}
err = os.Chmod(tmpDirData, 0777)
if err != nil {
return nil, err
}
configFile, err := createConfigFile(config.JSON)
if err != nil {
return nil, err
}
tmpCertData, err := ioutils.TempDir("", fmt.Sprintf("%s-certs", name))
if err != nil {
return nil, err
}
err = os.Chmod(tmpCertData, 0777)
if err != nil {
return nil, err
}
for filename, cert := range config.Certs {
err := createCertFile(tmpCertData, filename, cert)
if err != nil {
return nil, errors.Wrapf(err, "failed to write file %s", filename)
}
}
opts := containerOpts{
name: name,
certDir: tmpCertData,
configFile: configFile,
dataDir: tmpDirData,
license: license,
addtionalNetworks: []string{"bridge", network},
hostname: fmt.Sprintf("agent-%d", index),
}
podReq, consulReq := newContainerRequest(config, opts)
podContainer, err := startContainer(ctx, podReq)
if err != nil {
return nil, err
}
localIP, err := podContainer.Host(ctx)
if err != nil {
return nil, err
}
mappedPort, err := podContainer.MappedPort(ctx, "8500")
if err != nil {
return nil, err
}
ip, err := podContainer.ContainerIP(ctx)
if err != nil {
return nil, err
}
consulContainer, err := startContainer(ctx, consulReq)
if err != nil {
return nil, err
}
if err := consulContainer.StartLogProducer(ctx); err != nil {
return nil, err
}
consulContainer.FollowOutput(&LogConsumer{
Prefix: name,
})
uri := fmt.Sprintf("http://%s:%s", localIP, mappedPort.Port())
apiConfig := api.DefaultConfig()
apiConfig.Address = uri
apiClient, err := api.NewClient(apiConfig)
if err != nil {
return nil, err
}
return &consulContainerNode{
config: config,
pod: podContainer,
container: consulContainer,
serverMode: pc.Server,
ip: ip,
port: mappedPort.Int(),
datacenter: pc.Datacenter,
client: apiClient,
ctx: ctx,
podReq: podReq,
consulReq: consulReq,
dataDir: tmpDirData,
certDir: tmpCertData,
network: network,
id: index,
}, nil
}
func (c *consulContainerNode) GetName() string {
name, err := c.container.Name(c.ctx)
if err != nil {
return ""
}
return name
}
func (c *consulContainerNode) GetConfig() Config {
return c.config
}
func (c *consulContainerNode) GetDatacenter() string {
return c.datacenter
}
func (c *consulContainerNode) IsServer() bool {
return c.serverMode
}
// GetClient returns an API client that can be used to communicate with the Agent.
func (c *consulContainerNode) GetClient() *api.Client {
return c.client
}
// GetAddr return the network address associated with the Agent.
func (c *consulContainerNode) GetAddr() (string, int) {
return c.ip, c.port
}
func (c *consulContainerNode) RegisterTermination(f func() error) {
c.terminateFuncs = append(c.terminateFuncs, f)
}
func (c *consulContainerNode) Upgrade(ctx context.Context, config Config, index int) error {
pc, err := readSomeConfigFileFields(config.JSON)
if err != nil {
return err
}
consulType := "client"
if pc.Server {
consulType = "server"
}
name := utils.RandName(fmt.Sprintf("%s-consul-%s-%d", pc.Datacenter, consulType, index))
// Inject new Agent name
config.Cmd = append(config.Cmd, "-node", name)
file, err := createConfigFile(config.JSON)
if err != nil {
return err
}
for filename, cert := range config.Certs {
err := createCertFile(c.certDir, filename, cert)
if err != nil {
return errors.Wrapf(err, "failed to write file %s", filename)
}
}
// We'll keep the same pod.
opts := containerOpts{
name: c.consulReq.Name,
certDir: c.certDir,
configFile: file,
dataDir: c.dataDir,
license: "",
addtionalNetworks: []string{"bridge", c.network},
hostname: fmt.Sprintf("agent-%d", c.id),
}
_, consulReq2 := newContainerRequest(config, opts)
consulReq2.Env = c.consulReq.Env // copy license
_ = c.container.StopLogProducer()
if err := c.container.Terminate(ctx); err != nil {
return err
}
c.consulReq = consulReq2
container, err := startContainer(ctx, c.consulReq)
if err != nil {
return err
}
if err := container.StartLogProducer(ctx); err != nil {
return err
}
container.FollowOutput(&LogConsumer{
Prefix: name,
})
c.container = container
return nil
}
// Terminate attempts to terminate the agent container.
// This might also include running termination functions for containers associated with the agent.
// On failure, an error will be returned and the reaper process (RYUK) will handle cleanup.
func (c *consulContainerNode) Terminate() error {
// Services might register a termination function that should also fire
// when the "agent" is cleaned up
for _, f := range c.terminateFuncs {
err := f()
if err != nil {
}
}
if c.container == nil {
return nil
}
err := c.container.StopLogProducer()
if err1 := c.container.Terminate(c.ctx); err == nil {
err = err1
}
c.container = nil
return err
}
func startContainer(ctx context.Context, req testcontainers.ContainerRequest) (testcontainers.Container, error) {
return testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
}
const pauseImage = "k8s.gcr.io/pause:3.3"
type containerOpts struct {
certDir string
configFile string
dataDir string
hostname string
index int
license string
name string
addtionalNetworks []string
}
func newContainerRequest(config Config, opts containerOpts) (podRequest, consulRequest testcontainers.ContainerRequest) {
skipReaper := isRYUKDisabled()
pod := testcontainers.ContainerRequest{
Image: pauseImage,
AutoRemove: false,
Name: opts.name + "-pod",
SkipReaper: skipReaper,
ExposedPorts: []string{"8500/tcp"},
Hostname: opts.hostname,
Networks: opts.addtionalNetworks,
}
// For handshakes like auto-encrypt, it can take 10's of seconds for the agent to become "ready".
// If we only wait until the log stream starts, subsequent commands to agents will fail.
// TODO: optimize the wait strategy
app := testcontainers.ContainerRequest{
NetworkMode: dockercontainer.NetworkMode("container:" + opts.name + "-pod"),
Image: config.Image + ":" + config.Version,
WaitingFor: wait.ForLog(bootLogLine).WithStartupTimeout(60 * time.Second), // See note above
AutoRemove: false,
Name: opts.name,
Mounts: []testcontainers.ContainerMount{
{Source: testcontainers.DockerBindMountSource{HostPath: opts.certDir}, Target: "/consul/config/certs"},
{Source: testcontainers.DockerBindMountSource{HostPath: opts.configFile}, Target: "/consul/config/config.json"},
{Source: testcontainers.DockerBindMountSource{HostPath: opts.dataDir}, Target: "/consul/data"},
},
Cmd: config.Cmd,
SkipReaper: skipReaper,
Env: map[string]string{"CONSUL_LICENSE": opts.license},
}
return pod, app
}
// isRYUKDisabled returns whether the reaper process (RYUK) has been disabled
// by an environment variable.
//
// https://github.com/testcontainers/moby-ryuk
func isRYUKDisabled() bool {
skipReaperStr := os.Getenv(disableRYUKEnv)
skipReaper, err := strconv.ParseBool(skipReaperStr)
if err != nil {
return false
}
return skipReaper
}
func readLicense() (string, error) {
license := os.Getenv("CONSUL_LICENSE")
if license == "" {
licensePath := os.Getenv("CONSUL_LICENSE_PATH")
if licensePath != "" {
licenseBytes, err := os.ReadFile(licensePath)
if err != nil {
return "", err
}
license = string(licenseBytes)
}
}
return license, nil
}
func createConfigFile(JSON string) (string, error) {
tmpDir, err := ioutils.TempDir("", "consul-container-test-config")
if err != nil {
return "", err
}
err = os.Chmod(tmpDir, 0777)
if err != nil {
return "", err
}
err = os.Mkdir(tmpDir+"/config", 0777)
if err != nil {
return "", err
}
configFile := tmpDir + "/config/config.hcl"
err = os.WriteFile(configFile, []byte(JSON), 0644)
if err != nil {
return "", err
}
return configFile, nil
}
func createCertFile(dir, filename, cert string) error {
filename = filepath.Base(filename)
path := filepath.Join(dir, filename)
err := os.WriteFile(path, []byte(cert), 0644)
if err != nil {
return errors.Wrap(err, "could not write cert file")
}
return nil
}
type parsedConfig struct {
Datacenter string `json:"datacenter"`
Server bool `json:"server"`
}
func readSomeConfigFileFields(JSON string) (parsedConfig, error) {
var pc parsedConfig
if err := json.Unmarshal([]byte(JSON), &pc); err != nil {
return pc, errors.Wrap(err, "failed to parse config file")
}
if pc.Datacenter == "" {
pc.Datacenter = "dc1"
}
return pc, nil
}

View File

@ -0,0 +1,58 @@
package agent
import (
"crypto/rand"
"crypto/x509"
"encoding/base64"
"fmt"
"net"
"github.com/pkg/errors"
"github.com/hashicorp/consul/tlsutil"
)
func newSerfEncryptionKey() (string, error) {
key := make([]byte, 32)
n, err := rand.Reader.Read(key)
if err != nil {
return "", errors.Wrap(err, "error reading random data")
}
if n != 32 {
return "", errors.Wrap(err, "couldn't read enough entropy. Generate more entropy!")
}
return base64.StdEncoding.EncodeToString(key), nil
}
func newServerTLSKeyPair(dc string, ctx *BuildContext) (string, string, string, string) {
// Generate agent-specific key pair. Borrowed from 'consul tls cert create -server -dc <dc_name>'
name := fmt.Sprintf("server.%s.%s", dc, "consul")
dnsNames := []string{
name,
"localhost",
}
ipAddresses := []net.IP{net.ParseIP("127.0.0.1")}
extKeyUsage := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}
signer, err := tlsutil.ParseSigner(ctx.caKey)
if err != nil {
panic("could not parse signer from CA key")
}
pub, priv, err := tlsutil.GenerateCert(tlsutil.CertOpts{
Signer: signer, CA: ctx.caCert, Name: name, Days: 365,
DNSNames: dnsNames, IPAddresses: ipAddresses, ExtKeyUsage: extKeyUsage,
})
prefix := fmt.Sprintf("%s-server-%s", dc, "consul")
certFileName := fmt.Sprintf("%s-%d.pem", prefix, ctx.index)
keyFileName := fmt.Sprintf("%s-%d-key.pem", prefix, ctx.index)
if err = tlsutil.Verify(ctx.caCert, pub, name); err != nil {
panic(fmt.Sprintf("could not verify keypair for %s and %s", certFileName, keyFileName))
}
return keyFileName, priv, certFileName, pub
}

View File

@ -1,4 +1,4 @@
package node package agent
import ( import (
"fmt" "fmt"
@ -7,13 +7,13 @@ import (
"github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go"
) )
type NodeLogConsumer struct { type LogConsumer struct {
Prefix string Prefix string
} }
var _ testcontainers.LogConsumer = (*NodeLogConsumer)(nil) var _ testcontainers.LogConsumer = (*LogConsumer)(nil)
func (c *NodeLogConsumer) Accept(log testcontainers.Log) { func (c *LogConsumer) Accept(log testcontainers.Log) {
switch log.LogType { switch log.LogType {
case "STDOUT": case "STDOUT":
fmt.Fprint(os.Stdout, c.Prefix+" ~~ "+string(log.Content)) fmt.Fprint(os.Stdout, c.Prefix+" ~~ "+string(log.Content))

View File

@ -0,0 +1,10 @@
package assert
import (
"time"
)
const (
defaultTimeout = 5 * time.Second
defaultWait = 500 * time.Millisecond
)

View File

@ -0,0 +1,44 @@
package assert
import (
"context"
"testing"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
)
// PeeringStatus verifies the peering connection is the specified state with a default retry.
func PeeringStatus(t *testing.T, client *api.Client, peerName string, status api.PeeringState) {
failer := func() *retry.Timer {
return &retry.Timer{Timeout: 20 * time.Second, Wait: defaultWait}
}
retry.RunWith(failer(), t, func(r *retry.R) {
peering, _, err := client.Peerings().Read(context.Background(), peerName, &api.QueryOptions{})
if err != nil {
r.Fatal("error reading peering data")
}
if status != peering.State {
r.Fatal("peering state did not match: got ", peering.State, " want ", status)
}
})
}
// PeeringExports verifies the correct number of exported services with a default retry.
func PeeringExports(t *testing.T, client *api.Client, peerName string, exports int) {
failer := func() *retry.Timer {
return &retry.Timer{Timeout: defaultTimeout, Wait: defaultWait}
}
retry.RunWith(failer(), t, func(r *retry.R) {
peering, _, err := client.Peerings().Read(context.Background(), peerName, &api.QueryOptions{})
if err != nil {
r.Fatal("error reading peering data")
}
if exports != len(peering.StreamStatus.ExportedServices) {
r.Fatal("peering exported services did not match: got ", len(peering.StreamStatus.ExportedServices), " want ", exports)
}
})
}

View File

@ -0,0 +1,62 @@
package assert
import (
"fmt"
"io"
"net/http"
"strings"
"testing"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
)
const (
defaultHTTPTimeout = 30 * time.Second
defaultHTTPWait = defaultWait
)
// HTTPServiceEchoes verifies that a post to the given ip/port combination returns the data
// in the response body
func HTTPServiceEchoes(t *testing.T, ip string, port int) {
phrase := "hello"
failer := func() *retry.Timer {
return &retry.Timer{Timeout: defaultHTTPTimeout, Wait: defaultHTTPWait}
}
client := http.DefaultClient
url := fmt.Sprintf("http://%s:%d", ip, port)
retry.RunWith(failer(), t, func(r *retry.R) {
t.Logf("making call to %s", url)
reader := strings.NewReader(phrase)
res, err := client.Post(url, "text/plain", reader)
if err != nil {
r.Fatal("could not make call to service ", url)
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
r.Fatal("could not read response body ", url)
}
if !strings.Contains(string(body), phrase) {
r.Fatal("received an incorrect response ", body)
}
})
}
// CatalogServiceExists verifies the service name exists in the Consul catalog
func CatalogServiceExists(t *testing.T, c *api.Client, svc string) {
retry.Run(t, func(r *retry.R) {
services, _, err := c.Catalog().Service(svc, "", nil)
if err != nil {
r.Fatal("error reading peering data")
}
if len(services) == 0 {
r.Fatal("did not find catalog entry for ", svc)
}
})
}

View File

@ -2,99 +2,157 @@ package cluster
import ( import (
"context" "context"
"crypto/rand"
"encoding/base64"
"fmt" "fmt"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/hashicorp/serf/serf"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/teris-io/shortid"
"github.com/testcontainers/testcontainers-go"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/test/integration/consul-container/libs/node" libagent "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
"github.com/stretchr/testify/require"
) )
// Cluster provides an interface for creating and controlling a Consul cluster // Cluster provides an interface for creating and controlling a Consul cluster
// in integration tests, with nodes running in containers. // in integration tests, with agents running in containers.
// These fields are public in the event someone might want to surgically
// craft a test case.
type Cluster struct { type Cluster struct {
Nodes []node.Node Agents []libagent.Agent
EncryptKey string CACert string
CAKey string
ID string
Index int
Network testcontainers.Network
NetworkName string
} }
// New creates a Consul cluster. A node will be started for each of the given // New creates a Consul cluster. An agent will be started for each of the given
// configs and joined to the cluster. // configs and joined to the cluster.
func New(configs []node.Config) (*Cluster, error) { //
serfKey, err := newSerfEncryptionKey() // A cluster has its own docker network for DNS connectivity, but is also joined
func New(configs []libagent.Config) (*Cluster, error) {
id, err := shortid.Generate()
if err != nil { if err != nil {
return nil, err return nil, errors.Wrap(err, "could not cluster id")
}
name := fmt.Sprintf("consul-int-cluster-%s", id)
network, err := createNetwork(name)
if err != nil {
return nil, errors.Wrap(err, "could not create cluster container network")
} }
cluster := Cluster{ cluster := Cluster{
EncryptKey: serfKey, ID: id,
Network: network,
NetworkName: name,
} }
nodes := make([]node.Node, len(configs)) if err := cluster.Add(configs); err != nil {
for idx, c := range configs { return nil, errors.Wrap(err, "could not start or join all agents")
c.HCL += fmt.Sprintf(" encrypt=%q", serfKey)
n, err := node.NewConsulContainer(context.Background(), c)
if err != nil {
return nil, err
}
nodes[idx] = n
}
if err := cluster.AddNodes(nodes); err != nil {
return nil, err
} }
return &cluster, nil return &cluster, nil
} }
// AddNodes joins the given nodes to the cluster. // Add starts an agent with the given configuration and joins it with the existing cluster
func (c *Cluster) AddNodes(nodes []node.Node) error { func (c *Cluster) Add(configs []libagent.Config) error {
var joinAddr string
if len(c.Nodes) >= 1 {
joinAddr, _ = c.Nodes[0].GetAddr()
} else if len(nodes) >= 1 {
joinAddr, _ = nodes[0].GetAddr()
}
for _, node := range nodes { agents := make([]libagent.Agent, len(configs))
err := node.GetClient().Agent().Join(joinAddr, false) for idx, conf := range configs {
n, err := libagent.NewConsulContainer(context.Background(), conf, c.NetworkName, c.Index)
if err != nil { if err != nil {
return err return errors.Wrapf(err, "could not add container index %d", idx)
} }
c.Nodes = append(c.Nodes, node) agents[idx] = n
c.Index++
}
if err := c.Join(agents); err != nil {
return errors.Wrapf(err, "could not join agent")
} }
return nil return nil
} }
// Terminate will attempt to terminate all nodes in the cluster. If any node // Join joins the given agent to the cluster.
func (c *Cluster) Join(agents []libagent.Agent) error {
var joinAddr string
if len(c.Agents) >= 1 {
joinAddr, _ = c.Agents[0].GetAddr()
} else if len(agents) >= 1 {
joinAddr, _ = agents[0].GetAddr()
}
for _, n := range agents {
err := n.GetClient().Agent().Join(joinAddr, false)
if err != nil {
return errors.Wrapf(err, "could not join agent %s to %s", n.GetName(), joinAddr)
}
c.Agents = append(c.Agents, n)
}
return nil
}
// Remove instructs the agent to leave the cluster then removes it
// from the cluster Agent list.
func (c *Cluster) Remove(n libagent.Agent) error {
err := n.GetClient().Agent().Leave()
if err != nil {
return errors.Wrapf(err, "could not remove agent %s", n.GetName())
}
foundIdx := -1
for idx, this := range c.Agents {
if this == n {
foundIdx = idx
break
}
}
if foundIdx == -1 {
return errors.New("could not find agent in cluster")
}
c.Agents = append(c.Agents[:foundIdx], c.Agents[foundIdx+1:]...)
return nil
}
// Terminate will attempt to terminate all agents in the cluster and its network. If any agent
// termination fails, Terminate will abort and return an error. // termination fails, Terminate will abort and return an error.
func (c *Cluster) Terminate() error { func (c *Cluster) Terminate() error {
for _, n := range c.Nodes { for _, n := range c.Agents {
err := n.Terminate() err := n.Terminate()
if err != nil { if err != nil {
return err return err
} }
} }
// Testcontainers seems to clean this the network.
// Trigger it now will throw an error while the containers are still shutting down
//if err := c.Network.Remove(context.Background()); err != nil {
// return errors.Wrapf(err, "could not terminate cluster network %s", c.ID)
//}
return nil return nil
} }
// Leader returns the cluster leader node, or an error if no leader is // Leader returns the cluster leader agent, or an error if no leader is
// available. // available.
func (c *Cluster) Leader() (node.Node, error) { func (c *Cluster) Leader() (libagent.Agent, error) {
if len(c.Nodes) < 1 { if len(c.Agents) < 1 {
return nil, fmt.Errorf("no node available") return nil, fmt.Errorf("no agent available")
} }
n0 := c.Nodes[0] n0 := c.Agents[0]
leaderAdd, err := GetLeader(n0.GetClient()) leaderAdd, err := getLeader(n0.GetClient())
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, n := range c.Nodes { for _, n := range c.Agents {
addr, _ := n.GetAddr() addr, _ := n.GetAddr()
if strings.Contains(leaderAdd, addr) { if strings.Contains(leaderAdd, addr) {
return n, nil return n, nil
@ -103,30 +161,58 @@ func (c *Cluster) Leader() (node.Node, error) {
return nil, fmt.Errorf("leader not found") return nil, fmt.Errorf("leader not found")
} }
func newSerfEncryptionKey() (string, error) { func getLeader(client *api.Client) (string, error) {
key := make([]byte, 32)
n, err := rand.Reader.Read(key)
if err != nil {
return "", fmt.Errorf("Error reading random data: %w", err)
}
if n != 32 {
return "", fmt.Errorf("Couldn't read enough entropy. Generate more entropy!")
}
return base64.StdEncoding.EncodeToString(key), nil
}
func GetLeader(client *api.Client) (string, error) {
leaderAdd, err := client.Status().Leader() leaderAdd, err := client.Status().Leader()
if err != nil { if err != nil {
return "", err return "", errors.Wrap(err, "could not query leader")
} }
if leaderAdd == "" { if leaderAdd == "" {
return "", fmt.Errorf("no leader available") return "", errors.New("no leader available")
} }
return leaderAdd, nil return leaderAdd, nil
} }
// Followers returns the cluster following servers.
func (c *Cluster) Followers() ([]libagent.Agent, error) {
var followers []libagent.Agent
leader, err := c.Leader()
if err != nil {
return nil, fmt.Errorf("could not determine leader: %w", err)
}
for _, n := range c.Agents {
if n != leader && n.IsServer() {
followers = append(followers, n)
}
}
return followers, nil
}
// Servers returns the handle to server agents
func (c *Cluster) Servers() ([]libagent.Agent, error) {
var servers []libagent.Agent
for _, n := range c.Agents {
if n.IsServer() {
servers = append(servers, n)
}
}
return servers, nil
}
// Clients returns the handle to client agents
func (c *Cluster) Clients() ([]libagent.Agent, error) {
var clients []libagent.Agent
for _, n := range c.Agents {
if !n.IsServer() {
clients = append(clients, n)
}
}
return clients, nil
}
const retryTimeout = 20 * time.Second const retryTimeout = 20 * time.Second
const retryFrequency = 500 * time.Millisecond const retryFrequency = 500 * time.Millisecond
@ -148,7 +234,7 @@ func WaitForLeader(t *testing.T, cluster *Cluster, client *api.Client) {
func waitForLeaderFromClient(t *testing.T, client *api.Client) { func waitForLeaderFromClient(t *testing.T, client *api.Client) {
retry.RunWith(LongFailer(), t, func(r *retry.R) { retry.RunWith(LongFailer(), t, func(r *retry.R) {
leader, err := GetLeader(client) leader, err := getLeader(client)
require.NoError(r, err) require.NoError(r, err)
require.NotEmpty(r, leader) require.NotEmpty(r, leader)
}) })
@ -157,7 +243,13 @@ func waitForLeaderFromClient(t *testing.T, client *api.Client) {
func WaitForMembers(t *testing.T, client *api.Client, expectN int) { func WaitForMembers(t *testing.T, client *api.Client, expectN int) {
retry.RunWith(LongFailer(), t, func(r *retry.R) { retry.RunWith(LongFailer(), t, func(r *retry.R) {
members, err := client.Agent().Members(false) members, err := client.Agent().Members(false)
var activeMembers int
for _, member := range members {
if serf.MemberStatus(member.Status) == serf.StatusAlive {
activeMembers++
}
}
require.NoError(r, err) require.NoError(r, err)
require.Len(r, members, expectN) require.Equal(r, activeMembers, expectN)
}) })
} }

View File

@ -0,0 +1,23 @@
package cluster
import (
"context"
"github.com/pkg/errors"
"github.com/testcontainers/testcontainers-go"
)
func createNetwork(name string) (testcontainers.Network, error) {
req := testcontainers.GenericNetworkRequest{
NetworkRequest: testcontainers.NetworkRequest{
Name: name,
Attachable: true,
CheckDuplicate: true,
},
}
network, err := testcontainers.GenericNetwork(context.Background(), req)
if err != nil {
return nil, errors.Wrap(err, "could not create network")
}
return network, nil
}

View File

@ -1,295 +0,0 @@
package node
import (
"context"
"fmt"
"os"
"strconv"
"time"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/ioutils"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/hcl"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
const bootLogLine = "Consul agent running"
const disableRYUKEnv = "TESTCONTAINERS_RYUK_DISABLED"
// consulContainerNode implements the Node interface by running a Consul node
// in a container.
type consulContainerNode struct {
ctx context.Context
client *api.Client
pod testcontainers.Container
container testcontainers.Container
ip string
port int
config Config
podReq testcontainers.ContainerRequest
consulReq testcontainers.ContainerRequest
dataDir string
}
func (c *consulContainerNode) GetConfig() Config {
return c.config
}
func startContainer(ctx context.Context, req testcontainers.ContainerRequest) (testcontainers.Container, error) {
return testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
}
// NewConsulContainer starts a Consul node in a container with the given config.
func NewConsulContainer(ctx context.Context, config Config) (Node, error) {
license, err := readLicense()
if err != nil {
return nil, err
}
name := utils.RandName("consul-")
tmpDirData, err := ioutils.TempDir("", name)
if err != nil {
return nil, err
}
err = os.Chmod(tmpDirData, 0777)
if err != nil {
return nil, err
}
pc, err := readSomeConfigFileFields(config.HCL)
if err != nil {
return nil, err
}
configFile, err := createConfigFile(config.HCL)
if err != nil {
return nil, err
}
podReq, consulReq := newContainerRequest(config, name, configFile, tmpDirData, license)
podContainer, err := startContainer(ctx, podReq)
if err != nil {
return nil, err
}
localIP, err := podContainer.Host(ctx)
if err != nil {
return nil, err
}
mappedPort, err := podContainer.MappedPort(ctx, "8500")
if err != nil {
return nil, err
}
ip, err := podContainer.ContainerIP(ctx)
if err != nil {
return nil, err
}
consulContainer, err := startContainer(ctx, consulReq)
if err != nil {
return nil, err
}
if err := consulContainer.StartLogProducer(ctx); err != nil {
return nil, err
}
consulContainer.FollowOutput(&NodeLogConsumer{
Prefix: pc.NodeName,
})
uri := fmt.Sprintf("http://%s:%s", localIP, mappedPort.Port())
apiConfig := api.DefaultConfig()
apiConfig.Address = uri
apiClient, err := api.NewClient(apiConfig)
if err != nil {
return nil, err
}
return &consulContainerNode{
config: config,
pod: podContainer,
container: consulContainer,
ip: ip,
port: mappedPort.Int(),
client: apiClient,
ctx: ctx,
podReq: podReq,
consulReq: consulReq,
dataDir: tmpDirData,
}, nil
}
const pauseImage = "k8s.gcr.io/pause:3.3"
func newContainerRequest(config Config, name, configFile, dataDir, license string) (podRequest, consulRequest testcontainers.ContainerRequest) {
skipReaper := isRYUKDisabled()
pod := testcontainers.ContainerRequest{
Image: pauseImage,
AutoRemove: false,
Name: name + "-pod",
SkipReaper: skipReaper,
ExposedPorts: []string{"8500/tcp"},
}
app := testcontainers.ContainerRequest{
NetworkMode: dockercontainer.NetworkMode("container:" + name + "-pod"),
Image: config.Image + ":" + config.Version,
WaitingFor: wait.ForLog(bootLogLine).WithStartupTimeout(10 * time.Second),
AutoRemove: false,
Name: name,
Mounts: []testcontainers.ContainerMount{
{Source: testcontainers.DockerBindMountSource{HostPath: configFile}, Target: "/consul/config/config.hcl"},
{Source: testcontainers.DockerBindMountSource{HostPath: dataDir}, Target: "/consul/data"},
},
Cmd: config.Cmd,
SkipReaper: skipReaper,
Env: map[string]string{"CONSUL_LICENSE": license},
}
return pod, app
}
// GetClient returns an API client that can be used to communicate with the Node.
func (c *consulContainerNode) GetClient() *api.Client {
return c.client
}
// GetAddr return the network address associated with the Node.
func (c *consulContainerNode) GetAddr() (string, int) {
return c.ip, c.port
}
func (c *consulContainerNode) Upgrade(ctx context.Context, config Config) error {
pc, err := readSomeConfigFileFields(config.HCL)
if err != nil {
return err
}
file, err := createConfigFile(config.HCL)
if err != nil {
return err
}
// We'll keep the same pod.
_, consulReq2 := newContainerRequest(
config,
c.consulReq.Name,
file,
c.dataDir,
"",
)
consulReq2.Env = c.consulReq.Env // copy license
_ = c.container.StopLogProducer()
if err := c.container.Terminate(ctx); err != nil {
return err
}
c.consulReq = consulReq2
container, err := startContainer(ctx, c.consulReq)
if err != nil {
return err
}
if err := container.StartLogProducer(ctx); err != nil {
return err
}
container.FollowOutput(&NodeLogConsumer{
Prefix: pc.NodeName,
})
c.container = container
return nil
}
// Terminate attempts to terminate the container. On failure, an error will be
// returned and the reaper process (RYUK) will handle cleanup.
func (c *consulContainerNode) Terminate() error {
if c.container == nil {
return nil
}
err := c.container.StopLogProducer()
if err1 := c.container.Terminate(c.ctx); err == nil {
err = err1
}
c.container = nil
return err
}
// isRYUKDisabled returns whether the reaper process (RYUK) has been disabled
// by an environment variable.
//
// https://github.com/testcontainers/moby-ryuk
func isRYUKDisabled() bool {
skipReaperStr := os.Getenv(disableRYUKEnv)
skipReaper, err := strconv.ParseBool(skipReaperStr)
if err != nil {
return false
}
return skipReaper
}
func readLicense() (string, error) {
license := os.Getenv("CONSUL_LICENSE")
if license == "" {
licensePath := os.Getenv("CONSUL_LICENSE_PATH")
if licensePath != "" {
licenseBytes, err := os.ReadFile(licensePath)
if err != nil {
return "", err
}
license = string(licenseBytes)
}
}
return license, nil
}
func createConfigFile(HCL string) (string, error) {
tmpDir, err := ioutils.TempDir("", "consul-container-test-config")
if err != nil {
return "", err
}
err = os.Chmod(tmpDir, 0777)
if err != nil {
return "", err
}
err = os.Mkdir(tmpDir+"/config", 0777)
if err != nil {
return "", err
}
configFile := tmpDir + "/config/config.hcl"
err = os.WriteFile(configFile, []byte(HCL), 0644)
if err != nil {
return "", err
}
return configFile, nil
}
type parsedConfig struct {
NodeName string `hcl:"node_name"`
}
func readSomeConfigFileFields(HCL string) (parsedConfig, error) {
var pc parsedConfig
if err := hcl.Decode(&pc, HCL); err != nil {
return pc, fmt.Errorf("Failed to parse config file: %w", err)
}
return pc, nil
}

View File

@ -1,4 +0,0 @@
//go:build !consulent
// +build !consulent
package node

View File

@ -1,24 +0,0 @@
package node
import (
"context"
"github.com/hashicorp/consul/api"
)
// Node represent a Consul node abstraction
type Node interface {
Terminate() error
GetClient() *api.Client
GetAddr() (string, int)
GetConfig() Config
Upgrade(ctx context.Context, config Config) error
}
// Config is a set of configurations required to create a Node
type Config struct {
HCL string
Image string
Version string
Cmd []string
}

View File

@ -0,0 +1,7 @@
# Note this arg has to be before the first FROM
ARG ENVOY_VERSION
FROM consul:local as consul
FROM docker.mirror.hashicorp.services/envoyproxy/envoy:v${ENVOY_VERSION}
COPY --from=consul /bin/consul /bin/consul

View File

@ -0,0 +1,57 @@
package service
import (
"archive/tar"
"bytes"
_ "embed"
"os"
"github.com/testcontainers/testcontainers-go"
)
const latestEnvoyVersion = "1.23.1"
const envoyEnvKey = "ENVOY_VERSION"
const hashicorpDockerProxy = "docker.mirror.hashicorp.services"
//go:embed assets/Dockerfile-consul-envoy
var consulEnvoyDockerfile string
// getDevContainerDockerfile returns the necessary context to build a combined consul and
// envoy image for running "consul connect envoy ..."
func getDevContainerDockerfile() (testcontainers.FromDockerfile, error) {
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
dockerfileBytes := []byte(consulEnvoyDockerfile)
hdr := &tar.Header{
Name: "Dockerfile",
Mode: 0600,
Size: int64(len(dockerfileBytes)),
}
if err := tw.WriteHeader(hdr); err != nil {
return testcontainers.FromDockerfile{}, err
}
if _, err := tw.Write(dockerfileBytes); err != nil {
return testcontainers.FromDockerfile{}, err
}
if err := tw.Close(); err != nil {
return testcontainers.FromDockerfile{}, err
}
reader := bytes.NewReader(buf.Bytes())
fromDockerfile := testcontainers.FromDockerfile{
ContextArchive: reader,
}
return fromDockerfile, nil
}
func getEnvoyVersion() string {
if version, ok := os.LookupEnv(envoyEnvKey); ok && version != "" {
return version
}
return latestEnvoyVersion
}

View File

@ -0,0 +1,144 @@
package service
import (
"context"
"fmt"
"time"
"github.com/docker/go-connections/nat"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
libnode "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
// ConnectContainer
type ConnectContainer struct {
ctx context.Context
container testcontainers.Container
ip string
appPort int
adminPort int
req testcontainers.ContainerRequest
}
func (g ConnectContainer) GetName() string {
name, err := g.container.Name(g.ctx)
if err != nil {
return ""
}
return name
}
func (g ConnectContainer) GetAddr() (string, int) {
return g.ip, g.appPort
}
func (g ConnectContainer) Start() error {
if g.container == nil {
return fmt.Errorf("container has not been initialized")
}
return g.container.Start(context.Background())
}
func (g ConnectContainer) GetAdminAddr() (string, int) {
return "localhost", g.adminPort
}
// Terminate attempts to terminate the container. On failure, an error will be
// returned and the reaper process (RYUK) will handle cleanup.
func (c ConnectContainer) Terminate() error {
if c.container == nil {
return nil
}
err := c.container.StopLogProducer()
if err1 := c.container.Terminate(c.ctx); err == nil {
err = err1
}
c.container = nil
return err
}
func NewConnectService(ctx context.Context, name string, serviceName string, serviceBindPort int, node libnode.Agent) (Service, error) {
namePrefix := fmt.Sprintf("%s-service-connect-%s", node.GetDatacenter(), name)
containerName := utils.RandName(namePrefix)
envoyVersion := getEnvoyVersion()
buildargs := map[string]*string{
"ENVOY_VERSION": utils.StringToPointer(envoyVersion),
}
dockerfileCtx, err := getDevContainerDockerfile()
if err != nil {
return nil, err
}
dockerfileCtx.BuildArgs = buildargs
nodeIP, _ := node.GetAddr()
req := testcontainers.ContainerRequest{
FromDockerfile: dockerfileCtx,
WaitingFor: wait.ForLog("").WithStartupTimeout(10 * time.Second),
AutoRemove: false,
Name: containerName,
Cmd: []string{
"consul", "connect", "envoy",
"-sidecar-for", serviceName,
"-service", name,
"-admin-bind", "0.0.0.0:19000",
"-grpc-addr", fmt.Sprintf("%s:8502", nodeIP),
"-http-addr", fmt.Sprintf("%s:8500", nodeIP),
"--",
"--log-level", "trace"},
ExposedPorts: []string{
fmt.Sprintf("%d/tcp", serviceBindPort), // Envoy Listener
"19000/tcp", // Envoy Admin Port
},
}
container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
if err != nil {
return nil, err
}
ip, err := container.ContainerIP(ctx)
if err != nil {
return nil, err
}
mappedAppPort, err := container.MappedPort(ctx, nat.Port(fmt.Sprintf("%d", serviceBindPort)))
if err != nil {
return nil, err
}
mappedAdminPort, err := container.MappedPort(ctx, nat.Port(fmt.Sprintf("%d", 19000)))
if err != nil {
return nil, err
}
if err := container.StartLogProducer(ctx); err != nil {
return nil, err
}
container.FollowOutput(&LogConsumer{
Prefix: containerName,
})
// Register the termination function the agent so the containers can stop together
terminate := func() error {
return container.Terminate(context.Background())
}
node.RegisterTermination(terminate)
return &ConnectContainer{
container: container,
ip: ip,
appPort: mappedAppPort.Int(),
adminPort: mappedAdminPort.Int(),
}, nil
}

View File

@ -0,0 +1,113 @@
package service
import (
"context"
"fmt"
"time"
"github.com/docker/go-connections/nat"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
libnode "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
// exampleContainer
type exampleContainer struct {
ctx context.Context
container testcontainers.Container
ip string
httpPort int
grpcPort int
req testcontainers.ContainerRequest
}
func (g exampleContainer) GetName() string {
name, err := g.container.Name(g.ctx)
if err != nil {
return ""
}
return name
}
func (g exampleContainer) GetAddr() (string, int) {
return g.ip, g.httpPort
}
func (g exampleContainer) Start() error {
if g.container == nil {
return fmt.Errorf("container has not been initialized")
}
return g.container.Start(context.Background())
}
// Terminate attempts to terminate the container. On failure, an error will be
// returned and the reaper process (RYUK) will handle cleanup.
func (c exampleContainer) Terminate() error {
if c.container == nil {
return nil
}
err := c.container.StopLogProducer()
if err1 := c.container.Terminate(c.ctx); err == nil {
err = err1
}
c.container = nil
return err
}
func NewExampleService(ctx context.Context, name string, httpPort int, grpcPort int, node libnode.Agent) (Service, error) {
namePrefix := fmt.Sprintf("%s-service-example-%s", node.GetDatacenter(), name)
containerName := utils.RandName(namePrefix)
req := testcontainers.ContainerRequest{
Image: hashicorpDockerProxy + "/fortio/fortio",
WaitingFor: wait.ForLog("").WithStartupTimeout(10 * time.Second),
AutoRemove: false,
Name: containerName,
Cmd: []string{"server", "-http-port", fmt.Sprintf("%d", httpPort), "-grpc-port", fmt.Sprintf("%d", grpcPort), "-redirect-port", "-disabled"},
Env: map[string]string{"FORTIO_NAME": name},
ExposedPorts: []string{
fmt.Sprintf("%d/tcp", httpPort), // HTTP Listener
fmt.Sprintf("%d/tcp", grpcPort), // GRPC Listener
},
}
container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
if err != nil {
return nil, err
}
ip, err := container.ContainerIP(ctx)
if err != nil {
return nil, err
}
mappedHTPPPort, err := container.MappedPort(ctx, nat.Port(fmt.Sprintf("%d", httpPort)))
if err != nil {
return nil, err
}
mappedGRPCPort, err := container.MappedPort(ctx, nat.Port(fmt.Sprintf("%d", grpcPort)))
if err != nil {
return nil, err
}
if err := container.StartLogProducer(ctx); err != nil {
return nil, err
}
container.FollowOutput(&LogConsumer{
Prefix: containerName,
})
terminate := func() error {
return container.Terminate(context.Background())
}
node.RegisterTermination(terminate)
return &exampleContainer{container: container, ip: ip, httpPort: mappedHTPPPort.Int(), grpcPort: mappedGRPCPort.Int()}, nil
}

View File

@ -0,0 +1,128 @@
package service
import (
"context"
"fmt"
"time"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
libnode "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
// gatewayContainer
type gatewayContainer struct {
ctx context.Context
container testcontainers.Container
ip string
port int
req testcontainers.ContainerRequest
}
func (g gatewayContainer) GetName() string {
name, err := g.container.Name(g.ctx)
if err != nil {
return ""
}
return name
}
func (g gatewayContainer) GetAddr() (string, int) {
return g.ip, g.port
}
func (g gatewayContainer) Start() error {
if g.container == nil {
return fmt.Errorf("container has not been initialized")
}
return g.container.Start(context.Background())
}
// Terminate attempts to terminate the container. On failure, an error will be
// returned and the reaper process (RYUK) will handle cleanup.
func (c gatewayContainer) Terminate() error {
if c.container == nil {
return nil
}
err := c.container.StopLogProducer()
if err1 := c.container.Terminate(c.ctx); err == nil {
err = err1
}
c.container = nil
return err
}
func NewGatewayService(ctx context.Context, name string, kind string, node libnode.Agent) (Service, error) {
namePrefix := fmt.Sprintf("%s-service-gateway-%s", node.GetDatacenter(), name)
containerName := utils.RandName(namePrefix)
envoyVersion := getEnvoyVersion()
buildargs := map[string]*string{
"ENVOY_VERSION": utils.StringToPointer(envoyVersion),
}
dockerfileCtx, err := getDevContainerDockerfile()
if err != nil {
return nil, err
}
dockerfileCtx.BuildArgs = buildargs
nodeIP, _ := node.GetAddr()
req := testcontainers.ContainerRequest{
FromDockerfile: dockerfileCtx,
WaitingFor: wait.ForLog("").WithStartupTimeout(10 * time.Second),
AutoRemove: false,
Name: containerName,
Cmd: []string{
"consul", "connect", "envoy",
fmt.Sprintf("-gateway=%s", kind),
"-register",
"-service", name,
"-address", "{{ GetInterfaceIP \"eth0\" }}:8443",
fmt.Sprintf("-grpc-addr=%s:%d", nodeIP, 8502),
"-admin-bind", "0.0.0.0:19000",
"--",
"--log-level", "info"},
Env: map[string]string{"CONSUL_HTTP_ADDR": fmt.Sprintf("%s:%d", nodeIP, 8500)},
ExposedPorts: []string{
"8443/tcp", // Envoy Gateway Listener
"19000/tcp", // Envoy Admin Port
},
}
container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
if err != nil {
return nil, err
}
ip, err := container.ContainerIP(ctx)
if err != nil {
return nil, err
}
mappedPort, err := container.MappedPort(ctx, "8443")
if err != nil {
return nil, err
}
if err := container.StartLogProducer(ctx); err != nil {
return nil, err
}
container.FollowOutput(&LogConsumer{
Prefix: containerName,
})
terminate := func() error {
return container.Terminate(context.Background())
}
node.RegisterTermination(terminate)
return &gatewayContainer{container: container, ip: ip, port: mappedPort.Int()}, nil
}

View File

@ -0,0 +1,144 @@
package service
import (
"context"
"fmt"
"io"
"net/http"
"github.com/hashicorp/consul/api"
libnode "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
)
func CreateAndRegisterStaticServerAndSidecar(node libnode.Agent) (Service, Service, error) {
// Create a service and proxy instance
serverService, err := NewExampleService(context.Background(), "static-server", 8080, 8079, node)
if err != nil {
return nil, nil, err
}
serverConnectProxy, err := NewConnectService(context.Background(), "static-server-sidecar", "static-server", 8080, node) // bindPort not used
if err != nil {
return nil, nil, err
}
serverServiceIP, _ := serverService.GetAddr()
serverConnectProxyIP, _ := serverConnectProxy.GetAddr()
// Register the static-server service and sidecar
req := &api.AgentServiceRegistration{
Name: "static-server",
Port: 8080,
Address: serverServiceIP,
Connect: &api.AgentServiceConnect{
SidecarService: &api.AgentServiceRegistration{
Name: "static-server-sidecar-proxy",
Port: 20000,
Address: serverConnectProxyIP,
Kind: api.ServiceKindConnectProxy,
Checks: api.AgentServiceChecks{
&api.AgentServiceCheck{
Name: "Connect Sidecar Listening",
TCP: fmt.Sprintf("%s:%d", serverConnectProxyIP, 20000),
Interval: "10s",
},
&api.AgentServiceCheck{
Name: "Connect Sidecar Aliasing Static Server",
AliasService: "static-server",
},
},
Proxy: &api.AgentServiceConnectProxyConfig{
DestinationServiceName: "static-server",
LocalServiceAddress: serverServiceIP,
LocalServicePort: 8080,
},
},
},
Check: &api.AgentServiceCheck{
Name: "Connect Sidecar Listening",
TCP: fmt.Sprintf("%s:%d", serverServiceIP, 8080),
Interval: "10s",
},
}
err = node.GetClient().Agent().ServiceRegister(req)
if err != nil {
return serverService, serverConnectProxy, err
}
return serverService, serverConnectProxy, nil
}
func CreateAndRegisterStaticClientSidecar(node libnode.Agent, peerName string, localMeshGateway bool) (Service, error) {
// Create a service and proxy instance
clientConnectProxy, err := NewConnectService(context.Background(), "static-client-sidecar", "static-client", 5000, node)
if err != nil {
return nil, err
}
clientConnectProxyIP, _ := clientConnectProxy.GetAddr()
mgwMode := api.MeshGatewayModeRemote
if localMeshGateway {
mgwMode = api.MeshGatewayModeLocal
}
// Register the static-client service and sidecar
req := &api.AgentServiceRegistration{
Name: "static-client",
Port: 8080,
Connect: &api.AgentServiceConnect{
SidecarService: &api.AgentServiceRegistration{
Name: "static-client-sidecar-proxy",
Port: 20000,
Kind: api.ServiceKindConnectProxy,
Checks: api.AgentServiceChecks{
&api.AgentServiceCheck{
Name: "Connect Sidecar Listening",
TCP: fmt.Sprintf("%s:%d", clientConnectProxyIP, 20000),
Interval: "10s",
},
},
Proxy: &api.AgentServiceConnectProxyConfig{
Upstreams: []api.Upstream{
{
DestinationName: "static-server",
DestinationPeer: peerName,
LocalBindAddress: "0.0.0.0",
LocalBindPort: 5000,
MeshGateway: api.MeshGatewayConfig{
Mode: mgwMode,
},
},
},
},
},
},
Checks: api.AgentServiceChecks{},
}
err = node.GetClient().Agent().ServiceRegister(req)
if err != nil {
return clientConnectProxy, err
}
return clientConnectProxy, nil
}
func GetEnvoyConfigDump(port int) (string, error) {
client := http.DefaultClient
url := fmt.Sprintf("http://localhost:%d/config_dump?include_eds", port)
res, err := client.Get(url)
if err != nil {
return "", err
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return "", err
}
return string(body), nil
}

View File

@ -0,0 +1,23 @@
package service
import (
"fmt"
"os"
"github.com/testcontainers/testcontainers-go"
)
type LogConsumer struct {
Prefix string
}
var _ testcontainers.LogConsumer = (*LogConsumer)(nil)
func (c *LogConsumer) Accept(log testcontainers.Log) {
switch log.LogType {
case "STDOUT":
fmt.Fprint(os.Stdout, c.Prefix+" ~~ "+string(log.Content))
case "STDERR":
fmt.Fprint(os.Stderr, c.Prefix+" ~~ "+string(log.Content))
}
}

View File

@ -0,0 +1,10 @@
package service
// Service represents a process that will be registered with the
// Consul catalog, including Consul components such as sidecars and gateways
type Service interface {
Terminate() error
GetName() string
GetAddr() (string, int)
Start() (err error)
}

View File

@ -0,0 +1,17 @@
package utils
import (
"github.com/hashicorp/consul/api"
)
func ApplyDefaultProxySettings(c *api.Client) (bool, error) {
req := &api.ProxyConfigEntry{
Name: "global",
Kind: "proxy-defaults",
Config: map[string]any{
"protocol": "tcp",
},
}
ok, _, err := c.ConfigEntries().Set(req, &api.WriteOptions{})
return ok, err
}

View File

@ -1,13 +1,60 @@
package utils package utils
import ( import (
"github.com/hashicorp/go-uuid" "encoding/json"
"fmt"
"github.com/itchyny/gojq"
"github.com/teris-io/shortid"
) )
func RandName(name string) string { func RandName(name string) string {
generateUUID, err := uuid.GenerateUUID() shortID, err := shortid.New(1, shortid.DefaultABC, 6666)
id, err := shortID.Generate()
if err != nil { if err != nil {
return "" return ""
} }
return name + generateUUID return name + "-" + id
}
// JQFilter uses the provided "jq" filter to parse json.
// Matching results are returned as a slice of strings.
func JQFilter(config, filter string) ([]string, error) {
result := []string{}
query, err := gojq.Parse(filter)
if err != nil {
return nil, err
}
var m interface{}
err = json.Unmarshal([]byte(config), &m)
if err != nil {
return nil, err
}
iter := query.Run(m)
for {
v, ok := iter.Next()
if !ok {
break
}
if err, ok := v.(error); ok {
return nil, err
}
s := fmt.Sprintf("%v", v)
result = append(result, s)
}
return result, nil
}
func IntToPointer(i int) *int {
return &i
}
func BoolToPointer(b bool) *bool {
return &b
}
func StringToPointer(s string) *string {
return &s
} }

View File

@ -0,0 +1,88 @@
package basic
import (
"testing"
"github.com/stretchr/testify/require"
libagent "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
// TestBasicConnectService Summary
// This test makes sure two services in the same datacenter have connectivity.
// A simulated client (a direct HTTP call) talks to it's upstream proxy through the
//
// Steps:
// * Create a single agent cluster.
// * Create the example static-server and sidecar containers, then register them both with Consul
// * Create an example static-client sidecar, then register both the service and sidecar with Consul
// * Make sure a call to the client sidecar local bind port returns a response from the upstream, static-server
func TestBasicConnectService(t *testing.T) {
cluster := createCluster(t)
defer terminate(t, cluster)
clientService := createServices(t, cluster)
_, port := clientService.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port)
}
func terminate(t *testing.T, cluster *libcluster.Cluster) {
err := cluster.Terminate()
require.NoError(t, err)
}
// createCluster
func createCluster(t *testing.T) *libcluster.Cluster {
opts := libagent.BuildOptions{
InjectAutoEncryption: true,
InjectGossipEncryption: true,
}
ctx, err := libagent.NewBuildContext(opts)
require.NoError(t, err)
conf, err := libagent.NewConfigBuilder(ctx).ToAgentConfig()
require.NoError(t, err)
t.Logf("Cluster config:\n%s", conf.JSON)
configs := []libagent.Config{*conf}
cluster, err := libcluster.New(configs)
require.NoError(t, err)
node := cluster.Agents[0]
client := node.GetClient()
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, 1)
// Default Proxy Settings
ok, err := utils.ApplyDefaultProxySettings(client)
require.NoError(t, err)
require.True(t, ok)
return cluster
}
func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Service {
node := cluster.Agents[0]
client := node.GetClient()
// Create a service and proxy instance
_, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(node)
require.NoError(t, err)
libassert.CatalogServiceExists(t, client, "static-server-sidecar-proxy")
libassert.CatalogServiceExists(t, client, "static-server")
// Create a client proxy instance with the server as an upstream
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
require.NoError(t, err)
libassert.CatalogServiceExists(t, client, "static-client-sidecar-proxy")
return clientConnectProxy
}

View File

@ -12,55 +12,40 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
libagent "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster" libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
"github.com/hashicorp/consul/test/integration/consul-container/libs/node"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
) )
// Given a 3-server cluster, when the leader is elected, then leader's isLeader is 1 and non-leader's 0 // Given a 3-server cluster, when the leader is elected, then leader's isLeader is 1 and non-leader's 0
func TestLeadershipMetrics(t *testing.T) { func TestLeadershipMetrics(t *testing.T) {
var configs []node.Config var configs []agent.Config
configs = append(configs,
node.Config{
HCL: `node_name="` + utils.RandName("consul-server") + `"
log_level="DEBUG"
server=true
telemetry {
statsite_address = "127.0.0.1:2180"
}`,
Cmd: []string{"agent", "-client=0.0.0.0"},
Version: *utils.TargetVersion,
Image: *utils.TargetImage,
})
statsConf, err := libagent.NewConfigBuilder(nil).Telemetry("127.0.0.0:2180").ToAgentConfig()
require.NoError(t, err)
configs = append(configs, *statsConf)
conf, err := libagent.NewConfigBuilder(nil).Bootstrap(3).ToAgentConfig()
require.NoError(t, err)
numServer := 3 numServer := 3
for i := 1; i < numServer; i++ { for i := 1; i < numServer; i++ {
configs = append(configs, configs = append(configs, *conf)
node.Config{
HCL: `node_name="` + utils.RandName("consul-server") + `"
log_level="DEBUG"
bootstrap_expect=3
server=true`,
Cmd: []string{"agent", "-client=0.0.0.0"},
Version: *utils.TargetVersion,
Image: *utils.TargetImage,
})
} }
cluster, err := libcluster.New(configs) cluster, err := libcluster.New(configs)
require.NoError(t, err) require.NoError(t, err)
defer terminate(t, cluster) defer terminate(t, cluster)
svrCli := cluster.Nodes[0].GetClient() svrCli := cluster.Agents[0].GetClient()
libcluster.WaitForLeader(t, cluster, svrCli) libcluster.WaitForLeader(t, cluster, svrCli)
libcluster.WaitForMembers(t, svrCli, 3) libcluster.WaitForMembers(t, svrCli, 3)
retryWithBackoff := func(agentNode node.Node, expectedStr string) error { retryWithBackoff := func(agent agent.Agent, expectedStr string) error {
waiter := &utils.Waiter{ waiter := &utils.Waiter{
MaxWait: 5 * time.Minute, MaxWait: 5 * time.Minute,
} }
_, port := agentNode.GetAddr() _, port := agent.GetAddr()
ctx := context.Background() ctx := context.Background()
for { for {
if waiter.Failures() > 5 { if waiter.Failures() > 5 {
@ -78,14 +63,14 @@ func TestLeadershipMetrics(t *testing.T) {
} }
} }
leaderNode, err := cluster.Leader() leader, err := cluster.Leader()
require.NoError(t, err) require.NoError(t, err)
leadAddr, leaderPort := leaderNode.GetAddr() leadAddr, leaderPort := leader.GetAddr()
for i, n := range cluster.Nodes { for i, n := range cluster.Agents {
addr, port := n.GetAddr() addr, port := n.GetAddr()
if addr == leadAddr && port == leaderPort { if addr == leadAddr && port == leaderPort {
err = retryWithBackoff(leaderNode, ".server.isLeader\",\"Value\":1,") err = retryWithBackoff(leader, ".server.isLeader\",\"Value\":1,")
require.NoError(t, err, "%dth node(leader): could not find the metric %q in the /v1/agent/metrics response", i, ".server.isLeader\",\"Value\":1,") require.NoError(t, err, "%dth node(leader): could not find the metric %q in the /v1/agent/metrics response", i, ".server.isLeader\",\"Value\":1,")
} else { } else {
err = retryWithBackoff(n, ".server.isLeader\",\"Value\":0,") err = retryWithBackoff(n, ".server.isLeader\",\"Value\":0,")

View File

@ -0,0 +1,372 @@
package peering
import (
"context"
"encoding/pem"
"fmt"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
libagent "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
const (
acceptingPeerName = "accepting-to-dialer"
dialingPeerName = "dialing-to-acceptor"
)
// TestPeering_RotateServerAndCAThenFail_
// This test runs a few scenarios back to back
// 1. It makes sure that the peering stream send server address updates between peers.
// It also verifies that dialing clusters will use this stored information to supersede the addresses
// encoded in the peering token.
// 2. Rotate the CA in the exporting cluster and ensure services don't break
// 3. Terminate the server nodes in the exporting cluster and make sure the importing cluster can still dial it's
// upstream.
//
// ## Steps
// ### Part 1
// - Create an accepting cluster with 3 servers. 1 client should be used to host a service for export
// - Create a single agent dialing cluster.
// - Create the peering and export the service. Verify it is working
// - Incrementally replace the follower nodes.
// - Replace the leader agent
// - Verify the dialer can reach the new server nodes and the service becomes available.
//
// ### Part 2
// - Push an update to the CA Configuration in the exporting cluster and wait for the new root to be generated
// - Verify envoy client sidecar has two certificates for the upstream server
// - Make sure there is still service connectivity from the importing cluster
//
// ### Part 3
// - Terminate the server nodes in the exporting cluster
// - Make sure there is still service connectivity from the importing cluster
func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
var acceptingCluster, dialingCluster *libcluster.Cluster
var acceptingClient, dialingClient *api.Client
var acceptingCtx *libagent.BuildContext
var clientSidecarService libservice.Service
var wg sync.WaitGroup
wg.Add(1)
go func() {
acceptingCluster, acceptingClient, acceptingCtx = creatingAcceptingClusterAndSetup(t)
wg.Done()
}()
defer func() {
terminate(t, acceptingCluster)
}()
wg.Add(1)
go func() {
dialingCluster, dialingClient, clientSidecarService = createDialingClusterAndSetup(t)
wg.Done()
}()
defer func() {
terminate(t, dialingCluster)
}()
wg.Wait()
generateReq := api.PeeringGenerateTokenRequest{
PeerName: acceptingPeerName,
}
generateRes, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), generateReq, &api.WriteOptions{})
require.NoError(t, err)
establishReq := api.PeeringEstablishRequest{
PeerName: dialingPeerName,
PeeringToken: generateRes.PeeringToken,
}
_, _, err = dialingClient.Peerings().Establish(context.Background(), establishReq, &api.WriteOptions{})
require.NoError(t, err)
libassert.PeeringStatus(t, acceptingClient, acceptingPeerName, api.PeeringStateActive)
libassert.PeeringExports(t, acceptingClient, acceptingPeerName, 1)
_, port := clientSidecarService.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port)
t.Run("test rotating servers", func(t *testing.T) {
// Start by replacing the Followers
leader, err := acceptingCluster.Leader()
require.NoError(t, err)
followers, err := acceptingCluster.Followers()
require.NoError(t, err)
require.Len(t, followers, 2)
for idx, follower := range followers {
t.Log("Removing follower", idx)
rotateServer(t, acceptingCluster, acceptingClient, acceptingCtx, follower)
}
t.Log("Removing leader")
rotateServer(t, acceptingCluster, acceptingClient, acceptingCtx, leader)
libassert.PeeringStatus(t, acceptingClient, acceptingPeerName, api.PeeringStateActive)
libassert.PeeringExports(t, acceptingClient, acceptingPeerName, 1)
libassert.HTTPServiceEchoes(t, "localhost", port)
})
t.Run("rotate exporting cluster's root CA", func(t *testing.T) {
config, meta, err := acceptingClient.Connect().CAGetConfig(&api.QueryOptions{})
require.NoError(t, err)
t.Logf("%+v", config)
req := &api.CAConfig{
Provider: "consul",
Config: map[string]interface{}{
"PrivateKeyType": "ec",
"PrivateKeyBits": 384,
},
}
_, err = acceptingClient.Connect().CASetConfig(req, &api.WriteOptions{})
require.NoError(t, err)
// wait up to 30 seconds for the update
_, _, err = acceptingClient.Connect().CAGetConfig(&api.QueryOptions{
WaitIndex: meta.LastIndex,
WaitTime: 30 * time.Second,
})
require.NoError(t, err)
// There should be two root certs now
rootList, _, err := acceptingClient.Connect().CARoots(&api.QueryOptions{})
require.NoError(t, err)
require.Len(t, rootList.Roots, 2)
// Connectivity should still be contained
_, port := clientSidecarService.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port)
verifySidecarHasTwoRootCAs(t, clientSidecarService)
})
t.Run("terminate exporting clusters servers and ensure imported services are still reachable", func(t *testing.T) {
// Keep this list for later
newNodes, err := acceptingCluster.Clients()
require.NoError(t, err)
serverNodes, err := acceptingCluster.Servers()
require.NoError(t, err)
for _, node := range serverNodes {
require.NoError(t, node.Terminate())
}
// Remove the nodes from the cluster to prevent double-termination
acceptingCluster.Agents = newNodes
// ensure any transitory actions like replication cleanup would not affect the next verifications
time.Sleep(30 * time.Second)
_, port := clientSidecarService.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port)
})
}
func terminate(t *testing.T, cluster *libcluster.Cluster) {
err := cluster.Terminate()
require.NoError(t, err)
}
// creatingAcceptingClusterAndSetup creates a cluster with 3 servers and 1 client.
// It also creates and registers a service+sidecar.
// The API client returned is pointed at the client agent.
func creatingAcceptingClusterAndSetup(t *testing.T) (*libcluster.Cluster, *api.Client, *libagent.BuildContext) {
var configs []libagent.Config
opts := libagent.BuildOptions{
InjectAutoEncryption: true,
InjectGossipEncryption: true,
}
ctx, err := libagent.NewBuildContext(opts)
require.NoError(t, err)
numServer := 3
for i := 0; i < numServer; i++ {
serverConf, err := libagent.NewConfigBuilder(ctx).
Bootstrap(3).
Peering(true).
RetryJoin(fmt.Sprintf("agent-%d", (i+1)%3)). // Round-robin join the servers
ToAgentConfig()
require.NoError(t, err)
t.Logf("dc1 server config %d: \n%s", i, serverConf.JSON)
configs = append(configs, *serverConf)
}
// Add a stable client to register the service
clientConf, err := libagent.NewConfigBuilder(ctx).
Client().
Peering(true).
RetryJoin("agent-0", "agent-1", "agent-2").
ToAgentConfig()
require.NoError(t, err)
t.Logf("dc1 client config: \n%s", clientConf.JSON)
configs = append(configs, *clientConf)
cluster, err := libcluster.New(configs)
require.NoError(t, err)
// Use the client agent as the HTTP endpoint since we will not rotate it
clientNode := cluster.Agents[3]
client := clientNode.GetClient()
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, 4)
// Default Proxy Settings
ok, err := utils.ApplyDefaultProxySettings(client)
require.NoError(t, err)
require.True(t, ok)
// Create the mesh gateway for dataplane traffic
_, err = libservice.NewGatewayService(context.Background(), "mesh", "mesh", clientNode)
require.NoError(t, err)
// Create a service and proxy instance
_, _, err = libservice.CreateAndRegisterStaticServerAndSidecar(clientNode)
require.NoError(t, err)
libassert.CatalogServiceExists(t, client, "static-server")
libassert.CatalogServiceExists(t, client, "static-server-sidecar-proxy")
// Export the service
config := &api.ExportedServicesConfigEntry{
Name: "default",
Services: []api.ExportedService{
{
Name: "static-server",
Consumers: []api.ServiceConsumer{
{Peer: acceptingPeerName},
},
},
},
}
ok, _, err = client.ConfigEntries().Set(config, &api.WriteOptions{})
require.NoError(t, err)
require.True(t, ok)
return cluster, client, ctx
}
// createDialingClusterAndSetup creates a cluster for peering with a single dev agent
func createDialingClusterAndSetup(t *testing.T) (*libcluster.Cluster, *api.Client, libservice.Service) {
opts := libagent.BuildOptions{
Datacenter: "dc2",
InjectAutoEncryption: true,
InjectGossipEncryption: true,
}
ctx, err := libagent.NewBuildContext(opts)
require.NoError(t, err)
conf, err := libagent.NewConfigBuilder(ctx).
Peering(true).
ToAgentConfig()
require.NoError(t, err)
t.Logf("dc2 server config: \n%s", conf.JSON)
configs := []libagent.Config{*conf}
cluster, err := libcluster.New(configs)
require.NoError(t, err)
node := cluster.Agents[0]
client := node.GetClient()
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, 1)
// Default Proxy Settings
ok, err := utils.ApplyDefaultProxySettings(client)
require.NoError(t, err)
require.True(t, ok)
// Create the mesh gateway for dataplane traffic
_, err = libservice.NewGatewayService(context.Background(), "mesh", "mesh", node)
require.NoError(t, err)
// Create a service and proxy instance
clientProxyService, err := libservice.CreateAndRegisterStaticClientSidecar(node, dialingPeerName, true)
require.NoError(t, err)
libassert.CatalogServiceExists(t, client, "static-client-sidecar-proxy")
return cluster, client, clientProxyService
}
// rotateServer add a new server agent to the cluster, then forces the prior agent to leave.
func rotateServer(t *testing.T, cluster *libcluster.Cluster, client *api.Client, ctx *libagent.BuildContext, node libagent.Agent) {
conf, err := libagent.NewConfigBuilder(ctx).
Bootstrap(0).
Peering(true).
RetryJoin("agent-3"). // Always use the client agent since it never leaves the cluster
ToAgentConfig()
require.NoError(t, err)
err = cluster.Add([]libagent.Config{*conf})
require.NoError(t, err, "could not start new node")
libcluster.WaitForMembers(t, client, 5)
require.NoError(t, cluster.Remove(node))
libcluster.WaitForMembers(t, client, 4)
}
func verifySidecarHasTwoRootCAs(t *testing.T, sidecar libservice.Service) {
connectContainer, ok := sidecar.(*libservice.ConnectContainer)
require.True(t, ok)
_, adminPort := connectContainer.GetAdminAddr()
failer := func() *retry.Timer {
return &retry.Timer{Timeout: 30 * time.Second, Wait: 1 * time.Second}
}
retry.RunWith(failer(), t, func(r *retry.R) {
dump, err := libservice.GetEnvoyConfigDump(adminPort)
if err != nil {
r.Fatal("could not curl envoy configuration")
}
// Make sure there are two certs in the sidecar
filter := `.configs[] | select(.["@type"] | contains("type.googleapis.com/envoy.admin.v3.ClustersConfigDump")).dynamic_active_clusters[] | select(.cluster.name | contains("static-server.default.dialing-to-acceptor.external")).cluster.transport_socket.typed_config.common_tls_context.validation_context.trusted_ca.inline_string`
results, err := utils.JQFilter(dump, filter)
if err != nil {
r.Fatal("could not parse envoy configuration")
}
if len(results) != 1 {
r.Fatal("could not find certificates in cluster TLS context")
}
rest := []byte(results[0])
var count int
for len(rest) > 0 {
var p *pem.Block
p, rest = pem.Decode(rest)
if p == nil {
break
}
count++
}
if count != 2 {
r.Fatalf("expected 2 TLS certificates and %d present", count)
}
})
}

View File

@ -10,8 +10,8 @@ import (
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
libagent "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster" libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
"github.com/hashicorp/consul/test/integration/consul-container/libs/node"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
) )
@ -25,11 +25,11 @@ func TestTargetServersWithLatestGAClients(t *testing.T) {
cluster := serversCluster(t, numServers, *utils.TargetVersion, *utils.TargetImage) cluster := serversCluster(t, numServers, *utils.TargetVersion, *utils.TargetImage)
defer terminate(t, cluster) defer terminate(t, cluster)
clients := clientsCreate(t, numClients, *utils.LatestImage, *utils.LatestVersion, cluster.EncryptKey) clients := clientsCreate(t, numClients, *utils.LatestImage, *utils.LatestVersion, cluster)
require.NoError(t, cluster.AddNodes(clients)) require.NoError(t, cluster.Join(clients))
client := cluster.Nodes[0].GetClient() client := cluster.Agents[0].GetClient()
libcluster.WaitForLeader(t, cluster, client) libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, 4) libcluster.WaitForMembers(t, client, 4)
@ -71,29 +71,32 @@ func TestTargetServersWithLatestGAClients(t *testing.T) {
// Test health check GRPC call using Mixed (majority latest) Servers and Latest GA Clients // Test health check GRPC call using Mixed (majority latest) Servers and Latest GA Clients
func TestMixedServersMajorityLatestGAClient(t *testing.T) { func TestMixedServersMajorityLatestGAClient(t *testing.T) {
var configs []node.Config var configs []libagent.Config
configs = append(configs,
node.Config{ leaderConf, err := libagent.NewConfigBuilder(nil).ToAgentConfig()
HCL: `node_name="` + utils.RandName("consul-server") + `" require.NoError(t, err)
log_level="DEBUG"
server=true`, configs = append(configs, *leaderConf)
Cmd: []string{"agent", "-client=0.0.0.0"},
Version: *utils.TargetVersion, // This needs a specialized config since it is using an older version of the agent.
Image: *utils.TargetImage, // That is missing fields like GRPC_TLS and PEERING, which are passed as defaults
}) serverConf := `{
"advertise_addr": "{{ GetInterfaceIP \"eth0\" }}",
"bind_addr": "0.0.0.0",
"client_addr": "0.0.0.0",
"log_level": "DEBUG",
"server": true,
"bootstrap_expect": 3
}`
for i := 1; i < 3; i++ { for i := 1; i < 3; i++ {
configs = append(configs, configs = append(configs,
node.Config{ libagent.Config{
HCL: `node_name="` + utils.RandName("consul-server") + `" JSON: serverConf,
log_level="DEBUG" Cmd: []string{"agent"},
bootstrap_expect=3
server=true`,
Cmd: []string{"agent", "-client=0.0.0.0"},
Version: *utils.LatestVersion, Version: *utils.LatestVersion,
Image: *utils.LatestImage, Image: *utils.LatestImage,
}) })
} }
cluster, err := libcluster.New(configs) cluster, err := libcluster.New(configs)
@ -104,9 +107,9 @@ func TestMixedServersMajorityLatestGAClient(t *testing.T) {
numClients = 1 numClients = 1
) )
clients := clientsCreate(t, numClients, *utils.LatestImage, *utils.LatestVersion, cluster.EncryptKey) clients := clientsCreate(t, numClients, *utils.LatestImage, *utils.LatestVersion, cluster)
require.NoError(t, cluster.AddNodes(clients)) require.NoError(t, cluster.Join(clients))
client := clients[0].GetClient() client := clients[0].GetClient()
@ -149,26 +152,26 @@ func TestMixedServersMajorityLatestGAClient(t *testing.T) {
// Test health check GRPC call using Mixed (majority target) Servers and Latest GA Clients // Test health check GRPC call using Mixed (majority target) Servers and Latest GA Clients
func TestMixedServersMajorityTargetGAClient(t *testing.T) { func TestMixedServersMajorityTargetGAClient(t *testing.T) {
var configs []node.Config var configs []libagent.Config
for i := 0; i < 2; i++ {
configs = append(configs,
node.Config{
HCL: `node_name="` + utils.RandName("consul-server") + `"
log_level="DEBUG"
bootstrap_expect=3
server=true`,
Cmd: []string{"agent", "-client=0.0.0.0"},
Version: *utils.TargetVersion,
Image: *utils.TargetImage,
})
for i := 0; i < 2; i++ {
serverConf, err := libagent.NewConfigBuilder(nil).Bootstrap(3).ToAgentConfig()
require.NoError(t, err)
configs = append(configs, *serverConf)
} }
leaderConf := `{
"advertise_addr": "{{ GetInterfaceIP \"eth0\" }}",
"bind_addr": "0.0.0.0",
"client_addr": "0.0.0.0",
"log_level": "DEBUG",
"server": true
}`
configs = append(configs, configs = append(configs,
node.Config{ libagent.Config{
HCL: `node_name="` + utils.RandName("consul-server") + `" JSON: leaderConf,
log_level="DEBUG" Cmd: []string{"agent"},
server=true`,
Cmd: []string{"agent", "-client=0.0.0.0"},
Version: *utils.LatestVersion, Version: *utils.LatestVersion,
Image: *utils.LatestImage, Image: *utils.LatestImage,
}) })
@ -181,9 +184,9 @@ func TestMixedServersMajorityTargetGAClient(t *testing.T) {
numClients = 1 numClients = 1
) )
clients := clientsCreate(t, numClients, *utils.LatestImage, *utils.LatestVersion, cluster.EncryptKey) clients := clientsCreate(t, numClients, *utils.LatestImage, *utils.LatestVersion, cluster)
require.NoError(t, cluster.AddNodes(clients)) require.NoError(t, cluster.Join(clients))
client := clients[0].GetClient() client := clients[0].GetClient()
@ -211,7 +214,7 @@ func TestMixedServersMajorityTargetGAClient(t *testing.T) {
&api.AgentServiceRegistration{Name: serviceName, Port: 9998}, &api.AgentServiceRegistration{Name: serviceName, Port: 9998},
)) ))
timer := time.NewTimer(1 * time.Second) timer := time.NewTimer(3 * time.Second)
select { select {
case err := <-errCh: case err := <-errCh:
require.NoError(t, err) require.NoError(t, err)
@ -224,20 +227,29 @@ func TestMixedServersMajorityTargetGAClient(t *testing.T) {
} }
} }
func clientsCreate(t *testing.T, numClients int, image string, version string, serfKey string) []node.Node { func clientsCreate(t *testing.T, numClients int, image string, version string, cluster *libcluster.Cluster) []libagent.Agent {
clients := make([]node.Node, numClients) clients := make([]libagent.Agent, numClients)
// This needs a specialized config since it is using an older version of the agent.
// That is missing fields like GRPC_TLS and PEERING, which are passed as defaults
conf := `{
"advertise_addr": "{{ GetInterfaceIP \"eth0\" }}",
"bind_addr": "0.0.0.0",
"client_addr": "0.0.0.0",
"log_level": "DEBUG"
}`
for i := 0; i < numClients; i++ { for i := 0; i < numClients; i++ {
var err error var err error
clients[i], err = node.NewConsulContainer(context.Background(), clients[i], err = libagent.NewConsulContainer(context.Background(),
node.Config{ libagent.Config{
HCL: fmt.Sprintf(` JSON: conf,
node_name = %q Cmd: []string{"agent"},
log_level = "DEBUG"
encrypt = %q`, utils.RandName("consul-client"), serfKey),
Cmd: []string{"agent", "-client=0.0.0.0"},
Version: version, Version: version,
Image: image, Image: image,
}) },
cluster.NetworkName,
cluster.Index)
require.NoError(t, err) require.NoError(t, err)
} }
return clients return clients
@ -257,23 +269,21 @@ func serviceCreate(t *testing.T, client *api.Client, serviceName string) uint64
} }
func serversCluster(t *testing.T, numServers int, version string, image string) *libcluster.Cluster { func serversCluster(t *testing.T, numServers int, version string, image string) *libcluster.Cluster {
var configs []node.Config var configs []libagent.Config
conf, err := libagent.NewConfigBuilder(nil).
Bootstrap(3).
ToAgentConfig()
require.NoError(t, err)
for i := 0; i < numServers; i++ { for i := 0; i < numServers; i++ {
configs = append(configs, node.Config{ configs = append(configs, *conf)
HCL: `node_name="` + utils.RandName("consul-server") + `"
log_level="DEBUG"
bootstrap_expect=3
server=true`,
Cmd: []string{"agent", "-client=0.0.0.0"},
Version: version,
Image: image,
})
} }
cluster, err := libcluster.New(configs) cluster, err := libcluster.New(configs)
require.NoError(t, err) require.NoError(t, err)
libcluster.WaitForLeader(t, cluster, nil) libcluster.WaitForLeader(t, cluster, nil)
libcluster.WaitForMembers(t, cluster.Nodes[0].GetClient(), numServers) libcluster.WaitForMembers(t, cluster.Agents[0].GetClient(), numServers)
return cluster return cluster
} }