consul-container: mitigate the drift from ent repo (#17323)
This commit is contained in:
parent
f99593a054
commit
b5b0a34ca5
|
@ -57,7 +57,7 @@ func GetLatestImageName() string {
|
|||
|
||||
func DockerImage(image, version string) string {
|
||||
v := image + ":" + version
|
||||
if image == DefaultImageNameENT && isSemVer(version) {
|
||||
if strings.Contains(image, DefaultImageNameENT) && isSemVer(version) {
|
||||
// Enterprise versions get a suffix.
|
||||
v += ImageVersionSuffixENT
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
)
|
||||
|
||||
|
@ -21,22 +22,20 @@ import (
|
|||
func TestBasic(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
configCtx := libcluster.NewBuildContext(t, libcluster.BuildOptions{
|
||||
const numServers = 1
|
||||
buildOpts := &libcluster.BuildOptions{
|
||||
ConsulImageName: utils.GetLatestImageName(),
|
||||
ConsulVersion: utils.LatestVersion,
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
}
|
||||
|
||||
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
BuildOpts: buildOpts,
|
||||
ApplyDefaultProxySettings: true,
|
||||
})
|
||||
|
||||
const numServers = 1
|
||||
|
||||
serverConf := libcluster.NewConfigBuilder(configCtx).
|
||||
Bootstrap(numServers).
|
||||
ToAgentConfig(t)
|
||||
t.Logf("Cluster config:\n%s", serverConf.JSON)
|
||||
require.Equal(t, utils.LatestVersion, serverConf.Version) // TODO: remove
|
||||
|
||||
cluster, err := libcluster.NewN(t, *serverConf, numServers)
|
||||
require.NoError(t, err)
|
||||
|
||||
client := cluster.APIClient(0)
|
||||
|
||||
libcluster.WaitForLeader(t, cluster, client)
|
||||
|
@ -53,7 +52,7 @@ func TestBasic(t *testing.T) {
|
|||
|
||||
// upgrade the cluster to the Target version
|
||||
t.Logf("initiating standard upgrade to version=%q", utils.TargetVersion)
|
||||
err = cluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), utils.TargetVersion)
|
||||
err := cluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), utils.TargetVersion)
|
||||
|
||||
require.NoError(t, err)
|
||||
libcluster.WaitForLeader(t, cluster, client)
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
)
|
||||
|
||||
|
@ -50,24 +51,19 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
|
|||
)
|
||||
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
configCtx := libcluster.NewBuildContext(t, libcluster.BuildOptions{
|
||||
const numServers = 1
|
||||
buildOpts := &libcluster.BuildOptions{
|
||||
ConsulImageName: utils.GetLatestImageName(),
|
||||
ConsulVersion: tc.oldVersion,
|
||||
ConsulVersion: utils.LatestVersion,
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
}
|
||||
|
||||
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
|
||||
NumServers: numServers,
|
||||
BuildOpts: buildOpts,
|
||||
ApplyDefaultProxySettings: true,
|
||||
})
|
||||
|
||||
const (
|
||||
numServers = 1
|
||||
)
|
||||
|
||||
serverConf := libcluster.NewConfigBuilder(configCtx).
|
||||
Bootstrap(numServers).
|
||||
ToAgentConfig(t)
|
||||
t.Logf("Cluster config:\n%s", serverConf.JSON)
|
||||
require.Equal(t, tc.oldVersion, serverConf.Version) // TODO: remove
|
||||
|
||||
cluster, err := libcluster.NewN(t, *serverConf, numServers)
|
||||
require.NoError(t, err)
|
||||
|
||||
client := cluster.APIClient(0)
|
||||
|
||||
libcluster.WaitForLeader(t, cluster, client)
|
||||
|
@ -80,7 +76,7 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
|
|||
require.NoError(t, client.Agent().ServiceRegister(
|
||||
&api.AgentServiceRegistration{Name: serviceName, Port: 9998},
|
||||
))
|
||||
err = goretry.Do(
|
||||
err := goretry.Do(
|
||||
func() error {
|
||||
ch, errCh := libservice.ServiceHealthBlockingQuery(client, serviceName, index)
|
||||
select {
|
||||
|
|
|
@ -85,7 +85,7 @@ func TestMixedServersMajorityTargetGAClient(t *testing.T) {
|
|||
func testMixedServersGAClient(t *testing.T, majorityIsTarget bool) {
|
||||
var (
|
||||
latestOpts = libcluster.BuildOptions{
|
||||
ConsulImageName: utils.LatestImageName,
|
||||
ConsulImageName: utils.GetLatestImageName(),
|
||||
ConsulVersion: utils.LatestVersion,
|
||||
}
|
||||
targetOpts = libcluster.BuildOptions{
|
||||
|
@ -137,7 +137,7 @@ func testMixedServersGAClient(t *testing.T, majorityIsTarget bool) {
|
|||
cluster, err := libcluster.New(t, configs)
|
||||
require.NoError(t, err)
|
||||
|
||||
libservice.ClientsCreate(t, numClients, utils.LatestImageName, utils.LatestVersion, cluster)
|
||||
libservice.ClientsCreate(t, numClients, utils.GetLatestImageName(), utils.LatestVersion, cluster)
|
||||
|
||||
client := cluster.APIClient(0)
|
||||
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
package upgrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
)
|
||||
|
||||
// CreateAndRegisterStaticClientSidecarWith2Upstreams creates a static-client that
|
||||
// has two upstreams connecting to destinationNames: local bind addresses are 5000
|
||||
// and 5001.
|
||||
// - crossCluster: true if upstream is in another cluster
|
||||
func CreateAndRegisterStaticClientSidecarWith2Upstreams(c *cluster.Cluster, destinationNames []string, crossCluster bool) (*libservice.ConnectContainer, error) {
|
||||
// Do some trickery to ensure that partial completion is correctly torn
|
||||
// down, but successful execution is not.
|
||||
var deferClean utils.ResettableDefer
|
||||
defer deferClean.Execute()
|
||||
|
||||
node := c.Servers()[0]
|
||||
mgwMode := api.MeshGatewayModeLocal
|
||||
|
||||
// Register the static-client service and sidecar first to prevent race with sidecar
|
||||
// trying to get xDS before it's ready
|
||||
req := &api.AgentServiceRegistration{
|
||||
Name: libservice.StaticClientServiceName,
|
||||
Port: 8080,
|
||||
Connect: &api.AgentServiceConnect{
|
||||
SidecarService: &api.AgentServiceRegistration{
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
Upstreams: []api.Upstream{
|
||||
{
|
||||
DestinationName: destinationNames[0],
|
||||
LocalBindAddress: "0.0.0.0",
|
||||
LocalBindPort: cluster.ServiceUpstreamLocalBindPort,
|
||||
},
|
||||
{
|
||||
DestinationName: destinationNames[1],
|
||||
LocalBindAddress: "0.0.0.0",
|
||||
LocalBindPort: cluster.ServiceUpstreamLocalBindPort2,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if crossCluster {
|
||||
for _, upstream := range req.Connect.SidecarService.Proxy.Upstreams {
|
||||
upstream.MeshGateway = api.MeshGatewayConfig{
|
||||
Mode: mgwMode,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := node.GetClient().Agent().ServiceRegister(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a service and proxy instance
|
||||
sidecarCfg := libservice.SidecarConfig{
|
||||
Name: fmt.Sprintf("%s-sidecar", libservice.StaticClientServiceName),
|
||||
ServiceID: libservice.StaticClientServiceName,
|
||||
}
|
||||
|
||||
clientConnectProxy, err := libservice.NewConnectService(context.Background(), sidecarCfg, []int{cluster.ServiceUpstreamLocalBindPort, cluster.ServiceUpstreamLocalBindPort2}, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deferClean.Add(func() {
|
||||
_ = clientConnectProxy.Terminate()
|
||||
})
|
||||
|
||||
// disable cleanup functions now that we have an object with a Terminate() function
|
||||
deferClean.Reset()
|
||||
|
||||
return clientConnectProxy, nil
|
||||
}
|
|
@ -12,10 +12,10 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/api"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/test/upgrade"
|
||||
)
|
||||
|
||||
func TestPeering_Basic(t *testing.T) {
|
||||
|
@ -101,7 +101,7 @@ func TestPeering_HTTPResolverAndFailover(t *testing.T) {
|
|||
},
|
||||
}))
|
||||
|
||||
clientConnectProxy, err := createAndRegisterStaticClientSidecarWith2Upstreams(dialingCluster,
|
||||
clientConnectProxy, err := upgrade.CreateAndRegisterStaticClientSidecarWith2Upstreams(dialingCluster,
|
||||
[]string{libservice.StaticServerServiceName, "peer-static-server"}, true,
|
||||
)
|
||||
require.NoErrorf(t, err, "error creating client connect proxy in cluster %s", dialingCluster.NetworkName)
|
||||
|
@ -194,7 +194,7 @@ func TestPeering_HTTPResolverAndSplitter(t *testing.T) {
|
|||
},
|
||||
}))
|
||||
|
||||
clientConnectProxy, err := createAndRegisterStaticClientSidecarWith2Upstreams(dialingCluster,
|
||||
clientConnectProxy, err := upgrade.CreateAndRegisterStaticClientSidecarWith2Upstreams(dialingCluster,
|
||||
[]string{"split-static-server", "peer-static-server"}, true,
|
||||
)
|
||||
require.NoErrorf(t, err, "creating client connect proxy in cluster %s", dialingCluster.NetworkName)
|
||||
|
@ -332,73 +332,3 @@ func peeringPostUpgradeValidation(t *testing.T, dialing *libtopology.BuiltCluste
|
|||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), libservice.StaticServerServiceName, "")
|
||||
}
|
||||
|
||||
// createAndRegisterStaticClientSidecarWith2Upstreams creates a static-client that
|
||||
// has two upstreams connecting to destinationNames: local bind addresses are 5000
|
||||
// and 5001.
|
||||
// - crossCluster: true if upstream is in another cluster
|
||||
func createAndRegisterStaticClientSidecarWith2Upstreams(c *cluster.Cluster, destinationNames []string, crossCluster bool) (*libservice.ConnectContainer, error) {
|
||||
// Do some trickery to ensure that partial completion is correctly torn
|
||||
// down, but successful execution is not.
|
||||
var deferClean utils.ResettableDefer
|
||||
defer deferClean.Execute()
|
||||
|
||||
node := c.Servers()[0]
|
||||
mgwMode := api.MeshGatewayModeLocal
|
||||
|
||||
// Register the static-client service and sidecar first to prevent race with sidecar
|
||||
// trying to get xDS before it's ready
|
||||
req := &api.AgentServiceRegistration{
|
||||
Name: libservice.StaticClientServiceName,
|
||||
Port: 8080,
|
||||
Connect: &api.AgentServiceConnect{
|
||||
SidecarService: &api.AgentServiceRegistration{
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
Upstreams: []api.Upstream{
|
||||
{
|
||||
DestinationName: destinationNames[0],
|
||||
LocalBindAddress: "0.0.0.0",
|
||||
LocalBindPort: cluster.ServiceUpstreamLocalBindPort,
|
||||
},
|
||||
{
|
||||
DestinationName: destinationNames[1],
|
||||
LocalBindAddress: "0.0.0.0",
|
||||
LocalBindPort: cluster.ServiceUpstreamLocalBindPort2,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if crossCluster {
|
||||
for _, upstream := range req.Connect.SidecarService.Proxy.Upstreams {
|
||||
upstream.MeshGateway = api.MeshGatewayConfig{
|
||||
Mode: mgwMode,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := node.GetClient().Agent().ServiceRegister(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a service and proxy instance
|
||||
sidecarCfg := libservice.SidecarConfig{
|
||||
Name: fmt.Sprintf("%s-sidecar", libservice.StaticClientServiceName),
|
||||
ServiceID: libservice.StaticClientServiceName,
|
||||
}
|
||||
|
||||
clientConnectProxy, err := libservice.NewConnectService(context.Background(), sidecarCfg, []int{cluster.ServiceUpstreamLocalBindPort, cluster.ServiceUpstreamLocalBindPort2}, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deferClean.Add(func() {
|
||||
_ = clientConnectProxy.Terminate()
|
||||
})
|
||||
|
||||
// disable cleanup functions now that we have an object with a Terminate() function
|
||||
deferClean.Reset()
|
||||
|
||||
return clientConnectProxy, nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue