Merge pull request #4573 from hashicorp/NET-2841 (#16544)

* Merge pull request #4573 from hashicorp/NET-2841

NET-2841: PART 2 refactor upgrade tests to include version 1.15

* update upgrade versions
This commit is contained in:
Anita Akaeze 2023-03-06 11:40:33 -05:00 committed by GitHub
parent 39dc305143
commit 210ea1da42
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 89 additions and 150 deletions

View File

@ -163,24 +163,3 @@ func testMixedServersGAClient(t *testing.T, majorityIsTarget bool) {
t.Fatalf("test timeout")
}
}
func serversCluster(t *testing.T, numServers int, image, version string) *libcluster.Cluster {
opts := libcluster.BuildOptions{
ConsulImageName: image,
ConsulVersion: version,
}
ctx := libcluster.NewBuildContext(t, opts)
conf := libcluster.NewConfigBuilder(ctx).
Bootstrap(numServers).
ToAgentConfig(t)
t.Logf("Cluster server config:\n%s", conf.JSON)
cluster, err := libcluster.NewN(t, *conf, numServers)
require.NoError(t, err)
libcluster.WaitForLeader(t, cluster, nil)
libcluster.WaitForMembers(t, cluster.APIClient(0), numServers)
return cluster
}

View File

@ -19,29 +19,14 @@ import (
func TestACL_Upgrade_Node_Token(t *testing.T) {
t.Parallel()
type testcase struct {
oldversion string
targetVersion string
}
tcs := []testcase{
{
oldversion: "1.13",
targetVersion: utils.TargetVersion,
},
{
oldversion: "1.14",
targetVersion: utils.TargetVersion,
},
}
run := func(t *testing.T, tc testcase) {
run := func(t *testing.T, oldVersion, targetVersion string) {
// NOTE: Disable auto.encrypt due to its conflict with ACL token during bootstrap
cluster, _, _ := libtopology.NewCluster(t, &libtopology.ClusterConfig{
NumServers: 1,
NumClients: 1,
BuildOpts: &libcluster.BuildOptions{
Datacenter: "dc1",
ConsulVersion: tc.oldversion,
ConsulVersion: oldVersion,
InjectAutoEncryption: false,
ACLEnabled: true,
},
@ -52,7 +37,7 @@ func TestACL_Upgrade_Node_Token(t *testing.T) {
cluster.Agents[1].GetAgentName())
require.NoError(t, err)
err = cluster.StandardUpgrade(t, context.Background(), tc.targetVersion)
err = cluster.StandardUpgrade(t, context.Background(), targetVersion)
require.NoError(t, err)
// Post upgrade validation: agent token can be used to query the node
@ -61,11 +46,10 @@ func TestACL_Upgrade_Node_Token(t *testing.T) {
require.NoError(t, err)
libassert.CatalogNodeExists(t, client, cluster.Agents[1].GetAgentName())
}
for _, tc := range tcs {
t.Run(fmt.Sprintf("upgrade from %s to %s", tc.oldversion, tc.targetVersion),
for _, oldVersion := range UpgradeFromVersions {
t.Run(fmt.Sprintf("Upgrade from %s to %s", oldVersion, utils.TargetVersion),
func(t *testing.T) {
run(t, tc)
run(t, oldVersion, utils.TargetVersion)
})
}
}

View File

@ -15,37 +15,42 @@ import (
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
type testcase struct {
oldVersion string
targetVersion string
expectErr bool
}
var (
tcs []testcase
)
// Test upgrade a cluster of latest version to the target version
func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
t.Parallel()
type testcase struct {
oldversion string
targetVersion string
expectErr bool
}
tcs := []testcase{
// Use the case of "1.12.3" ==> "1.13.0" to verify the test can
// catch the upgrade bug found in snapshot of 1.13.0
{
oldversion: "1.12.3",
tcs = append(tcs,
testcase{
// Use the case of "1.12.3" ==> "1.13.0" to verify the test can
// catch the upgrade bug found in snapshot of 1.13.0
oldVersion: "1.12.3",
targetVersion: "1.13.0",
expectErr: true,
},
{
oldversion: "1.13",
targetVersion: utils.TargetVersion,
},
{
oldversion: "1.14",
)
for _, oldVersion := range UpgradeFromVersions {
tcs = append(tcs, testcase{
oldVersion: oldVersion,
targetVersion: utils.TargetVersion,
},
)
}
run := func(t *testing.T, tc testcase) {
configCtx := libcluster.NewBuildContext(t, libcluster.BuildOptions{
ConsulImageName: utils.TargetImageName,
ConsulVersion: tc.oldversion,
ConsulVersion: tc.oldVersion,
})
const (
@ -56,7 +61,7 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
Bootstrap(numServers).
ToAgentConfig(t)
t.Logf("Cluster config:\n%s", serverConf.JSON)
require.Equal(t, tc.oldversion, serverConf.Version) // TODO: remove
require.Equal(t, tc.oldVersion, serverConf.Version) // TODO: remove
cluster, err := libcluster.NewN(t, *serverConf, numServers)
require.NoError(t, err)
@ -90,6 +95,7 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
// upgrade the cluster to the Target version
t.Logf("initiating standard upgrade to version=%q", tc.targetVersion)
err = cluster.StandardUpgrade(t, context.Background(), tc.targetVersion)
if !tc.expectErr {
require.NoError(t, err)
libcluster.WaitForLeader(t, cluster, client)
@ -108,9 +114,10 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
}
for _, tc := range tcs {
t.Run(fmt.Sprintf("upgrade from %s to %s", tc.oldversion, tc.targetVersion),
t.Run(fmt.Sprintf("upgrade from %s to %s", tc.oldVersion, tc.targetVersion),
func(t *testing.T) {
run(t, tc)
})
time.Sleep(1 * time.Second)
}
}

View File

@ -16,7 +16,7 @@ import (
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
libutils "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -31,20 +31,6 @@ import (
// - performs these tests again
func TestIngressGateway_UpgradeToTarget_fromLatest(t *testing.T) {
t.Parallel()
type testcase struct {
oldversion string
targetVersion string
}
tcs := []testcase{
{
oldversion: "1.13",
targetVersion: libutils.TargetVersion,
},
{
oldversion: "1.14",
targetVersion: libutils.TargetVersion,
},
}
run := func(t *testing.T, oldVersion, targetVersion string) {
// setup
@ -283,15 +269,17 @@ func TestIngressGateway_UpgradeToTarget_fromLatest(t *testing.T) {
tests(t)
})
}
for _, tc := range tcs {
for _, oldVersion := range UpgradeFromVersions {
// copy to avoid lint loopclosure
tc := tc
t.Run(fmt.Sprintf("upgrade from %s to %s", tc.oldversion, tc.targetVersion),
oldVersion := oldVersion
t.Run(fmt.Sprintf("Upgrade from %s to %s", oldVersion, utils.TargetVersion),
func(t *testing.T) {
t.Parallel()
run(t, tc.oldversion, tc.targetVersion)
run(t, oldVersion, utils.TargetVersion)
})
time.Sleep(3 * time.Second)
time.Sleep(1 * time.Second)
}
}

View File

@ -11,9 +11,8 @@ import (
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
libutils "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
upgrade "github.com/hashicorp/consul/test/integration/consul-container/test/upgrade"
"github.com/hashicorp/go-version"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/test/integration/consul-container/test/upgrade"
"github.com/stretchr/testify/require"
)
@ -330,11 +329,7 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
Datacenter: "dc1",
InjectAutoEncryption: true,
}
// If version < 1.14 disable AutoEncryption
oldVersionTmp, _ := version.NewVersion(oldVersion)
if oldVersionTmp.LessThan(libutils.Version_1_14) {
buildOpts.InjectAutoEncryption = false
}
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
NumServers: 1,
NumClients: 1,
@ -391,12 +386,11 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
tc.extraAssertion(staticClientProxy)
}
targetVersion := libutils.TargetVersion
for _, oldVersion := range upgrade.UpgradeFromVersions {
for _, tc := range tcs {
t.Run(fmt.Sprintf("%s upgrade from %s to %s", tc.name, oldVersion, targetVersion),
t.Run(fmt.Sprintf("%s upgrade from %s to %s", tc.name, oldVersion, utils.TargetVersion),
func(t *testing.T) {
run(t, tc, oldVersion, targetVersion)
run(t, tc, oldVersion, utils.TargetVersion)
})
}
}

View File

@ -24,25 +24,8 @@ import (
func TestPeering_Upgrade_ControlPlane_MGW(t *testing.T) {
t.Parallel()
type testcase struct {
oldversion string
targetVersion string
}
tcs := []testcase{
// {
// TODO: API changed from 1.13 to 1.14 in , PeerName to Peer
// exportConfigEntry
// oldversion: "1.13",
// targetVersion: *utils.TargetVersion,
// },
{
oldversion: "1.14",
targetVersion: utils.TargetVersion,
},
}
run := func(t *testing.T, tc testcase) {
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, tc.oldversion, true)
run := func(t *testing.T, oldVersion, targetVersion string) {
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, oldVersion, true)
var (
acceptingCluster = accepting.Cluster
dialingCluster = dialing.Cluster
@ -66,11 +49,11 @@ func TestPeering_Upgrade_ControlPlane_MGW(t *testing.T) {
"upstream_cx_total", 1)
// Upgrade the accepting cluster and assert peering is still ACTIVE
require.NoError(t, acceptingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
require.NoError(t, acceptingCluster.StandardUpgrade(t, context.Background(), targetVersion))
libassert.PeeringStatus(t, acceptingClient, libtopology.AcceptingPeerName, api.PeeringStateActive)
libassert.PeeringStatus(t, dialingClient, libtopology.DialingPeerName, api.PeeringStateActive)
require.NoError(t, dialingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
require.NoError(t, dialingCluster.StandardUpgrade(t, context.Background(), targetVersion))
libassert.PeeringStatus(t, acceptingClient, libtopology.AcceptingPeerName, api.PeeringStateActive)
libassert.PeeringStatus(t, dialingClient, libtopology.DialingPeerName, api.PeeringStateActive)
@ -105,10 +88,10 @@ func TestPeering_Upgrade_ControlPlane_MGW(t *testing.T) {
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), libservice.StaticServerServiceName, "")
}
for _, tc := range tcs {
t.Run(fmt.Sprintf("upgrade from %s to %s", tc.oldversion, tc.targetVersion),
for _, oldVersion := range UpgradeFromVersions {
t.Run(fmt.Sprintf("Upgrade from %s to %s", oldVersion, utils.TargetVersion),
func(t *testing.T) {
run(t, tc)
run(t, oldVersion, utils.TargetVersion)
})
}
}

View File

@ -21,9 +21,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
t.Parallel()
type testcase struct {
oldversion string
targetVersion string
name string
name string
// create creates addtional resources in peered clusters depending on cases, e.g., static-client,
// static server, and config-entries. It returns the proxy services, an assertation function to
// be called to verify the resources.
@ -40,18 +38,14 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
// targetVersion: *utils.TargetVersion,
// },
{
oldversion: "1.14",
targetVersion: utils.TargetVersion,
name: "basic",
name: "basic",
create: func(accepting *cluster.Cluster, dialing *cluster.Cluster) (libservice.Service, libservice.Service, func(), error) {
return nil, nil, func() {}, nil
},
extraAssertion: func(clientUpstreamPort int) {},
},
{
oldversion: "1.14",
targetVersion: utils.TargetVersion,
name: "http_router",
name: "http_router",
// Create a second static-service at the client agent of accepting cluster and
// a service-router that routes /static-server-2 to static-server-2
create: func(accepting *cluster.Cluster, dialing *cluster.Cluster) (libservice.Service, libservice.Service, func(), error) {
@ -104,9 +98,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
},
},
{
oldversion: "1.14",
targetVersion: utils.TargetVersion,
name: "http splitter and resolver",
name: "http splitter and resolver",
// In addtional to the basic topology, this case provisions the following
// services in the dialing cluster:
//
@ -221,9 +213,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
extraAssertion: func(clientUpstreamPort int) {},
},
{
oldversion: "1.14",
targetVersion: utils.TargetVersion,
name: "http resolver and failover",
name: "http resolver and failover",
// Verify resolver and failover can direct traffic to server in peered cluster
// In addtional to the basic topology, this case provisions the following
// services in the dialing cluster:
@ -316,8 +306,8 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
},
}
run := func(t *testing.T, tc testcase) {
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, tc.oldversion, false)
run := func(t *testing.T, tc testcase, oldVersion, targetVersion string) {
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, oldVersion, false)
var (
acceptingCluster = accepting.Cluster
dialingCluster = dialing.Cluster
@ -339,11 +329,11 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
tc.extraAssertion(appPort)
// Upgrade the accepting cluster and assert peering is still ACTIVE
require.NoError(t, acceptingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
require.NoError(t, acceptingCluster.StandardUpgrade(t, context.Background(), targetVersion))
libassert.PeeringStatus(t, acceptingClient, libtopology.AcceptingPeerName, api.PeeringStateActive)
libassert.PeeringStatus(t, dialingClient, libtopology.DialingPeerName, api.PeeringStateActive)
require.NoError(t, dialingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
require.NoError(t, dialingCluster.StandardUpgrade(t, context.Background(), targetVersion))
libassert.PeeringStatus(t, acceptingClient, libtopology.AcceptingPeerName, api.PeeringStateActive)
libassert.PeeringStatus(t, dialingClient, libtopology.DialingPeerName, api.PeeringStateActive)
@ -382,12 +372,13 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
tc.extraAssertion(appPort)
}
for _, tc := range tcs {
t.Run(fmt.Sprintf("%s upgrade from %s to %s", tc.name, tc.oldversion, tc.targetVersion),
func(t *testing.T) {
run(t, tc)
})
// time.Sleep(3 * time.Second)
for _, oldVersion := range UpgradeFromVersions {
for _, tc := range tcs {
t.Run(fmt.Sprintf("%s upgrade from %s to %s", tc.name, oldVersion, utils.TargetVersion),
func(t *testing.T) {
run(t, tc, oldVersion, utils.TargetVersion)
})
}
}
}

View File

@ -133,17 +133,23 @@ func testLatestGAServersWithCurrentClients_TenancyCRUD(
)
// Create initial cluster
cluster := serversCluster(t, numServers, utils.LatestImageName, utils.LatestVersion)
libservice.ClientsCreate(t, numClients, utils.LatestImageName, utils.LatestVersion, cluster)
cluster, _, _ := libtopology.NewCluster(t, &libtopology.ClusterConfig{
NumServers: numServers,
NumClients: numClients,
BuildOpts: &libcluster.BuildOptions{
Datacenter: "dc1",
ConsulImageName: utils.LatestImageName,
ConsulVersion: utils.LatestVersion,
},
ApplyDefaultProxySettings: true,
})
client := cluster.APIClient(0)
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, 5)
testutil.RunStep(t, "Create "+tenancyName, func(t *testing.T) {
fmt.Println("!!!!!!!")
createFn(t, client)
fmt.Println("!!!!DONE!!!!")
})
ctx := context.Background()
@ -238,9 +244,16 @@ func testLatestGAServersWithCurrentClients_TenancyCRUD(
}
// Create a fresh cluster from scratch
cluster2 := serversCluster(t, numServers, utils.TargetImageName, utils.TargetVersion)
libservice.ClientsCreate(t, numClients, utils.LatestImageName, utils.LatestVersion, cluster2)
cluster2, _, _ := libtopology.NewCluster(t, &libtopology.ClusterConfig{
NumServers: numServers,
NumClients: numClients,
BuildOpts: &libcluster.BuildOptions{
Datacenter: "dc1",
ConsulImageName: utils.LatestImageName,
ConsulVersion: utils.LatestVersion,
},
ApplyDefaultProxySettings: true,
})
client2 := cluster2.APIClient(0)
testutil.RunStep(t, "Restore saved snapshot", func(t *testing.T) {

View File

@ -1,3 +1,3 @@
package upgrade
var UpgradeFromVersions = []string{"1.13", "1.14"}
var UpgradeFromVersions = []string{"1.14", "1.15"}