Backport 1.16.x Upgrade test: remove outdated test and disable log due to verbosity (… (#18413)

Upgrade test: remove outdated test and disable log due to verbosity (#18403)

* remove outdated test

* disable log since we have too many parallel tests
This commit is contained in:
cskh 2023-08-08 18:23:07 -04:00 committed by GitHub
parent fd5d0c4929
commit 97d44d170b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 60 additions and 88 deletions

View File

@ -538,7 +538,9 @@ jobs:
-p=4 \ -p=4 \
-tags "${{ env.GOTAGS }}" \ -tags "${{ env.GOTAGS }}" \
-timeout=30m \ -timeout=30m \
-json ./... \ -json \
./... \
--follow-log=false \
--target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \ --target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \
--target-version local \ --target-version local \
--latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \ --latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \

View File

@ -32,99 +32,69 @@ var (
// Test upgrade a cluster of latest version to the target version // Test upgrade a cluster of latest version to the target version
func TestStandardUpgradeToTarget_fromLatest(t *testing.T) { func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
tcs = append(tcs, const numServers = 1
testcase{ buildOpts := &libcluster.BuildOptions{
// Use the case of "1.12.3" ==> "1.13.0" to verify the test can ConsulImageName: utils.GetLatestImageName(),
// catch the upgrade bug found in snapshot of 1.13.0 ConsulVersion: utils.LatestVersion,
oldVersion: "1.12.3", Datacenter: "dc1",
targetVersion: "1.13.0", InjectAutoEncryption: true,
expectErr: true, }
},
)
tcs = append(tcs, testcase{ cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
oldVersion: utils.LatestVersion, NumServers: numServers,
targetVersion: utils.TargetVersion, BuildOpts: buildOpts,
}, ApplyDefaultProxySettings: true,
) })
client := cluster.APIClient(0)
run := func(t *testing.T, tc testcase) { libcluster.WaitForLeader(t, cluster, client)
const numServers = 1 libcluster.WaitForMembers(t, client, numServers)
buildOpts := &libcluster.BuildOptions{
ConsulImageName: utils.GetLatestImageName(),
ConsulVersion: utils.LatestVersion,
Datacenter: "dc1",
InjectAutoEncryption: true,
}
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{ // Create a service to be stored in the snapshot
NumServers: numServers, const serviceName = "api"
BuildOpts: buildOpts, index := libservice.ServiceCreate(t, client, serviceName)
ApplyDefaultProxySettings: true,
})
client := cluster.APIClient(0)
libcluster.WaitForLeader(t, cluster, client) require.NoError(t, client.Agent().ServiceRegister(
libcluster.WaitForMembers(t, client, numServers) &api.AgentServiceRegistration{Name: serviceName, Port: 9998},
))
// Create a service to be stored in the snapshot err := goretry.Do(
const serviceName = "api" func() error {
index := libservice.ServiceCreate(t, client, serviceName) ch, errCh := libservice.ServiceHealthBlockingQuery(client, serviceName, index)
select {
require.NoError(t, client.Agent().ServiceRegister( case err := <-errCh:
&api.AgentServiceRegistration{Name: serviceName, Port: 9998}, require.NoError(t, err)
)) case service := <-ch:
err := goretry.Do( index = service[0].Service.ModifyIndex
func() error { if len(service) != 1 {
ch, errCh := libservice.ServiceHealthBlockingQuery(client, serviceName, index) return fmt.Errorf("service is %d, want 1", len(service))
select {
case err := <-errCh:
require.NoError(t, err)
case service := <-ch:
index = service[0].Service.ModifyIndex
if len(service) != 1 {
return fmt.Errorf("service is %d, want 1", len(service))
}
if serviceName != service[0].Service.Service {
return fmt.Errorf("service name is %s, want %s", service[0].Service.Service, serviceName)
}
if service[0].Service.Port != 9998 {
return fmt.Errorf("service is %d, want 9998", service[0].Service.Port)
}
} }
return nil if serviceName != service[0].Service.Service {
}, return fmt.Errorf("service name is %s, want %s", service[0].Service.Service, serviceName)
goretry.Attempts(5), }
goretry.Delay(time.Second), if service[0].Service.Port != 9998 {
) return fmt.Errorf("service is %d, want 9998", service[0].Service.Port)
require.NoError(t, err) }
}
return nil
},
goretry.Attempts(5),
goretry.Delay(time.Second),
)
require.NoError(t, err)
// upgrade the cluster to the Target version // upgrade the cluster to the Target version
t.Logf("initiating standard upgrade to version=%q", tc.targetVersion) t.Logf("initiating standard upgrade to version=%q", utils.TargetVersion)
err = cluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), tc.targetVersion) err = cluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), utils.TargetVersion)
if !tc.expectErr { require.NoError(t, err)
require.NoError(t, err) libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForLeader(t, cluster, client) libcluster.WaitForMembers(t, client, numServers)
libcluster.WaitForMembers(t, client, numServers)
// Verify service is restored from the snapshot // Verify service is restored from the snapshot
retry.RunWith(&retry.Timer{Timeout: 5 * time.Second, Wait: 500 * time.Microsecond}, t, func(r *retry.R) { retry.RunWith(&retry.Timer{Timeout: 5 * time.Second, Wait: 500 * time.Microsecond}, t, func(r *retry.R) {
service, _, err := client.Catalog().Service(serviceName, "", &api.QueryOptions{}) service, _, err := client.Catalog().Service(serviceName, "", &api.QueryOptions{})
require.NoError(r, err) require.NoError(r, err)
require.Len(r, service, 1) require.Len(r, service, 1)
require.Equal(r, serviceName, service[0].ServiceName) require.Equal(r, serviceName, service[0].ServiceName)
}) })
} else {
require.ErrorContains(t, err, "context deadline exceeded")
}
}
for _, tc := range tcs {
t.Run(fmt.Sprintf("upgrade from %s to %s", tc.oldVersion, tc.targetVersion),
func(t *testing.T) {
run(t, tc)
})
time.Sleep(1 * time.Second)
}
} }