open-consul/api/txn_test.go
hc-github-team-consul-core ea93c7b29c
Backport of Displays Consul version of each nodes in UI nodes section into release/1.16.x (#18113)
## Backport

This PR is auto-generated from #17754 to be assessed for backporting due
to the inclusion of the label backport/1.16.


🚨
>**Warning** automatic cherry-pick of commits failed. If the first
commit failed,
you will see a blank no-op commit below. If at least one commit
succeeded, you
will see the cherry-picked commits up to, _not including_, the commit
where
the merge conflict occurred.

The person who merged in the original PR is:
@WenInCode
This person should manually cherry-pick the original PR into a new
backport PR,
and close this one when the manual backport PR is merged in.

> merge conflict error: unable to process merge commit:
"1c757b8a2c1160ad53421b7b8bd7f74b205c4b89", automatic backport requires
rebase workflow



The below text is copied from the body of the original PR.

---

fixes #17097 Consul version of each nodes in UI nodes section

@jkirschner-hashicorp @huikang @team @Maintainers

Updated consul version in the request to register consul.
Added this as Node MetaData.
Fetching this new metadata in UI

<img width="1512" alt="Screenshot 2023-06-15 at 4 21 33 PM"
src="https://github.com/hashicorp/consul/assets/3139634/94f7cf6b-701f-4230-b9f7-d8c4342d0737">

Also made this backward compatible and tested.

Backward compatible in this context means - If consul binary with above
PR changes is deployed to one of node, and if UI is run from this node,
then the version of not only current (upgraded) node is displayed in UI
, but also of older nodes given that they are consul servers only.
For older (non-server or client) nodes the version is not added in
NodeMeta Data and hence the version will not be displayed for them.
If a old node is consul server, the version will be displayed. As the
endpoint - "v1/internal/ui/nodes?dc=dc1" was already returning version
in service meta. This is made use of in current UI changes.

<img width="1480" alt="Screenshot 2023-06-16 at 6 58 32 PM"
src="https://github.com/hashicorp/consul/assets/3139634/257942f4-fbed-437d-a492-37849d2bec4c">




---

<details>
<summary> Overview of commits </summary>

- 931fdfc7ecdc26bb7cc20b698c5e14c1b65fcc6e -
b3e2ec1ccaca3832a088ffcac54257fa6653c6c1 -
8d0e9a54907039c09330c6cd7b9e761566af6856 -
04e5d88cca37821f6667be381c16aaa5958b5c92 -
28286a2e98f8cd66ef8593c2e2893b4db6080417 -
43e50ad38207952a9c4d04d45d08b6b8f71b31fe -
0cf1b7077cdf255596254d9dc1624a269c42b94d -
27f34ce1c2973591f75b1e38a81ccbe7cee6cee3 -
2ac76d62b8cbae76b1a903021aebb9b865e29d6e -
3d618df9ef1d10dd5056c8b1ed865839c553a0e0 -
1c757b8a2c1160ad53421b7b8bd7f74b205c4b89 -
23ce82b4cee8f74dd634dbe145313e9a56c0077d -
4dc1c9b4c5aafdb8883ef977dfa9b39da138b6cb -
85a12a92528bfa267a039a9bb258170be914abf7 -
25d30a3fa980d130a30d445d26d47ef2356cb553 -
7f1d6192dce3352e92307175848b89f91e728c24 -
5174cbff84b0795d4cb36eb8980d0d5336091ac9

</details>

---------

Co-authored-by: Vijay Srinivas <vijayraghav22@gmail.com>
Co-authored-by: John Murret <john.murret@hashicorp.com>
Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com>
2023-07-17 17:27:50 +00:00

395 lines
10 KiB
Go

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package api
import (
"strings"
"testing"
"time"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/require"
)
func TestAPI_ClientTxn(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
session := c.Session()
txn := c.Txn()
// Set up a test service and health check.
nodeID, err := uuid.GenerateUUID()
require.NoError(t, err)
catalog := c.Catalog()
reg := &CatalogRegistration{
ID: nodeID,
Node: "foo",
Address: "2.2.2.2",
Service: &AgentService{
ID: "foo1",
Service: "foo",
},
Checks: HealthChecks{
{
CheckID: "bar",
Status: "critical",
Definition: HealthCheckDefinition{
TCP: "1.1.1.1",
IntervalDuration: 5 * time.Second,
TimeoutDuration: 10 * time.Second,
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
},
},
{
CheckID: "baz",
Status: "passing",
Definition: HealthCheckDefinition{
TCP: "2.2.2.2",
Interval: ReadableDuration(40 * time.Second),
Timeout: ReadableDuration(80 * time.Second),
DeregisterCriticalServiceAfter: ReadableDuration(160 * time.Second),
},
},
{
CheckID: "bor",
Status: "critical",
Definition: HealthCheckDefinition{
UDP: "1.1.1.1",
Interval: ReadableDuration(5 * time.Second),
Timeout: ReadableDuration(10 * time.Second),
DeregisterCriticalServiceAfter: ReadableDuration(20 * time.Second),
},
Type: "udp",
},
{
CheckID: "bur",
Status: "passing",
Definition: HealthCheckDefinition{
UDP: "2.2.2.2",
Interval: ReadableDuration(5 * time.Second),
Timeout: ReadableDuration(10 * time.Second),
DeregisterCriticalServiceAfter: ReadableDuration(20 * time.Second),
},
Type: "udp",
},
},
}
_, err = catalog.Register(reg, nil)
require.NoError(t, err)
node, _, err := catalog.Node("foo", nil)
require.NoError(t, err)
require.Equal(t, nodeID, node.Node.ID)
// Make a session.
id, _, err := session.CreateNoChecks(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
// Acquire and get the key via a transaction, but don't supply a valid
// session.
key := testKey()
value := []byte("test")
ops := TxnOps{
&TxnOp{
KV: &KVTxnOp{
Verb: KVLock,
Key: key,
Value: value,
},
},
&TxnOp{
KV: &KVTxnOp{
Verb: KVGet,
Key: key,
},
},
&TxnOp{
Node: &NodeTxnOp{
Verb: NodeGet,
Node: Node{Node: "foo"},
},
},
&TxnOp{
Service: &ServiceTxnOp{
Verb: ServiceGet,
Node: "foo",
Service: AgentService{ID: "foo1"},
},
},
&TxnOp{
Check: &CheckTxnOp{
Verb: CheckGet,
Check: HealthCheck{Node: "foo", CheckID: "bar"},
},
},
&TxnOp{
Check: &CheckTxnOp{
Verb: CheckGet,
Check: HealthCheck{Node: "foo", CheckID: "baz"},
},
},
&TxnOp{
Check: &CheckTxnOp{
Verb: CheckGet,
Check: HealthCheck{Node: "foo", CheckID: "bor"},
},
},
&TxnOp{
Check: &CheckTxnOp{
Verb: CheckGet,
Check: HealthCheck{Node: "foo", CheckID: "bur"},
},
},
}
ok, ret, _, err := txn.Txn(ops, nil)
if err != nil {
t.Fatalf("err: %v", err)
} else if ok {
t.Fatalf("transaction should have failed")
}
if ret == nil || len(ret.Errors) != 2 || len(ret.Results) != 0 {
t.Fatalf("bad: %v", ret.Errors[2])
}
if ret.Errors[0].OpIndex != 0 ||
!strings.Contains(ret.Errors[0].What, "missing session") ||
!strings.Contains(ret.Errors[1].What, "doesn't exist") {
t.Fatalf("bad: %v", ret.Errors[0])
}
// Now poke in a real session and try again.
ops[0].KV.Session = id
ok, ret, _, err = txn.Txn(ops, nil)
if err != nil {
t.Fatalf("err: %v", err)
} else if !ok {
t.Fatalf("transaction failure")
}
if ret == nil || len(ret.Errors) != 0 || len(ret.Results) != 8 {
t.Fatalf("bad: %v", ret)
}
expected := TxnResults{
&TxnResult{
KV: &KVPair{
Key: key,
Session: id,
LockIndex: 1,
CreateIndex: ret.Results[0].KV.CreateIndex,
ModifyIndex: ret.Results[0].KV.ModifyIndex,
Namespace: ret.Results[0].KV.Namespace,
Partition: defaultPartition,
},
},
&TxnResult{
KV: &KVPair{
Key: key,
Session: id,
Value: []byte("test"),
LockIndex: 1,
CreateIndex: ret.Results[1].KV.CreateIndex,
ModifyIndex: ret.Results[1].KV.ModifyIndex,
Namespace: ret.Results[0].KV.Namespace,
Partition: defaultPartition,
},
},
&TxnResult{
Node: &Node{
ID: nodeID,
Node: "foo",
Partition: defaultPartition,
Address: "2.2.2.2",
Datacenter: "dc1",
CreateIndex: ret.Results[2].Node.CreateIndex,
ModifyIndex: ret.Results[2].Node.CreateIndex,
},
},
&TxnResult{
Service: &CatalogService{
ID: "foo1",
CreateIndex: ret.Results[3].Service.CreateIndex,
ModifyIndex: ret.Results[3].Service.CreateIndex,
Partition: defaultPartition,
Namespace: defaultNamespace,
},
},
&TxnResult{
Check: &HealthCheck{
Node: "foo",
CheckID: "bar",
Status: "critical",
Definition: HealthCheckDefinition{
TCP: "1.1.1.1",
Interval: ReadableDuration(5 * time.Second),
IntervalDuration: 5 * time.Second,
Timeout: ReadableDuration(10 * time.Second),
TimeoutDuration: 10 * time.Second,
DeregisterCriticalServiceAfter: ReadableDuration(20 * time.Second),
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
},
Type: "tcp",
Partition: defaultPartition,
Namespace: defaultNamespace,
CreateIndex: ret.Results[4].Check.CreateIndex,
ModifyIndex: ret.Results[4].Check.CreateIndex,
},
},
&TxnResult{
Check: &HealthCheck{
Node: "foo",
CheckID: "baz",
Status: "passing",
Definition: HealthCheckDefinition{
TCP: "2.2.2.2",
Interval: ReadableDuration(40 * time.Second),
IntervalDuration: 40 * time.Second,
Timeout: ReadableDuration(80 * time.Second),
TimeoutDuration: 80 * time.Second,
DeregisterCriticalServiceAfter: ReadableDuration(160 * time.Second),
DeregisterCriticalServiceAfterDuration: 160 * time.Second,
},
Type: "tcp",
Partition: defaultPartition,
Namespace: defaultNamespace,
CreateIndex: ret.Results[4].Check.CreateIndex,
ModifyIndex: ret.Results[4].Check.CreateIndex,
},
},
&TxnResult{
Check: &HealthCheck{
Node: "foo",
CheckID: "bor",
Status: "critical",
Definition: HealthCheckDefinition{
UDP: "1.1.1.1",
Interval: ReadableDuration(5 * time.Second),
IntervalDuration: 5 * time.Second,
Timeout: ReadableDuration(10 * time.Second),
TimeoutDuration: 10 * time.Second,
DeregisterCriticalServiceAfter: ReadableDuration(20 * time.Second),
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
},
Type: "udp",
Partition: defaultPartition,
Namespace: defaultNamespace,
CreateIndex: ret.Results[4].Check.CreateIndex,
ModifyIndex: ret.Results[4].Check.CreateIndex,
},
},
&TxnResult{
Check: &HealthCheck{
Node: "foo",
CheckID: "bur",
Status: "passing",
Definition: HealthCheckDefinition{
UDP: "2.2.2.2",
Interval: ReadableDuration(5 * time.Second),
IntervalDuration: 5 * time.Second,
Timeout: ReadableDuration(10 * time.Second),
TimeoutDuration: 10 * time.Second,
DeregisterCriticalServiceAfter: ReadableDuration(20 * time.Second),
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
},
Type: "udp",
Partition: defaultPartition,
Namespace: defaultNamespace,
CreateIndex: ret.Results[4].Check.CreateIndex,
ModifyIndex: ret.Results[4].Check.CreateIndex,
},
},
}
require.Equal(t, expected, ret.Results)
retry.Run(t, func(r *retry.R) {
// Run a read-only transaction.
ops = TxnOps{
&TxnOp{
KV: &KVTxnOp{
Verb: KVGet,
Key: key,
},
},
&TxnOp{
Node: &NodeTxnOp{
Verb: NodeGet,
Node: Node{ID: s.Config.NodeID, Node: s.Config.NodeName},
},
},
}
ok, ret, _, err = txn.Txn(ops, nil)
if err != nil {
r.Fatalf("err: %v", err)
} else if !ok {
r.Fatalf("transaction failure")
}
expected = TxnResults{
&TxnResult{
KV: &KVPair{
Key: key,
Session: id,
Value: []byte("test"),
LockIndex: 1,
CreateIndex: ret.Results[0].KV.CreateIndex,
ModifyIndex: ret.Results[0].KV.ModifyIndex,
Namespace: ret.Results[0].KV.Namespace,
Partition: defaultPartition,
},
},
&TxnResult{
Node: &Node{
ID: s.Config.NodeID,
Node: s.Config.NodeName,
Partition: defaultPartition,
Address: "127.0.0.1",
Datacenter: "dc1",
TaggedAddresses: map[string]string{
"lan": s.Config.Bind,
"lan_ipv4": s.Config.Bind,
"wan": s.Config.Bind,
"wan_ipv4": s.Config.Bind,
},
Meta: map[string]string{
"consul-network-segment": "",
"consul-version": s.Config.Version,
},
CreateIndex: ret.Results[1].Node.CreateIndex,
ModifyIndex: ret.Results[1].Node.ModifyIndex,
},
},
}
require.Equal(r, expected, ret.Results)
})
// Sanity check using the regular GET API.
kv := c.KV()
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if pair.LockIndex != 1 {
t.Fatalf("Expected lock: %v", pair)
}
if pair.Session != id {
t.Fatalf("Expected lock: %v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
}