open-nomad/nomad/status_endpoint_test.go
Seth Hoenig f0c3dca49c tests: swap lib/freeport for tweaked helper/freeport
Copy the updated version of freeport (sdk/freeport), and tweak it for use
in Nomad tests. This means staying below port 10000 to avoid conflicts with
the lib/freeport that is still transitively used by the old version of
consul that we vendor. Also provide implementations to find ephemeral ports
of macOS and Windows environments.

Ports acquired through freeport are supposed to be returned to freeport,
which this change now also introduces. Many tests are modified to include
calls to a cleanup function for Server objects.

This should help quite a bit with some flakey tests, but not all of them.
Our port problems will not go away completely until we upgrade our vendor
version of consul. With Go modules, we'll probably do a 'replace' to swap
out other copies of freeport with the one now in 'nomad/helper/freeport'.
2019-12-09 08:37:32 -06:00

215 lines
5.2 KiB
Go

package nomad
import (
"testing"
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestStatusVersion(t *testing.T) {
t.Parallel()
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
arg := &structs.GenericRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
AllowStale: true,
},
}
var out structs.VersionResponse
if err := msgpackrpc.CallWithCodec(codec, "Status.Version", arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
if out.Build == "" {
t.Fatalf("bad: %#v", out)
}
if out.Versions[structs.ProtocolVersion] != ProtocolVersionMax {
t.Fatalf("bad: %#v", out)
}
if out.Versions[structs.APIMajorVersion] != structs.ApiMajorVersion {
t.Fatalf("bad: %#v", out)
}
if out.Versions[structs.APIMinorVersion] != structs.ApiMinorVersion {
t.Fatalf("bad: %#v", out)
}
}
func TestStatusPing(t *testing.T) {
t.Parallel()
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
arg := struct{}{}
var out struct{}
if err := msgpackrpc.CallWithCodec(codec, "Status.Ping", arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestStatusLeader(t *testing.T) {
t.Parallel()
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
arg := &structs.GenericRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
AllowStale: true,
},
}
var leader string
if err := msgpackrpc.CallWithCodec(codec, "Status.Leader", arg, &leader); err != nil {
t.Fatalf("err: %v", err)
}
if leader == "" {
t.Fatalf("unexpected leader: %v", leader)
}
}
func TestStatusPeers(t *testing.T) {
t.Parallel()
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
arg := &structs.GenericRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
AllowStale: true,
},
}
var peers []string
if err := msgpackrpc.CallWithCodec(codec, "Status.Peers", arg, &peers); err != nil {
t.Fatalf("err: %v", err)
}
if len(peers) != 1 {
t.Fatalf("no peers: %v", peers)
}
}
func TestStatusMembers(t *testing.T) {
t.Parallel()
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
assert := assert.New(t)
arg := &structs.GenericRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
AllowStale: true,
},
}
var out structs.ServerMembersResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Status.Members", arg, &out))
assert.Len(out.Members, 1)
}
func TestStatusMembers_ACL(t *testing.T) {
t.Parallel()
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
assert := assert.New(t)
state := s1.fsm.State()
// Create the namespace policy and tokens
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyRead))
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.AgentPolicy(acl.PolicyRead))
arg := &structs.GenericRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
AllowStale: true,
},
}
// Try without a token and expect failure
{
var out structs.ServerMembersResponse
err := msgpackrpc.CallWithCodec(codec, "Status.Members", arg, &out)
assert.NotNil(err)
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with an invalid token and expect failure
{
arg.AuthToken = invalidToken.SecretID
var out structs.ServerMembersResponse
err := msgpackrpc.CallWithCodec(codec, "Status.Members", arg, &out)
assert.NotNil(err)
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a valid token
{
arg.AuthToken = validToken.SecretID
var out structs.ServerMembersResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Status.Members", arg, &out))
assert.Len(out.Members, 1)
}
// Try with a management token
{
arg.AuthToken = root.SecretID
var out structs.ServerMembersResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Status.Members", arg, &out))
assert.Len(out.Members, 1)
}
}
func TestStatus_HasClientConn(t *testing.T) {
t.Parallel()
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
require := require.New(t)
arg := &structs.NodeSpecificRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
AllowStale: true,
},
}
// Try without setting a node id
var out structs.NodeConnQueryResponse
require.NotNil(msgpackrpc.CallWithCodec(codec, "Status.HasNodeConn", arg, &out))
// Set a bad node id
arg.NodeID = uuid.Generate()
var out2 structs.NodeConnQueryResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Status.HasNodeConn", arg, &out2))
require.False(out2.Connected)
// Create a connection on that node
s1.addNodeConn(&RPCContext{
NodeID: arg.NodeID,
})
var out3 structs.NodeConnQueryResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Status.HasNodeConn", arg, &out3))
require.True(out3.Connected)
require.NotZero(out3.Established)
}