2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2016-08-04 01:44:24 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
2022-12-14 15:24:22 +00:00
|
|
|
"context"
|
2016-08-04 01:44:24 +00:00
|
|
|
"fmt"
|
2016-08-04 23:33:40 +00:00
|
|
|
"os"
|
2019-04-15 20:43:19 +00:00
|
|
|
"strconv"
|
2016-08-04 01:44:24 +00:00
|
|
|
"testing"
|
2019-03-04 14:52:45 +00:00
|
|
|
"time"
|
2016-08-04 01:44:24 +00:00
|
|
|
|
2021-08-06 22:00:58 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2020-12-09 21:22:29 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2022-07-01 15:18:33 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs/aclfilter"
|
2019-02-27 19:28:31 +00:00
|
|
|
tokenStore "github.com/hashicorp/consul/agent/token"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2019-04-25 16:26:33 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2016-08-04 01:44:24 +00:00
|
|
|
)
|
|
|
|
|
2018-10-19 16:04:07 +00:00
|
|
|
func TestACLReplication_diffACLPolicies(t *testing.T) {
|
2019-04-15 20:43:19 +00:00
|
|
|
diffACLPolicies := func(local structs.ACLPolicies, remote structs.ACLPolicyListStubs, lastRemoteIndex uint64) ([]string, []string) {
|
|
|
|
tr := &aclPolicyReplicator{local: local, remote: remote}
|
|
|
|
res := diffACLType(tr, lastRemoteIndex)
|
|
|
|
return res.LocalDeletes, res.LocalUpserts
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
local := structs.ACLPolicies{
|
|
|
|
&structs.ACLPolicy{
|
|
|
|
ID: "44ef9aec-7654-4401-901b-4d4a8b3c80fc",
|
|
|
|
Name: "policy1",
|
|
|
|
Description: "policy1 - already in sync",
|
|
|
|
Rules: `acl = "read"`,
|
|
|
|
Datacenters: nil,
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
|
|
|
},
|
|
|
|
&structs.ACLPolicy{
|
|
|
|
ID: "8ea41efb-8519-4091-bc91-c42da0cda9ae",
|
|
|
|
Name: "policy2",
|
|
|
|
Description: "policy2 - updated but not changed",
|
|
|
|
Rules: `acl = "read"`,
|
|
|
|
Datacenters: nil,
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 25},
|
|
|
|
},
|
|
|
|
&structs.ACLPolicy{
|
|
|
|
ID: "539f1cb6-40aa-464f-ae66-a900d26bc1b2",
|
|
|
|
Name: "policy3",
|
|
|
|
Description: "policy3 - updated and changed",
|
|
|
|
Rules: `acl = "read"`,
|
|
|
|
Datacenters: nil,
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 25},
|
|
|
|
},
|
|
|
|
&structs.ACLPolicy{
|
|
|
|
ID: "e9d33298-6490-4466-99cb-ba93af64fa76",
|
|
|
|
Name: "policy4",
|
|
|
|
Description: "policy4 - needs deleting",
|
|
|
|
Rules: `acl = "read"`,
|
|
|
|
Datacenters: nil,
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 25},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
remote := structs.ACLPolicyListStubs{
|
|
|
|
&structs.ACLPolicyListStub{
|
|
|
|
ID: "44ef9aec-7654-4401-901b-4d4a8b3c80fc",
|
|
|
|
Name: "policy1",
|
|
|
|
Description: "policy1 - already in sync",
|
|
|
|
Datacenters: nil,
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
CreateIndex: 1,
|
|
|
|
ModifyIndex: 2,
|
|
|
|
},
|
|
|
|
&structs.ACLPolicyListStub{
|
|
|
|
ID: "8ea41efb-8519-4091-bc91-c42da0cda9ae",
|
|
|
|
Name: "policy2",
|
|
|
|
Description: "policy2 - updated but not changed",
|
|
|
|
Datacenters: nil,
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
CreateIndex: 1,
|
|
|
|
ModifyIndex: 50,
|
|
|
|
},
|
|
|
|
&structs.ACLPolicyListStub{
|
|
|
|
ID: "539f1cb6-40aa-464f-ae66-a900d26bc1b2",
|
|
|
|
Name: "policy3",
|
|
|
|
Description: "policy3 - updated and changed",
|
|
|
|
Datacenters: nil,
|
|
|
|
Hash: []byte{5, 6, 7, 8},
|
|
|
|
CreateIndex: 1,
|
|
|
|
ModifyIndex: 50,
|
|
|
|
},
|
|
|
|
&structs.ACLPolicyListStub{
|
|
|
|
ID: "c6e8fffd-cbd9-4ecd-99fe-ab2f200c7926",
|
|
|
|
Name: "policy5",
|
|
|
|
Description: "policy5 - needs adding",
|
|
|
|
Datacenters: nil,
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
CreateIndex: 1,
|
|
|
|
ModifyIndex: 50,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do the full diff. This full exercises the main body of the loop
|
|
|
|
deletions, updates := diffACLPolicies(local, remote, 28)
|
|
|
|
require.Len(t, updates, 2)
|
|
|
|
require.ElementsMatch(t, updates, []string{
|
|
|
|
"c6e8fffd-cbd9-4ecd-99fe-ab2f200c7926",
|
|
|
|
"539f1cb6-40aa-464f-ae66-a900d26bc1b2"})
|
|
|
|
|
|
|
|
require.Len(t, deletions, 1)
|
|
|
|
require.Equal(t, "e9d33298-6490-4466-99cb-ba93af64fa76", deletions[0])
|
|
|
|
|
|
|
|
deletions, updates = diffACLPolicies(local, nil, 28)
|
|
|
|
require.Len(t, updates, 0)
|
|
|
|
require.Len(t, deletions, 4)
|
|
|
|
require.ElementsMatch(t, deletions, []string{
|
|
|
|
"44ef9aec-7654-4401-901b-4d4a8b3c80fc",
|
|
|
|
"8ea41efb-8519-4091-bc91-c42da0cda9ae",
|
|
|
|
"539f1cb6-40aa-464f-ae66-a900d26bc1b2",
|
|
|
|
"e9d33298-6490-4466-99cb-ba93af64fa76"})
|
|
|
|
|
|
|
|
deletions, updates = diffACLPolicies(nil, remote, 28)
|
|
|
|
require.Len(t, deletions, 0)
|
|
|
|
require.Len(t, updates, 4)
|
|
|
|
require.ElementsMatch(t, updates, []string{
|
|
|
|
"44ef9aec-7654-4401-901b-4d4a8b3c80fc",
|
|
|
|
"8ea41efb-8519-4091-bc91-c42da0cda9ae",
|
|
|
|
"539f1cb6-40aa-464f-ae66-a900d26bc1b2",
|
|
|
|
"c6e8fffd-cbd9-4ecd-99fe-ab2f200c7926"})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestACLReplication_diffACLTokens(t *testing.T) {
|
2019-04-15 20:43:19 +00:00
|
|
|
diffACLTokens := func(
|
|
|
|
local structs.ACLTokens,
|
|
|
|
remote structs.ACLTokenListStubs,
|
|
|
|
lastRemoteIndex uint64,
|
|
|
|
) itemDiffResults {
|
|
|
|
tr := &aclTokenReplicator{local: local, remote: remote}
|
|
|
|
return diffACLType(tr, lastRemoteIndex)
|
|
|
|
}
|
|
|
|
|
2018-10-19 16:04:07 +00:00
|
|
|
local := structs.ACLTokens{
|
2018-11-07 15:59:44 +00:00
|
|
|
// When a just-upgraded (1.3->1.4+) secondary DC is replicating from an
|
|
|
|
// upgraded primary DC (1.4+), the local state for tokens predating the
|
|
|
|
// upgrade will lack AccessorIDs.
|
|
|
|
//
|
|
|
|
// The primary DC will lazily perform the update to assign AccessorIDs,
|
|
|
|
// and that new update will come across the wire locally as a new
|
|
|
|
// insert.
|
|
|
|
//
|
|
|
|
// We simulate that scenario here with 'token0' having no AccessorID in
|
|
|
|
// the secondary (local) DC and having an AccessorID assigned in the
|
|
|
|
// payload retrieved from the primary (remote) DC.
|
|
|
|
&structs.ACLToken{
|
|
|
|
AccessorID: "",
|
|
|
|
SecretID: "5128289f-c22c-4d32-936e-7662443f1a55",
|
|
|
|
Description: "token0 - old and not yet upgraded",
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 3},
|
|
|
|
},
|
2018-10-19 16:04:07 +00:00
|
|
|
&structs.ACLToken{
|
|
|
|
AccessorID: "44ef9aec-7654-4401-901b-4d4a8b3c80fc",
|
|
|
|
SecretID: "44ef9aec-7654-4401-901b-4d4a8b3c80fc",
|
|
|
|
Description: "token1 - already in sync",
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
|
|
|
},
|
|
|
|
&structs.ACLToken{
|
|
|
|
AccessorID: "8ea41efb-8519-4091-bc91-c42da0cda9ae",
|
|
|
|
SecretID: "8ea41efb-8519-4091-bc91-c42da0cda9ae",
|
|
|
|
Description: "token2 - updated but not changed",
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 25},
|
|
|
|
},
|
|
|
|
&structs.ACLToken{
|
|
|
|
AccessorID: "539f1cb6-40aa-464f-ae66-a900d26bc1b2",
|
|
|
|
SecretID: "539f1cb6-40aa-464f-ae66-a900d26bc1b2",
|
|
|
|
Description: "token3 - updated and changed",
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 25},
|
|
|
|
},
|
|
|
|
&structs.ACLToken{
|
|
|
|
AccessorID: "e9d33298-6490-4466-99cb-ba93af64fa76",
|
|
|
|
SecretID: "e9d33298-6490-4466-99cb-ba93af64fa76",
|
|
|
|
Description: "token4 - needs deleting",
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 25},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
remote := structs.ACLTokenListStubs{
|
2018-11-07 15:59:44 +00:00
|
|
|
&structs.ACLTokenListStub{
|
|
|
|
AccessorID: "72fac6a3-a014-41c8-9cb2-8d9a5e935f3d",
|
|
|
|
//SecretID: "5128289f-c22c-4d32-936e-7662443f1a55", (formerly)
|
|
|
|
Description: "token0 - old and not yet upgraded locally",
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
CreateIndex: 1,
|
|
|
|
ModifyIndex: 3,
|
|
|
|
},
|
2018-10-19 16:04:07 +00:00
|
|
|
&structs.ACLTokenListStub{
|
|
|
|
AccessorID: "44ef9aec-7654-4401-901b-4d4a8b3c80fc",
|
|
|
|
Description: "token1 - already in sync",
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
CreateIndex: 1,
|
|
|
|
ModifyIndex: 2,
|
|
|
|
},
|
|
|
|
&structs.ACLTokenListStub{
|
|
|
|
AccessorID: "8ea41efb-8519-4091-bc91-c42da0cda9ae",
|
|
|
|
Description: "token2 - updated but not changed",
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
CreateIndex: 1,
|
|
|
|
ModifyIndex: 50,
|
|
|
|
},
|
|
|
|
&structs.ACLTokenListStub{
|
|
|
|
AccessorID: "539f1cb6-40aa-464f-ae66-a900d26bc1b2",
|
|
|
|
Description: "token3 - updated and changed",
|
|
|
|
Hash: []byte{5, 6, 7, 8},
|
|
|
|
CreateIndex: 1,
|
|
|
|
ModifyIndex: 50,
|
|
|
|
},
|
|
|
|
&structs.ACLTokenListStub{
|
|
|
|
AccessorID: "c6e8fffd-cbd9-4ecd-99fe-ab2f200c7926",
|
|
|
|
Description: "token5 - needs adding",
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
CreateIndex: 1,
|
|
|
|
ModifyIndex: 50,
|
|
|
|
},
|
2018-11-07 15:59:44 +00:00
|
|
|
// When a 1.4+ secondary DC is replicating from a 1.4+ primary DC,
|
|
|
|
// tokens created using the legacy APIs will not initially have
|
|
|
|
// AccessorIDs assigned. That assignment is lazy (but in quick
|
|
|
|
// succession).
|
|
|
|
//
|
|
|
|
// The secondary (local) will see these in the api response as a stub
|
|
|
|
// with "" as the AccessorID.
|
|
|
|
//
|
|
|
|
// We simulate that here to verify that the secondary does the right
|
|
|
|
// thing by skipping them until it sees them with nonempty AccessorIDs.
|
|
|
|
&structs.ACLTokenListStub{
|
|
|
|
AccessorID: "",
|
|
|
|
Description: "token6 - pending async AccessorID assignment",
|
|
|
|
Hash: []byte{1, 2, 3, 4},
|
|
|
|
CreateIndex: 51,
|
|
|
|
ModifyIndex: 51,
|
|
|
|
},
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Do the full diff. This full exercises the main body of the loop
|
2018-11-07 15:59:44 +00:00
|
|
|
t.Run("full-diff", func(t *testing.T) {
|
|
|
|
res := diffACLTokens(local, remote, 28)
|
|
|
|
require.Equal(t, 1, res.LocalSkipped)
|
|
|
|
require.Equal(t, 1, res.RemoteSkipped)
|
|
|
|
require.Len(t, res.LocalUpserts, 3)
|
|
|
|
require.ElementsMatch(t, res.LocalUpserts, []string{
|
|
|
|
"72fac6a3-a014-41c8-9cb2-8d9a5e935f3d",
|
|
|
|
"c6e8fffd-cbd9-4ecd-99fe-ab2f200c7926",
|
|
|
|
"539f1cb6-40aa-464f-ae66-a900d26bc1b2"})
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2018-11-07 15:59:44 +00:00
|
|
|
require.Len(t, res.LocalDeletes, 1)
|
|
|
|
require.Equal(t, "e9d33298-6490-4466-99cb-ba93af64fa76", res.LocalDeletes[0])
|
|
|
|
})
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2018-11-07 15:59:44 +00:00
|
|
|
t.Run("only-local", func(t *testing.T) {
|
|
|
|
res := diffACLTokens(local, nil, 28)
|
|
|
|
require.Equal(t, 1, res.LocalSkipped)
|
|
|
|
require.Equal(t, 0, res.RemoteSkipped)
|
|
|
|
require.Len(t, res.LocalUpserts, 0)
|
|
|
|
require.Len(t, res.LocalDeletes, 4)
|
|
|
|
require.ElementsMatch(t, res.LocalDeletes, []string{
|
|
|
|
"44ef9aec-7654-4401-901b-4d4a8b3c80fc",
|
|
|
|
"8ea41efb-8519-4091-bc91-c42da0cda9ae",
|
|
|
|
"539f1cb6-40aa-464f-ae66-a900d26bc1b2",
|
|
|
|
"e9d33298-6490-4466-99cb-ba93af64fa76"})
|
|
|
|
})
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2018-11-07 15:59:44 +00:00
|
|
|
t.Run("only-remote", func(t *testing.T) {
|
|
|
|
res := diffACLTokens(nil, remote, 28)
|
|
|
|
require.Equal(t, 0, res.LocalSkipped)
|
|
|
|
require.Equal(t, 1, res.RemoteSkipped)
|
|
|
|
require.Len(t, res.LocalDeletes, 0)
|
|
|
|
require.Len(t, res.LocalUpserts, 5)
|
|
|
|
require.ElementsMatch(t, res.LocalUpserts, []string{
|
|
|
|
"72fac6a3-a014-41c8-9cb2-8d9a5e935f3d",
|
|
|
|
"44ef9aec-7654-4401-901b-4d4a8b3c80fc",
|
|
|
|
"8ea41efb-8519-4091-bc91-c42da0cda9ae",
|
|
|
|
"539f1cb6-40aa-464f-ae66-a900d26bc1b2",
|
|
|
|
"c6e8fffd-cbd9-4ecd-99fe-ab2f200c7926"})
|
|
|
|
})
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
2018-10-31 20:00:46 +00:00
|
|
|
|
|
|
|
func TestACLReplication_Tokens(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-10-31 20:00:46 +00:00
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
2021-08-06 22:00:58 +00:00
|
|
|
c.PrimaryDatacenter = "dc1"
|
2018-10-31 20:00:46 +00:00
|
|
|
c.ACLsEnabled = true
|
2021-12-07 12:39:28 +00:00
|
|
|
c.ACLInitialManagementToken = "root"
|
2018-10-31 20:00:46 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc2"
|
2021-08-06 22:00:58 +00:00
|
|
|
c.PrimaryDatacenter = "dc1"
|
2018-10-31 20:00:46 +00:00
|
|
|
c.ACLsEnabled = true
|
|
|
|
c.ACLTokenReplication = true
|
|
|
|
c.ACLReplicationRate = 100
|
|
|
|
c.ACLReplicationBurst = 100
|
|
|
|
c.ACLReplicationApplyLimit = 1000000
|
|
|
|
})
|
2019-02-27 19:28:31 +00:00
|
|
|
s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig)
|
2018-10-31 20:00:46 +00:00
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join.
|
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
2019-06-18 00:52:01 +00:00
|
|
|
waitForNewACLReplication(t, s2, structs.ACLReplicateTokens, 1, 1, 0)
|
2019-04-15 20:43:19 +00:00
|
|
|
|
2018-10-31 20:00:46 +00:00
|
|
|
// Create a bunch of new tokens and policies
|
|
|
|
var tokens structs.ACLTokens
|
|
|
|
for i := 0; i < 50; i++ {
|
|
|
|
arg := structs.ACLTokenSetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ACLToken: structs.ACLToken{
|
|
|
|
Description: fmt.Sprintf("token-%d", i),
|
|
|
|
Policies: []structs.ACLTokenPolicyLink{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2018-10-31 20:00:46 +00:00
|
|
|
ID: structs.ACLPolicyGlobalManagementID,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Local: false,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var token structs.ACLToken
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "ACL.TokenSet", &arg, &token))
|
2018-10-31 20:00:46 +00:00
|
|
|
tokens = append(tokens, &token)
|
|
|
|
}
|
|
|
|
|
2020-12-09 21:22:29 +00:00
|
|
|
// Create an auth method in the primary that can create global tokens
|
|
|
|
// so that we ensure that these replicate correctly.
|
|
|
|
testSessionID := testauth.StartSession()
|
|
|
|
defer testauth.ResetSession(testSessionID)
|
|
|
|
testauth.InstallSessionToken(testSessionID, "fake-token", "default", "demo", "abc123")
|
|
|
|
method1, err := upsertTestCustomizedAuthMethod(client, "root", "dc1", func(method *structs.ACLAuthMethod) {
|
|
|
|
method.TokenLocality = "global"
|
|
|
|
method.Config = map[string]interface{}{
|
|
|
|
"SessionID": testSessionID,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = upsertTestBindingRule(client, "root", "dc1", method1.Name, "", structs.BindingRuleBindTypeService, "demo")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Create one token via this process.
|
|
|
|
methodToken := structs.ACLToken{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "ACL.Login", &structs.ACLLoginRequest{
|
2020-12-09 21:22:29 +00:00
|
|
|
Auth: &structs.ACLLoginParams{
|
|
|
|
AuthMethod: method1.Name,
|
|
|
|
BearerToken: "fake-token",
|
|
|
|
},
|
|
|
|
Datacenter: "dc1",
|
|
|
|
}, &methodToken))
|
|
|
|
tokens = append(tokens, &methodToken)
|
|
|
|
|
2019-04-15 20:43:19 +00:00
|
|
|
checkSame := func(t *retry.R) {
|
2018-10-31 20:00:46 +00:00
|
|
|
// only account for global tokens - local tokens shouldn't be replicated
|
2020-01-14 15:09:29 +00:00
|
|
|
index, remote, err := s1.fsm.State().ACLTokenList(nil, false, true, "", "", "", nil, nil)
|
2018-10-31 20:00:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-01-14 15:09:29 +00:00
|
|
|
_, local, err := s2.fsm.State().ACLTokenList(nil, false, true, "", "", "", nil, nil)
|
2018-10-31 20:00:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, local, len(remote))
|
|
|
|
for i, token := range remote {
|
|
|
|
require.Equal(t, token.Hash, local[i].Hash)
|
2020-12-09 21:22:29 +00:00
|
|
|
|
|
|
|
if token.AccessorID == methodToken.AccessorID {
|
|
|
|
require.Equal(t, method1.Name, token.AuthMethod)
|
|
|
|
require.Equal(t, method1.Name, local[i].AuthMethod)
|
|
|
|
}
|
2018-10-31 20:00:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
s2.aclReplicationStatusLock.RLock()
|
2019-04-15 20:43:19 +00:00
|
|
|
status := s2.aclReplicationStatus
|
2018-10-31 20:00:46 +00:00
|
|
|
s2.aclReplicationStatusLock.RUnlock()
|
|
|
|
|
2019-04-15 20:43:19 +00:00
|
|
|
require.True(t, status.Enabled)
|
|
|
|
require.True(t, status.Running)
|
|
|
|
require.Equal(t, status.ReplicationType, structs.ACLReplicateTokens)
|
|
|
|
require.Equal(t, status.ReplicatedTokenIndex, index)
|
|
|
|
require.Equal(t, status.SourceDatacenter, "dc1")
|
2018-10-31 20:00:46 +00:00
|
|
|
}
|
|
|
|
// Wait for the replica to converge.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
checkSame(r)
|
|
|
|
})
|
|
|
|
|
2019-05-21 23:58:37 +00:00
|
|
|
// Wait for s2 global-management policy
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-10-24 18:38:09 +00:00
|
|
|
_, policy, err := s2.fsm.State().ACLPolicyGetByID(nil, structs.ACLPolicyGlobalManagementID, nil)
|
2019-05-21 23:58:37 +00:00
|
|
|
require.NoError(r, err)
|
2019-07-12 15:52:26 +00:00
|
|
|
require.NotNil(r, policy)
|
2019-05-21 23:58:37 +00:00
|
|
|
})
|
|
|
|
|
2018-10-31 20:00:46 +00:00
|
|
|
// add some local tokens to the secondary DC
|
|
|
|
// these shouldn't be deleted by replication
|
|
|
|
for i := 0; i < 50; i++ {
|
|
|
|
arg := structs.ACLTokenSetRequest{
|
|
|
|
Datacenter: "dc2",
|
|
|
|
ACLToken: structs.ACLToken{
|
|
|
|
Description: fmt.Sprintf("token-%d", i),
|
|
|
|
Policies: []structs.ACLTokenPolicyLink{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2018-10-31 20:00:46 +00:00
|
|
|
ID: structs.ACLPolicyGlobalManagementID,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Local: true,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var token structs.ACLToken
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s2.RPC(context.Background(), "ACL.TokenSet", &arg, &token))
|
2018-10-31 20:00:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// add some local tokens to the primary DC
|
|
|
|
// these shouldn't be replicated to the secondary DC
|
|
|
|
for i := 0; i < 50; i++ {
|
|
|
|
arg := structs.ACLTokenSetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ACLToken: structs.ACLToken{
|
|
|
|
Description: fmt.Sprintf("token-%d", i),
|
|
|
|
Policies: []structs.ACLTokenPolicyLink{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2018-10-31 20:00:46 +00:00
|
|
|
ID: structs.ACLPolicyGlobalManagementID,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Local: true,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var token structs.ACLToken
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "ACL.TokenSet", &arg, &token))
|
2018-10-31 20:00:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update those other tokens
|
|
|
|
for i := 0; i < 50; i++ {
|
|
|
|
arg := structs.ACLTokenSetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ACLToken: structs.ACLToken{
|
|
|
|
AccessorID: tokens[i].AccessorID,
|
|
|
|
SecretID: tokens[i].SecretID,
|
|
|
|
Description: fmt.Sprintf("token-%d-modified", i),
|
|
|
|
Policies: []structs.ACLTokenPolicyLink{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2018-10-31 20:00:46 +00:00
|
|
|
ID: structs.ACLPolicyGlobalManagementID,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Local: false,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var token structs.ACLToken
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "ACL.TokenSet", &arg, &token))
|
2018-10-31 20:00:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the replica to converge.
|
|
|
|
// this time it also verifies the local tokens from the primary were not replicated.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
checkSame(r)
|
|
|
|
})
|
|
|
|
|
|
|
|
// verify dc2 local tokens didn't get blown away
|
2020-01-14 15:09:29 +00:00
|
|
|
_, local, err := s2.fsm.State().ACLTokenList(nil, true, false, "", "", "", nil, nil)
|
2018-10-31 20:00:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, local, 50)
|
|
|
|
|
|
|
|
for _, token := range tokens {
|
|
|
|
arg := structs.ACLTokenDeleteRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
TokenID: token.AccessorID,
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
|
|
|
|
var dontCare string
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "ACL.TokenDelete", &arg, &dontCare))
|
2018-10-31 20:00:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the replica to converge.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
checkSame(r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestACLReplication_Policies(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-10-31 20:00:46 +00:00
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
2021-08-06 22:00:58 +00:00
|
|
|
c.PrimaryDatacenter = "dc1"
|
2018-10-31 20:00:46 +00:00
|
|
|
c.ACLsEnabled = true
|
2021-12-07 12:39:28 +00:00
|
|
|
c.ACLInitialManagementToken = "root"
|
2018-10-31 20:00:46 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc2"
|
2021-08-06 22:00:58 +00:00
|
|
|
c.PrimaryDatacenter = "dc1"
|
2018-10-31 20:00:46 +00:00
|
|
|
c.ACLsEnabled = true
|
|
|
|
c.ACLTokenReplication = false
|
|
|
|
c.ACLReplicationRate = 100
|
|
|
|
c.ACLReplicationBurst = 100
|
|
|
|
c.ACLReplicationApplyLimit = 1000000
|
|
|
|
})
|
2019-02-27 19:28:31 +00:00
|
|
|
s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig)
|
2018-10-31 20:00:46 +00:00
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join.
|
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2021-09-29 19:21:30 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc2", testrpc.WithToken("root"))
|
2019-06-18 00:52:01 +00:00
|
|
|
waitForNewACLReplication(t, s2, structs.ACLReplicatePolicies, 1, 0, 0)
|
2019-04-15 20:43:19 +00:00
|
|
|
|
2018-10-31 20:00:46 +00:00
|
|
|
// Create a bunch of new policies
|
|
|
|
var policies structs.ACLPolicies
|
|
|
|
for i := 0; i < 50; i++ {
|
|
|
|
arg := structs.ACLPolicySetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Policy: structs.ACLPolicy{
|
|
|
|
Name: fmt.Sprintf("token-%d", i),
|
|
|
|
Description: fmt.Sprintf("token-%d", i),
|
|
|
|
Rules: fmt.Sprintf(`service "app-%d" { policy = "read" }`, i),
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var policy structs.ACLPolicy
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "ACL.PolicySet", &arg, &policy))
|
2018-10-31 20:00:46 +00:00
|
|
|
policies = append(policies, &policy)
|
|
|
|
}
|
|
|
|
|
2019-04-15 20:43:19 +00:00
|
|
|
checkSame := func(t *retry.R) {
|
2018-10-31 20:00:46 +00:00
|
|
|
// only account for global tokens - local tokens shouldn't be replicated
|
2019-10-24 18:38:09 +00:00
|
|
|
index, remote, err := s1.fsm.State().ACLPolicyList(nil, nil)
|
2018-10-31 20:00:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-10-24 18:38:09 +00:00
|
|
|
_, local, err := s2.fsm.State().ACLPolicyList(nil, nil)
|
2018-10-31 20:00:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, local, len(remote))
|
|
|
|
for i, policy := range remote {
|
|
|
|
require.Equal(t, policy.Hash, local[i].Hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
s2.aclReplicationStatusLock.RLock()
|
2019-04-15 20:43:19 +00:00
|
|
|
status := s2.aclReplicationStatus
|
2018-10-31 20:00:46 +00:00
|
|
|
s2.aclReplicationStatusLock.RUnlock()
|
|
|
|
|
2019-04-15 20:43:19 +00:00
|
|
|
require.True(t, status.Enabled)
|
|
|
|
require.True(t, status.Running)
|
|
|
|
require.Equal(t, status.ReplicationType, structs.ACLReplicatePolicies)
|
|
|
|
require.Equal(t, status.ReplicatedIndex, index)
|
|
|
|
require.Equal(t, status.SourceDatacenter, "dc1")
|
2018-10-31 20:00:46 +00:00
|
|
|
}
|
|
|
|
// Wait for the replica to converge.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
checkSame(r)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Update those policies
|
|
|
|
for i := 0; i < 50; i++ {
|
|
|
|
arg := structs.ACLPolicySetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Policy: structs.ACLPolicy{
|
|
|
|
ID: policies[i].ID,
|
|
|
|
Name: fmt.Sprintf("token-%d-modified", i),
|
|
|
|
Description: fmt.Sprintf("token-%d-modified", i),
|
|
|
|
Rules: policies[i].Rules,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var policy structs.ACLPolicy
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "ACL.PolicySet", &arg, &policy))
|
2018-10-31 20:00:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the replica to converge.
|
|
|
|
// this time it also verifies the local tokens from the primary were not replicated.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
checkSame(r)
|
|
|
|
})
|
|
|
|
|
|
|
|
for _, policy := range policies {
|
|
|
|
arg := structs.ACLPolicyDeleteRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
PolicyID: policy.ID,
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
|
|
|
|
var dontCare string
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "ACL.PolicyDelete", &arg, &dontCare))
|
2018-10-31 20:00:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the replica to converge.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
checkSame(r)
|
|
|
|
})
|
|
|
|
}
|
2019-03-04 14:52:45 +00:00
|
|
|
|
|
|
|
func TestACLReplication_TokensRedacted(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-03-04 14:52:45 +00:00
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
2021-08-06 22:00:58 +00:00
|
|
|
c.PrimaryDatacenter = "dc1"
|
2019-03-04 14:52:45 +00:00
|
|
|
c.ACLsEnabled = true
|
2021-12-07 12:39:28 +00:00
|
|
|
c.ACLInitialManagementToken = "root"
|
2019-03-04 14:52:45 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
// Create the ACL Write Policy
|
|
|
|
policyArg := structs.ACLPolicySetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Policy: structs.ACLPolicy{
|
|
|
|
Name: "token-replication-redacted",
|
|
|
|
Description: "token-replication-redacted",
|
|
|
|
Rules: `acl = "write"`,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var policy structs.ACLPolicy
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "ACL.PolicySet", &policyArg, &policy))
|
2019-03-04 14:52:45 +00:00
|
|
|
|
|
|
|
// Create the dc2 replication token
|
|
|
|
tokenArg := structs.ACLTokenSetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ACLToken: structs.ACLToken{
|
|
|
|
Description: "dc2-replication",
|
|
|
|
Policies: []structs.ACLTokenPolicyLink{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-03-04 14:52:45 +00:00
|
|
|
ID: policy.ID,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Local: false,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
|
|
|
|
var token structs.ACLToken
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "ACL.TokenSet", &tokenArg, &token))
|
2019-03-04 14:52:45 +00:00
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc2"
|
2021-08-06 22:00:58 +00:00
|
|
|
c.PrimaryDatacenter = "dc1"
|
2019-03-04 14:52:45 +00:00
|
|
|
c.ACLsEnabled = true
|
|
|
|
c.ACLTokenReplication = true
|
|
|
|
c.ACLReplicationRate = 100
|
|
|
|
c.ACLReplicationBurst = 100
|
|
|
|
c.ACLReplicationApplyLimit = 1000000
|
|
|
|
})
|
|
|
|
s2.tokens.UpdateReplicationToken(token.SecretID, tokenStore.TokenSourceConfig)
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join.
|
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
|
|
|
|
|
|
|
// ensures replication is working ok
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var tokenResp structs.ACLTokenResponse
|
|
|
|
req := structs.ACLTokenGetRequest{
|
|
|
|
Datacenter: "dc2",
|
|
|
|
TokenID: "root",
|
|
|
|
TokenIDType: structs.ACLTokenSecret,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: "root"},
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
err := s2.RPC(context.Background(), "ACL.TokenRead", &req, &tokenResp)
|
2019-03-04 14:52:45 +00:00
|
|
|
require.NoError(r, err)
|
2019-04-30 15:45:36 +00:00
|
|
|
require.NotNil(r, tokenResp.Token)
|
2019-03-04 14:52:45 +00:00
|
|
|
require.Equal(r, "root", tokenResp.Token.SecretID)
|
|
|
|
|
|
|
|
var status structs.ACLReplicationStatus
|
|
|
|
statusReq := structs.DCSpecificRequest{
|
|
|
|
Datacenter: "dc2",
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(r, s2.RPC(context.Background(), "ACL.ReplicationStatus", &statusReq, &status))
|
2019-03-04 14:52:45 +00:00
|
|
|
// ensures that tokens are not being synced
|
|
|
|
require.True(r, status.ReplicatedTokenIndex > 0, "ReplicatedTokenIndex not greater than 0")
|
|
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
// modify the replication policy to change to only granting read privileges
|
|
|
|
policyArg = structs.ACLPolicySetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Policy: structs.ACLPolicy{
|
|
|
|
ID: policy.ID,
|
|
|
|
Name: "token-replication-redacted",
|
|
|
|
Description: "token-replication-redacted",
|
|
|
|
Rules: `acl = "read"`,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "ACL.PolicySet", &policyArg, &policy))
|
2019-03-04 14:52:45 +00:00
|
|
|
|
|
|
|
// Create the another token so that replication will attempt to read it.
|
|
|
|
tokenArg = structs.ACLTokenSetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ACLToken: structs.ACLToken{
|
|
|
|
Description: "management",
|
|
|
|
Policies: []structs.ACLTokenPolicyLink{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-03-04 14:52:45 +00:00
|
|
|
ID: structs.ACLPolicyGlobalManagementID,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Local: false,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var token2 structs.ACLToken
|
|
|
|
|
|
|
|
// record the time right before we are touching the token
|
|
|
|
minErrorTime := time.Now()
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, s1.RPC(context.Background(), "ACL.TokenSet", &tokenArg, &token2))
|
2019-03-04 14:52:45 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var tokenResp structs.ACLTokenResponse
|
|
|
|
req := structs.ACLTokenGetRequest{
|
|
|
|
Datacenter: "dc2",
|
2022-07-01 15:18:33 +00:00
|
|
|
TokenID: aclfilter.RedactedToken,
|
2019-03-04 14:52:45 +00:00
|
|
|
TokenIDType: structs.ACLTokenSecret,
|
2022-07-01 15:18:33 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Token: aclfilter.RedactedToken},
|
2019-03-04 14:52:45 +00:00
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
err := s2.RPC(context.Background(), "ACL.TokenRead", &req, &tokenResp)
|
2023-02-08 23:49:44 +00:00
|
|
|
require.Error(r, err)
|
|
|
|
require.ErrorContains(r, err, "token does not exist")
|
2019-03-04 14:52:45 +00:00
|
|
|
require.Nil(r, tokenResp.Token)
|
|
|
|
|
|
|
|
var status structs.ACLReplicationStatus
|
|
|
|
statusReq := structs.DCSpecificRequest{
|
|
|
|
Datacenter: "dc2",
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(r, s2.RPC(context.Background(), "ACL.ReplicationStatus", &statusReq, &status))
|
2019-03-04 14:52:45 +00:00
|
|
|
// ensures that tokens are not being synced
|
|
|
|
require.True(r, status.ReplicatedTokenIndex < token2.CreateIndex, "ReplicatedTokenIndex is not less than the token2s create index")
|
|
|
|
// ensures that token replication is erroring
|
|
|
|
require.True(r, status.LastError.After(minErrorTime), "Replication LastError not after the minErrorTime")
|
2021-07-15 09:31:44 +00:00
|
|
|
require.Equal(r, status.LastErrorMessage, "failed to retrieve unredacted tokens - replication token in use does not grant acl:write")
|
2019-03-04 14:52:45 +00:00
|
|
|
})
|
|
|
|
}
|
2019-04-15 20:43:19 +00:00
|
|
|
|
|
|
|
func TestACLReplication_AllTypes(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-04-15 20:43:19 +00:00
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
2021-08-06 22:00:58 +00:00
|
|
|
c.PrimaryDatacenter = "dc1"
|
2019-04-15 20:43:19 +00:00
|
|
|
c.ACLsEnabled = true
|
2021-12-07 12:39:28 +00:00
|
|
|
c.ACLInitialManagementToken = "root"
|
2019-04-15 20:43:19 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc2"
|
2021-08-06 22:00:58 +00:00
|
|
|
c.PrimaryDatacenter = "dc1"
|
2019-04-15 20:43:19 +00:00
|
|
|
c.ACLsEnabled = true
|
|
|
|
c.ACLTokenReplication = true
|
|
|
|
c.ACLReplicationRate = 100
|
2019-12-06 19:01:34 +00:00
|
|
|
c.ACLReplicationBurst = 25
|
2019-04-15 20:43:19 +00:00
|
|
|
c.ACLReplicationApplyLimit = 1000000
|
|
|
|
})
|
|
|
|
s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig)
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join.
|
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
2019-06-18 00:52:01 +00:00
|
|
|
waitForNewACLReplication(t, s2, structs.ACLReplicateTokens, 1, 1, 0)
|
2019-04-15 20:43:19 +00:00
|
|
|
|
|
|
|
const (
|
|
|
|
numItems = 50
|
|
|
|
numItemsThatAreLocal = 10
|
|
|
|
)
|
|
|
|
|
|
|
|
// Create some data.
|
|
|
|
policyIDs, roleIDs, tokenIDs := createACLTestData(t, s1, "b1", numItems, numItemsThatAreLocal)
|
|
|
|
|
|
|
|
checkSameTokens := func(t *retry.R) {
|
|
|
|
// only account for global tokens - local tokens shouldn't be replicated
|
2020-01-14 15:09:29 +00:00
|
|
|
index, remote, err := s1.fsm.State().ACLTokenList(nil, false, true, "", "", "", nil, nil)
|
2019-04-15 20:43:19 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
// Query for all of them, so that we can prove that no globals snuck in.
|
2020-01-14 15:09:29 +00:00
|
|
|
_, local, err := s2.fsm.State().ACLTokenList(nil, true, true, "", "", "", nil, nil)
|
2019-04-15 20:43:19 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, remote, len(local))
|
|
|
|
for i, token := range remote {
|
|
|
|
require.Equal(t, token.Hash, local[i].Hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
s2.aclReplicationStatusLock.RLock()
|
|
|
|
status := s2.aclReplicationStatus
|
|
|
|
s2.aclReplicationStatusLock.RUnlock()
|
|
|
|
|
|
|
|
require.True(t, status.Enabled)
|
|
|
|
require.True(t, status.Running)
|
|
|
|
require.Equal(t, status.ReplicationType, structs.ACLReplicateTokens)
|
|
|
|
require.Equal(t, status.ReplicatedTokenIndex, index)
|
|
|
|
require.Equal(t, status.SourceDatacenter, "dc1")
|
|
|
|
}
|
|
|
|
checkSamePolicies := func(t *retry.R) {
|
2019-10-24 18:38:09 +00:00
|
|
|
index, remote, err := s1.fsm.State().ACLPolicyList(nil, nil)
|
2019-04-15 20:43:19 +00:00
|
|
|
require.NoError(t, err)
|
2019-10-24 18:38:09 +00:00
|
|
|
_, local, err := s2.fsm.State().ACLPolicyList(nil, nil)
|
2019-04-15 20:43:19 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, remote, len(local))
|
|
|
|
for i, policy := range remote {
|
|
|
|
require.Equal(t, policy.Hash, local[i].Hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
s2.aclReplicationStatusLock.RLock()
|
|
|
|
status := s2.aclReplicationStatus
|
|
|
|
s2.aclReplicationStatusLock.RUnlock()
|
|
|
|
|
|
|
|
require.True(t, status.Enabled)
|
|
|
|
require.True(t, status.Running)
|
|
|
|
require.Equal(t, status.ReplicationType, structs.ACLReplicateTokens)
|
|
|
|
require.Equal(t, status.ReplicatedIndex, index)
|
|
|
|
require.Equal(t, status.SourceDatacenter, "dc1")
|
|
|
|
}
|
|
|
|
checkSameRoles := func(t *retry.R) {
|
2019-10-24 18:38:09 +00:00
|
|
|
index, remote, err := s1.fsm.State().ACLRoleList(nil, "", nil)
|
2019-04-15 20:43:19 +00:00
|
|
|
require.NoError(t, err)
|
2019-10-24 18:38:09 +00:00
|
|
|
_, local, err := s2.fsm.State().ACLRoleList(nil, "", nil)
|
2019-04-15 20:43:19 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, remote, len(local))
|
|
|
|
for i, role := range remote {
|
|
|
|
require.Equal(t, role.Hash, local[i].Hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
s2.aclReplicationStatusLock.RLock()
|
|
|
|
status := s2.aclReplicationStatus
|
|
|
|
s2.aclReplicationStatusLock.RUnlock()
|
|
|
|
|
|
|
|
require.True(t, status.Enabled)
|
|
|
|
require.True(t, status.Running)
|
|
|
|
require.Equal(t, status.ReplicationType, structs.ACLReplicateTokens)
|
|
|
|
require.Equal(t, status.ReplicatedRoleIndex, index)
|
|
|
|
require.Equal(t, status.SourceDatacenter, "dc1")
|
|
|
|
}
|
|
|
|
checkSame := func(t *retry.R) {
|
|
|
|
checkSameTokens(t)
|
|
|
|
checkSamePolicies(t)
|
|
|
|
checkSameRoles(t)
|
|
|
|
}
|
|
|
|
// Wait for the replica to converge.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
checkSame(r)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Create additional data to replicate.
|
|
|
|
_, _, _ = createACLTestData(t, s1, "b2", numItems, numItemsThatAreLocal)
|
|
|
|
|
|
|
|
// Wait for the replica to converge.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
checkSame(r)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Delete one piece of each type of data from batch 1.
|
|
|
|
const itemToDelete = numItems - 1
|
|
|
|
{
|
|
|
|
id := tokenIDs[itemToDelete]
|
|
|
|
|
|
|
|
arg := structs.ACLTokenDeleteRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
TokenID: id,
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var dontCare string
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := s1.RPC(context.Background(), "ACL.TokenDelete", &arg, &dontCare); err != nil {
|
2019-04-15 20:43:19 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
{
|
|
|
|
id := roleIDs[itemToDelete]
|
|
|
|
|
|
|
|
arg := structs.ACLRoleDeleteRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
RoleID: id,
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var dontCare string
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := s1.RPC(context.Background(), "ACL.RoleDelete", &arg, &dontCare); err != nil {
|
2019-04-15 20:43:19 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
{
|
|
|
|
id := policyIDs[itemToDelete]
|
|
|
|
|
|
|
|
arg := structs.ACLPolicyDeleteRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
PolicyID: id,
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var dontCare string
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := s1.RPC(context.Background(), "ACL.PolicyDelete", &arg, &dontCare); err != nil {
|
2019-04-15 20:43:19 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Wait for the replica to converge.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
checkSame(r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func createACLTestData(t *testing.T, srv *Server, namePrefix string, numObjects, numItemsThatAreLocal int) (policyIDs, roleIDs, tokenIDs []string) {
|
|
|
|
require.True(t, numItemsThatAreLocal <= numObjects, 0, "numItemsThatAreLocal <= numObjects")
|
|
|
|
|
|
|
|
// Create some policies.
|
|
|
|
for i := 0; i < numObjects; i++ {
|
|
|
|
str := strconv.Itoa(i)
|
|
|
|
arg := structs.ACLPolicySetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Policy: structs.ACLPolicy{
|
|
|
|
Name: namePrefix + "-policy-" + str,
|
|
|
|
Description: namePrefix + "-policy " + str,
|
|
|
|
Rules: testACLPolicyNew,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var out structs.ACLPolicy
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := srv.RPC(context.Background(), "ACL.PolicySet", &arg, &out); err != nil {
|
2019-04-15 20:43:19 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
policyIDs = append(policyIDs, out.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create some roles.
|
|
|
|
for i := 0; i < numObjects; i++ {
|
|
|
|
str := strconv.Itoa(i)
|
|
|
|
arg := structs.ACLRoleSetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Role: structs.ACLRole{
|
|
|
|
Name: namePrefix + "-role-" + str,
|
|
|
|
Description: namePrefix + "-role " + str,
|
|
|
|
Policies: []structs.ACLRolePolicyLink{
|
|
|
|
{ID: policyIDs[i]},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var out structs.ACLRole
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := srv.RPC(context.Background(), "ACL.RoleSet", &arg, &out); err != nil {
|
2019-04-15 20:43:19 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
roleIDs = append(roleIDs, out.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a bunch of new tokens.
|
|
|
|
for i := 0; i < numObjects; i++ {
|
|
|
|
str := strconv.Itoa(i)
|
|
|
|
arg := structs.ACLTokenSetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ACLToken: structs.ACLToken{
|
|
|
|
Description: namePrefix + "-token " + str,
|
|
|
|
Policies: []structs.ACLTokenPolicyLink{
|
|
|
|
{ID: policyIDs[i]},
|
|
|
|
},
|
|
|
|
Roles: []structs.ACLTokenRoleLink{
|
|
|
|
{ID: roleIDs[i]},
|
|
|
|
},
|
|
|
|
Local: (i < numItemsThatAreLocal),
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var out structs.ACLToken
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := srv.RPC(context.Background(), "ACL.TokenSet", &arg, &out); err != nil {
|
2019-04-15 20:43:19 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
tokenIDs = append(tokenIDs, out.AccessorID)
|
|
|
|
}
|
|
|
|
|
|
|
|
return policyIDs, roleIDs, tokenIDs
|
|
|
|
}
|