Merge pull request #11541 from hashicorp/secondary-exports

OSS Backport: Prevent writing partition-exports entries to secondary DCs
This commit is contained in:
Freddy 2021-11-10 09:49:53 -07:00 committed by GitHub
commit b5c46f19f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 285 additions and 1 deletions

3
.changelog/11541.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
partitions: Prevent writing partition-exports entries to secondary DCs.
```

View File

@ -54,6 +54,11 @@ func (c *ConfigEntry) Apply(args *structs.ConfigEntryRequest, reply *bool) error
return err
}
err := gateWriteToSecondary(args.Datacenter, c.srv.config.Datacenter, c.srv.config.PrimaryDatacenter, args.Entry.GetKind())
if err != nil {
return err
}
// Ensure that all config entry writes go to the primary datacenter. These will then
// be replicated to all the other datacenters.
args.Datacenter = c.srv.config.PrimaryDatacenter
@ -586,6 +591,33 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
})
}
func gateWriteToSecondary(targetDC, localDC, primaryDC, kind string) error {
// Partition exports are gated from interactions from secondary DCs
// because non-default partitions cannot be created in secondaries
// and services cannot be exported to another datacenter.
if kind != structs.PartitionExports {
return nil
}
if localDC == "" {
// This should not happen because the datacenter is defaulted in DefaultConfig.
return fmt.Errorf("unknown local datacenter")
}
if primaryDC == "" {
primaryDC = localDC
}
switch {
case targetDC == "" && localDC != primaryDC:
return fmt.Errorf("partition-exports writes in secondary datacenters must target the primary datacenter explicitly.")
case targetDC != "" && targetDC != primaryDC:
return fmt.Errorf("partition-exports writes must not target secondary datacenters.")
}
return nil
}
// preflightCheck is meant to have kind-specific system validation outside of
// content validation. The initial use case is restricting the ability to do
// writes of service-intentions until the system is finished migration.

View File

@ -1,6 +1,7 @@
package consul
import (
"fmt"
"os"
"sort"
"testing"
@ -2058,3 +2059,145 @@ func runStep(t *testing.T, name string, fn func(t *testing.T)) {
t.FailNow()
}
}
func Test_gateWriteToSecondary(t *testing.T) {
type args struct {
targetDC string
localDC string
primaryDC string
kind string
}
type testCase struct {
name string
args args
wantErr string
}
run := func(t *testing.T, tc testCase) {
err := gateWriteToSecondary(tc.args.targetDC, tc.args.localDC, tc.args.primaryDC, tc.args.kind)
if tc.wantErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.wantErr)
return
}
require.NoError(t, err)
}
tt := []testCase{
{
name: "primary to primary with implicit primary and target",
args: args{
targetDC: "",
localDC: "dc1",
primaryDC: "",
kind: structs.PartitionExports,
},
},
{
name: "primary to primary with explicit primary and implicit target",
args: args{
targetDC: "",
localDC: "dc1",
primaryDC: "dc1",
kind: structs.PartitionExports,
},
},
{
name: "primary to primary with all filled in",
args: args{
targetDC: "dc1",
localDC: "dc1",
primaryDC: "dc1",
kind: structs.PartitionExports,
},
},
{
name: "primary to secondary with implicit primary and target",
args: args{
targetDC: "dc2",
localDC: "dc1",
primaryDC: "",
kind: structs.PartitionExports,
},
wantErr: "writes must not target secondary datacenters",
},
{
name: "primary to secondary with all filled in",
args: args{
targetDC: "dc2",
localDC: "dc1",
primaryDC: "dc1",
kind: structs.PartitionExports,
},
wantErr: "writes must not target secondary datacenters",
},
{
name: "secondary to secondary with all filled in",
args: args{
targetDC: "dc2",
localDC: "dc2",
primaryDC: "dc1",
kind: structs.PartitionExports,
},
wantErr: "writes must not target secondary datacenters",
},
{
name: "implicit write to secondary",
args: args{
targetDC: "",
localDC: "dc2",
primaryDC: "dc1",
kind: structs.PartitionExports,
},
wantErr: "must target the primary datacenter explicitly",
},
{
name: "empty local DC",
args: args{
localDC: "",
kind: structs.PartitionExports,
},
wantErr: "unknown local datacenter",
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
run(t, tc)
})
}
}
func Test_gateWriteToSecondary_AllowedKinds(t *testing.T) {
type args struct {
targetDC string
localDC string
primaryDC string
kind string
}
for _, kind := range structs.AllConfigEntryKinds {
if kind == structs.PartitionExports {
continue
}
t.Run(fmt.Sprintf("%s-secondary-to-secondary", kind), func(t *testing.T) {
tcase := args{
targetDC: "",
localDC: "dc2",
primaryDC: "dc1",
kind: kind,
}
require.NoError(t, gateWriteToSecondary(tcase.targetDC, tcase.localDC, tcase.primaryDC, tcase.kind))
})
t.Run(fmt.Sprintf("%s-primary-to-secondary", kind), func(t *testing.T) {
tcase := args{
targetDC: "dc2",
localDC: "dc1",
primaryDC: "dc1",
kind: kind,
}
require.NoError(t, gateWriteToSecondary(tcase.targetDC, tcase.localDC, tcase.primaryDC, tcase.kind))
})
}
}

View File

@ -92,6 +92,10 @@ func (s *Server) reconcileLocalConfig(ctx context.Context, configs []structs.Con
defer ticker.Stop()
for i, entry := range configs {
// Partition exports only apply to the primary datacenter.
if entry.GetKind() == structs.PartitionExports {
continue
}
req := structs.ConfigEntryRequest{
Op: op,
Datacenter: s.config.Datacenter,

View File

@ -6,10 +6,11 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/stretchr/testify/require"
)
func TestReplication_ConfigSort(t *testing.T) {
@ -91,6 +92,107 @@ func TestReplication_ConfigSort(t *testing.T) {
}
}
func TestReplication_DisallowedConfigEntries(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
client := rpcClient(t, s1)
defer client.Close()
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc1"
c.ConfigReplicationRate = 100
c.ConfigReplicationBurst = 100
c.ConfigReplicationApplyLimit = 1000000
})
testrpc.WaitForLeader(t, s2.RPC, "dc2")
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join.
joinWAN(t, s2, s1)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
testrpc.WaitForLeader(t, s1.RPC, "dc2")
args := []structs.ConfigEntryRequest{
{
Datacenter: "dc1",
Op: structs.ConfigEntryUpsert,
Entry: &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
Protocol: "http2",
},
},
{
Datacenter: "dc1",
Op: structs.ConfigEntryUpsert,
Entry: &structs.PartitionExportsConfigEntry{
Name: "default",
Services: []structs.ExportedService{
{
Name: structs.WildcardSpecifier,
Consumers: []structs.ServiceConsumer{
{
Partition: "non-default",
},
},
},
},
},
},
{
Datacenter: "dc1",
Op: structs.ConfigEntryUpsert,
Entry: &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: "global",
Config: map[string]interface{}{
"Protocol": "http",
},
},
},
{
Datacenter: "dc1",
Op: structs.ConfigEntryUpsert,
Entry: &structs.MeshConfigEntry{
TransparentProxy: structs.TransparentProxyMeshConfig{
MeshDestinationsOnly: true,
},
},
},
}
for _, arg := range args {
out := false
require.NoError(t, s1.RPC("ConfigEntry.Apply", &arg, &out))
}
retry.Run(t, func(r *retry.R) {
_, local, err := s2.fsm.State().ConfigEntries(nil, structs.ReplicationEnterpriseMeta())
require.NoError(r, err)
require.Len(r, local, 3)
localKinds := make([]string, 0)
for _, entry := range local {
localKinds = append(localKinds, entry.GetKind())
}
// Should have all inserted kinds except for partition-exports.
expectKinds := []string{
structs.ProxyDefaults, structs.ServiceDefaults, structs.MeshConfig,
}
require.ElementsMatch(r, expectKinds, localKinds)
})
}
func TestReplication_ConfigEntries(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")