open-consul/agent/proxycfg-glue/internal_service_dump.go
Freddy e96c0e1dad
Fixup authz for data imported from peers (#15347)
There are a few changes that needed to be made to to handle authorizing
reads for imported data:

- If the data was imported from a peer we should not attempt to read the
  data using the traditional authz rules. This is because the name of
  services/nodes in a peer cluster are not equivalent to those of the
  importing cluster.

- If the data was imported from a peer we need to check whether the
  token corresponds to a service, meaning that it has service:write
  permissions, or to a local read only token that can read all
  nodes/services in a namespace.

This required changes at the policyAuthorizer level, since that is the
only view available to OSS Consul, and at the enterprise
partition/namespace level.
2022-11-14 11:36:27 -07:00

100 lines
3.3 KiB
Go

package proxycfgglue
import (
"context"
"fmt"
"github.com/hashicorp/go-bexpr"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/consul/watch"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/structs/aclfilter"
)
// CacheInternalServiceDump satisfies the proxycfg.InternalServiceDump
// interface by sourcing data from the agent cache.
func CacheInternalServiceDump(c *cache.Cache) proxycfg.InternalServiceDump {
return &cacheInternalServiceDump{c}
}
// cacheInternalServiceDump wraps the underlying cache-type to return a simpler
// subset of the response (as this is all we use in proxycfg).
type cacheInternalServiceDump struct {
c *cache.Cache
}
func (c *cacheInternalServiceDump) Notify(ctx context.Context, req *structs.ServiceDumpRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
dispatch := dispatchCacheUpdate(ch)
return c.c.NotifyCallback(ctx, cachetype.InternalServiceDumpName, req, correlationID,
func(ctx context.Context, event cache.UpdateEvent) {
if r, _ := event.Result.(*structs.IndexedNodesWithGateways); r != nil {
event.Result = &structs.IndexedCheckServiceNodes{
Nodes: r.Nodes,
QueryMeta: r.QueryMeta,
}
}
dispatch(ctx, event)
})
}
// ServerInternalServiceDump satisfies the proxycfg.InternalServiceDump
// interface by sourcing data from a blocking query against the server's
// state store.
func ServerInternalServiceDump(deps ServerDataSourceDeps, remoteSource proxycfg.InternalServiceDump) proxycfg.InternalServiceDump {
return &serverInternalServiceDump{deps, remoteSource}
}
type serverInternalServiceDump struct {
deps ServerDataSourceDeps
remoteSource proxycfg.InternalServiceDump
}
func (s *serverInternalServiceDump) Notify(ctx context.Context, req *structs.ServiceDumpRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
if req.Datacenter != s.deps.Datacenter {
return s.remoteSource.Notify(ctx, req, correlationID, ch)
}
filter, err := bexpr.CreateFilter(req.Filter, nil, structs.CheckServiceNodes{})
if err != nil {
return err
}
// This is just the small subset of the Internal.ServiceDump RPC handler used
// by proxycfg.
return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore,
func(ws memdb.WatchSet, store Store) (uint64, *structs.IndexedCheckServiceNodes, error) {
authz, err := s.deps.ACLResolver.ResolveTokenAndDefaultMeta(req.Token, &req.EnterpriseMeta, nil)
if err != nil {
return 0, nil, err
}
idx, nodes, err := store.ServiceDump(ws, req.ServiceKind, req.UseServiceKind, &req.EnterpriseMeta, req.PeerName)
if err != nil {
return 0, nil, err
}
raw, err := filter.Execute(nodes)
if err != nil {
return 0, nil, fmt.Errorf("could not filter local service dump: %w", err)
}
nodes = raw.(structs.CheckServiceNodes)
aclfilter.New(authz, s.deps.Logger).Filter(&nodes)
return idx, &structs.IndexedCheckServiceNodes{
Nodes: nodes,
QueryMeta: structs.QueryMeta{
Index: idx,
Backend: structs.QueryBackendBlocking,
},
}, nil
},
dispatchBlockingQueryUpdate[*structs.IndexedCheckServiceNodes](ch),
)
}