2017-01-13 19:47:16 +00:00
|
|
|
package state
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"strings"
|
|
|
|
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2017-01-13 19:47:16 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
|
|
|
"github.com/hashicorp/go-memdb"
|
|
|
|
)
|
|
|
|
|
2017-11-29 01:03:34 +00:00
|
|
|
// nodesTableSchema returns a new table schema used for storing node
|
|
|
|
// information.
|
|
|
|
func nodesTableSchema() *memdb.TableSchema {
|
|
|
|
return &memdb.TableSchema{
|
|
|
|
Name: "nodes",
|
|
|
|
Indexes: map[string]*memdb.IndexSchema{
|
|
|
|
"id": &memdb.IndexSchema{
|
|
|
|
Name: "id",
|
|
|
|
AllowMissing: false,
|
|
|
|
Unique: true,
|
|
|
|
Indexer: &memdb.StringFieldIndex{
|
|
|
|
Field: "Node",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"uuid": &memdb.IndexSchema{
|
|
|
|
Name: "uuid",
|
|
|
|
AllowMissing: true,
|
|
|
|
Unique: true,
|
|
|
|
Indexer: &memdb.UUIDFieldIndex{
|
|
|
|
Field: "ID",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"meta": &memdb.IndexSchema{
|
|
|
|
Name: "meta",
|
|
|
|
AllowMissing: true,
|
|
|
|
Unique: false,
|
|
|
|
Indexer: &memdb.StringMapFieldIndex{
|
|
|
|
Field: "Meta",
|
|
|
|
Lowercase: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// servicesTableSchema returns a new table schema used to store information
|
|
|
|
// about services.
|
|
|
|
func servicesTableSchema() *memdb.TableSchema {
|
|
|
|
return &memdb.TableSchema{
|
|
|
|
Name: "services",
|
|
|
|
Indexes: map[string]*memdb.IndexSchema{
|
|
|
|
"id": &memdb.IndexSchema{
|
|
|
|
Name: "id",
|
|
|
|
AllowMissing: false,
|
|
|
|
Unique: true,
|
|
|
|
Indexer: &memdb.CompoundIndex{
|
|
|
|
Indexes: []memdb.Indexer{
|
|
|
|
&memdb.StringFieldIndex{
|
|
|
|
Field: "Node",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
&memdb.StringFieldIndex{
|
|
|
|
Field: "ServiceID",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"node": &memdb.IndexSchema{
|
|
|
|
Name: "node",
|
|
|
|
AllowMissing: false,
|
|
|
|
Unique: false,
|
|
|
|
Indexer: &memdb.StringFieldIndex{
|
|
|
|
Field: "Node",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"service": &memdb.IndexSchema{
|
|
|
|
Name: "service",
|
|
|
|
AllowMissing: true,
|
|
|
|
Unique: false,
|
|
|
|
Indexer: &memdb.StringFieldIndex{
|
|
|
|
Field: "ServiceName",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// checksTableSchema returns a new table schema used for storing and indexing
|
|
|
|
// health check information. Health checks have a number of different attributes
|
|
|
|
// we want to filter by, so this table is a bit more complex.
|
|
|
|
func checksTableSchema() *memdb.TableSchema {
|
|
|
|
return &memdb.TableSchema{
|
|
|
|
Name: "checks",
|
|
|
|
Indexes: map[string]*memdb.IndexSchema{
|
|
|
|
"id": &memdb.IndexSchema{
|
|
|
|
Name: "id",
|
|
|
|
AllowMissing: false,
|
|
|
|
Unique: true,
|
|
|
|
Indexer: &memdb.CompoundIndex{
|
|
|
|
Indexes: []memdb.Indexer{
|
|
|
|
&memdb.StringFieldIndex{
|
|
|
|
Field: "Node",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
&memdb.StringFieldIndex{
|
|
|
|
Field: "CheckID",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"status": &memdb.IndexSchema{
|
|
|
|
Name: "status",
|
|
|
|
AllowMissing: false,
|
|
|
|
Unique: false,
|
|
|
|
Indexer: &memdb.StringFieldIndex{
|
|
|
|
Field: "Status",
|
|
|
|
Lowercase: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"service": &memdb.IndexSchema{
|
|
|
|
Name: "service",
|
|
|
|
AllowMissing: true,
|
|
|
|
Unique: false,
|
|
|
|
Indexer: &memdb.StringFieldIndex{
|
|
|
|
Field: "ServiceName",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"node": &memdb.IndexSchema{
|
|
|
|
Name: "node",
|
|
|
|
AllowMissing: true,
|
|
|
|
Unique: false,
|
|
|
|
Indexer: &memdb.StringFieldIndex{
|
|
|
|
Field: "Node",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"node_service_check": &memdb.IndexSchema{
|
|
|
|
Name: "node_service_check",
|
|
|
|
AllowMissing: true,
|
|
|
|
Unique: false,
|
|
|
|
Indexer: &memdb.CompoundIndex{
|
|
|
|
Indexes: []memdb.Indexer{
|
|
|
|
&memdb.StringFieldIndex{
|
|
|
|
Field: "Node",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
&memdb.FieldSetIndex{
|
|
|
|
Field: "ServiceID",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"node_service": &memdb.IndexSchema{
|
|
|
|
Name: "node_service",
|
|
|
|
AllowMissing: true,
|
|
|
|
Unique: false,
|
|
|
|
Indexer: &memdb.CompoundIndex{
|
|
|
|
Indexes: []memdb.Indexer{
|
|
|
|
&memdb.StringFieldIndex{
|
|
|
|
Field: "Node",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
&memdb.StringFieldIndex{
|
|
|
|
Field: "ServiceID",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
registerSchema(nodesTableSchema)
|
|
|
|
registerSchema(servicesTableSchema)
|
|
|
|
registerSchema(checksTableSchema)
|
|
|
|
}
|
|
|
|
|
2017-02-01 22:20:25 +00:00
|
|
|
const (
|
|
|
|
// minUUIDLookupLen is used as a minimum length of a node name required before
|
|
|
|
// we test to see if the name is actually a UUID and perform an ID-based node
|
|
|
|
// lookup.
|
2017-02-02 20:12:18 +00:00
|
|
|
minUUIDLookupLen = 2
|
2017-02-01 22:20:25 +00:00
|
|
|
)
|
|
|
|
|
2017-02-02 20:13:58 +00:00
|
|
|
func resizeNodeLookupKey(s string) string {
|
|
|
|
l := len(s)
|
|
|
|
|
|
|
|
if l%2 != 0 {
|
|
|
|
return s[0 : l-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
// Nodes is used to pull the full list of nodes for use during snapshots.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Snapshot) Nodes() (memdb.ResultIterator, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
iter, err := s.tx.Get("nodes", "id")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Services is used to pull the full list of services for a given node for use
|
|
|
|
// during snapshots.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Snapshot) Services(node string) (memdb.ResultIterator, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
iter, err := s.tx.Get("services", "node", node)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Checks is used to pull the full list of checks for a given node for use
|
|
|
|
// during snapshots.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Snapshot) Checks(node string) (memdb.ResultIterator, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
iter, err := s.tx.Get("checks", "node", node)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Registration is used to make sure a node, service, and check registration is
|
|
|
|
// performed within a single transaction to avoid race conditions on state
|
|
|
|
// updates.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Restore) Registration(idx uint64, req *structs.RegisterRequest) error {
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.store.ensureRegistrationTxn(s.tx, idx, req); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// EnsureRegistration is used to make sure a node, service, and check
|
|
|
|
// registration is performed within a single transaction to avoid race
|
|
|
|
// conditions on state updates.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) EnsureRegistration(idx uint64, req *structs.RegisterRequest) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(true)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.ensureRegistrationTxn(tx, idx, req); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
tx.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensureRegistrationTxn is used to make sure a node, service, and check
|
|
|
|
// registration is performed within a single transaction to avoid race
|
|
|
|
// conditions on state updates.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) ensureRegistrationTxn(tx *memdb.Txn, idx uint64, req *structs.RegisterRequest) error {
|
2017-01-20 05:55:57 +00:00
|
|
|
// Create a node structure.
|
2017-01-13 19:47:16 +00:00
|
|
|
node := &structs.Node{
|
2017-01-18 22:26:42 +00:00
|
|
|
ID: req.ID,
|
2017-01-13 19:47:16 +00:00
|
|
|
Node: req.Node,
|
|
|
|
Address: req.Address,
|
2017-04-18 12:02:24 +00:00
|
|
|
Datacenter: req.Datacenter,
|
2017-01-13 19:47:16 +00:00
|
|
|
TaggedAddresses: req.TaggedAddresses,
|
|
|
|
Meta: req.NodeMeta,
|
|
|
|
}
|
2017-01-20 05:55:57 +00:00
|
|
|
|
|
|
|
// Since this gets called for all node operations (service and check
|
|
|
|
// updates) and churn on the node itself is basically none after the
|
|
|
|
// node updates itself the first time, it's worth seeing if we need to
|
|
|
|
// modify the node at all so we prevent watch churn and useless writes
|
|
|
|
// and modify index bumps on the node.
|
|
|
|
{
|
|
|
|
existing, err := tx.First("nodes", "id", node.Node)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("node lookup failed: %s", err)
|
|
|
|
}
|
|
|
|
if existing == nil || req.ChangesNode(existing.(*structs.Node)) {
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.ensureNodeTxn(tx, idx, node); err != nil {
|
2017-01-20 05:55:57 +00:00
|
|
|
return fmt.Errorf("failed inserting node: %s", err)
|
|
|
|
}
|
|
|
|
}
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
2017-01-20 05:55:57 +00:00
|
|
|
// Add the service, if any. We perform a similar check as we do for the
|
|
|
|
// node info above to make sure we actually need to update the service
|
|
|
|
// definition in order to prevent useless churn if nothing has changed.
|
2017-01-13 19:47:16 +00:00
|
|
|
if req.Service != nil {
|
2017-01-20 05:55:57 +00:00
|
|
|
existing, err := tx.First("services", "id", req.Node, req.Service.ID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed service lookup: %s", err)
|
|
|
|
}
|
|
|
|
if existing == nil || !(existing.(*structs.ServiceNode).ToNodeService()).IsSame(req.Service) {
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.ensureServiceTxn(tx, idx, req.Node, req.Service); err != nil {
|
2017-01-20 05:55:57 +00:00
|
|
|
return fmt.Errorf("failed inserting service: %s", err)
|
|
|
|
|
|
|
|
}
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the checks, if any.
|
|
|
|
if req.Check != nil {
|
2017-03-29 23:01:53 +00:00
|
|
|
if req.Check.Node != req.Node {
|
|
|
|
return fmt.Errorf("check node %q does not match node %q",
|
|
|
|
req.Check.Node, req.Node)
|
|
|
|
}
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.ensureCheckTxn(tx, idx, req.Check); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return fmt.Errorf("failed inserting check: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, check := range req.Checks {
|
2017-03-29 23:01:53 +00:00
|
|
|
if check.Node != req.Node {
|
|
|
|
return fmt.Errorf("check node %q does not match node %q",
|
|
|
|
check.Node, req.Node)
|
|
|
|
}
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.ensureCheckTxn(tx, idx, check); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return fmt.Errorf("failed inserting check: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// EnsureNode is used to upsert node registration or modification.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) EnsureNode(idx uint64, node *structs.Node) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(true)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Call the node upsert
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.ensureNodeTxn(tx, idx, node); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
tx.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensureNodeTxn is the inner function called to actually create a node
|
|
|
|
// registration or modify an existing one in the state store. It allows
|
|
|
|
// passing in a memdb transaction so it may be part of a larger txn.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) ensureNodeTxn(tx *memdb.Txn, idx uint64, node *structs.Node) error {
|
2017-03-27 07:15:21 +00:00
|
|
|
// See if there's an existing node with this UUID, and make sure the
|
|
|
|
// name is the same.
|
|
|
|
var n *structs.Node
|
|
|
|
if node.ID != "" {
|
|
|
|
existing, err := tx.First("nodes", "uuid", string(node.ID))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("node lookup failed: %s", err)
|
|
|
|
}
|
|
|
|
if existing != nil {
|
|
|
|
n = existing.(*structs.Node)
|
|
|
|
if n.Node != node.Node {
|
|
|
|
return fmt.Errorf("node ID %q for node %q aliases existing node %q",
|
|
|
|
node.ID, node.Node, n.Node)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for an existing node by name to support nodes with no IDs.
|
|
|
|
if n == nil {
|
|
|
|
existing, err := tx.First("nodes", "id", node.Node)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("node name lookup failed: %s", err)
|
|
|
|
}
|
|
|
|
if existing != nil {
|
|
|
|
n = existing.(*structs.Node)
|
|
|
|
}
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
2017-03-27 07:15:21 +00:00
|
|
|
// Get the indexes.
|
|
|
|
if n != nil {
|
|
|
|
node.CreateIndex = n.CreateIndex
|
2017-01-13 19:47:16 +00:00
|
|
|
node.ModifyIndex = idx
|
|
|
|
} else {
|
|
|
|
node.CreateIndex = idx
|
|
|
|
node.ModifyIndex = idx
|
|
|
|
}
|
|
|
|
|
2017-03-27 07:15:21 +00:00
|
|
|
// Insert the node and update the index.
|
2017-01-13 19:47:16 +00:00
|
|
|
if err := tx.Insert("nodes", node); err != nil {
|
|
|
|
return fmt.Errorf("failed inserting node: %s", err)
|
|
|
|
}
|
|
|
|
if err := tx.Insert("index", &IndexEntry{"nodes", idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-02-01 22:20:25 +00:00
|
|
|
// GetNode is used to retrieve a node registration by node name ID.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) GetNode(id string) (uint64, *structs.Node, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-24 19:53:02 +00:00
|
|
|
idx := maxIndexTxn(tx, "nodes")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Retrieve the node from the state store
|
|
|
|
node, err := tx.First("nodes", "id", id)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("node lookup failed: %s", err)
|
|
|
|
}
|
|
|
|
if node != nil {
|
|
|
|
return idx, node.(*structs.Node), nil
|
|
|
|
}
|
|
|
|
return idx, nil, nil
|
|
|
|
}
|
|
|
|
|
2017-02-01 22:20:25 +00:00
|
|
|
// GetNodeID is used to retrieve a node registration by node ID.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) GetNodeID(id types.NodeID) (uint64, *structs.Node, error) {
|
2017-02-01 22:20:25 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
|
|
|
idx := maxIndexTxn(tx, "nodes")
|
|
|
|
|
|
|
|
// Retrieve the node from the state store
|
|
|
|
node, err := tx.First("nodes", "uuid", string(id))
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("node lookup failed: %s", err)
|
|
|
|
}
|
|
|
|
if node != nil {
|
|
|
|
return idx, node.(*structs.Node), nil
|
|
|
|
}
|
|
|
|
return idx, nil, nil
|
|
|
|
}
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
// Nodes is used to return all of the known nodes.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) Nodes(ws memdb.WatchSet) (uint64, structs.Nodes, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-20 07:36:50 +00:00
|
|
|
idx := maxIndexTxn(tx, "nodes")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Retrieve all of the nodes
|
|
|
|
nodes, err := tx.Get("nodes", "id")
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
|
|
|
|
}
|
2017-01-20 07:36:50 +00:00
|
|
|
ws.Add(nodes.WatchCh())
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Create and return the nodes list.
|
|
|
|
var results structs.Nodes
|
|
|
|
for node := nodes.Next(); node != nil; node = nodes.Next() {
|
|
|
|
results = append(results, node.(*structs.Node))
|
|
|
|
}
|
|
|
|
return idx, results, nil
|
|
|
|
}
|
|
|
|
|
2017-01-14 01:45:34 +00:00
|
|
|
// NodesByMeta is used to return all nodes with the given metadata key/value pairs.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string) (uint64, structs.Nodes, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-20 07:36:50 +00:00
|
|
|
idx := maxIndexTxn(tx, "nodes")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Retrieve all of the nodes
|
|
|
|
var args []interface{}
|
|
|
|
for key, value := range filters {
|
|
|
|
args = append(args, key, value)
|
2017-01-14 01:45:34 +00:00
|
|
|
break
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
nodes, err := tx.Get("nodes", "meta", args...)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
|
|
|
|
}
|
2017-01-20 07:36:50 +00:00
|
|
|
ws.Add(nodes.WatchCh())
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Create and return the nodes list.
|
|
|
|
var results structs.Nodes
|
|
|
|
for node := nodes.Next(); node != nil; node = nodes.Next() {
|
2017-01-14 01:45:34 +00:00
|
|
|
n := node.(*structs.Node)
|
|
|
|
if len(filters) <= 1 || structs.SatisfiesMetaFilters(n.Meta, filters) {
|
|
|
|
results = append(results, n)
|
|
|
|
}
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
return idx, results, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteNode is used to delete a given node by its ID.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) DeleteNode(idx uint64, nodeName string) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(true)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Call the node deletion.
|
2017-01-18 22:26:42 +00:00
|
|
|
if err := s.deleteNodeTxn(tx, idx, nodeName); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
tx.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteNodeTxn is the inner method used for removing a node from
|
|
|
|
// the store within a given transaction.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) deleteNodeTxn(tx *memdb.Txn, idx uint64, nodeName string) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
// Look up the node.
|
2017-01-18 22:26:42 +00:00
|
|
|
node, err := tx.First("nodes", "id", nodeName)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("node lookup failed: %s", err)
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete all services associated with the node and update the service index.
|
2017-01-18 22:26:42 +00:00
|
|
|
services, err := tx.Get("services", "node", nodeName)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed service lookup: %s", err)
|
|
|
|
}
|
|
|
|
var sids []string
|
|
|
|
for service := services.Next(); service != nil; service = services.Next() {
|
2018-02-19 17:29:22 +00:00
|
|
|
svc := service.(*structs.ServiceNode)
|
|
|
|
sids = append(sids, svc.ServiceID)
|
|
|
|
if err := tx.Insert("index", &IndexEntry{serviceIndexName(svc.ServiceName), idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Do the delete in a separate loop so we don't trash the iterator.
|
|
|
|
for _, sid := range sids {
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.deleteServiceTxn(tx, idx, nodeName, sid); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete all checks associated with the node. This will invalidate
|
|
|
|
// sessions as necessary.
|
2017-01-18 22:26:42 +00:00
|
|
|
checks, err := tx.Get("checks", "node", nodeName)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed check lookup: %s", err)
|
|
|
|
}
|
|
|
|
var cids []types.CheckID
|
|
|
|
for check := checks.Next(); check != nil; check = checks.Next() {
|
|
|
|
cids = append(cids, check.(*structs.HealthCheck).CheckID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do the delete in a separate loop so we don't trash the iterator.
|
|
|
|
for _, cid := range cids {
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.deleteCheckTxn(tx, idx, nodeName, cid); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-14 14:36:07 +00:00
|
|
|
// Delete any coordinates associated with this node.
|
|
|
|
coords, err := tx.Get("coordinates", "node", nodeName)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed coordinate lookup: %s", err)
|
|
|
|
}
|
2017-08-14 14:36:07 +00:00
|
|
|
for coord := coords.Next(); coord != nil; coord = coords.Next() {
|
2017-01-13 19:47:16 +00:00
|
|
|
if err := tx.Delete("coordinates", coord); err != nil {
|
|
|
|
return fmt.Errorf("failed deleting coordinate: %s", err)
|
|
|
|
}
|
|
|
|
if err := tx.Insert("index", &IndexEntry{"coordinates", idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the node and update the index.
|
|
|
|
if err := tx.Delete("nodes", node); err != nil {
|
|
|
|
return fmt.Errorf("failed deleting node: %s", err)
|
|
|
|
}
|
|
|
|
if err := tx.Insert("index", &IndexEntry{"nodes", idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Invalidate any sessions for this node.
|
2017-01-18 22:26:42 +00:00
|
|
|
sessions, err := tx.Get("sessions", "node", nodeName)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed session lookup: %s", err)
|
|
|
|
}
|
|
|
|
var ids []string
|
|
|
|
for sess := sessions.Next(); sess != nil; sess = sessions.Next() {
|
|
|
|
ids = append(ids, sess.(*structs.Session).ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do the delete in a separate loop so we don't trash the iterator.
|
|
|
|
for _, id := range ids {
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.deleteSessionTxn(tx, idx, id); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return fmt.Errorf("failed session delete: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// EnsureService is called to upsert creation of a given NodeService.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) EnsureService(idx uint64, node string, svc *structs.NodeService) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(true)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Call the service registration upsert
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.ensureServiceTxn(tx, idx, node, svc); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
tx.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensureServiceTxn is used to upsert a service registration within an
|
|
|
|
// existing memdb transaction.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) ensureServiceTxn(tx *memdb.Txn, idx uint64, node string, svc *structs.NodeService) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
// Check for existing service
|
|
|
|
existing, err := tx.First("services", "id", node, svc.ID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed service lookup: %s", err)
|
|
|
|
}
|
|
|
|
|
2018-03-28 14:04:50 +00:00
|
|
|
if err = structs.ValidateMetadata(svc.Meta, false); err != nil {
|
2018-03-27 20:22:42 +00:00
|
|
|
return fmt.Errorf("Invalid Service Meta for node %s and serviceID %s: %v", node, svc.ID, err)
|
|
|
|
}
|
2017-01-13 19:47:16 +00:00
|
|
|
// Create the service node entry and populate the indexes. Note that
|
2017-01-18 22:26:42 +00:00
|
|
|
// conversion doesn't populate any of the node-specific information.
|
|
|
|
// That's always populated when we read from the state store.
|
2017-01-13 19:47:16 +00:00
|
|
|
entry := svc.ToServiceNode(node)
|
|
|
|
if existing != nil {
|
|
|
|
entry.CreateIndex = existing.(*structs.ServiceNode).CreateIndex
|
|
|
|
entry.ModifyIndex = idx
|
|
|
|
} else {
|
|
|
|
entry.CreateIndex = idx
|
|
|
|
entry.ModifyIndex = idx
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the node
|
|
|
|
n, err := tx.First("nodes", "id", node)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed node lookup: %s", err)
|
|
|
|
}
|
|
|
|
if n == nil {
|
|
|
|
return ErrMissingNode
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert the service and update the index
|
|
|
|
if err := tx.Insert("services", entry); err != nil {
|
|
|
|
return fmt.Errorf("failed inserting service: %s", err)
|
|
|
|
}
|
2018-02-20 22:34:38 +00:00
|
|
|
if err := tx.Insert("index", &IndexEntry{"services", idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
2018-02-19 17:29:22 +00:00
|
|
|
if err := tx.Insert("index", &IndexEntry{serviceIndexName(svc.Service), idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Services returns all services along with a list of associated tags.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) Services(ws memdb.WatchSet) (uint64, structs.Services, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-20 07:36:50 +00:00
|
|
|
idx := maxIndexTxn(tx, "services")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// List all the services.
|
|
|
|
services, err := tx.Get("services", "id")
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed querying services: %s", err)
|
|
|
|
}
|
2017-01-20 07:36:50 +00:00
|
|
|
ws.Add(services.WatchCh())
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Rip through the services and enumerate them and their unique set of
|
|
|
|
// tags.
|
|
|
|
unique := make(map[string]map[string]struct{})
|
|
|
|
for service := services.Next(); service != nil; service = services.Next() {
|
|
|
|
svc := service.(*structs.ServiceNode)
|
|
|
|
tags, ok := unique[svc.ServiceName]
|
|
|
|
if !ok {
|
|
|
|
unique[svc.ServiceName] = make(map[string]struct{})
|
|
|
|
tags = unique[svc.ServiceName]
|
|
|
|
}
|
|
|
|
for _, tag := range svc.ServiceTags {
|
|
|
|
tags[tag] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate the output structure.
|
|
|
|
var results = make(structs.Services)
|
|
|
|
for service, tags := range unique {
|
|
|
|
results[service] = make([]string, 0)
|
2017-04-20 18:42:22 +00:00
|
|
|
for tag := range tags {
|
2017-01-13 19:47:16 +00:00
|
|
|
results[service] = append(results[service], tag)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return idx, results, nil
|
|
|
|
}
|
|
|
|
|
2017-01-14 01:45:34 +00:00
|
|
|
// ServicesByNodeMeta returns all services, filtered by the given node metadata.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string) (uint64, structs.Services, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-20 07:36:50 +00:00
|
|
|
idx := maxIndexTxn(tx, "services", "nodes")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Retrieve all of the nodes with the meta k/v pair
|
|
|
|
var args []interface{}
|
|
|
|
for key, value := range filters {
|
|
|
|
args = append(args, key, value)
|
2017-01-14 01:45:34 +00:00
|
|
|
break
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
nodes, err := tx.Get("nodes", "meta", args...)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
|
|
|
|
}
|
2017-01-20 07:36:50 +00:00
|
|
|
ws.Add(nodes.WatchCh())
|
|
|
|
|
|
|
|
// We don't want to track an unlimited number of services, so we pull a
|
|
|
|
// top-level watch to use as a fallback.
|
|
|
|
allServices, err := tx.Get("services", "id")
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed services lookup: %s", err)
|
|
|
|
}
|
|
|
|
allServicesCh := allServices.WatchCh()
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Populate the services map
|
|
|
|
unique := make(map[string]map[string]struct{})
|
|
|
|
for node := nodes.Next(); node != nil; node = nodes.Next() {
|
|
|
|
n := node.(*structs.Node)
|
2017-01-14 01:45:34 +00:00
|
|
|
if len(filters) > 1 && !structs.SatisfiesMetaFilters(n.Meta, filters) {
|
|
|
|
continue
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
// List all the services on the node
|
|
|
|
services, err := tx.Get("services", "node", n.Node)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed querying services: %s", err)
|
|
|
|
}
|
2017-01-20 07:36:50 +00:00
|
|
|
ws.AddWithLimit(watchLimit, services.WatchCh(), allServicesCh)
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Rip through the services and enumerate them and their unique set of
|
|
|
|
// tags.
|
|
|
|
for service := services.Next(); service != nil; service = services.Next() {
|
|
|
|
svc := service.(*structs.ServiceNode)
|
|
|
|
tags, ok := unique[svc.ServiceName]
|
|
|
|
if !ok {
|
|
|
|
unique[svc.ServiceName] = make(map[string]struct{})
|
|
|
|
tags = unique[svc.ServiceName]
|
|
|
|
}
|
|
|
|
for _, tag := range svc.ServiceTags {
|
|
|
|
tags[tag] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate the output structure.
|
|
|
|
var results = make(structs.Services)
|
|
|
|
for service, tags := range unique {
|
|
|
|
results[service] = make([]string, 0)
|
2017-04-20 18:42:22 +00:00
|
|
|
for tag := range tags {
|
2017-01-13 19:47:16 +00:00
|
|
|
results[service] = append(results[service], tag)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return idx, results, nil
|
|
|
|
}
|
|
|
|
|
2018-02-19 18:30:25 +00:00
|
|
|
// maxIndexForService return the maximum Raft Index for a service
|
2018-02-20 22:57:28 +00:00
|
|
|
// If the index is not set for the service, it will return:
|
2018-03-01 13:09:36 +00:00
|
|
|
// - maxIndex(nodes, services) if checks is false
|
|
|
|
// - maxIndex(nodes, services, checks) if checks is true
|
|
|
|
func maxIndexForService(tx *memdb.Txn, serviceName string, checks bool) uint64 {
|
2018-02-19 17:29:22 +00:00
|
|
|
transaction, err := tx.First("index", "id", serviceIndexName(serviceName))
|
|
|
|
if err == nil {
|
|
|
|
if idx, ok := transaction.(*IndexEntry); ok {
|
2018-03-01 13:09:36 +00:00
|
|
|
return idx.Value
|
2018-02-19 17:29:22 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-20 00:28:06 +00:00
|
|
|
if checks {
|
2018-03-01 13:09:36 +00:00
|
|
|
return maxIndexTxn(tx, "nodes", "services", "checks")
|
2018-02-20 00:28:06 +00:00
|
|
|
}
|
2018-03-01 13:09:36 +00:00
|
|
|
return maxIndexTxn(tx, "nodes", "services")
|
2018-02-19 17:29:22 +00:00
|
|
|
}
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
// ServiceNodes returns the nodes associated with a given service name.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string) (uint64, structs.ServiceNodes, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2018-03-01 13:09:36 +00:00
|
|
|
idx := maxIndexForService(tx, serviceName, false)
|
2017-01-13 19:47:16 +00:00
|
|
|
// List all the services.
|
|
|
|
services, err := tx.Get("services", "service", serviceName)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
|
|
|
}
|
2017-01-20 07:36:50 +00:00
|
|
|
ws.Add(services.WatchCh())
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
var results structs.ServiceNodes
|
|
|
|
for service := services.Next(); service != nil; service = services.Next() {
|
|
|
|
results = append(results, service.(*structs.ServiceNode))
|
|
|
|
}
|
|
|
|
|
2017-01-24 07:37:21 +00:00
|
|
|
// Fill in the node details.
|
2017-01-20 07:36:50 +00:00
|
|
|
results, err = s.parseServiceNodes(tx, ws, results)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed parsing service nodes: %s", err)
|
|
|
|
}
|
|
|
|
return idx, results, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ServiceTagNodes returns the nodes associated with a given service, filtering
|
|
|
|
// out services that don't contain the given tag.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tag string) (uint64, structs.ServiceNodes, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2018-03-01 13:09:36 +00:00
|
|
|
idx := maxIndexForService(tx, service, false)
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// List all the services.
|
|
|
|
services, err := tx.Get("services", "service", service)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
|
|
|
}
|
2017-01-20 07:36:50 +00:00
|
|
|
ws.Add(services.WatchCh())
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Gather all the services and apply the tag filter.
|
|
|
|
var results structs.ServiceNodes
|
|
|
|
for service := services.Next(); service != nil; service = services.Next() {
|
|
|
|
svc := service.(*structs.ServiceNode)
|
|
|
|
if !serviceTagFilter(svc, tag) {
|
|
|
|
results = append(results, svc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-24 07:37:21 +00:00
|
|
|
// Fill in the node details.
|
2017-01-20 07:36:50 +00:00
|
|
|
results, err = s.parseServiceNodes(tx, ws, results)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed parsing service nodes: %s", err)
|
|
|
|
}
|
|
|
|
return idx, results, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// serviceTagFilter returns true (should filter) if the given service node
|
|
|
|
// doesn't contain the given tag.
|
|
|
|
func serviceTagFilter(sn *structs.ServiceNode, tag string) bool {
|
|
|
|
tag = strings.ToLower(tag)
|
|
|
|
|
|
|
|
// Look for the lower cased version of the tag.
|
|
|
|
for _, t := range sn.ServiceTags {
|
|
|
|
if strings.ToLower(t) == tag {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we didn't hit the tag above then we should filter.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-05-03 20:54:14 +00:00
|
|
|
// ServiceAddressNodes returns the nodes associated with a given service, filtering
|
|
|
|
// out services that don't match the given serviceAddress
|
|
|
|
func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string) (uint64, structs.ServiceNodes, error) {
|
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// List all the services.
|
|
|
|
services, err := tx.Get("services", "id")
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
|
|
|
}
|
|
|
|
ws.Add(services.WatchCh())
|
|
|
|
|
|
|
|
// Gather all the services and apply the tag filter.
|
|
|
|
var results structs.ServiceNodes
|
|
|
|
for service := services.Next(); service != nil; service = services.Next() {
|
|
|
|
svc := service.(*structs.ServiceNode)
|
|
|
|
if svc.ServiceAddress == address {
|
|
|
|
results = append(results, svc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fill in the node details.
|
|
|
|
results, err = s.parseServiceNodes(tx, ws, results)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed parsing service nodes: %s", err)
|
|
|
|
}
|
|
|
|
return 0, results, nil
|
|
|
|
}
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
// parseServiceNodes iterates over a services query and fills in the node details,
|
|
|
|
// returning a ServiceNodes slice.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) parseServiceNodes(tx *memdb.Txn, ws memdb.WatchSet, services structs.ServiceNodes) (structs.ServiceNodes, error) {
|
2017-01-20 07:36:50 +00:00
|
|
|
// We don't want to track an unlimited number of nodes, so we pull a
|
|
|
|
// top-level watch to use as a fallback.
|
|
|
|
allNodes, err := tx.Get("nodes", "id")
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed nodes lookup: %s", err)
|
|
|
|
}
|
|
|
|
allNodesCh := allNodes.WatchCh()
|
|
|
|
|
|
|
|
// Fill in the node data for each service instance.
|
2017-01-13 19:47:16 +00:00
|
|
|
var results structs.ServiceNodes
|
|
|
|
for _, sn := range services {
|
|
|
|
// Note that we have to clone here because we don't want to
|
|
|
|
// modify the node-related fields on the object in the database,
|
|
|
|
// which is what we are referencing.
|
|
|
|
s := sn.PartialClone()
|
|
|
|
|
|
|
|
// Grab the corresponding node record.
|
2017-01-20 07:36:50 +00:00
|
|
|
watchCh, n, err := tx.FirstWatch("nodes", "id", sn.Node)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed node lookup: %s", err)
|
|
|
|
}
|
2017-01-20 07:36:50 +00:00
|
|
|
ws.AddWithLimit(watchLimit, watchCh, allNodesCh)
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Populate the node-related fields. The tagged addresses may be
|
|
|
|
// used by agents to perform address translation if they are
|
|
|
|
// configured to do that.
|
|
|
|
node := n.(*structs.Node)
|
2017-01-18 22:26:42 +00:00
|
|
|
s.ID = node.ID
|
2017-01-13 19:47:16 +00:00
|
|
|
s.Address = node.Address
|
2017-04-18 12:02:24 +00:00
|
|
|
s.Datacenter = node.Datacenter
|
2017-01-13 19:47:16 +00:00
|
|
|
s.TaggedAddresses = node.TaggedAddresses
|
|
|
|
s.NodeMeta = node.Meta
|
|
|
|
|
|
|
|
results = append(results, s)
|
|
|
|
}
|
|
|
|
return results, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NodeService is used to retrieve a specific service associated with the given
|
|
|
|
// node.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) NodeService(nodeName string, serviceID string) (uint64, *structs.NodeService, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-24 19:53:02 +00:00
|
|
|
idx := maxIndexTxn(tx, "services")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Query the service
|
2017-01-18 22:26:42 +00:00
|
|
|
service, err := tx.First("services", "id", nodeName, serviceID)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
2017-01-18 22:26:42 +00:00
|
|
|
return 0, nil, fmt.Errorf("failed querying service for node %q: %s", nodeName, err)
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if service != nil {
|
|
|
|
return idx, service.(*structs.ServiceNode).ToNodeService(), nil
|
|
|
|
}
|
2017-04-21 01:59:42 +00:00
|
|
|
return idx, nil, nil
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-02 00:41:04 +00:00
|
|
|
// NodeServices is used to query service registrations by node name or UUID.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string) (uint64, *structs.NodeServices, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-20 07:36:50 +00:00
|
|
|
idx := maxIndexTxn(tx, "nodes", "services")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
2017-02-01 22:20:25 +00:00
|
|
|
// Query the node by node name
|
2017-02-01 22:59:24 +00:00
|
|
|
watchCh, n, err := tx.FirstWatch("nodes", "id", nodeNameOrID)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("node lookup failed: %s", err)
|
|
|
|
}
|
2017-02-01 22:20:25 +00:00
|
|
|
|
2017-02-01 23:18:00 +00:00
|
|
|
if n != nil {
|
|
|
|
ws.Add(watchCh)
|
|
|
|
} else {
|
|
|
|
if len(nodeNameOrID) < minUUIDLookupLen {
|
2017-02-01 22:20:25 +00:00
|
|
|
ws.Add(watchCh)
|
|
|
|
return 0, nil, nil
|
|
|
|
}
|
2017-02-01 23:18:00 +00:00
|
|
|
|
2017-02-01 23:51:25 +00:00
|
|
|
// Attempt to lookup the node by its node ID
|
2017-02-02 20:13:58 +00:00
|
|
|
iter, err := tx.Get("nodes", "uuid_prefix", resizeNodeLookupKey(nodeNameOrID))
|
2017-02-01 23:18:00 +00:00
|
|
|
if err != nil {
|
2017-02-01 23:51:25 +00:00
|
|
|
ws.Add(watchCh)
|
|
|
|
// TODO(sean@): We could/should log an error re: the uuid_prefix lookup
|
|
|
|
// failing once a logger has been introduced to the catalog.
|
|
|
|
return 0, nil, nil
|
2017-02-01 23:18:00 +00:00
|
|
|
}
|
2017-02-01 23:51:25 +00:00
|
|
|
|
2017-02-01 23:18:00 +00:00
|
|
|
n = iter.Next()
|
|
|
|
if n == nil {
|
2017-02-01 23:51:25 +00:00
|
|
|
// No nodes matched, even with the Node ID: add a watch on the node name.
|
2017-02-01 23:18:00 +00:00
|
|
|
ws.Add(watchCh)
|
|
|
|
return 0, nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
idWatchCh := iter.WatchCh()
|
|
|
|
if iter.Next() != nil {
|
2017-02-01 23:51:25 +00:00
|
|
|
// More than one match present: Watch on the node name channel and return
|
|
|
|
// an empty result (node lookups can not be ambiguous).
|
2017-02-01 23:18:00 +00:00
|
|
|
ws.Add(watchCh)
|
|
|
|
return 0, nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ws.Add(idWatchCh)
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
2017-02-01 22:20:25 +00:00
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
node := n.(*structs.Node)
|
2017-02-01 23:41:10 +00:00
|
|
|
nodeName := node.Node
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Read all of the services
|
2017-02-01 23:41:10 +00:00
|
|
|
services, err := tx.Get("services", "node", nodeName)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
2017-02-01 23:41:10 +00:00
|
|
|
return 0, nil, fmt.Errorf("failed querying services for node %q: %s", nodeName, err)
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
2017-01-20 07:36:50 +00:00
|
|
|
ws.Add(services.WatchCh())
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Initialize the node services struct
|
|
|
|
ns := &structs.NodeServices{
|
|
|
|
Node: node,
|
|
|
|
Services: make(map[string]*structs.NodeService),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add all of the services to the map.
|
|
|
|
for service := services.Next(); service != nil; service = services.Next() {
|
|
|
|
svc := service.(*structs.ServiceNode).ToNodeService()
|
|
|
|
ns.Services[svc.ID] = svc
|
|
|
|
}
|
|
|
|
|
|
|
|
return idx, ns, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteService is used to delete a given service associated with a node.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) DeleteService(idx uint64, nodeName, serviceID string) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(true)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Call the service deletion
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
tx.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-02-19 17:29:22 +00:00
|
|
|
func serviceIndexName(name string) string {
|
|
|
|
return fmt.Sprintf("service.%s", name)
|
|
|
|
}
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
// deleteServiceTxn is the inner method called to remove a service
|
|
|
|
// registration within an existing transaction.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) deleteServiceTxn(tx *memdb.Txn, idx uint64, nodeName, serviceID string) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
// Look up the service.
|
2017-01-18 22:26:42 +00:00
|
|
|
service, err := tx.First("services", "id", nodeName, serviceID)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed service lookup: %s", err)
|
|
|
|
}
|
|
|
|
if service == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete any checks associated with the service. This will invalidate
|
|
|
|
// sessions as necessary.
|
2017-01-18 22:26:42 +00:00
|
|
|
checks, err := tx.Get("checks", "node_service", nodeName, serviceID)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed service check lookup: %s", err)
|
|
|
|
}
|
|
|
|
var cids []types.CheckID
|
|
|
|
for check := checks.Next(); check != nil; check = checks.Next() {
|
|
|
|
cids = append(cids, check.(*structs.HealthCheck).CheckID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do the delete in a separate loop so we don't trash the iterator.
|
|
|
|
for _, cid := range cids {
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.deleteCheckTxn(tx, idx, nodeName, cid); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the index.
|
|
|
|
if err := tx.Insert("index", &IndexEntry{"checks", idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the service and update the index
|
|
|
|
if err := tx.Delete("services", service); err != nil {
|
|
|
|
return fmt.Errorf("failed deleting service: %s", err)
|
|
|
|
}
|
|
|
|
if err := tx.Insert("index", &IndexEntry{"services", idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
|
2018-02-19 17:29:22 +00:00
|
|
|
svc := service.(*structs.ServiceNode)
|
2018-02-19 21:44:49 +00:00
|
|
|
if remainingService, err := tx.First("services", "service", svc.ServiceName); err == nil {
|
|
|
|
if remainingService != nil {
|
2018-02-19 17:29:22 +00:00
|
|
|
// We have at least one remaining service, update the index
|
|
|
|
if err := tx.Insert("index", &IndexEntry{serviceIndexName(svc.ServiceName), idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// There are no more service instances, cleanup the service.<serviceName> index
|
|
|
|
serviceIndex, err := tx.First("index", "id", serviceIndexName(svc.ServiceName))
|
|
|
|
if err == nil && serviceIndex != nil {
|
|
|
|
// we found service.<serviceName> index, garbage collect it
|
|
|
|
if errW := tx.Delete("index", serviceIndex); errW != nil {
|
|
|
|
return fmt.Errorf("[FAILED] deleting serviceIndex %s: %s", svc.ServiceName, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("Could not find any service %s: %s", svc.ServiceName, err)
|
|
|
|
}
|
2017-01-13 19:47:16 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// EnsureCheck is used to store a check registration in the db.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) EnsureCheck(idx uint64, hc *structs.HealthCheck) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(true)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Call the check registration
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.ensureCheckTxn(tx, idx, hc); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
tx.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-22 09:30:05 +00:00
|
|
|
// updateAllServiceIndexesOfNode updates the Raft index of all the services associated with this node
|
2018-03-19 15:12:54 +00:00
|
|
|
func (s *Store) updateAllServiceIndexesOfNode(tx *memdb.Txn, idx uint64, nodeID string) error {
|
|
|
|
services, err := tx.Get("services", "node", nodeID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed updating services for node %s: %s", nodeID, err)
|
|
|
|
}
|
|
|
|
for service := services.Next(); service != nil; service = services.Next() {
|
|
|
|
svc := service.(*structs.ServiceNode).ToNodeService()
|
|
|
|
if err := tx.Insert("index", &IndexEntry{serviceIndexName(svc.Service), idx}); err != nil {
|
2018-03-19 13:14:03 +00:00
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
// ensureCheckTransaction is used as the inner method to handle inserting
|
|
|
|
// a health check into the state store. It ensures safety against inserting
|
|
|
|
// checks with no matching node or service.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) ensureCheckTxn(tx *memdb.Txn, idx uint64, hc *structs.HealthCheck) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
// Check if we have an existing health check
|
|
|
|
existing, err := tx.First("checks", "id", hc.Node, string(hc.CheckID))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed health check lookup: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the indexes
|
|
|
|
if existing != nil {
|
|
|
|
hc.CreateIndex = existing.(*structs.HealthCheck).CreateIndex
|
|
|
|
hc.ModifyIndex = idx
|
|
|
|
} else {
|
|
|
|
hc.CreateIndex = idx
|
|
|
|
hc.ModifyIndex = idx
|
|
|
|
}
|
|
|
|
|
|
|
|
// Use the default check status if none was provided
|
|
|
|
if hc.Status == "" {
|
2017-04-19 23:00:11 +00:00
|
|
|
hc.Status = api.HealthCritical
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get the node
|
|
|
|
node, err := tx.First("nodes", "id", hc.Node)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed node lookup: %s", err)
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
return ErrMissingNode
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the check is associated with a service, check that we have
|
|
|
|
// a registration for the service.
|
|
|
|
if hc.ServiceID != "" {
|
|
|
|
service, err := tx.First("services", "id", hc.Node, hc.ServiceID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed service lookup: %s", err)
|
|
|
|
}
|
|
|
|
if service == nil {
|
|
|
|
return ErrMissingService
|
|
|
|
}
|
|
|
|
|
2017-04-27 23:03:05 +00:00
|
|
|
// Copy in the service name and tags
|
|
|
|
svc := service.(*structs.ServiceNode)
|
|
|
|
hc.ServiceName = svc.ServiceName
|
|
|
|
hc.ServiceTags = svc.ServiceTags
|
2018-02-19 17:29:22 +00:00
|
|
|
if err = tx.Insert("index", &IndexEntry{serviceIndexName(svc.ServiceName), idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Update the status for all the services associated with this node
|
2018-03-19 15:12:54 +00:00
|
|
|
err = s.updateAllServiceIndexesOfNode(tx, idx, hc.Node)
|
2018-02-19 17:29:22 +00:00
|
|
|
if err != nil {
|
2018-03-19 15:12:54 +00:00
|
|
|
return err
|
2018-02-19 17:29:22 +00:00
|
|
|
}
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete any sessions for this check if the health is critical.
|
2017-04-19 23:00:11 +00:00
|
|
|
if hc.Status == api.HealthCritical {
|
2017-01-13 19:47:16 +00:00
|
|
|
mappings, err := tx.Get("session_checks", "node_check", hc.Node, string(hc.CheckID))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed session checks lookup: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var ids []string
|
|
|
|
for mapping := mappings.Next(); mapping != nil; mapping = mappings.Next() {
|
|
|
|
ids = append(ids, mapping.(*sessionCheck).Session)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the session in a separate loop so we don't trash the
|
|
|
|
// iterator.
|
|
|
|
for _, id := range ids {
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.deleteSessionTxn(tx, idx, id); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return fmt.Errorf("failed deleting session: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist the check registration in the db.
|
|
|
|
if err := tx.Insert("checks", hc); err != nil {
|
|
|
|
return fmt.Errorf("failed inserting check: %s", err)
|
|
|
|
}
|
|
|
|
if err := tx.Insert("index", &IndexEntry{"checks", idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NodeCheck is used to retrieve a specific check associated with the given
|
|
|
|
// node.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) NodeCheck(nodeName string, checkID types.CheckID) (uint64, *structs.HealthCheck, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-24 07:37:21 +00:00
|
|
|
idx := maxIndexTxn(tx, "checks")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Return the check.
|
2017-01-18 22:26:42 +00:00
|
|
|
check, err := tx.First("checks", "id", nodeName, string(checkID))
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
if check != nil {
|
|
|
|
return idx, check.(*structs.HealthCheck), nil
|
|
|
|
}
|
2017-04-21 01:59:42 +00:00
|
|
|
return idx, nil, nil
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NodeChecks is used to retrieve checks associated with the
|
|
|
|
// given node from the state store.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string) (uint64, structs.HealthChecks, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-24 07:37:21 +00:00
|
|
|
idx := maxIndexTxn(tx, "checks")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Return the checks.
|
2017-01-24 07:37:21 +00:00
|
|
|
iter, err := tx.Get("checks", "node", nodeName)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
|
|
|
var results structs.HealthChecks
|
|
|
|
for check := iter.Next(); check != nil; check = iter.Next() {
|
|
|
|
results = append(results, check.(*structs.HealthCheck))
|
|
|
|
}
|
|
|
|
return idx, results, nil
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServiceChecks is used to get all checks associated with a
|
|
|
|
// given service ID. The query is performed against a service
|
|
|
|
// _name_ instead of a service ID.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string) (uint64, structs.HealthChecks, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-24 07:37:21 +00:00
|
|
|
idx := maxIndexTxn(tx, "checks")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Return the checks.
|
2017-01-24 07:37:21 +00:00
|
|
|
iter, err := tx.Get("checks", "service", serviceName)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
|
|
|
var results structs.HealthChecks
|
|
|
|
for check := iter.Next(); check != nil; check = iter.Next() {
|
|
|
|
results = append(results, check.(*structs.HealthCheck))
|
|
|
|
}
|
|
|
|
return idx, results, nil
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
2017-01-14 01:08:43 +00:00
|
|
|
// ServiceChecksByNodeMeta is used to get all checks associated with a
|
2017-01-14 01:45:34 +00:00
|
|
|
// given service ID, filtered by the given node metadata values. The query
|
|
|
|
// is performed against a service _name_ instead of a service ID.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) ServiceChecksByNodeMeta(ws memdb.WatchSet, serviceName string,
|
2017-01-24 07:37:21 +00:00
|
|
|
filters map[string]string) (uint64, structs.HealthChecks, error) {
|
|
|
|
|
2017-01-14 01:08:43 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2018-03-01 13:09:36 +00:00
|
|
|
idx := maxIndexForService(tx, serviceName, true)
|
2017-01-14 01:08:43 +00:00
|
|
|
// Return the checks.
|
2017-01-24 07:37:21 +00:00
|
|
|
iter, err := tx.Get("checks", "service", serviceName)
|
2017-01-14 01:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
|
|
|
return s.parseChecksByNodeMeta(tx, ws, idx, iter, filters)
|
2017-01-14 01:08:43 +00:00
|
|
|
}
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
// ChecksInState is used to query the state store for all checks
|
|
|
|
// which are in the provided state.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) ChecksInState(ws memdb.WatchSet, state string) (uint64, structs.HealthChecks, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-24 07:37:21 +00:00
|
|
|
idx := maxIndexTxn(tx, "checks")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
2017-01-24 07:37:21 +00:00
|
|
|
// Query all checks if HealthAny is passed, otherwise use the index.
|
|
|
|
var iter memdb.ResultIterator
|
|
|
|
var err error
|
2017-04-19 23:00:11 +00:00
|
|
|
if state == api.HealthAny {
|
2017-01-24 07:37:21 +00:00
|
|
|
iter, err = tx.Get("checks", "status")
|
|
|
|
} else {
|
|
|
|
iter, err = tx.Get("checks", "status", state)
|
2017-04-27 23:03:05 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
2017-01-13 19:47:16 +00:00
|
|
|
|
2017-01-24 07:37:21 +00:00
|
|
|
var results structs.HealthChecks
|
|
|
|
for check := iter.Next(); check != nil; check = iter.Next() {
|
|
|
|
results = append(results, check.(*structs.HealthCheck))
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
return idx, results, nil
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
2017-01-14 01:45:34 +00:00
|
|
|
// ChecksInStateByNodeMeta is used to query the state store for all checks
|
|
|
|
// which are in the provided state, filtered by the given node metadata values.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) ChecksInStateByNodeMeta(ws memdb.WatchSet, state string, filters map[string]string) (uint64, structs.HealthChecks, error) {
|
2017-01-14 01:08:43 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-24 07:37:21 +00:00
|
|
|
idx := maxIndexTxn(tx, "nodes", "checks")
|
2017-01-14 01:08:43 +00:00
|
|
|
|
2017-01-24 07:37:21 +00:00
|
|
|
// Query all checks if HealthAny is passed, otherwise use the index.
|
|
|
|
var iter memdb.ResultIterator
|
2017-01-14 01:08:43 +00:00
|
|
|
var err error
|
2017-04-19 23:00:11 +00:00
|
|
|
if state == api.HealthAny {
|
2017-01-24 07:37:21 +00:00
|
|
|
iter, err = tx.Get("checks", "status")
|
2017-01-14 01:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
|
|
|
}
|
|
|
|
} else {
|
2017-01-24 07:37:21 +00:00
|
|
|
iter, err = tx.Get("checks", "status", state)
|
2017-01-14 01:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
|
|
|
}
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
2017-01-14 01:08:43 +00:00
|
|
|
|
2017-01-24 07:37:21 +00:00
|
|
|
return s.parseChecksByNodeMeta(tx, ws, idx, iter, filters)
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
2017-01-14 01:08:43 +00:00
|
|
|
// parseChecksByNodeMeta is a helper function used to deduplicate some
|
|
|
|
// repetitive code for returning health checks filtered by node metadata fields.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) parseChecksByNodeMeta(tx *memdb.Txn, ws memdb.WatchSet,
|
2017-01-24 07:37:21 +00:00
|
|
|
idx uint64, iter memdb.ResultIterator, filters map[string]string) (uint64, structs.HealthChecks, error) {
|
|
|
|
|
|
|
|
// We don't want to track an unlimited number of nodes, so we pull a
|
|
|
|
// top-level watch to use as a fallback.
|
|
|
|
allNodes, err := tx.Get("nodes", "id")
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
|
|
|
|
}
|
|
|
|
allNodesCh := allNodes.WatchCh()
|
|
|
|
|
|
|
|
// Only take results for nodes that satisfy the node metadata filters.
|
2017-01-14 01:08:43 +00:00
|
|
|
var results structs.HealthChecks
|
|
|
|
for check := iter.Next(); check != nil; check = iter.Next() {
|
|
|
|
healthCheck := check.(*structs.HealthCheck)
|
2017-01-24 07:37:21 +00:00
|
|
|
watchCh, node, err := tx.FirstWatch("nodes", "id", healthCheck.Node)
|
2017-01-14 01:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
return 0, nil, ErrMissingNode
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
|
|
|
|
// Add even the filtered nodes so we wake up if the node metadata
|
|
|
|
// changes.
|
|
|
|
ws.AddWithLimit(watchLimit, watchCh, allNodesCh)
|
2017-01-14 01:08:43 +00:00
|
|
|
if structs.SatisfiesMetaFilters(node.(*structs.Node).Meta, filters) {
|
|
|
|
results = append(results, healthCheck)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return idx, results, nil
|
|
|
|
}
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
// DeleteCheck is used to delete a health check registration.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(true)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Call the check deletion
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.deleteCheckTxn(tx, idx, node, checkID); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
tx.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteCheckTxn is the inner method used to call a health
|
|
|
|
// check deletion within an existing transaction.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) deleteCheckTxn(tx *memdb.Txn, idx uint64, node string, checkID types.CheckID) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
// Try to retrieve the existing health check.
|
|
|
|
hc, err := tx.First("checks", "id", node, string(checkID))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("check lookup failed: %s", err)
|
|
|
|
}
|
|
|
|
if hc == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2018-02-19 17:29:22 +00:00
|
|
|
existing := hc.(*structs.HealthCheck)
|
2018-03-19 13:14:03 +00:00
|
|
|
if existing != nil {
|
2018-03-19 15:12:54 +00:00
|
|
|
// When no service is linked to this service, update all services of node
|
|
|
|
if existing.ServiceID != "" {
|
|
|
|
if err = tx.Insert("index", &IndexEntry{serviceIndexName(existing.ServiceName), idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = s.updateAllServiceIndexesOfNode(tx, idx, existing.Node)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failed to update services linked to deleted healthcheck: %s", err)
|
|
|
|
}
|
|
|
|
if err := tx.Insert("index", &IndexEntry{"services", idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
2018-02-19 17:29:22 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Delete the check from the DB and update the index.
|
|
|
|
if err := tx.Delete("checks", hc); err != nil {
|
|
|
|
return fmt.Errorf("failed removing check: %s", err)
|
|
|
|
}
|
|
|
|
if err := tx.Insert("index", &IndexEntry{"checks", idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete any sessions for this check.
|
|
|
|
mappings, err := tx.Get("session_checks", "node_check", node, string(checkID))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed session checks lookup: %s", err)
|
|
|
|
}
|
|
|
|
var ids []string
|
|
|
|
for mapping := mappings.Next(); mapping != nil; mapping = mappings.Next() {
|
|
|
|
ids = append(ids, mapping.(*sessionCheck).Session)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do the delete in a separate loop so we don't trash the iterator.
|
|
|
|
for _, id := range ids {
|
2017-01-24 19:53:02 +00:00
|
|
|
if err := s.deleteSessionTxn(tx, idx, id); err != nil {
|
2017-01-13 19:47:16 +00:00
|
|
|
return fmt.Errorf("failed deleting session: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-01-16 18:28:46 +00:00
|
|
|
// CheckServiceNodes is used to query all nodes and checks for a given service.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) CheckServiceNodes(ws memdb.WatchSet, serviceName string) (uint64, structs.CheckServiceNodes, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2018-03-01 13:09:36 +00:00
|
|
|
idx := maxIndexForService(tx, serviceName, true)
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Query the state store for the service.
|
2017-01-24 07:37:21 +00:00
|
|
|
iter, err := tx.Get("services", "service", serviceName)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Return the results.
|
|
|
|
var results structs.ServiceNodes
|
2017-01-24 07:37:21 +00:00
|
|
|
for service := iter.Next(); service != nil; service = iter.Next() {
|
2017-01-13 19:47:16 +00:00
|
|
|
results = append(results, service.(*structs.ServiceNode))
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
return s.parseCheckServiceNodes(tx, ws, idx, serviceName, results, err)
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CheckServiceTagNodes is used to query all nodes and checks for a given
|
2017-01-16 18:28:46 +00:00
|
|
|
// service, filtering out services that don't contain the given tag.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName, tag string) (uint64, structs.CheckServiceNodes, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2018-03-01 13:09:36 +00:00
|
|
|
idx := maxIndexForService(tx, serviceName, true)
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Query the state store for the service.
|
2017-01-24 07:37:21 +00:00
|
|
|
iter, err := tx.Get("services", "service", serviceName)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Return the results, filtering by tag.
|
|
|
|
var results structs.ServiceNodes
|
2017-01-24 07:37:21 +00:00
|
|
|
for service := iter.Next(); service != nil; service = iter.Next() {
|
2017-01-13 19:47:16 +00:00
|
|
|
svc := service.(*structs.ServiceNode)
|
|
|
|
if !serviceTagFilter(svc, tag) {
|
|
|
|
results = append(results, svc)
|
|
|
|
}
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
return s.parseCheckServiceNodes(tx, ws, idx, serviceName, results, err)
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// parseCheckServiceNodes is used to parse through a given set of services,
|
|
|
|
// and query for an associated node and a set of checks. This is the inner
|
|
|
|
// method used to return a rich set of results from a more simple query.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) parseCheckServiceNodes(
|
2017-01-24 07:37:21 +00:00
|
|
|
tx *memdb.Txn, ws memdb.WatchSet, idx uint64,
|
|
|
|
serviceName string, services structs.ServiceNodes,
|
2017-01-13 19:47:16 +00:00
|
|
|
err error) (uint64, structs.CheckServiceNodes, error) {
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Special-case the zero return value to nil, since this ends up in
|
|
|
|
// external APIs.
|
|
|
|
if len(services) == 0 {
|
|
|
|
return idx, nil, nil
|
|
|
|
}
|
|
|
|
|
2017-01-24 07:37:21 +00:00
|
|
|
// We don't want to track an unlimited number of nodes, so we pull a
|
|
|
|
// top-level watch to use as a fallback.
|
|
|
|
allNodes, err := tx.Get("nodes", "id")
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
|
|
|
|
}
|
|
|
|
allNodesCh := allNodes.WatchCh()
|
|
|
|
|
|
|
|
// We need a similar fallback for checks. Since services need the
|
|
|
|
// status of node + service-specific checks, we pull in a top-level
|
|
|
|
// watch over all checks.
|
|
|
|
allChecks, err := tx.Get("checks", "id")
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed checks lookup: %s", err)
|
|
|
|
}
|
|
|
|
allChecksCh := allChecks.WatchCh()
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
results := make(structs.CheckServiceNodes, 0, len(services))
|
|
|
|
for _, sn := range services {
|
|
|
|
// Retrieve the node.
|
2017-01-24 07:37:21 +00:00
|
|
|
watchCh, n, err := tx.FirstWatch("nodes", "id", sn.Node)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
ws.AddWithLimit(watchLimit, watchCh, allNodesCh)
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
if n == nil {
|
|
|
|
return 0, nil, ErrMissingNode
|
|
|
|
}
|
|
|
|
node := n.(*structs.Node)
|
|
|
|
|
2017-01-24 07:37:21 +00:00
|
|
|
// First add the node-level checks. These always apply to any
|
|
|
|
// service on the node.
|
2017-01-13 19:47:16 +00:00
|
|
|
var checks structs.HealthChecks
|
2017-01-24 07:37:21 +00:00
|
|
|
iter, err := tx.Get("checks", "node_service_check", sn.Node, false)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
2017-01-24 07:37:21 +00:00
|
|
|
ws.AddWithLimit(watchLimit, iter.WatchCh(), allChecksCh)
|
2017-01-13 19:47:16 +00:00
|
|
|
for check := iter.Next(); check != nil; check = iter.Next() {
|
2017-01-24 07:37:21 +00:00
|
|
|
checks = append(checks, check.(*structs.HealthCheck))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now add the service-specific checks.
|
|
|
|
iter, err = tx.Get("checks", "node_service", sn.Node, sn.ServiceID)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
|
|
|
ws.AddWithLimit(watchLimit, iter.WatchCh(), allChecksCh)
|
|
|
|
for check := iter.Next(); check != nil; check = iter.Next() {
|
|
|
|
checks = append(checks, check.(*structs.HealthCheck))
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Append to the results.
|
|
|
|
results = append(results, structs.CheckServiceNode{
|
|
|
|
Node: node,
|
|
|
|
Service: sn.ToNodeService(),
|
|
|
|
Checks: checks,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return idx, results, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NodeInfo is used to generate a dump of a single node. The dump includes
|
|
|
|
// all services and checks which are registered against the node.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) NodeInfo(ws memdb.WatchSet, node string) (uint64, structs.NodeDump, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-24 17:06:51 +00:00
|
|
|
idx := maxIndexTxn(tx, "nodes", "services", "checks")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Query the node by the passed node
|
|
|
|
nodes, err := tx.Get("nodes", "id", node)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
|
|
|
|
}
|
2017-01-24 17:06:51 +00:00
|
|
|
ws.Add(nodes.WatchCh())
|
|
|
|
return s.parseNodes(tx, ws, idx, nodes)
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NodeDump is used to generate a dump of all nodes. This call is expensive
|
|
|
|
// as it has to query every node, service, and check. The response can also
|
|
|
|
// be quite large since there is currently no filtering applied.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) NodeDump(ws memdb.WatchSet) (uint64, structs.NodeDump, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-24 19:53:02 +00:00
|
|
|
idx := maxIndexTxn(tx, "nodes", "services", "checks")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Fetch all of the registered nodes
|
|
|
|
nodes, err := tx.Get("nodes", "id")
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
|
|
|
|
}
|
2017-01-24 17:06:51 +00:00
|
|
|
ws.Add(nodes.WatchCh())
|
|
|
|
return s.parseNodes(tx, ws, idx, nodes)
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// parseNodes takes an iterator over a set of nodes and returns a struct
|
|
|
|
// containing the nodes along with all of their associated services
|
|
|
|
// and/or health checks.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) parseNodes(tx *memdb.Txn, ws memdb.WatchSet, idx uint64,
|
2017-01-13 19:47:16 +00:00
|
|
|
iter memdb.ResultIterator) (uint64, structs.NodeDump, error) {
|
|
|
|
|
2017-01-24 17:06:51 +00:00
|
|
|
// We don't want to track an unlimited number of services, so we pull a
|
|
|
|
// top-level watch to use as a fallback.
|
|
|
|
allServices, err := tx.Get("services", "id")
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed services lookup: %s", err)
|
|
|
|
}
|
|
|
|
allServicesCh := allServices.WatchCh()
|
|
|
|
|
|
|
|
// We need a similar fallback for checks.
|
|
|
|
allChecks, err := tx.Get("checks", "id")
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed checks lookup: %s", err)
|
|
|
|
}
|
|
|
|
allChecksCh := allChecks.WatchCh()
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
var results structs.NodeDump
|
|
|
|
for n := iter.Next(); n != nil; n = iter.Next() {
|
|
|
|
node := n.(*structs.Node)
|
|
|
|
|
|
|
|
// Create the wrapped node
|
|
|
|
dump := &structs.NodeInfo{
|
2017-01-18 22:26:42 +00:00
|
|
|
ID: node.ID,
|
2017-01-13 19:47:16 +00:00
|
|
|
Node: node.Node,
|
|
|
|
Address: node.Address,
|
|
|
|
TaggedAddresses: node.TaggedAddresses,
|
|
|
|
Meta: node.Meta,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query the node services
|
|
|
|
services, err := tx.Get("services", "node", node.Node)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed services lookup: %s", err)
|
|
|
|
}
|
2017-01-24 17:06:51 +00:00
|
|
|
ws.AddWithLimit(watchLimit, services.WatchCh(), allServicesCh)
|
2017-01-13 19:47:16 +00:00
|
|
|
for service := services.Next(); service != nil; service = services.Next() {
|
|
|
|
ns := service.(*structs.ServiceNode).ToNodeService()
|
|
|
|
dump.Services = append(dump.Services, ns)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query the node checks
|
|
|
|
checks, err := tx.Get("checks", "node", node.Node)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
|
|
|
|
}
|
2017-01-24 17:06:51 +00:00
|
|
|
ws.AddWithLimit(watchLimit, checks.WatchCh(), allChecksCh)
|
2017-01-13 19:47:16 +00:00
|
|
|
for check := checks.Next(); check != nil; check = checks.Next() {
|
|
|
|
hc := check.(*structs.HealthCheck)
|
|
|
|
dump.Checks = append(dump.Checks, hc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the result to the slice
|
|
|
|
results = append(results, dump)
|
|
|
|
}
|
|
|
|
return idx, results, nil
|
|
|
|
}
|