2017-01-13 19:47:16 +00:00
|
|
|
package state
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
|
2021-01-30 00:24:41 +00:00
|
|
|
"github.com/hashicorp/go-memdb"
|
|
|
|
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-08-14 14:36:07 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2017-01-13 19:47:16 +00:00
|
|
|
)
|
|
|
|
|
2017-11-29 01:03:34 +00:00
|
|
|
// coordinatesTableSchema returns a new table schema used for storing
|
|
|
|
// network coordinates.
|
|
|
|
func coordinatesTableSchema() *memdb.TableSchema {
|
|
|
|
return &memdb.TableSchema{
|
|
|
|
Name: "coordinates",
|
|
|
|
Indexes: map[string]*memdb.IndexSchema{
|
2020-06-16 17:19:31 +00:00
|
|
|
"id": {
|
2017-11-29 01:03:34 +00:00
|
|
|
Name: "id",
|
|
|
|
AllowMissing: false,
|
|
|
|
Unique: true,
|
|
|
|
Indexer: &memdb.CompoundIndex{
|
|
|
|
// AllowMissing is required since we allow
|
|
|
|
// Segment to be an empty string.
|
|
|
|
AllowMissing: true,
|
|
|
|
Indexes: []memdb.Indexer{
|
|
|
|
&memdb.StringFieldIndex{
|
|
|
|
Field: "Node",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
&memdb.StringFieldIndex{
|
|
|
|
Field: "Segment",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
"node": {
|
2017-11-29 01:03:34 +00:00
|
|
|
Name: "node",
|
|
|
|
AllowMissing: false,
|
|
|
|
Unique: false,
|
|
|
|
Indexer: &memdb.StringFieldIndex{
|
|
|
|
Field: "Node",
|
|
|
|
Lowercase: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
// Coordinates is used to pull all the coordinates from the snapshot.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Snapshot) Coordinates() (memdb.ResultIterator, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
iter, err := s.tx.Get("coordinates", "id")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Coordinates is used when restoring from a snapshot. For general inserts, use
|
|
|
|
// CoordinateBatchUpdate. We do less vetting of the updates here because they
|
|
|
|
// already got checked on the way in during a batch update.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Restore) Coordinates(idx uint64, updates structs.Coordinates) error {
|
2017-01-13 19:47:16 +00:00
|
|
|
for _, update := range updates {
|
2017-05-26 00:37:16 +00:00
|
|
|
// Skip any bad data that may have gotten into the database from
|
|
|
|
// a bad client in the past.
|
|
|
|
if !update.Coord.IsValid() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
if err := s.tx.Insert("coordinates", update); err != nil {
|
|
|
|
return fmt.Errorf("failed restoring coordinate: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := indexUpdateMaxTxn(s.tx, idx, "coordinates"); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-14 14:36:07 +00:00
|
|
|
// Coordinate returns a map of coordinates for the given node, indexed by
|
|
|
|
// network segment.
|
2017-10-27 02:16:40 +00:00
|
|
|
func (s *Store) Coordinate(node string, ws memdb.WatchSet) (uint64, lib.CoordinateSet, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
2017-10-31 20:34:49 +00:00
|
|
|
tableIdx := maxIndexTxn(tx, "coordinates")
|
2017-10-27 02:16:40 +00:00
|
|
|
|
2017-08-14 14:36:07 +00:00
|
|
|
iter, err := tx.Get("coordinates", "node", node)
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
2017-10-27 02:16:40 +00:00
|
|
|
return 0, nil, fmt.Errorf("failed coordinate lookup: %s", err)
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
2017-10-27 02:16:40 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
2017-01-13 19:47:16 +00:00
|
|
|
|
2017-08-14 14:36:07 +00:00
|
|
|
results := make(lib.CoordinateSet)
|
|
|
|
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
|
|
|
coord := raw.(*structs.Coordinate)
|
|
|
|
results[coord.Segment] = coord.Coord
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
2017-10-31 20:34:49 +00:00
|
|
|
return tableIdx, results, nil
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Coordinates queries for all nodes with coordinates.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) Coordinates(ws memdb.WatchSet) (uint64, structs.Coordinates, error) {
|
2017-01-13 19:47:16 +00:00
|
|
|
tx := s.db.Txn(false)
|
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Get the table index.
|
2017-01-24 16:13:48 +00:00
|
|
|
idx := maxIndexTxn(tx, "coordinates")
|
2017-01-13 19:47:16 +00:00
|
|
|
|
|
|
|
// Pull all the coordinates.
|
2017-01-24 16:13:48 +00:00
|
|
|
iter, err := tx.Get("coordinates", "id")
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, fmt.Errorf("failed coordinate lookup: %s", err)
|
|
|
|
}
|
2017-01-24 16:13:48 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
var results structs.Coordinates
|
2017-01-24 16:13:48 +00:00
|
|
|
for coord := iter.Next(); coord != nil; coord = iter.Next() {
|
2017-01-13 19:47:16 +00:00
|
|
|
results = append(results, coord.(*structs.Coordinate))
|
|
|
|
}
|
|
|
|
return idx, results, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CoordinateBatchUpdate processes a batch of coordinate updates and applies
|
|
|
|
// them in a single transaction.
|
2017-04-21 00:46:29 +00:00
|
|
|
func (s *Store) CoordinateBatchUpdate(idx uint64, updates structs.Coordinates) error {
|
2020-03-19 13:11:20 +00:00
|
|
|
tx := s.db.WriteTxn(idx)
|
2017-01-13 19:47:16 +00:00
|
|
|
defer tx.Abort()
|
|
|
|
|
|
|
|
// Upsert the coordinates.
|
|
|
|
for _, update := range updates {
|
2017-05-26 00:37:16 +00:00
|
|
|
// Skip any bad data that may have gotten into the database from
|
|
|
|
// a bad client in the past.
|
|
|
|
if !update.Coord.IsValid() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-01-13 19:47:16 +00:00
|
|
|
// Since the cleanup of coordinates is tied to deletion of
|
|
|
|
// nodes, we silently drop any updates for nodes that we don't
|
|
|
|
// know about. This might be possible during normal operation
|
|
|
|
// if we happen to get a coordinate update for a node that
|
|
|
|
// hasn't been able to add itself to the catalog yet. Since we
|
|
|
|
// don't carefully sequence this, and since it will fix itself
|
|
|
|
// on the next coordinate update from that node, we don't return
|
|
|
|
// an error or log anything.
|
2021-02-19 23:18:25 +00:00
|
|
|
node, err := tx.First(tableNodes, indexID, Query{Value: update.Node})
|
2017-01-13 19:47:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed node lookup: %s", err)
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := tx.Insert("coordinates", update); err != nil {
|
|
|
|
return fmt.Errorf("failed inserting coordinate: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the index.
|
|
|
|
if err := tx.Insert("index", &IndexEntry{"coordinates", idx}); err != nil {
|
|
|
|
return fmt.Errorf("failed updating index: %s", err)
|
|
|
|
}
|
|
|
|
|
2020-06-02 20:34:56 +00:00
|
|
|
return tx.Commit()
|
2017-01-13 19:47:16 +00:00
|
|
|
}
|