Moves sort to a query-time decision and adds back the limit.
This commit is contained in:
parent
25fac70924
commit
0141438e6c
|
@ -160,17 +160,6 @@ func parseService(svc *structs.ServiceQuery) error {
|
|||
// - OnlyPassing is just a boolean so doesn't need further validation.
|
||||
// - Tags is a free-form list of tags and doesn't need further validation.
|
||||
|
||||
// Sort order must be one of the allowed values, or if not given we
|
||||
// default to "shuffle" so there's load balancing.
|
||||
switch svc.Sort {
|
||||
case structs.QueryOrderShuffle:
|
||||
case structs.QueryOrderSort:
|
||||
case "":
|
||||
svc.Sort = structs.QueryOrderShuffle
|
||||
default:
|
||||
return fmt.Errorf("Bad Sort '%s'", svc.Sort)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -226,10 +215,13 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest,
|
|||
// Shuffle the results in case coordinates are not available if they
|
||||
// requested an RTT sort.
|
||||
reply.Nodes.Shuffle()
|
||||
if query.Service.Sort == structs.QueryOrderSort {
|
||||
if err := p.srv.sortNodesByDistanceFrom(args.Source, reply.Nodes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Apply the limit if given.
|
||||
if args.Limit > 0 && len(reply.Nodes) > args.Limit {
|
||||
reply.Nodes = reply.Nodes[:args.Limit]
|
||||
}
|
||||
|
||||
// In the happy path where we found some healthy nodes we go with that
|
||||
|
@ -237,7 +229,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest,
|
|||
// by the query setup.
|
||||
if len(reply.Nodes) == 0 {
|
||||
wrapper := &queryServerWrapper{p.srv}
|
||||
if err := queryFailover(wrapper, query, args.QueryOptions, reply); err != nil {
|
||||
if err := queryFailover(wrapper, query, args, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -274,6 +266,11 @@ func (p *PreparedQuery) ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRe
|
|||
// balance the load across the results.
|
||||
reply.Nodes.Shuffle()
|
||||
|
||||
// Apply the limit if given.
|
||||
if args.Limit > 0 && len(reply.Nodes) > args.Limit {
|
||||
reply.Nodes = reply.Nodes[:args.Limit]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -403,7 +400,7 @@ func (q *queryServerWrapper) GetOtherDatacentersByDistance() ([]string, error) {
|
|||
// queryFailover runs an algorithm to determine which DCs to try and then calls
|
||||
// them to try to locate alternative services.
|
||||
func queryFailover(q queryServer, query *structs.PreparedQuery,
|
||||
options structs.QueryOptions,
|
||||
args *structs.PreparedQueryExecuteRequest,
|
||||
reply *structs.PreparedQueryExecuteResponse) error {
|
||||
|
||||
// Build a candidate list of DCs, starting with the nearest N from RTTs.
|
||||
|
@ -433,12 +430,16 @@ func queryFailover(q queryServer, query *structs.PreparedQuery,
|
|||
}
|
||||
}
|
||||
|
||||
// Now try the selected DCs in priority order.
|
||||
// Now try the selected DCs in priority order. Note that we pass along
|
||||
// the limit since it can be applied remotely to save bandwidth. We also
|
||||
// pass along the consistency mode information we were given, so that
|
||||
// applies to the remote query as well.
|
||||
for _, dc := range dcs {
|
||||
remote := &structs.PreparedQueryExecuteRemoteRequest{
|
||||
Datacenter: dc,
|
||||
Query: *query,
|
||||
QueryOptions: options,
|
||||
Limit: args.Limit,
|
||||
QueryOptions: args.QueryOptions,
|
||||
}
|
||||
if err := q.ForwardDC("PreparedQuery.ExecuteRemote", dc, remote, reply); err != nil {
|
||||
return err
|
||||
|
|
|
@ -4,11 +4,6 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
QueryOrderShuffle = "shuffle"
|
||||
QueryOrderSort = "near_agent"
|
||||
)
|
||||
|
||||
const (
|
||||
QueryTTLMax = 24 * time.Hour
|
||||
QueryTTLMin = 10 * time.Second
|
||||
|
@ -52,10 +47,6 @@ type ServiceQuery struct {
|
|||
// this list it must be present. If the tag is preceded with "~" then
|
||||
// it is disallowed.
|
||||
Tags []string
|
||||
|
||||
// Sort has one of the QueryOrder* options which control how the output
|
||||
// is sorted. If this is left blank we default to "shuffle".
|
||||
Sort string
|
||||
}
|
||||
|
||||
// PreparedQuery defines a complete prepared query, and is the structure we
|
||||
|
@ -115,9 +106,22 @@ func (q *PreparedQueryRequest) RequestDatacenter() string {
|
|||
|
||||
// PreparedQueryExecuteRequest is used to execute a prepared query.
|
||||
type PreparedQueryExecuteRequest struct {
|
||||
// Datacenter is the target this request is intended for.
|
||||
Datacenter string
|
||||
|
||||
// QueryIDOrName is the ID of a query _or_ the name of one, either can
|
||||
// be provided.
|
||||
QueryIDOrName string
|
||||
|
||||
// Limit will trim the resulting list down to the given limit.
|
||||
Limit int
|
||||
|
||||
// Source is used to sort the results relative to a given node using
|
||||
// network coordinates.
|
||||
Source QuerySource
|
||||
|
||||
// QueryOptions (unfortunately named here) controls the consistency
|
||||
// settings for the query lookup itself, as well as the service lookups.
|
||||
QueryOptions
|
||||
}
|
||||
|
||||
|
@ -127,11 +131,20 @@ func (q *PreparedQueryExecuteRequest) RequestDatacenter() string {
|
|||
}
|
||||
|
||||
// PreparedQueryExecuteRemoteRequest is used when running a local query in a
|
||||
// remote datacenter. We have to ship the entire query over since it won't be
|
||||
// present in the remote state store.
|
||||
// remote datacenter.
|
||||
type PreparedQueryExecuteRemoteRequest struct {
|
||||
// Datacenter is the target this request is intended for.
|
||||
Datacenter string
|
||||
|
||||
// Query is a copy of the query to execute. We have to ship the entire
|
||||
// query over since it won't be present in the remote state store.
|
||||
Query PreparedQuery
|
||||
|
||||
// Limit will trim the resulting list down to the given limit.
|
||||
Limit int
|
||||
|
||||
// QueryOptions (unfortunately named here) controls the consistency
|
||||
// settings for the the service lookups.
|
||||
QueryOptions
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue