f61f801e77
Upcoming work to instrument the rate of RPC requests by consumer (and eventually rate limit) requires that we thread the `RPCContext` through all RPC handlers so that we can access the underlying connection. This changeset adds the context to everywhere we intend to initially support it and intentionally excludes streaming RPCs and client RPCs. To improve the ergonomics of adding the context everywhere its needed and to clarify the requirements of dynamic vs static handlers, I've also done a good bit of refactoring here: * canonicalized the RPC handler fields so they're as close to identical as possible without introducing unused fields (i.e. I didn't add loggers if the handler doesn't use them already). * canonicalized the imports in the handler files. * added a `NewExampleEndpoint` function for each handler that ensures we're constructing the handlers with the required arguments. * reordered the registration in server.go to match the order of the files (to make it easier to see if we've missed one), and added a bunch of commentary there as to what the difference between static and dynamic handlers is.
69 lines
1.6 KiB
Go
69 lines
1.6 KiB
Go
package nomad
|
|
|
|
import (
|
|
"fmt"
|
|
"time"
|
|
|
|
"github.com/armon/go-metrics"
|
|
"github.com/hashicorp/go-hclog"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
)
|
|
|
|
// Plan endpoint is used for plan interactions
|
|
type Plan struct {
|
|
srv *Server
|
|
ctx *RPCContext
|
|
logger hclog.Logger
|
|
}
|
|
|
|
func NewPlanEndpoint(srv *Server, ctx *RPCContext) *Plan {
|
|
return &Plan{srv: srv, ctx: ctx, logger: srv.logger.Named("plan")}
|
|
}
|
|
|
|
// Submit is used to submit a plan to the leader
|
|
func (p *Plan) Submit(args *structs.PlanRequest, reply *structs.PlanResponse) error {
|
|
// Ensure the connection was initiated by another server if TLS is used.
|
|
err := validateTLSCertificateLevel(p.srv, p.ctx, tlsCertificateLevelServer)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if done, err := p.srv.forward("Plan.Submit", args, args, reply); done {
|
|
return err
|
|
}
|
|
defer metrics.MeasureSince([]string{"nomad", "plan", "submit"}, time.Now())
|
|
|
|
if args.Plan == nil {
|
|
return fmt.Errorf("cannot submit nil plan")
|
|
}
|
|
|
|
// Pause the Nack timer for the eval as it is making progress as long as it
|
|
// is in the plan queue. We resume immediately after we get a result to
|
|
// handle the case that the receiving worker dies.
|
|
plan := args.Plan
|
|
id := plan.EvalID
|
|
token := plan.EvalToken
|
|
if err := p.srv.evalBroker.PauseNackTimeout(id, token); err != nil {
|
|
return err
|
|
}
|
|
defer p.srv.evalBroker.ResumeNackTimeout(id, token)
|
|
|
|
// Submit the plan to the queue
|
|
future, err := p.srv.planQueue.Enqueue(plan)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Wait for the results
|
|
result, err := future.Wait()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Package the result
|
|
reply.Result = result
|
|
reply.Index = result.AllocIndex
|
|
return nil
|
|
}
|