Add gosimple linter (#9590)

This commit is contained in:
Kris Hicks 2020-12-09 11:05:18 -08:00 committed by GitHub
parent c4ae5aab9a
commit 0a3a748053
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
77 changed files with 111 additions and 186 deletions

View File

@ -73,6 +73,7 @@ linters:
- structcheck
- unconvert
- gofmt
- gosimple
# Stretch Goal
#- maligned
fast: false

View File

@ -135,10 +135,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) {
cancel()
return
}
select {
case <-ctx.Done():
return
}
<-ctx.Done()
}()
logCh := monitor.Start()

View File

@ -55,8 +55,7 @@ func createSecretDir(dir string) error {
return nil
}
var flags uintptr
flags = syscall.MS_NOEXEC
flags := uintptr(syscall.MS_NOEXEC)
options := fmt.Sprintf("size=%dm", secretDirTmpfsSize)
if err := syscall.Mount("tmpfs", dir, "tmpfs", flags, options); err != nil {
return os.NewSyscallError("mount", err)

View File

@ -215,7 +215,7 @@ func (h *allocHealthWatcherHook) watchHealth(ctx context.Context, deadline time.
// Allocation has stopped so no need to set health
return
case <-time.After(deadline.Sub(time.Now())):
case <-time.After(time.Until(deadline)):
// Time is up! Fallthrough to set unhealthy.
h.logger.Trace("deadline reached; setting unhealthy", "deadline", deadline)

View File

@ -161,7 +161,7 @@ func (h *csiPluginSupervisorHook) Poststart(_ context.Context, _ *interfaces.Tas
// Deeper fingerprinting of the plugin is implemented by the csimanager.
func (h *csiPluginSupervisorHook) ensureSupervisorLoop(ctx context.Context) {
h.runningLock.Lock()
if h.running == true {
if h.running {
h.runningLock.Unlock()
return
}

View File

@ -1613,7 +1613,7 @@ func (c *Client) getHeartbeatRetryIntv(err error) time.Duration {
}
// Determine how much time we have left to heartbeat
left := last.Add(ttl).Sub(time.Now())
left := time.Until(last.Add(ttl))
// Logic for retrying is:
// * Do not retry faster than once a second

View File

@ -406,8 +406,6 @@ func (i *instanceManager) handleFingerprintError() {
// Cancel the context so we cleanup all goroutines
i.cancel()
return
}
// handleFingerprint stores the new devices and triggers the fingerprint output

View File

@ -373,9 +373,7 @@ func (p *pluginEventBroadcaster) run() {
case msg := <-p.publishCh:
p.subscriptionsLock.RLock()
for msgCh := range p.subscriptions {
select {
case msgCh <- msg:
}
msgCh <- msg
}
p.subscriptionsLock.RUnlock()
}

View File

@ -12,6 +12,4 @@ func newEnterpriseClient(logger hclog.Logger) *EnterpriseClient {
}
// SetFeatures is used for enterprise builds to configure enterprise features
func (ec *EnterpriseClient) SetFeatures(features uint64) {
return
}
func (ec *EnterpriseClient) SetFeatures(features uint64) {}

View File

@ -30,7 +30,7 @@ type FingerprintResponse struct {
func (f *FingerprintResponse) AddAttribute(name, value string) {
// initialize Attributes if it has not been already
if f.Attributes == nil {
f.Attributes = make(map[string]string, 0)
f.Attributes = make(map[string]string)
}
f.Attributes[name] = value
@ -41,7 +41,7 @@ func (f *FingerprintResponse) AddAttribute(name, value string) {
func (f *FingerprintResponse) RemoveAttribute(name string) {
// initialize Attributes if it has not been already
if f.Attributes == nil {
f.Attributes = make(map[string]string, 0)
f.Attributes = make(map[string]string)
}
f.Attributes[name] = ""
@ -51,7 +51,7 @@ func (f *FingerprintResponse) RemoveAttribute(name string) {
func (f *FingerprintResponse) AddLink(name, value string) {
// initialize Links if it has not been already
if f.Links == nil {
f.Links = make(map[string]string, 0)
f.Links = make(map[string]string)
}
f.Links[name] = value
@ -62,7 +62,7 @@ func (f *FingerprintResponse) AddLink(name, value string) {
func (f *FingerprintResponse) RemoveLink(name string) {
// initialize Links if it has not been already
if f.Links == nil {
f.Links = make(map[string]string, 0)
f.Links = make(map[string]string)
}
f.Links[name] = ""

View File

@ -295,7 +295,7 @@ func (f *FileRotator) purgeOldFiles() {
// Sorting the file indexes so that we can purge the older files and keep
// only the number of files as configured by the user
sort.Sort(sort.IntSlice(fIndexes))
sort.Ints(fIndexes)
toDelete := fIndexes[0 : len(fIndexes)-f.MaxFiles]
for _, fIndex := range toDelete {
fname := filepath.Join(f.path, fmt.Sprintf("%s.%d", f.baseFileName, fIndex))

View File

@ -253,7 +253,6 @@ func (l *logRotatorWrapper) start(openFn func() (io.ReadCloser, error)) {
reader.Close()
}
}()
return
}
// Close closes the rotator and the process writer to ensure that the Wait

View File

@ -65,14 +65,12 @@ func (m *PluginGroup) WaitForFirstFingerprint(ctx context.Context) (<-chan struc
go func() {
defer wg.Done()
logger.Debug("waiting on plugin manager initial fingerprint")
<-manager.WaitForFirstFingerprint(ctx)
select {
case <-manager.WaitForFirstFingerprint(ctx):
select {
case <-ctx.Done():
logger.Warn("timeout waiting for plugin manager to be ready")
default:
logger.Debug("finished plugin manager initial fingerprint")
}
case <-ctx.Done():
logger.Warn("timeout waiting for plugin manager to be ready")
default:
logger.Debug("finished plugin manager initial fingerprint")
}
}()
}

View File

@ -114,7 +114,7 @@ TRY:
case <-time.After(lib.RandomStagger(c.config.RPCHoldTimeout / structs.JitterFraction)):
// If we are going to retry a blocking query we need to update the time to block so it finishes by our deadline.
if info, ok := args.(structs.RPCInfo); ok && info.TimeToBlock() > 0 {
newBlockTime := deadline.Sub(time.Now())
newBlockTime := time.Until(deadline)
// We can get below 0 here on slow computers because we slept for jitter so at least try to get an immediate response
if newBlockTime < 0 {
newBlockTime = 0

View File

@ -108,10 +108,7 @@ func (t *taskRunnerState08) Upgrade(allocID, taskName string) (*state.LocalState
// The docker driver prefixed the handle with 'DOCKER:'
// Strip so that it can be unmarshalled
data := t.HandleID
if strings.HasPrefix(data, "DOCKER:") {
data = data[7:]
}
data := strings.TrimPrefix(t.HandleID, "DOCKER:")
// The pre09 driver handle ID is given to the driver. It is unmarshalled
// here to check for errors

View File

@ -331,7 +331,7 @@ type HealthCheckIntervalResponse struct {
func (h *HealthCheckResponse) AddDriverInfo(name string, driverInfo *structs.DriverInfo) {
// initialize Drivers if it has not been already
if h.Drivers == nil {
h.Drivers = make(map[string]*structs.DriverInfo, 0)
h.Drivers = make(map[string]*structs.DriverInfo)
}
h.Drivers[name] = driverInfo

View File

@ -479,7 +479,7 @@ func (c *vaultClient) run() {
// Compute the duration after which the item
// needs renewal and set the renewalCh to fire
// at that time.
renewalDuration := renewalTime.Sub(time.Now())
renewalDuration := time.Until(renewalTime)
renewalCh = time.After(renewalDuration)
} else {
// If the renewals of multiple items are too

View File

@ -111,7 +111,7 @@ func formatPolicies(policies []*api.ACLPolicyListStub) string {
}
output := make([]string, 0, len(policies)+1)
output = append(output, fmt.Sprintf("Name|Description"))
output = append(output, "Name|Description")
for _, p := range policies {
output = append(output, fmt.Sprintf("%s|%s", p.Name, p.Description))
}

View File

@ -108,7 +108,7 @@ func formatTokens(tokens []*api.ACLTokenListStub) string {
}
output := make([]string, 0, len(tokens)+1)
output = append(output, fmt.Sprintf("Name|Type|Global|Accessor ID"))
output = append(output, "Name|Type|Global|Accessor ID")
for _, p := range tokens {
output = append(output, fmt.Sprintf("%s|%s|%t|%s", p.Name, p.Type, p.Global, p.AccessorID))
}

View File

@ -882,7 +882,7 @@ func (c *Command) handleReload() {
c.Ui.Output("Reloading configuration...")
newConf := c.readConfig()
if newConf == nil {
c.Ui.Error(fmt.Sprintf("Failed to reload configs"))
c.Ui.Error("Failed to reload configs")
return
}

View File

@ -400,7 +400,6 @@ func (s *HTTPServer) handleUI(h http.Handler) http.Handler {
header := w.Header()
header.Add("Content-Security-Policy", "default-src 'none'; connect-src *; img-src 'self' data:; script-src 'self'; style-src 'self' 'unsafe-inline'; form-action 'none'; frame-ancestors 'none'")
h.ServeHTTP(w, req)
return
})
}
@ -448,7 +447,7 @@ func (s *HTTPServer) wrap(handler func(resp http.ResponseWriter, req *http.Reque
reqURL := req.URL.String()
start := time.Now()
defer func() {
s.logger.Debug("request complete", "method", req.Method, "path", reqURL, "duration", time.Now().Sub(start))
s.logger.Debug("request complete", "method", req.Method, "path", reqURL, "duration", time.Since(start))
}()
obj, err := s.auditHandler(handler)(resp, req)
@ -524,7 +523,7 @@ func (s *HTTPServer) wrapNonJSON(handler func(resp http.ResponseWriter, req *htt
reqURL := req.URL.String()
start := time.Now()
defer func() {
s.logger.Debug("request complete", "method", req.Method, "path", reqURL, "duration", time.Now().Sub(start))
s.logger.Debug("request complete", "method", req.Method, "path", reqURL, "duration", time.Since(start))
}()
obj, err := s.auditNonJSONHandler(handler)(resp, req)
@ -595,7 +594,7 @@ func setMeta(resp http.ResponseWriter, m *structs.QueryMeta) {
// setHeaders is used to set canonical response header fields
func setHeaders(resp http.ResponseWriter, headers map[string]string) {
for field, value := range headers {
resp.Header().Set(http.CanonicalHeaderKey(field), value)
resp.Header().Set(field, value)
}
}

View File

@ -79,7 +79,7 @@ func (c *AgentInfoCommand) Run(args []string) int {
for _, key := range statsKeys {
c.Ui.Output(key)
statsData, _ := info.Stats[key]
statsData := info.Stats[key]
statsDataKeys := make([]string, len(statsData))
i := 0
for key := range statsData {

View File

@ -97,7 +97,7 @@ func (c *MonitorCommand) Run(args []string) int {
// Query the node info and lookup prefix
if len(nodeID) == 1 {
c.Ui.Error(fmt.Sprintf("Node identifier must contain at least two characters."))
c.Ui.Error("Node identifier must contain at least two characters.")
return 1
}

View File

@ -168,7 +168,7 @@ func (l *AllocExecCommand) Run(args []string) int {
// Query the allocation info
if len(allocID) == 1 {
l.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters."))
l.Ui.Error("Alloc ID must contain at least two characters.")
return 1
}

View File

@ -178,7 +178,7 @@ func (f *AllocFSCommand) Run(args []string) int {
}
// Query the allocation info
if len(allocID) == 1 {
f.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters."))
f.Ui.Error("Alloc ID must contain at least two characters.")
return 1
}

View File

@ -158,7 +158,7 @@ func (l *AllocLogsCommand) Run(args []string) int {
}
// Query the allocation info
if len(allocID) == 1 {
l.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters."))
l.Ui.Error("Alloc ID must contain at least two characters.")
return 1
}

View File

@ -68,7 +68,7 @@ func (c *AllocRestartCommand) Run(args []string) int {
// Query the allocation info
if len(allocID) == 1 {
c.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters."))
c.Ui.Error("Alloc ID must contain at least two characters.")
return 1
}

View File

@ -73,7 +73,7 @@ func (c *AllocSignalCommand) Run(args []string) int {
// Query the allocation info
if len(allocID) == 1 {
c.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters."))
c.Ui.Error("Alloc ID must contain at least two characters.")
return 1
}

View File

@ -149,7 +149,7 @@ func (c *AllocStatusCommand) Run(args []string) int {
// Query the allocation info
if len(allocID) == 1 {
c.Ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
c.Ui.Error("Identifier must contain at least two characters.")
return 1
}

View File

@ -76,7 +76,7 @@ func (c *AllocStopCommand) Run(args []string) int {
// Query the allocation info
if len(allocID) == 1 {
c.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters."))
c.Ui.Error("Alloc ID must contain at least two characters.")
return 1
}

View File

@ -139,7 +139,7 @@ func (c *EvalStatusCommand) Run(args []string) int {
// Query the allocation info
if len(evalID) == 1 {
c.Ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
c.Ui.Error("Identifier must contain at least two characters.")
return 1
}

View File

@ -255,7 +255,7 @@ func (c *JobPlanCommand) addPreemptions(resp *api.JobPlanResponse) {
c.Ui.Output(c.Colorize().Color("[bold][yellow]Preemptions:\n[reset]"))
if len(resp.Annotations.PreemptedAllocs) < preemptionDisplayThreshold {
var allocs []string
allocs = append(allocs, fmt.Sprintf("Alloc ID|Job ID|Task Group"))
allocs = append(allocs, "Alloc ID|Job ID|Task Group")
for _, alloc := range resp.Annotations.PreemptedAllocs {
allocs = append(allocs, fmt.Sprintf("%s|%s|%s", alloc.ID, alloc.JobID, alloc.TaskGroup))
}
@ -284,7 +284,7 @@ func (c *JobPlanCommand) addPreemptions(resp *api.JobPlanResponse) {
// Show counts grouped by job ID if its less than a threshold
var output []string
if numJobs < preemptionDisplayThreshold {
output = append(output, fmt.Sprintf("Job ID|Namespace|Job Type|Preemptions"))
output = append(output, "Job ID|Namespace|Job Type|Preemptions")
for jobType, jobCounts := range allocDetails {
for jobId, count := range jobCounts {
output = append(output, fmt.Sprintf("%s|%s|%s|%d", jobId.id, jobId.namespace, jobType, count))
@ -292,7 +292,7 @@ func (c *JobPlanCommand) addPreemptions(resp *api.JobPlanResponse) {
}
} else {
// Show counts grouped by job type
output = append(output, fmt.Sprintf("Job Type|Preemptions"))
output = append(output, "Job Type|Preemptions")
for jobType, jobCounts := range allocDetails {
total := 0
for _, count := range jobCounts {

View File

@ -188,7 +188,7 @@ func (c *JobStatusCommand) Run(args []string) int {
if periodic && !parameterized {
if *job.Stop {
basic = append(basic, fmt.Sprintf("Next Periodic Launch|none (job stopped)"))
basic = append(basic, "Next Periodic Launch|none (job stopped)")
} else {
location, err := job.Periodic.GetLocation()
if err == nil {

View File

@ -97,7 +97,7 @@ func (c *NodeConfigCommand) Run(args []string) int {
c.Ui.Error(fmt.Sprintf("Error updating server list: %s", err))
return 1
}
c.Ui.Output(fmt.Sprint("Updated server list"))
c.Ui.Output("Updated server list")
return 0
}

View File

@ -223,7 +223,7 @@ func (c *NodeDrainCommand) Run(args []string) int {
// Check if node exists
if len(nodeID) == 1 {
c.Ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
c.Ui.Error("Identifier must contain at least two characters.")
return 1
}

View File

@ -123,7 +123,7 @@ func (c *NodeEligibilityCommand) Run(args []string) int {
// Check if node exists
if len(nodeID) == 1 {
c.Ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
c.Ui.Error("Identifier must contain at least two characters.")
return 1
}

View File

@ -239,7 +239,7 @@ func (c *NodeStatusCommand) Run(args []string) int {
}
}
if len(nodeID) == 1 {
c.Ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
c.Ui.Error("Identifier must contain at least two characters.")
return 1
}

View File

@ -373,9 +373,7 @@ func (c *OperatorDebugCommand) Run(args []string) int {
c.serverIDs = append(c.serverIDs, member.Name)
}
} else {
for _, id := range argNodes(serverIDs) {
c.serverIDs = append(c.serverIDs, id)
}
c.serverIDs = append(c.serverIDs, argNodes(serverIDs)...)
}
serversFound := 0
@ -590,7 +588,7 @@ func (c *OperatorDebugCommand) collectAgentHost(path, id string, client *api.Cli
if strings.Contains(err.Error(), structs.ErrPermissionDenied.Error()) {
// Drop a hint to help the operator resolve the error
c.Ui.Warn(fmt.Sprintf("Agent host retrieval requires agent:read ACL or enable_debug=true. See https://www.nomadproject.io/api-docs/agent#host for more information."))
c.Ui.Warn("Agent host retrieval requires agent:read ACL or enable_debug=true. See https://www.nomadproject.io/api-docs/agent#host for more information.")
}
return // exit on any error
}
@ -630,7 +628,7 @@ func (c *OperatorDebugCommand) collectPprof(path, id string, client *api.Client)
// one permission failure before we bail.
// But lets first drop a hint to help the operator resolve the error
c.Ui.Warn(fmt.Sprintf("Pprof retrieval requires agent:write ACL or enable_debug=true. See https://www.nomadproject.io/api-docs/agent#agent-runtime-profiles for more information."))
c.Ui.Warn("Pprof retrieval requires agent:write ACL or enable_debug=true. See https://www.nomadproject.io/api-docs/agent#agent-runtime-profiles for more information.")
return // only exit on 403
}
} else {

View File

@ -38,7 +38,7 @@ func (c *OperatorKeygenCommand) Run(_ []string) int {
return 1
}
if n != 32 {
c.Ui.Error(fmt.Sprintf("Couldn't read enough entropy. Generate more entropy!"))
c.Ui.Error("Couldn't read enough entropy. Generate more entropy!")
return 1
}

View File

@ -171,7 +171,7 @@ func (c *OperatorKeyringCommand) handleKeyResponse(resp *api.KeyringResponse) {
out[0] = "Key"
i := 1
for k := range resp.Keys {
out[i] = fmt.Sprintf("%s", k)
out[i] = k
i = i + 1
}
c.Ui.Output(formatList(out))

View File

@ -96,9 +96,7 @@ func (r *RecommendationApplyCommand) Run(args []string) int {
// Create a list of recommendations to apply.
ids := make([]string, len(args))
for i, id := range args {
ids[i] = id
}
copy(ids, args)
resp, _, err := client.Recommendations().Apply(ids, override)
if err != nil {

View File

@ -93,9 +93,7 @@ func (r *RecommendationDismissCommand) Run(args []string) int {
// Create a list of recommendations to dismiss.
ids := make([]string, len(args))
for i, id := range args {
ids[i] = id
}
copy(ids, args)
_, err = client.Recommendations().Delete(ids, nil)
if err != nil {

View File

@ -168,8 +168,5 @@ func (s scalingPolicyStubList) Less(i, j int) bool {
stringList := []string{iTarget, jTarget}
sort.Strings(stringList)
if stringList[0] == iTarget {
return true
}
return false
return stringList[0] == iTarget
}

View File

@ -184,6 +184,6 @@ func (c *StatusCommand) logMultiMatchError(id string, matches map[contexts.Conte
}
c.Ui.Error(fmt.Sprintf("\n%s:", strings.Title(string(ctx))))
c.Ui.Error(fmt.Sprintf("%s", strings.Join(vers, ", ")))
c.Ui.Error(strings.Join(vers, ", "))
}
}

View File

@ -182,6 +182,6 @@ func (c *UiCommand) logMultiMatchError(id string, matches map[contexts.Context][
}
c.Ui.Error(fmt.Sprintf("\n%s:", strings.Title(string(ctx))))
c.Ui.Error(fmt.Sprintf("%s", strings.Join(vers, ", ")))
c.Ui.Error(strings.Join(vers, ", "))
}
}

View File

@ -52,9 +52,7 @@ func (c *VolumeDetachCommand) AutocompleteArgs() complete.Predictor {
if err != nil {
return []string{}
}
for _, match := range resp.Matches[contexts.Nodes] {
matches = append(matches, match)
}
matches = append(matches, resp.Matches[contexts.Nodes]...)
return matches
})
}

View File

@ -154,9 +154,7 @@ func (d *dockerCoordinator) PullImage(image string, authOptions *docker.AuthConf
// Delete the future since we don't need it and we don't want to cache an
// image being there if it has possibly been manually deleted (outside of
// Nomad).
if _, ok := d.pullFutures[image]; ok {
delete(d.pullFutures, image)
}
delete(d.pullFutures, image)
// If we are cleaning up, we increment the reference count on the image
if err == nil && d.cleanup {
@ -220,7 +218,6 @@ func (d *dockerCoordinator) pullImageImpl(image string, authOptions *docker.Auth
}
future.set(dockerImage.ID, nil)
return
}
// IncrementImageReference is used to increment an image reference count

View File

@ -1657,7 +1657,7 @@ func sliceMergeUlimit(ulimitsRaw map[string]string) ([]docker.ULimit, error) {
return []docker.ULimit{}, fmt.Errorf("Malformed ulimit specification %v: %q, cannot be empty", name, ulimitRaw)
}
// hard limit is optional
if strings.Contains(ulimitRaw, ":") == false {
if !strings.Contains(ulimitRaw, ":") {
ulimitRaw = ulimitRaw + ":" + ulimitRaw
}

View File

@ -103,7 +103,7 @@ func (p *imageProgress) get() (string, time.Time) {
}
}
elapsed := time.Now().Sub(p.pullStart)
elapsed := time.Since(p.pullStart)
cur := p.currentBytes()
total := p.totalBytes()
var est int64

View File

@ -94,5 +94,4 @@ func (h *taskHandle) run() {
}
h.exitResult = runCommand(h.command, stdout, stderr, h.killCh, pluginExitTimer, h.logger)
return
}

View File

@ -680,7 +680,7 @@ func (d *Driver) getMonitorPath(dir string, fingerPrint *drivers.Fingerprint) (s
d.logger.Debug("long socket paths available in this version of QEMU", "version", currentQemuVer)
}
fullSocketPath := fmt.Sprintf("%s/%s", dir, qemuMonitorSocketName)
if len(fullSocketPath) > qemuLegacyMaxMonitorPathLen && longPathSupport == false {
if len(fullSocketPath) > qemuLegacyMaxMonitorPathLen && !longPathSupport {
return "", fmt.Errorf("monitor path is too long for this version of qemu")
}
return fullSocketPath, nil

View File

@ -172,7 +172,7 @@ func (c *Run) Run(args []string) int {
}
if len(failedEnvs) > 0 {
c.Ui.Error(fmt.Sprintf("The following environments ***FAILED***"))
c.Ui.Error("The following environments ***FAILED***")
for name, report := range failedEnvs {
c.Ui.Error(fmt.Sprintf(" [%s]: %d out of %d suite failures",
name, report.TotalFailedSuites, report.TotalSuites))

View File

@ -259,10 +259,5 @@ func (f *Framework) runCase(t *testing.T, s *TestSuite, c TestCase) {
}
func isTestMethod(m string) bool {
if !strings.HasPrefix(m, "Test") {
return false
}
// THINKING: adding flag to target a specific test or test regex?
return true
return strings.HasPrefix(m, "Test")
}

View File

@ -189,7 +189,7 @@ func (tc *VaultSecretsTest) TestVaultSecrets(f *framework.F) {
fmt.Sprintf("%s/myapp", tc.secretsPath), "key=UPDATED")
f.NoError(err, out)
elapsed := time.Now().Sub(ttlStart)
elapsed := time.Since(ttlStart)
time.Sleep((time.Second * 60) - elapsed)
// tokens will not be updated

View File

@ -17,9 +17,7 @@ func Flatten(obj interface{}, filter []string, primitiveOnly bool) map[string]st
flatten("", v, primitiveOnly, false, flat)
for _, f := range filter {
if _, ok := flat[f]; ok {
delete(flat, f)
}
delete(flat, f)
}
return flat
}
@ -116,7 +114,7 @@ func getSubPrefix(curPrefix, subField string) string {
if curPrefix != "" {
return fmt.Sprintf("%s.%s", curPrefix, subField)
}
return fmt.Sprintf("%s", subField)
return subField
}
// getSubKeyPrefix takes the current prefix and the next subfield and returns an
@ -125,5 +123,5 @@ func getSubKeyPrefix(curPrefix, subField string) string {
if curPrefix != "" {
return fmt.Sprintf("%s[%s]", curPrefix, subField)
}
return fmt.Sprintf("%s", subField)
return subField
}

View File

@ -366,9 +366,7 @@ func CopySliceString(s []string) []string {
}
c := make([]string, l)
for i, v := range s {
c[i] = v
}
copy(c, s)
return c
}
@ -379,9 +377,7 @@ func CopySliceInt(s []int) []int {
}
c := make([]int, l)
for i, v := range s {
c[i] = v
}
copy(c, s)
return c
}

View File

@ -35,7 +35,7 @@ func StreamClientInterceptor(logger hclog.Logger, opts ...Option) grpc.StreamCli
func emitClientLog(logger hclog.Logger, o *options, fullMethodString string, startTime time.Time, err error, msg string) {
code := status.Code(err)
logLevel := o.levelFunc(code)
reqDuration := time.Now().Sub(startTime)
reqDuration := time.Since(startTime)
service := path.Dir(fullMethodString)[1:]
method := path.Base(fullMethodString)
logger.Log(logLevel, msg, "grpc.code", code, "duration", reqDuration, "grpc.service", service, "grpc.method", method)

View File

@ -85,7 +85,7 @@ func formattedDiagnosticErrors(diag hcl.Diagnostics) []error {
if d.Summary == "Extraneous JSON object property" {
d.Summary = "Invalid label"
}
err := errors.New(fmt.Sprintf("%s: %s", d.Summary, d.Detail))
err := fmt.Errorf("%s: %s", d.Summary, d.Detail)
errs = append(errs, err)
}
return errs

View File

@ -309,18 +309,15 @@ func (c *Config) OutgoingTLSWrapper() (RegionWrapper, error) {
// no longer supports this mode of operation, we have to do it
// manually.
func WrapTLSClient(conn net.Conn, tlsConfig *tls.Config) (net.Conn, error) {
var err error
var tlsConn *tls.Conn
tlsConn = tls.Client(conn, tlsConfig)
tlsConn := tls.Client(conn, tlsConfig)
// If crypto/tls is doing verification, there's no need to do
// our own.
if tlsConfig.InsecureSkipVerify == false {
if !tlsConfig.InsecureSkipVerify {
return tlsConn, nil
}
if err = tlsConn.Handshake(); err != nil {
if err := tlsConn.Handshake(); err != nil {
tlsConn.Close()
return nil, err
}
@ -342,13 +339,13 @@ func WrapTLSClient(conn net.Conn, tlsConfig *tls.Config) (net.Conn, error) {
opts.Intermediates.AddCert(cert)
}
_, err = certs[0].Verify(opts)
_, err := certs[0].Verify(opts)
if err != nil {
tlsConn.Close()
return nil, err
}
return tlsConn, err
return tlsConn, nil
}
// IncomingTLSConfig generates a TLS configuration for incoming requests

View File

@ -332,7 +332,7 @@ func parseSpread(result *[]*api.Spread, list *ast.ObjectList) error {
// Parse spread target
if o := listVal.Filter("target"); len(o.Items) > 0 {
if err := parseSpreadTarget(&s.SpreadTarget, o); err != nil {
return multierror.Prefix(err, fmt.Sprintf("target ->"))
return multierror.Prefix(err, "target ->")
}
}

View File

@ -198,10 +198,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) {
cancel()
return
}
select {
case <-ctx.Done():
return
}
<-ctx.Done()
}()
logCh := monitor.Start()
@ -351,7 +348,6 @@ func (a *Agent) forwardMonitorClient(conn io.ReadWriteCloser, args cstructs.Moni
}
structs.Bridge(conn, clientConn)
return
}
func (a *Agent) forwardMonitorServer(conn io.ReadWriteCloser, server *serverParts, args cstructs.MonitorRequest, encoder *codec.Encoder, decoder *codec.Decoder) {
@ -373,8 +369,8 @@ func (a *Agent) forwardMonitorServer(conn io.ReadWriteCloser, server *serverPart
}
structs.Bridge(conn, serverConn)
return
}
func (a *Agent) forwardProfileClient(args *structs.AgentPprofRequest, reply *structs.AgentPprofResponse) error {
state, srv, err := a.findClientConn(args.NodeID)

View File

@ -389,5 +389,4 @@ func (a *ClientAllocations) exec(conn io.ReadWriteCloser) {
}
structs.Bridge(conn, clientConn)
return
}

View File

@ -318,7 +318,6 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) {
}
structs.Bridge(conn, clientConn)
return
}
// logs is used to access an task's logs for a given allocation
@ -440,5 +439,4 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) {
}
structs.Bridge(conn, clientConn)
return
}

View File

@ -412,7 +412,7 @@ func (w *deploymentWatcher) watch() {
<-deadlineTimer.C
}
} else {
deadlineTimer = time.NewTimer(currentDeadline.Sub(time.Now()))
deadlineTimer = time.NewTimer(time.Until(currentDeadline))
}
allocIndex := uint64(1)
@ -474,7 +474,7 @@ FAIL:
// rollout, the next progress deadline becomes zero, so we want
// to avoid resetting, causing a deployment failure.
if !next.IsZero() {
deadlineTimer.Reset(next.Sub(time.Now()))
deadlineTimer.Reset(time.Until(next))
}
}

View File

@ -95,7 +95,7 @@ func (d *deadlineHeap) watch() {
// deadline is in the future, see if we already have a timer setup to
// handle it. If we don't create the timer.
if deadline.IsZero() || !deadline.Equal(nextDeadline) {
timer.Reset(deadline.Sub(time.Now()))
timer.Reset(time.Until(deadline))
nextDeadline = deadline
}
}

View File

@ -559,7 +559,7 @@ CHECK_LEADER:
if firstCheck.IsZero() {
firstCheck = time.Now()
}
if time.Now().Sub(firstCheck) < r.config.RPCHoldTimeout {
if time.Since(firstCheck) < r.config.RPCHoldTimeout {
jitter := lib.RandomStagger(r.config.RPCHoldTimeout / structs.JitterFraction)
select {
case <-time.After(jitter):
@ -749,7 +749,7 @@ func (r *rpcHandler) setQueryMeta(m *structs.QueryMeta) {
m.LastContact = 0
m.KnownLeader = true
} else {
m.LastContact = time.Now().Sub(r.raft.LastContact())
m.LastContact = time.Since(r.raft.LastContact())
m.KnownLeader = (r.raft.Leader() != "")
}
}

View File

@ -287,7 +287,7 @@ func jobIsPeriodic(obj interface{}) (bool, error) {
return false, fmt.Errorf("Unexpected type: %v", obj)
}
if j.Periodic != nil && j.Periodic.Enabled == true {
if j.Periodic != nil && j.Periodic.Enabled {
return true, nil
}

View File

@ -185,13 +185,13 @@ func (t *TLSConfig) IsEmpty() bool {
return true
}
return t.EnableHTTP == false &&
t.EnableRPC == false &&
t.VerifyServerHostname == false &&
return !t.EnableHTTP &&
!t.EnableRPC &&
!t.VerifyServerHostname &&
t.CAFile == "" &&
t.CertFile == "" &&
t.KeyFile == "" &&
t.VerifyHTTPSClient == false
!t.VerifyHTTPSClient
}
// Merge is used to merge two TLS configs together

View File

@ -1060,12 +1060,12 @@ func connectGatewayProxyDiff(prev, next *ConsulGatewayProxy, contextual bool) *O
if prev.ConnectTimeout == nil {
oldPrimitiveFlat["ConnectTimeout"] = ""
} else {
oldPrimitiveFlat["ConnectTimeout"] = fmt.Sprintf("%s", *prev.ConnectTimeout)
oldPrimitiveFlat["ConnectTimeout"] = prev.ConnectTimeout.String()
}
if next.ConnectTimeout == nil {
newPrimitiveFlat["ConnectTimeout"] = ""
} else {
newPrimitiveFlat["ConnectTimeout"] = fmt.Sprintf("%s", *next.ConnectTimeout)
newPrimitiveFlat["ConnectTimeout"] = next.ConnectTimeout.String()
}
}

View File

@ -3527,11 +3527,7 @@ func (a *AllocatedTaskResources) Comparable() *ComparableResources {
},
},
}
if len(a.Networks) > 0 {
for _, net := range a.Networks {
ret.Flattened.Networks = append(ret.Flattened.Networks, net)
}
}
ret.Flattened.Networks = append(ret.Flattened.Networks, a.Networks...)
return ret
}
@ -4722,9 +4718,7 @@ func (m *Multiregion) Copy() *Multiregion {
Datacenters: []string{},
Meta: map[string]string{},
}
for _, dc := range region.Datacenters {
copyRegion.Datacenters = append(copyRegion.Datacenters, dc)
}
copyRegion.Datacenters = append(copyRegion.Datacenters, region.Datacenters...)
for k, v := range region.Meta {
copyRegion.Meta[k] = v
}
@ -8367,15 +8361,15 @@ func (s *Spread) Validate() error {
if !ok {
seen[target.Value] = struct{}{}
} else {
mErr.Errors = append(mErr.Errors, errors.New(fmt.Sprintf("Spread target value %q already defined", target.Value)))
mErr.Errors = append(mErr.Errors, fmt.Errorf("Spread target value %q already defined", target.Value))
}
if target.Percent < 0 || target.Percent > 100 {
mErr.Errors = append(mErr.Errors, errors.New(fmt.Sprintf("Spread target percentage for value %q must be between 0 and 100", target.Value)))
mErr.Errors = append(mErr.Errors, fmt.Errorf("Spread target percentage for value %q must be between 0 and 100", target.Value))
}
sumPercent += uint32(target.Percent)
}
if sumPercent > 100 {
mErr.Errors = append(mErr.Errors, errors.New(fmt.Sprintf("Sum of spread target percentages must not be greater than 100%%; got %d%%", sumPercent)))
mErr.Errors = append(mErr.Errors, fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", sumPercent))
}
return mErr.ErrorOrNil()
}
@ -9608,11 +9602,9 @@ func (a *AllocListStub) SetEventDisplayMessages() {
}
func setDisplayMsg(taskStates map[string]*TaskState) {
if taskStates != nil {
for _, taskState := range taskStates {
for _, event := range taskState.Events {
event.PopulateEventDisplayMessage()
}
for _, taskState := range taskStates {
for _, event := range taskState.Events {
event.PopulateEventDisplayMessage()
}
}
}
@ -10504,7 +10496,7 @@ func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) {
expected := 0
actual := 0
for name, allocList := range plan.NodeAllocation {
didAlloc, _ := p.NodeAllocation[name]
didAlloc := p.NodeAllocation[name]
expected += len(allocList)
actual += len(didAlloc)
}

View File

@ -330,8 +330,6 @@ func (v *vaultClient) SetActive(active bool) {
v.revLock.Lock()
v.revoking = make(map[*structs.VaultAccessor]time.Time)
v.revLock.Unlock()
return
}
// flush is used to reset the state of the vault client
@ -557,7 +555,7 @@ func (v *vaultClient) renewalLoop() {
// Successfully renewed
if err == nil {
// Attempt to renew the token at half the expiration time
durationUntilRenew := currentExpiration.Sub(time.Now()) / 2
durationUntilRenew := time.Until(currentExpiration) / 2
v.logger.Info("successfully renewed token", "next_renewal", durationUntilRenew)
authRenewTimer.Reset(durationUntilRenew)

View File

@ -197,7 +197,7 @@ func TestExecFSIsolation(t *testing.T, driver *DriverHarness, taskID string) {
// we always run in a cgroup - testing freezer cgroup
r = execTask(t, driver, taskID,
fmt.Sprintf("cat /proc/self/cgroup"),
"cat /proc/self/cgroup",
false, "")
require.Zero(t, r.exitCode)

View File

@ -296,16 +296,11 @@ func (a *Attribute) Comparable(b *Attribute) bool {
}
if a.String != nil {
if b.String != nil {
return true
}
return false
return b.String != nil
}
if a.Bool != nil {
if b.Bool != nil {
return true
}
return false
return b.Bool != nil
}
return true

View File

@ -348,12 +348,8 @@ func (c *NetworkChecker) SetNetwork(network *structs.NetworkResource) {
}
c.ports = make([]structs.Port, len(network.DynamicPorts)+len(network.ReservedPorts))
for _, port := range network.DynamicPorts {
c.ports = append(c.ports, port)
}
for _, port := range network.ReservedPorts {
c.ports = append(c.ports, port)
}
c.ports = append(c.ports, network.DynamicPorts...)
c.ports = append(c.ports, network.ReservedPorts...)
}
func (c *NetworkChecker) Feasible(option *structs.Node) bool {

View File

@ -705,7 +705,7 @@ func updateRescheduleTracker(alloc *structs.Allocation, prev *structs.Allocation
// findPreferredNode finds the preferred node for an allocation
func (s *GenericScheduler) findPreferredNode(place placementResult) (*structs.Node, error) {
if prev := place.PreviousAllocation(); prev != nil && place.TaskGroup().EphemeralDisk.Sticky == true {
if prev := place.PreviousAllocation(); prev != nil && place.TaskGroup().EphemeralDisk.Sticky {
var preferredNode *structs.Node
ws := memdb.NewWatchSet()
preferredNode, err := s.state.NodeByID(ws, prev.NodeID)

View File

@ -466,9 +466,7 @@ func (a *allocReconciler) computeGroup(group string, all allocSet) bool {
if deploymentPlaceReady {
desiredChanges.Place += uint64(len(place))
for _, p := range place {
a.result.place = append(a.result.place, p)
}
a.result.place = append(a.result.place, place...)
a.markStop(rescheduleNow, "", allocRescheduled)
desiredChanges.Stop += uint64(len(rescheduleNow))
@ -481,9 +479,7 @@ func (a *allocReconciler) computeGroup(group string, all allocSet) bool {
if len(lost) != 0 {
allowed := helper.IntMin(len(lost), len(place))
desiredChanges.Place += uint64(allowed)
for _, p := range place[:allowed] {
a.result.place = append(a.result.place, p)
}
a.result.place = append(a.result.place, place[:allowed]...)
}
// Handle rescheduling of failed allocations even if the deployment is

View File

@ -50,7 +50,7 @@ func GetVersion() *VersionInfo {
}
func (c *VersionInfo) VersionNumber() string {
version := fmt.Sprintf("%s", c.Version)
version := c.Version
if c.VersionPrerelease != "" {
version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease)