Add gocritic to golangci-lint config (#9556)
This commit is contained in:
parent
48c150ec6c
commit
93155ba3da
|
@ -53,10 +53,18 @@ linters-settings:
|
|||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
|
||||
issues:
|
||||
exclude:
|
||||
- ifElseChain
|
||||
- singleCaseSwitch
|
||||
- assignOp
|
||||
- unlambda
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- goimports
|
||||
- gocritic
|
||||
- misspell
|
||||
- govet
|
||||
- deadcode
|
||||
|
|
|
@ -2785,12 +2785,10 @@ func (c *Client) emitStats() {
|
|||
next.Reset(c.config.StatsCollectionInterval)
|
||||
if err != nil {
|
||||
c.logger.Warn("error fetching host resource usage stats", "error", err)
|
||||
} else {
|
||||
} else if c.config.PublishNodeMetrics {
|
||||
// Publish Node metrics if operator has opted in
|
||||
if c.config.PublishNodeMetrics {
|
||||
c.emitHostStats()
|
||||
}
|
||||
}
|
||||
|
||||
c.emitClientMetrics()
|
||||
case <-c.shutdownCh:
|
||||
|
|
|
@ -1564,10 +1564,10 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
|
|||
result.BridgeNetworkSubnet = b.BridgeNetworkSubnet
|
||||
}
|
||||
|
||||
if len(b.HostNetworks) != 0 {
|
||||
result.HostNetworks = append(a.HostNetworks, b.HostNetworks...)
|
||||
} else {
|
||||
result.HostNetworks = a.HostNetworks
|
||||
|
||||
if len(b.HostNetworks) != 0 {
|
||||
result.HostNetworks = append(result.HostNetworks, b.HostNetworks...)
|
||||
}
|
||||
|
||||
if b.BindWildcardDefaultHostNetwork {
|
||||
|
|
|
@ -140,7 +140,8 @@ func (s *HTTPServer) nodeToggleDrain(resp http.ResponseWriter, req *http.Request
|
|||
drainRequest.MarkEligible = true
|
||||
}
|
||||
} else {
|
||||
if err := decodeBody(req, &drainRequest); err != nil {
|
||||
err := decodeBody(req, &drainRequest)
|
||||
if err != nil {
|
||||
return nil, CodedError(400, err.Error())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,11 +43,8 @@ func (s *HTTPServer) scalingPoliciesListRequest(resp http.ResponseWriter, req *h
|
|||
|
||||
func (s *HTTPServer) ScalingPolicySpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
path := strings.TrimPrefix(req.URL.Path, "/v1/scaling/policy/")
|
||||
switch {
|
||||
default:
|
||||
return s.scalingPolicyCRUD(resp, req, path)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) scalingPolicyCRUD(resp http.ResponseWriter, req *http.Request,
|
||||
policyID string) (interface{}, error) {
|
||||
|
|
|
@ -122,7 +122,7 @@ func (c *OperatorMetricsCommand) Run(args []string) int {
|
|||
return 1
|
||||
}
|
||||
|
||||
resp := string(bs[:])
|
||||
resp := string(bs)
|
||||
c.Ui.Output(resp)
|
||||
}
|
||||
|
||||
|
|
|
@ -634,7 +634,8 @@ func (c *OperatorDebugCommand) collectPprof(path, id string, client *api.Client)
|
|||
return // only exit on 403
|
||||
}
|
||||
} else {
|
||||
if err := c.writeBytes(path, "profile.prof", bs); err != nil {
|
||||
err := c.writeBytes(path, "profile.prof", bs)
|
||||
if err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
@ -643,7 +644,8 @@ func (c *OperatorDebugCommand) collectPprof(path, id string, client *api.Client)
|
|||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("%s: Failed to retrieve pprof trace.prof, err: %v", path, err))
|
||||
} else {
|
||||
if err := c.writeBytes(path, "trace.prof", bs); err != nil {
|
||||
err := c.writeBytes(path, "trace.prof", bs)
|
||||
if err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
@ -652,7 +654,8 @@ func (c *OperatorDebugCommand) collectPprof(path, id string, client *api.Client)
|
|||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("%s: Failed to retrieve pprof goroutine.prof, err: %v", path, err))
|
||||
} else {
|
||||
if err := c.writeBytes(path, "goroutine.prof", bs); err != nil {
|
||||
err := c.writeBytes(path, "goroutine.prof", bs)
|
||||
if err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
@ -664,7 +667,8 @@ func (c *OperatorDebugCommand) collectPprof(path, id string, client *api.Client)
|
|||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("%s: Failed to retrieve pprof goroutine-debug1.txt, err: %v", path, err))
|
||||
} else {
|
||||
if err := c.writeBytes(path, "goroutine-debug1.txt", bs); err != nil {
|
||||
err := c.writeBytes(path, "goroutine-debug1.txt", bs)
|
||||
if err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
@ -677,7 +681,8 @@ func (c *OperatorDebugCommand) collectPprof(path, id string, client *api.Client)
|
|||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("%s: Failed to retrieve pprof goroutine-debug2.txt, err: %v", path, err))
|
||||
} else {
|
||||
if err := c.writeBytes(path, "goroutine-debug2.txt", bs); err != nil {
|
||||
err := c.writeBytes(path, "goroutine-debug2.txt", bs)
|
||||
if err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,10 +81,10 @@ func (d *NvidiaDevice) stats(ctx context.Context, stats chan<- *device.StatsResp
|
|||
|
||||
// filterStatsByID accepts list of StatsData and set of IDs
|
||||
// this function would return entries from StatsData with IDs found in the set
|
||||
func filterStatsByID(stats []*nvml.StatsData, IDs map[string]struct{}) []*nvml.StatsData {
|
||||
func filterStatsByID(stats []*nvml.StatsData, ids map[string]struct{}) []*nvml.StatsData {
|
||||
var filteredStats []*nvml.StatsData
|
||||
for _, statsItem := range stats {
|
||||
if _, ok := IDs[statsItem.UUID]; ok {
|
||||
if _, ok := ids[statsItem.UUID]; ok {
|
||||
filteredStats = append(filteredStats, statsItem)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -139,13 +139,11 @@ func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
|||
|
||||
if n.IPAM.Config[0].Gateway != "" {
|
||||
fp.Attributes["driver.docker.bridge_ip"] = pstructs.NewStringAttribute(n.IPAM.Config[0].Gateway)
|
||||
} else {
|
||||
} else if d.fingerprintSuccess == nil {
|
||||
// Docker 17.09.0-ce dropped the Gateway IP from the bridge network
|
||||
// See https://github.com/moby/moby/issues/32648
|
||||
if d.fingerprintSuccess == nil {
|
||||
d.logger.Debug("bridge_ip could not be discovered")
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
@ -307,7 +307,8 @@ func (l *LibcontainerExecutor) Shutdown(signal string, grace time.Duration) erro
|
|||
}
|
||||
}
|
||||
} else {
|
||||
if err := l.container.Signal(os.Kill, true); err != nil {
|
||||
err := l.container.Signal(os.Kill, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,15 +114,13 @@ func (tc *SystemSchedTest) TestJobUpdateOnIneligbleNode(f *framework.F) {
|
|||
if alloc.ID == dAlloc.ID {
|
||||
foundPreviousAlloc = true
|
||||
require.Equal(t, uint64(0), alloc.JobVersion)
|
||||
} else {
|
||||
} else if alloc.ClientStatus == structs.AllocClientStatusRunning {
|
||||
// Ensure allocs running on non disabled node are
|
||||
// newer version
|
||||
if alloc.ClientStatus == structs.AllocClientStatusRunning {
|
||||
require.Equal(t, uint64(1), alloc.JobVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
require.True(t, foundPreviousAlloc, "unable to find previous alloc for ineligible node")
|
||||
}
|
||||
|
||||
|
|
|
@ -160,7 +160,6 @@ func (w *NoXSSResponseWriter) Write(p []byte) (int, error) {
|
|||
// Headers and buffer were written, this writer has been
|
||||
// flushed and can be a passthrough
|
||||
w.flushed = true
|
||||
w.buf = w.buf[:]
|
||||
|
||||
// Write p
|
||||
return w.orig.Write(p)
|
||||
|
|
|
@ -394,11 +394,11 @@ func parseSpreadTarget(result *[]*api.SpreadTarget, list *ast.ObjectList) error
|
|||
func parseBool(value interface{}) (bool, error) {
|
||||
var enabled bool
|
||||
var err error
|
||||
switch value.(type) {
|
||||
switch data := value.(type) {
|
||||
case string:
|
||||
enabled, err = strconv.ParseBool(value.(string))
|
||||
enabled, err = strconv.ParseBool(data)
|
||||
case bool:
|
||||
enabled = value.(bool)
|
||||
enabled = data
|
||||
default:
|
||||
err = fmt.Errorf("%v couldn't be converted to boolean value", value)
|
||||
}
|
||||
|
|
|
@ -40,17 +40,16 @@ func (h delayedHeapImp) Len() int {
|
|||
return len(h)
|
||||
}
|
||||
|
||||
// Less sorts zero WaitUntil times at the end of the list, and normally
|
||||
// otherwise
|
||||
func (h delayedHeapImp) Less(i, j int) bool {
|
||||
// Two zero times should return false.
|
||||
// Otherwise, zero is "greater" than any other time.
|
||||
// (To sort it at the end of the list.)
|
||||
// Sort such that zero times are at the end of the list.
|
||||
iZero, jZero := h[i].WaitUntil.IsZero(), h[j].WaitUntil.IsZero()
|
||||
if iZero && jZero {
|
||||
if h[i].WaitUntil.IsZero() {
|
||||
// 0,? => ?,0
|
||||
return false
|
||||
} else if iZero {
|
||||
return false
|
||||
} else if jZero {
|
||||
}
|
||||
|
||||
if h[j].WaitUntil.IsZero() {
|
||||
// ?,0 => ?,0
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -620,24 +620,20 @@ func (v *CSIVolume) nodeUnpublishVolume(vol *structs.CSIVolume, claim *structs.C
|
|||
if ok && rclaim.NodeID == claim.NodeID {
|
||||
allocIDs = append(allocIDs, allocID)
|
||||
}
|
||||
} else {
|
||||
if alloc.NodeID == claim.NodeID && alloc.TerminalStatus() {
|
||||
} else if alloc.NodeID == claim.NodeID && alloc.TerminalStatus() {
|
||||
allocIDs = append(allocIDs, allocID)
|
||||
}
|
||||
}
|
||||
}
|
||||
for allocID, alloc := range vol.WriteAllocs {
|
||||
if alloc == nil {
|
||||
wclaim, ok := vol.WriteClaims[allocID]
|
||||
if ok && wclaim.NodeID == claim.NodeID {
|
||||
allocIDs = append(allocIDs, allocID)
|
||||
}
|
||||
} else {
|
||||
if alloc.NodeID == claim.NodeID && alloc.TerminalStatus() {
|
||||
} else if alloc.NodeID == claim.NodeID && alloc.TerminalStatus() {
|
||||
allocIDs = append(allocIDs, allocID)
|
||||
}
|
||||
}
|
||||
}
|
||||
var merr multierror.Error
|
||||
for _, allocID := range allocIDs {
|
||||
claim.AllocationID = allocID
|
||||
|
|
|
@ -6043,8 +6043,8 @@ func (s *StateSnapshot) DenormalizeAllocationDiffSlice(allocDiffs []*structs.All
|
|||
return denormalizedAllocs, nil
|
||||
}
|
||||
|
||||
func getPreemptedAllocDesiredDescription(PreemptedByAllocID string) string {
|
||||
return fmt.Sprintf("Preempted by alloc ID %v", PreemptedByAllocID)
|
||||
func getPreemptedAllocDesiredDescription(preemptedByAllocID string) string {
|
||||
return fmt.Sprintf("Preempted by alloc ID %v", preemptedByAllocID)
|
||||
}
|
||||
|
||||
// StateRestore is used to optimize the performance when
|
||||
|
|
|
@ -5375,12 +5375,10 @@ func (p *ScalingPolicy) Validate() error {
|
|||
if p.Max < 0 {
|
||||
mErr.Errors = append(mErr.Errors,
|
||||
fmt.Errorf("maximum count must be specified and non-negative"))
|
||||
} else {
|
||||
if p.Max < p.Min {
|
||||
} else if p.Max < p.Min {
|
||||
mErr.Errors = append(mErr.Errors,
|
||||
fmt.Errorf("maximum count must not be less than minimum count"))
|
||||
}
|
||||
}
|
||||
|
||||
if p.Min < 0 {
|
||||
mErr.Errors = append(mErr.Errors,
|
||||
|
|
Loading…
Reference in New Issue