diff --git a/.golangci.yml b/.golangci.yml index d71c93d16..afa2de0d4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -23,10 +23,10 @@ issues: text: 'SA9004:' - linters: [staticcheck] - text: 'SA1019: Package github.com/golang/protobuf/jsonpb is deprecated' + text: 'SA1019: "github.com/golang/protobuf/jsonpb" is deprecated: Use the "google.golang.org/protobuf/encoding/protojson" package instead.' - linters: [staticcheck] - text: 'SA1019: Package github.com/golang/protobuf/proto is deprecated' + text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.' - linters: [staticcheck] text: 'SA1019: ptypes.MarshalAny is deprecated' @@ -35,7 +35,7 @@ issues: text: 'SA1019: ptypes.UnmarshalAny is deprecated' - linters: [staticcheck] - text: 'SA1019: package github.com/golang/protobuf/ptypes is deprecated' + text: 'SA1019: "github.com/golang/protobuf/ptypes" is deprecated: Well-known types have specialized functionality directly injected into the generated packages for each message type. See the deprecation notice for each function for the suggested alternative.' # An argument that always receives the same value is often not a problem. - linters: [unparam] @@ -67,6 +67,14 @@ issues: path: '(_oss.go|_oss_test.go|_ent.go|_ent_test.go)' linters-settings: + govet: + check-shadowing: true + enable-all: true + disable: + - fieldalignment + - nilness + - shadow + - unusedwrite gofmt: simplify: true forbidigo: diff --git a/GNUmakefile b/GNUmakefile index cfa3191e5..66570f439 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -7,7 +7,7 @@ SHELL = bash # These version variables can either be a valid string for "go install @" # or the string @DEV to imply use what is currently installed locally. ### -GOLANGCI_LINT_VERSION='v1.46.2' +GOLANGCI_LINT_VERSION='v1.50.1' MOCKERY_VERSION='v2.12.2' BUF_VERSION='v1.4.0' PROTOC_GEN_GO_GRPC_VERSION="v1.2.0" diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go index bfa24c6ce..c2e8a40a4 100644 --- a/agent/consul/leader_peering.go +++ b/agent/consul/leader_peering.go @@ -90,14 +90,14 @@ func (s *Server) runPeeringMetrics(ctx context.Context) error { metrics.SetGauge(leaderExportedServicesCountKey, float32(0)) return nil case <-ticker.C: - if err := s.emitPeeringMetricsOnce(logger, defaultMetrics()); err != nil { + if err := s.emitPeeringMetricsOnce(defaultMetrics()); err != nil { s.logger.Error("error emitting peering stream metrics", "error", err) } } } } -func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metrics.Metrics) error { +func (s *Server) emitPeeringMetricsOnce(metricsImpl *metrics.Metrics) error { _, peers, err := s.fsm.State().PeeringList(nil, *structs.NodeEnterpriseMetaInPartition(structs.WildcardSpecifier)) if err != nil { return err diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index 4183a9f30..fd01737b8 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -1414,7 +1414,7 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) { met, err := metrics.New(cfg, sink) require.NoError(t, err) - errM := s2.emitPeeringMetricsOnce(s2.logger, met) + errM := s2.emitPeeringMetricsOnce(met) require.NoError(t, errM) retry.Run(t, func(r *retry.R) { diff --git a/agent/consul/state/config_entry_test.go b/agent/consul/state/config_entry_test.go index e32b18534..5253a2027 100644 --- a/agent/consul/state/config_entry_test.go +++ b/agent/consul/state/config_entry_test.go @@ -910,7 +910,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) { if err := entry.Validate(); err != nil { return err } - return s.EnsureConfigEntry(0, entry) + return s.EnsureConfigEntry(idx, entry) } type tcase struct { diff --git a/agent/consul/state/peering.go b/agent/consul/state/peering.go index be2aa1c73..b84c7acbd 100644 --- a/agent/consul/state/peering.go +++ b/agent/consul/state/peering.go @@ -946,6 +946,7 @@ func listAllExportedServices( return idx, found, nil } +//nolint:unparam func listServicesExportedToAnyPeerByConfigEntry( ws memdb.WatchSet, tx ReadTxn, diff --git a/agent/grpc-external/services/peerstream/replication.go b/agent/grpc-external/services/peerstream/replication.go index 2a62f74a6..c43262b05 100644 --- a/agent/grpc-external/services/peerstream/replication.go +++ b/agent/grpc-external/services/peerstream/replication.go @@ -63,10 +63,7 @@ func makeExportedServiceListResponse( // makeServiceResponse handles preparing exported service instance updates to the peer cluster. // Each cache.UpdateEvent will contain all instances for a service name. // If there are no instances in the event, we consider that to be a de-registration. -func makeServiceResponse( - mst *MutableStatus, - update cache.UpdateEvent, -) (*pbpeerstream.ReplicationMessage_Response, error) { +func makeServiceResponse(update cache.UpdateEvent) (*pbpeerstream.ReplicationMessage_Response, error) { serviceName := strings.TrimPrefix(update.CorrelationID, subExportedService) csn, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) if !ok { diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index bf9d1b791..64960f5f4 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -658,7 +658,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { continue } case strings.HasPrefix(update.CorrelationID, subExportedService): - resp, err = makeServiceResponse(status, update) + resp, err = makeServiceResponse(update) if err != nil { // Log the error and skip this response to avoid locking up peering due to a bad update event. logger.Error("failed to create service response", "error", err) diff --git a/agent/grpc-external/services/peerstream/subscription_manager.go b/agent/grpc-external/services/peerstream/subscription_manager.go index ef31f850b..8205ec315 100644 --- a/agent/grpc-external/services/peerstream/subscription_manager.go +++ b/agent/grpc-external/services/peerstream/subscription_manager.go @@ -9,11 +9,12 @@ import ( "time" "github.com/golang/protobuf/proto" - "github.com/hashicorp/consul/ipaddr" - "github.com/hashicorp/consul/lib/retry" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/ipaddr" + "github.com/hashicorp/consul/lib/retry" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/connect" @@ -142,7 +143,7 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti pending := &pendingPayload{} m.syncNormalServices(ctx, state, evt.Services) if m.config.ConnectEnabled { - m.syncDiscoveryChains(ctx, state, pending, evt.ListAllDiscoveryChains()) + m.syncDiscoveryChains(state, pending, evt.ListAllDiscoveryChains()) } err := pending.Add( @@ -254,7 +255,7 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti if state.exportList != nil { // Trigger public events for all synthetic discovery chain replies. for chainName, info := range state.connectServices { - m.collectPendingEventForDiscoveryChain(ctx, state, pending, chainName, info) + m.collectPendingEventForDiscoveryChain(state, pending, chainName, info) } } @@ -475,7 +476,6 @@ func (m *subscriptionManager) syncNormalServices( } func (m *subscriptionManager) syncDiscoveryChains( - ctx context.Context, state *subscriptionState, pending *pendingPayload, chainsByName map[structs.ServiceName]structs.ExportedDiscoveryChainInfo, @@ -488,7 +488,7 @@ func (m *subscriptionManager) syncDiscoveryChains( state.connectServices[chainName] = info - m.collectPendingEventForDiscoveryChain(ctx, state, pending, chainName, info) + m.collectPendingEventForDiscoveryChain(state, pending, chainName, info) } // if it was dropped, try to emit an DELETE event @@ -516,7 +516,6 @@ func (m *subscriptionManager) syncDiscoveryChains( } func (m *subscriptionManager) collectPendingEventForDiscoveryChain( - ctx context.Context, state *subscriptionState, pending *pendingPayload, chainName structs.ServiceName, @@ -786,7 +785,7 @@ func (m *subscriptionManager) notifyMeshConfigUpdates(ctx context.Context) <-cha const meshConfigWatch = "mesh-config-entry" notifyCh := make(chan cache.UpdateEvent, 1) - go m.syncViaBlockingQuery(ctx, meshConfigWatch, func(ctx_ context.Context, store StateStore, ws memdb.WatchSet) (interface{}, error) { + go m.syncViaBlockingQuery(ctx, meshConfigWatch, func(_ context.Context, store StateStore, ws memdb.WatchSet) (interface{}, error) { _, rawEntry, err := store.ConfigEntry(ws, structs.MeshConfig, structs.MeshConfigMesh, acl.DefaultEnterpriseMeta()) if err != nil { return nil, fmt.Errorf("failed to get mesh config entry: %w", err) diff --git a/agent/grpc-external/services/peerstream/subscription_manager_test.go b/agent/grpc-external/services/peerstream/subscription_manager_test.go index 615d72030..644215607 100644 --- a/agent/grpc-external/services/peerstream/subscription_manager_test.go +++ b/agent/grpc-external/services/peerstream/subscription_manager_test.go @@ -7,10 +7,11 @@ import ( "testing" "time" - "github.com/hashicorp/consul/types" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/types" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/connect" @@ -843,11 +844,13 @@ func newTestSubscriptionBackend(t *testing.T) *testSubscriptionBackend { return backend } +//nolint:unparam func (b *testSubscriptionBackend) ensurePeering(t *testing.T, name string) (uint64, string) { b.lastIdx++ return b.lastIdx, setupTestPeering(t, b.store, name, b.lastIdx) } +//nolint:unparam func (b *testSubscriptionBackend) ensureConfigEntry(t *testing.T, entry structs.ConfigEntry) uint64 { require.NoError(t, entry.Normalize()) require.NoError(t, entry.Validate()) @@ -863,24 +866,28 @@ func (b *testSubscriptionBackend) deleteConfigEntry(t *testing.T, kind, name str return b.lastIdx } +//nolint:unparam func (b *testSubscriptionBackend) ensureNode(t *testing.T, node *structs.Node) uint64 { b.lastIdx++ require.NoError(t, b.store.EnsureNode(b.lastIdx, node)) return b.lastIdx } +//nolint:unparam func (b *testSubscriptionBackend) ensureService(t *testing.T, node string, svc *structs.NodeService) uint64 { b.lastIdx++ require.NoError(t, b.store.EnsureService(b.lastIdx, node, svc)) return b.lastIdx } +//nolint:unparam func (b *testSubscriptionBackend) ensureCheck(t *testing.T, hc *structs.HealthCheck) uint64 { b.lastIdx++ require.NoError(t, b.store.EnsureCheck(b.lastIdx, hc)) return b.lastIdx } +//nolint:unparam func (b *testSubscriptionBackend) deleteService(t *testing.T, nodeName, serviceID string) uint64 { b.lastIdx++ require.NoError(t, b.store.DeleteService(b.lastIdx, nodeName, serviceID, nil, "")) diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index 9d5ea602f..2a576f0ef 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -1749,6 +1749,7 @@ func upsertTestACLs(t *testing.T, store *state.Store) { require.NoError(t, store.ACLTokenBatchSet(101, tokens, state.ACLTokenSetOptions{})) } +//nolint:unparam func setupTestPeering(t *testing.T, store *state.Store, name string, index uint64) string { t.Helper() err := store.PeeringWrite(index, &pbpeering.PeeringWriteRequest{ diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 4c23d0705..577e3f51b 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -1235,7 +1235,7 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot } } - err := s.finalizePublicListenerFromConfig(l, cfgSnap, cfg, useHTTPFilter) + err := s.finalizePublicListenerFromConfig(l, cfgSnap, useHTTPFilter) if err != nil { return nil, fmt.Errorf("failed to attach Consul filters and TLS context to custom public listener: %v", err) } @@ -1330,7 +1330,7 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot } } - err = s.finalizePublicListenerFromConfig(l, cfgSnap, cfg, useHTTPFilter) + err = s.finalizePublicListenerFromConfig(l, cfgSnap, useHTTPFilter) if err != nil { return nil, fmt.Errorf("failed to attach Consul filters and TLS context to custom public listener: %v", err) } @@ -1340,7 +1340,7 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot // finalizePublicListenerFromConfig is used for best-effort injection of Consul filter-chains onto listeners. // This include L4 authorization filters and TLS context. -func (s *ResourceGenerator) finalizePublicListenerFromConfig(l *envoy_listener_v3.Listener, cfgSnap *proxycfg.ConfigSnapshot, proxyCfg ProxyConfig, useHTTPFilter bool) error { +func (s *ResourceGenerator) finalizePublicListenerFromConfig(l *envoy_listener_v3.Listener, cfgSnap *proxycfg.ConfigSnapshot, useHTTPFilter bool) error { if !useHTTPFilter { // Best-effort injection of L4 intentions if err := s.injectConnectFilters(cfgSnap, l); err != nil {