chore: remove use of "err" a log line context key for errors. (#14433)
Log lines which include an error should use the full term "error" as the context key. This provides consistency across the codebase and avoids a Go style which operators might not be aware of.
This commit is contained in:
parent
9f8a3824c4
commit
4b9bcf94da
|
@ -98,7 +98,7 @@ func (c *cniNetworkConfigurator) Setup(ctx context.Context, alloc *structs.Alloc
|
|||
for attempt := 1; ; attempt++ {
|
||||
var err error
|
||||
if res, err = c.cni.Setup(ctx, alloc.ID, spec.Path, cni.WithCapabilityPortMap(getPortMapping(alloc, c.ignorePortMappingHostIP))); err != nil {
|
||||
c.logger.Warn("failed to configure network", "err", err, "attempt", attempt)
|
||||
c.logger.Warn("failed to configure network", "error", err, "attempt", attempt)
|
||||
switch attempt {
|
||||
case 1:
|
||||
firstError = err
|
||||
|
|
|
@ -2469,7 +2469,7 @@ func (c *Client) updateAlloc(update *structs.Allocation) {
|
|||
if update.ClientStatus == structs.AllocClientStatusUnknown && update.AllocModifyIndex > ar.Alloc().AllocModifyIndex {
|
||||
err = ar.Reconnect(update)
|
||||
if err != nil {
|
||||
c.logger.Error("error reconnecting alloc", "alloc_id", update.ID, "alloc_modify_index", update.AllocModifyIndex, "err", err)
|
||||
c.logger.Error("error reconnecting alloc", "alloc_id", update.ID, "alloc_modify_index", update.AllocModifyIndex, "error", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -127,7 +127,7 @@ func (f *EnvDigitalOceanFingerprint) Fingerprint(request *FingerprintRequest, re
|
|||
resp, err := f.Get(attr.path, "text")
|
||||
v := strings.TrimSpace(resp)
|
||||
if err != nil {
|
||||
f.logger.Warn("failed to read attribute", "attribute", k, "err", err)
|
||||
f.logger.Warn("failed to read attribute", "attribute", k, "error", err)
|
||||
continue
|
||||
} else if v == "" {
|
||||
f.logger.Debug("read an empty value", "attribute", k)
|
||||
|
|
|
@ -232,7 +232,7 @@ func (c *cpusetManagerV2) cleanup() {
|
|||
|
||||
return nil
|
||||
}); err != nil {
|
||||
c.logger.Error("failed to cleanup cgroup", "err", err)
|
||||
c.logger.Error("failed to cleanup cgroup", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -248,7 +248,7 @@ func (c *cpusetManagerV2) pathOf(id identity) string {
|
|||
func (c *cpusetManagerV2) remove(path string) {
|
||||
mgr, err := fs2.NewManager(nil, path)
|
||||
if err != nil {
|
||||
c.logger.Warn("failed to create manager", "path", path, "err", err)
|
||||
c.logger.Warn("failed to create manager", "path", path, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -264,7 +264,7 @@ func (c *cpusetManagerV2) remove(path string) {
|
|||
|
||||
// remove the cgroup
|
||||
if err3 := mgr.Destroy(); err3 != nil {
|
||||
c.logger.Warn("failed to cleanup cgroup", "path", path, "err", err)
|
||||
c.logger.Warn("failed to cleanup cgroup", "path", path, "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -276,13 +276,13 @@ func (c *cpusetManagerV2) write(id identity, set cpuset.CPUSet) {
|
|||
// make a manager for the cgroup
|
||||
m, err := fs2.NewManager(new(configs.Cgroup), path)
|
||||
if err != nil {
|
||||
c.logger.Error("failed to manage cgroup", "path", path, "err", err)
|
||||
c.logger.Error("failed to manage cgroup", "path", path, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
// create the cgroup
|
||||
if err = m.Apply(CreationPID); err != nil {
|
||||
c.logger.Error("failed to apply cgroup", "path", path, "err", err)
|
||||
c.logger.Error("failed to apply cgroup", "path", path, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -290,7 +290,7 @@ func (c *cpusetManagerV2) write(id identity, set cpuset.CPUSet) {
|
|||
if err = m.Set(&configs.Resources{
|
||||
CpusetCpus: set.String(),
|
||||
}); err != nil {
|
||||
c.logger.Error("failed to set cgroup", "path", path, "err", err)
|
||||
c.logger.Error("failed to set cgroup", "path", path, "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ func (f *FileRotator) Write(p []byte) (n int, err error) {
|
|||
f.flushBuffer()
|
||||
f.currentFile.Close()
|
||||
if err := f.nextFile(); err != nil {
|
||||
f.logger.Error("error creating next file", "err", err)
|
||||
f.logger.Error("error creating next file", "error", err)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ func (f *FileRotator) Write(p []byte) (n int, err error) {
|
|||
// Increment the total number of bytes in the file
|
||||
f.currentWr += int64(n)
|
||||
if err != nil {
|
||||
f.logger.Error("error writing to file", "err", err)
|
||||
f.logger.Error("error writing to file", "error", err)
|
||||
|
||||
// As bufio writer does not automatically recover in case of any
|
||||
// io error, we need to recover from it manually resetting the
|
||||
|
@ -277,7 +277,7 @@ func (f *FileRotator) purgeOldFiles() {
|
|||
var fIndexes []int
|
||||
files, err := ioutil.ReadDir(f.path)
|
||||
if err != nil {
|
||||
f.logger.Error("error getting directory listing", "err", err)
|
||||
f.logger.Error("error getting directory listing", "error", err)
|
||||
return
|
||||
}
|
||||
// Inserting all the rotated files in a slice
|
||||
|
@ -286,7 +286,7 @@ func (f *FileRotator) purgeOldFiles() {
|
|||
fileIdx := strings.TrimPrefix(fi.Name(), fmt.Sprintf("%s.", f.baseFileName))
|
||||
n, err := strconv.Atoi(fileIdx)
|
||||
if err != nil {
|
||||
f.logger.Error("error extracting file index", "err", err)
|
||||
f.logger.Error("error extracting file index", "error", err)
|
||||
continue
|
||||
}
|
||||
fIndexes = append(fIndexes, n)
|
||||
|
@ -307,7 +307,7 @@ func (f *FileRotator) purgeOldFiles() {
|
|||
fname := filepath.Join(f.path, fmt.Sprintf("%s.%d", f.baseFileName, fIndex))
|
||||
err := os.RemoveAll(fname)
|
||||
if err != nil {
|
||||
f.logger.Error("error removing file", "filename", fname, "err", err)
|
||||
f.logger.Error("error removing file", "filename", fname, "error", err)
|
||||
}
|
||||
}
|
||||
f.oldestLogFileIdx = fIndexes[0]
|
||||
|
|
|
@ -280,7 +280,7 @@ func (l *logRotatorWrapper) Close() {
|
|||
if l.processOutReader != nil {
|
||||
err := l.processOutReader.Close()
|
||||
if err != nil && !strings.Contains(err.Error(), "file already closed") {
|
||||
l.logger.Warn("error closing read-side of process output pipe", "err", err)
|
||||
l.logger.Warn("error closing read-side of process output pipe", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ func (i *instanceManager) dispense() (plugin drivers.DriverPlugin, err error) {
|
|||
|
||||
// If reattachment fails, get a new plugin instance
|
||||
if err != nil {
|
||||
i.logger.Warn("failed to reattach to plugin, starting new instance", "err", err)
|
||||
i.logger.Warn("failed to reattach to plugin, starting new instance", "error", err)
|
||||
pluginInstance, err = dispenseFn()
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -87,7 +87,7 @@ func (cf *cpusetFixer) fix(c coordinate) {
|
|||
source := c.NomadCgroup()
|
||||
destination := c.DockerCgroup()
|
||||
if err := cgutil.CopyCpuset(source, destination); err != nil {
|
||||
cf.logger.Debug("failed to copy cpuset", "err", err)
|
||||
cf.logger.Debug("failed to copy cpuset", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -612,7 +612,7 @@ func (d *Driver) DestroyTask(taskID string, force bool) error {
|
|||
|
||||
if !handle.pluginClient.Exited() {
|
||||
if err := handle.exec.Shutdown("", 0); err != nil {
|
||||
handle.logger.Error("destroying executor failed", "err", err)
|
||||
handle.logger.Error("destroying executor failed", "error", err)
|
||||
}
|
||||
|
||||
handle.pluginClient.Kill()
|
||||
|
|
|
@ -636,7 +636,7 @@ func (d *Driver) DestroyTask(taskID string, force bool) error {
|
|||
|
||||
if !handle.pluginClient.Exited() {
|
||||
if err := handle.exec.Shutdown("", 0); err != nil {
|
||||
handle.logger.Error("destroying executor failed", "err", err)
|
||||
handle.logger.Error("destroying executor failed", "error", err)
|
||||
}
|
||||
|
||||
handle.pluginClient.Kill()
|
||||
|
|
|
@ -674,7 +674,7 @@ func (d *Driver) DestroyTask(taskID string, force bool) error {
|
|||
|
||||
if !handle.pluginClient.Exited() {
|
||||
if err := handle.exec.Shutdown("", 0); err != nil {
|
||||
handle.logger.Error("destroying executor failed", "err", err)
|
||||
handle.logger.Error("destroying executor failed", "error", err)
|
||||
}
|
||||
|
||||
handle.pluginClient.Kill()
|
||||
|
|
|
@ -446,7 +446,7 @@ func (d *Driver) DestroyTask(taskID string, force bool) error {
|
|||
|
||||
if !handle.pluginClient.Exited() {
|
||||
if err := handle.exec.Shutdown("", 0); err != nil {
|
||||
handle.logger.Error("destroying executor failed", "err", err)
|
||||
handle.logger.Error("destroying executor failed", "error", err)
|
||||
}
|
||||
|
||||
handle.pluginClient.Kill()
|
||||
|
|
|
@ -158,7 +158,7 @@ func (a *ClientCSI) sendCSIControllerRPC(pluginID, method, fwdMethod string, arg
|
|||
}
|
||||
if a.isRetryable(err) {
|
||||
a.logger.Debug("failed to reach controller on client",
|
||||
"nodeID", clientID, "err", err)
|
||||
"nodeID", clientID, "error", err)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -1268,7 +1268,8 @@ func (a *allocReconciler) createTimeoutLaterEvals(disconnecting allocSet, tgName
|
|||
|
||||
timeoutDelays, err := disconnecting.delayByMaxClientDisconnect(a.now)
|
||||
if err != nil || len(timeoutDelays) != len(disconnecting) {
|
||||
a.logger.Error("error computing disconnecting timeouts for task_group", "task_group", tgName, "err", err)
|
||||
a.logger.Error("error computing disconnecting timeouts for task_group",
|
||||
"task_group", tgName, "error", err)
|
||||
return map[string]string{}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue