cli: stream both stdout and stderr when following an alloc. (#16556)

This update changes the behaviour when following logs from an
allocation, so that both stdout and stderr files streamed when the
operator supplies the follow flag. The previous behaviour is held
when all other flags and situations are provided.

Co-authored-by: Luiz Aoqui <luiz@hashicorp.com>
This commit is contained in:
James Rasell 2023-04-04 10:42:27 +01:00 committed by GitHub
parent b5a1051fe6
commit cb6ba80f0f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 520 additions and 75 deletions

3
.changelog/16556.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
cli: stream both stdout and stderr logs by default when following an allocation
```

View File

@ -18,6 +18,14 @@ const (
// and end of a file.
OriginStart = "start"
OriginEnd = "end"
// FSLogNameStdout is the name given to the stdout log stream of a task. It
// can be used when calling AllocFS.Logs as the logType parameter.
FSLogNameStdout = "stdout"
// FSLogNameStderr is the name given to the stderr log stream of a task. It
// can be used when calling AllocFS.Logs as the logType parameter.
FSLogNameStderr = "stderr"
)
// AllocFileInfo holds information about a file inside the AllocDir

View File

@ -28,6 +28,7 @@
"client/taskenv/...",
"command/agent/...",
"command/raft_tools/...",
"command/ui/...",
"helper/...",
"internal/...",
"jobspec/...",

View File

@ -12,11 +12,18 @@ import (
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/api/contexts"
"github.com/hashicorp/nomad/command/ui"
"github.com/posener/complete"
)
type AllocLogsCommand struct {
Meta
// The fields below represent the commands flags.
verbose, job, tail, stderr, stdout, follow bool
numLines int64
numBytes int64
task string
}
func (l *AllocLogsCommand) Help() string {
@ -35,6 +42,11 @@ General Options:
Logs Specific Options:
-stdout
Display stdout logs. This is used as the default value in all commands
except when using the "-f" flag where both stdout and stderr are used as
default.
-stderr
Display stderr logs.
@ -50,7 +62,9 @@ Logs Specific Options:
-f
Causes the output to not stop when the end of the logs are reached, but
rather to wait for additional output.
rather to wait for additional output. When supplied with no other flags
except optionally "-job" and "-task", both stdout and stderr logs will be
followed.
-tail
Show the logs contents with offsets relative to the end of the logs. If no
@ -79,6 +93,7 @@ func (l *AllocLogsCommand) AutocompleteFlags() complete.Flags {
return mergeAutocompleteFlags(l.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-stderr": complete.PredictNothing,
"-stdout": complete.PredictNothing,
"-verbose": complete.PredictNothing,
"-task": complete.PredictAnything,
"-job": complete.PredictAnything,
@ -107,20 +122,18 @@ func (l *AllocLogsCommand) AutocompleteArgs() complete.Predictor {
func (l *AllocLogsCommand) Name() string { return "alloc logs" }
func (l *AllocLogsCommand) Run(args []string) int {
var verbose, job, tail, stderr, follow bool
var numLines, numBytes int64
var task string
flags := l.Meta.FlagSet(l.Name(), FlagSetClient)
flags.Usage = func() { l.Ui.Output(l.Help()) }
flags.BoolVar(&verbose, "verbose", false, "")
flags.BoolVar(&job, "job", false, "")
flags.BoolVar(&tail, "tail", false, "")
flags.BoolVar(&follow, "f", false, "")
flags.BoolVar(&stderr, "stderr", false, "")
flags.Int64Var(&numLines, "n", -1, "")
flags.Int64Var(&numBytes, "c", -1, "")
flags.StringVar(&task, "task", "", "")
flags.BoolVar(&l.verbose, "verbose", false, "")
flags.BoolVar(&l.job, "job", false, "")
flags.BoolVar(&l.tail, "tail", false, "")
flags.BoolVar(&l.follow, "f", false, "")
flags.BoolVar(&l.stderr, "stderr", false, "")
flags.BoolVar(&l.stdout, "stdout", false, "")
flags.Int64Var(&l.numLines, "n", -1, "")
flags.Int64Var(&l.numBytes, "c", -1, "")
flags.StringVar(&l.task, "task", "", "")
if err := flags.Parse(args); err != nil {
return 1
@ -128,7 +141,7 @@ func (l *AllocLogsCommand) Run(args []string) int {
args = flags.Args()
if numArgs := len(args); numArgs < 1 {
if job {
if l.job {
l.Ui.Error("A job ID is required")
} else {
l.Ui.Error("An allocation ID is required")
@ -150,7 +163,7 @@ func (l *AllocLogsCommand) Run(args []string) int {
// If -job is specified, use random allocation, otherwise use provided allocation
allocID := args[0]
if job {
if l.job {
allocID, err = getRandomJobAllocID(client, args[0])
if err != nil {
l.Ui.Error(fmt.Sprintf("Error fetching allocations: %v", err))
@ -160,7 +173,7 @@ func (l *AllocLogsCommand) Run(args []string) int {
// Truncate the id unless full length is requested
length := shortId
if verbose {
if l.verbose {
length = fullId
}
// Query the allocation info
@ -181,7 +194,7 @@ func (l *AllocLogsCommand) Run(args []string) int {
}
if len(allocs) > 1 {
// Format the allocs
out := formatAllocListStubs(allocs, verbose, length)
out := formatAllocListStubs(allocs, l.verbose, length)
l.Ui.Error(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", out))
return 1
}
@ -195,17 +208,17 @@ func (l *AllocLogsCommand) Run(args []string) int {
// If -task isn't provided fallback to reading the task name
// from args.
if task != "" {
err = validateTaskExistsInAllocation(task, alloc)
if l.task != "" {
err = validateTaskExistsInAllocation(l.task, alloc)
} else {
if len(args) >= 2 {
task = args[1]
if task == "" {
l.task = args[1]
if l.task == "" {
l.Ui.Error("Task name required")
return 1
}
} else {
task, err = lookupAllocTask(alloc)
l.task, err = lookupAllocTask(alloc)
}
}
if err != nil {
@ -213,92 +226,183 @@ func (l *AllocLogsCommand) Run(args []string) int {
return 1
}
logType := "stdout"
if stderr {
logType = "stderr"
}
// We have a file, output it.
var r io.ReadCloser
var readErr error
if !tail {
r, readErr = l.followFile(client, alloc, follow, task, logType, api.OriginStart, 0)
if readErr != nil {
readErr = fmt.Errorf("Error reading file: %v", readErr)
// In order to run the mixed log output, we can only follow the files from
// their current positions. There is no way to interleave previous log
// lines as there is no timestamp references.
if l.follow && !(l.stderr || l.stdout || l.tail || l.numLines > 0 || l.numBytes > 0) {
if err := l.tailMultipleFiles(client, alloc); err != nil {
l.Ui.Error(fmt.Sprintf("Failed to tail stdout and stderr files: %v", err))
return 1
}
} else {
// Parse the offset
var offset int64 = defaultTailLines * bytesToLines
if nLines, nBytes := numLines != -1, numBytes != -1; nLines && nBytes {
l.Ui.Error("Both -n and -c set")
// If we are not strictly following the two files, we cannot support
// specifying both are targets.
if l.stderr && l.stdout {
l.Ui.Error("Unable to support both stdout and stderr")
return 1
} else if nLines {
offset = numLines * bytesToLines
} else if nBytes {
offset = numBytes
} else {
numLines = defaultTailLines
}
r, readErr = l.followFile(client, alloc, follow, task, logType, api.OriginEnd, offset)
// If numLines is set, wrap the reader
if numLines != -1 {
r = NewLineLimitReader(r, int(numLines), int(numLines*bytesToLines), 1*time.Second)
logType := api.FSLogNameStdout
if l.stderr {
logType = api.FSLogNameStderr
}
if readErr != nil {
readErr = fmt.Errorf("Error tailing file: %v", readErr)
if err := l.handleSingleFile(client, alloc, logType); err != nil {
l.Ui.Error(fmt.Sprintf("Failed to read %s file: %v", logType, err))
return 1
}
}
if readErr != nil {
l.Ui.Error(readErr.Error())
return 1
}
defer r.Close()
_, err = io.Copy(os.Stdout, r)
if err != nil {
l.Ui.Error(fmt.Sprintf("error following logs: %s", err))
return 1
}
return 0
}
func (l *AllocLogsCommand) handleSingleFile(client *api.Client, alloc *api.Allocation, logType string) error {
// We have a file, output it.
var r io.ReadCloser
var readErr error
if !l.tail {
r, readErr = l.followFile(client, alloc, logType, api.OriginStart, 0)
if readErr != nil {
return fmt.Errorf("error reading file: %v", readErr)
}
} else {
// Parse the offset
var offset = defaultTailLines * bytesToLines
if nLines, nBytes := l.numLines != -1, l.numBytes != -1; nLines && nBytes {
return errors.New("both -n and -c set")
} else if nLines {
offset = l.numLines * bytesToLines
} else if nBytes {
offset = l.numBytes
} else {
l.numLines = defaultTailLines
}
r, readErr = l.followFile(client, alloc, logType, api.OriginEnd, offset)
// If numLines is set, wrap the reader
if l.numLines != -1 {
r = NewLineLimitReader(r, int(l.numLines), int(l.numLines*bytesToLines), 1*time.Second)
}
if readErr != nil {
return fmt.Errorf("error tailing file: %v", readErr)
}
}
defer r.Close()
if _, err := io.Copy(os.Stdout, r); err != nil {
return fmt.Errorf("error following logs: %s", err)
}
return nil
}
// followFile outputs the contents of the file to stdout relative to the end of
// the file.
func (l *AllocLogsCommand) followFile(client *api.Client, alloc *api.Allocation,
follow bool, task, logType, origin string, offset int64) (io.ReadCloser, error) {
logType, origin string, offset int64) (io.ReadCloser, error) {
cancel := make(chan struct{})
frames, errCh := client.AllocFS().Logs(alloc, follow, task, logType, origin, offset, cancel, nil)
frames, errCh := client.AllocFS().Logs(alloc, l.follow, l.task, logType, origin, offset, cancel, nil)
// Setting up the logs stream can fail, therefore we need to check the
// error channel before continuing further.
select {
case err := <-errCh:
return nil, err
default:
}
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
// Create a reader
// Create a reader but don't initially cast it to an io.ReadCloser so that
// we can set the unblock time.
var r io.ReadCloser
frameReader := api.NewFrameReader(frames, errCh, cancel)
frameReader.SetUnblockTime(500 * time.Millisecond)
r = frameReader
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
// This go routine blocks until the command receives an interrupt or
// terminate signal, at which point we close the ReadCloser.
go func() {
<-signalCh
// End the streaming
r.Close()
_ = r.Close()
}()
return r, nil
}
// tailMultipleFiles will follow both stdout and stderr log files of the passed
// allocation. Each stream will be output to the users console via stout and
// stderr until the user cancels it.
func (l *AllocLogsCommand) tailMultipleFiles(client *api.Client, alloc *api.Allocation) error {
// Use a single cancel channel for both log streams, so we only have to
// close one.
cancel := make(chan struct{})
// Ensure the channel is closed in order to notify listeners whenever we
// exit.
defer close(cancel)
stdoutFrames, stdoutErrCh := client.AllocFS().Logs(
alloc, true, l.task, api.FSLogNameStdout, api.OriginEnd, 1, cancel, nil)
// Setting up the logs stream can fail, therefore we need to check the
// error channel before continuing further.
select {
case err := <-stdoutErrCh:
return fmt.Errorf("failed to setup stdout log tailing: %v", err)
default:
}
stderrFrames, stderrErrCh := client.AllocFS().Logs(
alloc, true, l.task, api.FSLogNameStderr, api.OriginEnd, 1, cancel, nil)
// Setting up the logs stream can fail, therefore we need to check the
// error channel before continuing further.
select {
case err := <-stderrErrCh:
return fmt.Errorf("failed to setup stderr log tailing: %v", err)
default:
}
// Trap user signals, so we know when to exit and cancel the log streams
// running in the background.
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
// Generate our logging UI that doesn't add any additional formatting to
// output strings.
logUI, err := ui.NewLogUI(l.Ui)
if err != nil {
return err
}
// Enter the main loop where we listen for log frames, errors, and a cancel
// signal. Any error at this point will result in the stream being ended,
// therefore should result in this command exiting. Otherwise, we would
// just be printing a single stream, which might be hard to notice for the
// user.
for {
select {
case <-signalCh:
return nil
case stdoutErr := <-stdoutErrCh:
return fmt.Errorf("received an error from stdout log stream: %v", stdoutErr)
case stdoutFrame := <-stdoutFrames:
logUI.Output(string(stdoutFrame.Data))
case stderrErr := <-stderrErrCh:
return fmt.Errorf("received an error from stderr log stream: %v", stderrErr)
case stderrFrame := <-stderrFrames:
logUI.Warn(string(stderrFrame.Data))
}
}
}
func lookupAllocTask(alloc *api.Allocation) (string, error) {
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
if tg == nil {

114
command/ui/ui.go Normal file
View File

@ -0,0 +1,114 @@
package ui
import (
"errors"
"fmt"
"io"
"github.com/fatih/color"
"github.com/mitchellh/cli"
)
// LogUI is an implementation of the cli.Ui interface which can be used for
// logging outputs. It differs from cli.BasicUi in the only fact that it does
// not add a newline after each UI write.
type LogUI struct {
reader io.Reader
writer io.Writer
errorWriter io.Writer
// underlyingUI stores the basic UI that was used to create this logUI. It
// allows us to call the ask functions and not implement them again.
underlyingUI cli.Ui
isColor bool
outputColor cli.UiColor
infoColor cli.UiColor
errorColor cli.UiColor
warnColor cli.UiColor
}
// NewLogUI generates a new cli.Ui that can be used for commands that write log
// lines to the terminal. The caller is required to pass a cli.BasicUi so we
// have access to the underlying writers.
//
// Currently, the passed ui needs to be either *cli.ColoredUi or *cli.BasicUi
// to work correctly. If more are needed, please add them.
func NewLogUI(ui cli.Ui) (cli.Ui, error) {
var found bool
logUI := LogUI{}
if coloredUI, ok := ui.(*cli.ColoredUi); ok {
logUI.isColor = true
logUI.outputColor = coloredUI.OutputColor
logUI.infoColor = coloredUI.InfoColor
logUI.errorColor = coloredUI.ErrorColor
logUI.warnColor = coloredUI.WarnColor
logUI.underlyingUI = coloredUI.Ui
if basicUI, ok := coloredUI.Ui.(*cli.BasicUi); ok {
logUI.reader = basicUI.Reader
logUI.writer = basicUI.Writer
logUI.errorWriter = basicUI.ErrorWriter
found = true
}
} else if basicUI, ok := ui.(*cli.BasicUi); ok && !found {
logUI.reader = basicUI.Reader
logUI.writer = basicUI.Writer
logUI.errorWriter = basicUI.ErrorWriter
logUI.underlyingUI = basicUI
found = true
}
if !found {
return nil, errors.New("failed to generate logging UI")
}
return &logUI, nil
}
// Ask implements the Ask function of the cli.Ui interface.
func (l *LogUI) Ask(query string) (string, error) {
return l.underlyingUI.Ask(l.colorize(query, l.outputColor))
}
// AskSecret implements the AskSecret function of the cli.Ui interface.
func (l *LogUI) AskSecret(query string) (string, error) {
return l.underlyingUI.AskSecret(l.colorize(query, l.outputColor))
}
// Output implements the Output function of the cli.Ui interface.
func (l *LogUI) Output(message string) {
_, _ = fmt.Fprint(l.writer, l.colorize(message, l.outputColor))
}
// Info implements the Info function of the cli.Ui interface.
func (l *LogUI) Info(message string) { l.Output(l.colorize(message, l.infoColor)) }
// Error implements the Error function of the cli.Ui interface.
func (l *LogUI) Error(message string) {
w := l.writer
if l.errorWriter != nil {
w = l.errorWriter
}
_, _ = fmt.Fprint(w, l.colorize(message, l.errorColor))
}
// Warn implements the Warn function of the cli.Ui interface.
func (l *LogUI) Warn(message string) { l.Error(l.colorize(message, l.warnColor)) }
func (l *LogUI) colorize(message string, uc cli.UiColor) string {
if !l.isColor {
return message
}
attr := []color.Attribute{color.Attribute(uc.Code)}
if uc.Bold {
attr = append(attr, color.Bold)
}
return color.New(attr...).SprintFunc()(message)
}

124
command/ui/ui_test.go Normal file
View File

@ -0,0 +1,124 @@
package ui
import (
"bytes"
"io"
"testing"
"github.com/mitchellh/cli"
"github.com/shoenig/test/must"
)
func TestLogUI_Implements(t *testing.T) {
var _ cli.Ui = new(LogUI)
}
func TestLogUI_Ask(t *testing.T) {
testCases := []struct {
name string
query string
input string
expectedQuery string
expectedResult string
}{
{
name: "EmptyString",
query: "Middle Name?",
input: "\n",
expectedQuery: "Middle Name? ",
expectedResult: "",
},
{
name: "NonEmptyString",
query: "Name?",
input: "foo bar\nbaz\n",
expectedQuery: "Name? ",
expectedResult: "foo bar",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
inReader, inWriter := io.Pipe()
defer inReader.Close()
defer inWriter.Close()
writer := new(bytes.Buffer)
logUI, err := NewLogUI(&cli.BasicUi{
Reader: inReader,
Writer: writer,
})
must.NoError(t, err)
go inWriter.Write([]byte(tc.input))
result, err := logUI.Ask(tc.query)
must.NoError(t, err)
must.Eq(t, writer.String(), tc.expectedQuery)
must.Eq(t, result, tc.expectedResult)
})
}
}
func TestLogUI_AskSecret(t *testing.T) {
inReader, inWriter := io.Pipe()
defer inReader.Close()
defer inWriter.Close()
writer := new(bytes.Buffer)
logUI, err := NewLogUI(&cli.BasicUi{
Reader: inReader,
Writer: writer,
})
must.NoError(t, err)
go inWriter.Write([]byte("foo bar\nbaz\n"))
result, err := logUI.AskSecret("Name?")
must.NoError(t, err)
must.Eq(t, writer.String(), "Name? ")
must.Eq(t, result, "foo bar")
}
func TestLogUI_Error(t *testing.T) {
writer := new(bytes.Buffer)
logUI, err := NewLogUI(&cli.BasicUi{Writer: writer})
must.NoError(t, err)
logUI.Error("ERROR")
must.Eq(t, writer.String(), "ERROR")
writer = new(bytes.Buffer)
logUI, err = NewLogUI(&cli.ColoredUi{Ui: &cli.BasicUi{Writer: writer}})
must.NoError(t, err)
logUI.Error("ERROR")
must.Eq(t, writer.String(), "ERROR")
}
func TestLogUI_Output(t *testing.T) {
writer := new(bytes.Buffer)
logUI, err := NewLogUI(&cli.BasicUi{Writer: writer})
must.NoError(t, err)
logUI.Error("OUTPUT")
must.Eq(t, writer.String(), "OUTPUT")
writer = new(bytes.Buffer)
logUI, err = NewLogUI(&cli.ColoredUi{Ui: &cli.BasicUi{Writer: writer}})
must.NoError(t, err)
logUI.Error("OUTPUT")
must.Eq(t, writer.String(), "OUTPUT")
}
func TestLogUI_Warn(t *testing.T) {
writer := new(bytes.Buffer)
logUI, err := NewLogUI(&cli.BasicUi{Writer: writer})
must.NoError(t, err)
logUI.Error("WARN")
must.Eq(t, writer.String(), "WARN")
writer = new(bytes.Buffer)
logUI, err = NewLogUI(&cli.ColoredUi{Ui: &cli.BasicUi{Writer: writer}})
must.NoError(t, err)
logUI.Error("WARN")
must.Eq(t, writer.String(), "WARN")
}

View File

@ -0,0 +1,45 @@
package alloc_logs
import (
"testing"
"github.com/hashicorp/nomad/e2e/e2eutil"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/shoenig/test/must"
)
func TestAllocLogs(t *testing.T) {
// Wait until we have a usable cluster before running the tests.
nomadClient := e2eutil.NomadClient(t)
e2eutil.WaitForLeader(t, nomadClient)
e2eutil.WaitForNodesReady(t, nomadClient, 1)
// Run our test cases.
t.Run("TestAllocLogs_MixedFollow", testMixedFollow)
}
func testMixedFollow(t *testing.T) {
nomadClient := e2eutil.NomadClient(t)
// Generate our job ID which will be used for the entire test.
jobID := "alloc-logs-mixed-follow-" + uuid.Short()
jobIDs := []string{jobID}
// Ensure jobs are cleaned.
t.Cleanup(e2eutil.CleanupJobsAndGC(t, &jobIDs))
allocStubs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient, "./input/mixed-output.nomad", jobID, "")
must.Len(t, 1, allocStubs)
// Run the alloc logs command which we expect to capture both stdout and
// stderr logs. The command will reach its timeout and therefore return an
// error. We want to ignore this, as it's expected. Any other error is
// terminal.
out, err := e2eutil.Command("nomad", "alloc", "logs", "-f", allocStubs[0].ID)
if err != nil {
must.ErrorContains(t, err, "failed: signal: killed")
}
must.StrContains(t, out, "stdout\nstderr")
}

6
e2e/alloc_logs/doc.go Normal file
View File

@ -0,0 +1,6 @@
// Package alloc_logs provides end-to-end tests for Nomads allocation logging
// functionality.
//
// In order to run this test suite only, from the e2e directory you can trigger
// go test -v -run '^TestAllocLogs' ./alloc_logs
package alloc_logs

View File

@ -0,0 +1,35 @@
job "alloc-logs" {
datacenters = ["dc1"]
type = "service"
#constraint {
# attribute = "${attr.kernel.name}"
# value = "linux"
#}
group "alloc-logs" {
task "test" {
driver = "raw_exec"
template {
data = <<EOH
while true
do
echo stdout >&1
sleep 1
echo stderr >&2
sleep 1
done
EOH
destination = "local/echo.sh"
}
config {
command = "bash"
args = ["local/echo.sh"]
}
}
}
}

2
go.mod
View File

@ -187,7 +187,7 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/fatih/color v1.13.0
github.com/felixge/httpsnoop v1.0.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect

View File

@ -22,8 +22,8 @@ allocation is only running a single task, the task name can be omitted.
Optionally, the `-job` option may be used in which case a random allocation from
the given job will be chosen.
Task name may also be specified using the `-task` option rather than a command
argument. If task name is given with both an argument and the `-task` option,
Task name may also be specified using the `-task` option rather than a command
argument. If task name is given with both an argument and the `-task` option,
preference is given to the `-task` option.
When ACLs are enabled, this command requires a token with the `read-logs`,
@ -35,6 +35,10 @@ When ACLs are enabled, this command requires a token with the `read-logs`,
## Logs Options
- `-stdout`: Display stdout logs. This is used as the default value in all
commands except when using the `-f` flag where both stdout and stderr are
used as default.
- `-stderr`: Display stderr logs.
- `-verbose`: Display verbose output.
@ -45,7 +49,8 @@ When ACLs are enabled, this command requires a token with the `read-logs`,
- `-task`: Specify the task to view the logs.
- `-f`: Causes the output to not stop when the end of the logs are reached, but
rather to wait for additional output.
rather to wait for additional output. When supplied with no other flags except
optionally `-job` and `-task`, both stdout and stderr logs will be followed.
- `-tail`: Show the logs contents with offsets relative to the end of the logs.
If no offset is given, -n is defaulted to 10.